]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'devel-stable' into for-linus
authorRussell King <rmk+kernel@arm.linux.org.uk>
Thu, 5 Jan 2012 13:24:33 +0000 (13:24 +0000)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Thu, 5 Jan 2012 13:24:33 +0000 (13:24 +0000)
Conflicts:
arch/arm/kernel/setup.c
arch/arm/mach-shmobile/board-kota2.c

757 files changed:
CREDITS
Documentation/ABI/testing/sysfs-bus-rbd
Documentation/kernel-parameters.txt
Documentation/networking/ip-sysctl.txt
Documentation/sound/alsa/soc/machine.txt
Documentation/usb/linux-cdc-acm.inf
Documentation/virtual/kvm/api.txt
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm/boot/compressed/Makefile
arch/arm/common/pl330.c
arch/arm/common/timer-sp.c
arch/arm/configs/imx_v4_v5_defconfig
arch/arm/configs/omap1_defconfig
arch/arm/include/asm/bug.h
arch/arm/include/asm/edac.h [new file with mode: 0644]
arch/arm/include/asm/gpio.h
arch/arm/include/asm/hardirq.h
arch/arm/include/asm/opcodes.h [new file with mode: 0644]
arch/arm/include/asm/pgtable.h
arch/arm/include/asm/processor.h
arch/arm/include/asm/setup.h
arch/arm/include/asm/swab.h
arch/arm/include/asm/unwind.h
arch/arm/kernel/Makefile
arch/arm/kernel/kprobes-test.c
arch/arm/kernel/opcodes.c [new file with mode: 0644]
arch/arm/kernel/perf_event.c
arch/arm/kernel/setup.c
arch/arm/kernel/smp_twd.c
arch/arm/kernel/swp_emulate.c
arch/arm/kernel/tcm.c
arch/arm/kernel/unwind.c
arch/arm/mach-at91/at91rm9200_devices.c
arch/arm/mach-at91/at91sam9260.c
arch/arm/mach-at91/at91sam9260_devices.c
arch/arm/mach-at91/at91sam9261_devices.c
arch/arm/mach-at91/at91sam9263_devices.c
arch/arm/mach-at91/include/mach/system_rev.h
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-davinci/board-dm365-evm.c
arch/arm/mach-davinci/board-dm646x-evm.c
arch/arm/mach-davinci/dm646x.c
arch/arm/mach-davinci/include/mach/psc.h
arch/arm/mach-davinci/psc.c
arch/arm/mach-exynos/cpu.c
arch/arm/mach-exynos/mct.c
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/Makefile
arch/arm/mach-imx/clock-imx35.c
arch/arm/mach-imx/mach-cpuimx35.c
arch/arm/mach-imx/mach-imx6q.c
arch/arm/mach-integrator/Kconfig
arch/arm/mach-integrator/core.c
arch/arm/mach-kirkwood/sheevaplug-setup.c
arch/arm/mach-msm/devices-iommu.c
arch/arm/mach-mx5/board-mx51_babbage.c
arch/arm/mach-mx5/board-mx53_evk.c
arch/arm/mach-mx5/board-mx53_loco.c
arch/arm/mach-mx5/board-mx53_smd.c
arch/arm/mach-mx5/imx51-dt.c
arch/arm/mach-mx5/imx53-dt.c
arch/arm/mach-mxs/include/mach/mx28.h
arch/arm/mach-mxs/include/mach/mxs.h
arch/arm/mach-mxs/mach-m28evk.c
arch/arm/mach-mxs/mach-stmp378x_devb.c
arch/arm/mach-mxs/module-tx28.c
arch/arm/mach-omap1/clock_data.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-omap2/mcbsp.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-orion5x/ts209-setup.c
arch/arm/mach-prima2/pm.c
arch/arm/mach-prima2/prima2.c
arch/arm/mach-s3c64xx/dev-spi.c
arch/arm/mach-s3c64xx/s3c6400.c
arch/arm/mach-s3c64xx/setup-fb-24bpp.c
arch/arm/mach-s5pv210/mach-smdkv210.c
arch/arm/mach-sa1100/Makefile.boot
arch/arm/mach-sa1100/nanoengine.c
arch/arm/mach-shmobile/board-ag5evm.c
arch/arm/mach-shmobile/board-kota2.c
arch/arm/mach-shmobile/clock-sh73a0.c
arch/arm/mach-shmobile/include/mach/gpio.h
arch/arm/mach-ux500/include/mach/gpio.h
arch/arm/mm/fault.c
arch/arm/mm/mmap.c
arch/arm/mm/proc-v7.S
arch/arm/nwfpe/entry.S
arch/arm/nwfpe/fpopcode.c
arch/arm/nwfpe/fpopcode.h
arch/arm/oprofile/common.c
arch/arm/plat-mxc/cpufreq.c
arch/arm/plat-mxc/include/mach/uncompress.h
arch/arm/plat-mxc/pwm.c
arch/arm/plat-orion/gpio.c
arch/arm/plat-samsung/dev-backlight.c
arch/arm/plat-samsung/include/plat/cpu-freq-core.h
arch/arm/tools/mach-types
arch/ia64/include/asm/cputime.h
arch/m68k/include/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/mips/kernel/perf_event_mipsxx.c
arch/powerpc/include/asm/cputime.h
arch/powerpc/include/asm/kvm_book3s.h
arch/powerpc/include/asm/kvm_book3s_64.h
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/e500.c
arch/s390/include/asm/cputime.h
arch/s390/include/asm/pgtable.h
arch/s390/kernel/ptrace.c
arch/s390/kernel/setup.c
arch/s390/kernel/signal.c
arch/s390/oprofile/init.c
arch/sh/boards/board-sh7757lcr.c
arch/sh/oprofile/common.c
arch/sparc/kernel/ds.c
arch/sparc/kernel/pci_sun4v.c
arch/sparc/kernel/prom_common.c
arch/sparc/mm/btfixup.c
arch/tile/include/asm/irq.h
arch/tile/kernel/irq.c
arch/tile/kernel/pci-dma.c
arch/tile/kernel/pci.c
arch/tile/kernel/sysfs.c
arch/tile/lib/exports.c
arch/tile/mm/homecache.c
arch/x86/Kconfig
arch/x86/include/asm/intel_scu_ipc.h
arch/x86/include/asm/mrst.h
arch/x86/include/asm/msr.h
arch/x86/include/asm/system.h
arch/x86/include/asm/timer.h
arch/x86/include/asm/uv/uv_mmrs.h
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_amd_ibs.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/hpet.c
arch/x86/kernel/irq_64.c
arch/x86/kernel/microcode_core.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/process.c
arch/x86/kernel/quirks.c
arch/x86/kernel/reboot.c
arch/x86/kernel/rtc.c
arch/x86/kvm/i8254.c
arch/x86/kvm/x86.c
arch/x86/mm/gup.c
arch/x86/mm/highmem_32.c
arch/x86/net/bpf_jit_comp.c
arch/x86/oprofile/init.c
arch/x86/platform/efi/efi_32.c
arch/x86/platform/mrst/mrst.c
arch/x86/xen/setup.c
block/blk-core.c
block/blk-map.c
block/blk-tag.c
block/cfq-iosched.c
drivers/ata/Kconfig
drivers/base/core.c
drivers/block/cciss.c
drivers/block/loop.c
drivers/block/rbd.c
drivers/block/swim3.c
drivers/bluetooth/Kconfig
drivers/bluetooth/btmrvl_sdio.c
drivers/bluetooth/btusb.c
drivers/char/ipmi/ipmi_watchdog.c
drivers/dma/Kconfig
drivers/firmware/iscsi_ibft.c
drivers/firmware/iscsi_ibft_find.c
drivers/firmware/sigma.c
drivers/gpio/Makefile
drivers/gpio/gpio-da9052.c
drivers/gpio/gpio-ml-ioh.c
drivers/gpio/gpio-mpc8xxx.c
drivers/gpio/gpio-pl061.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drv.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_object.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/nouveau/nvc0_graph.c
drivers/gpu/drm/nouveau/nvd0_display.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/evergreen_reg.h
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/radeon_acpi.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hwmon/jz4740-hwmon.c
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/infiniband/core/cma.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/input/misc/cma3000_d0x.c
drivers/input/mouse/sentelic.c
drivers/input/mouse/sentelic.h
drivers/input/mouse/synaptics.c
drivers/input/tablet/wacom_wac.c
drivers/iommu/intel-iommu.c
drivers/iommu/intr_remapping.c
drivers/iommu/iommu.c
drivers/isdn/divert/divert_procfs.c
drivers/isdn/i4l/isdn_net.c
drivers/md/bitmap.c
drivers/md/linear.c
drivers/md/md.c
drivers/md/raid5.c
drivers/media/common/tuners/mxl5007t.c
drivers/media/common/tuners/tda18218.c
drivers/media/rc/ati_remote.c
drivers/media/rc/keymaps/rc-ati-x10.c
drivers/media/rc/keymaps/rc-medion-x10.c
drivers/media/rc/keymaps/rc-snapstream-firefly.c
drivers/media/video/au0828/au0828-cards.c
drivers/media/video/gspca/gspca.c
drivers/media/video/m5mols/m5mols.h
drivers/media/video/m5mols/m5mols_core.c
drivers/media/video/mt9m111.c
drivers/media/video/mt9t112.c
drivers/media/video/omap/omap_vout.c
drivers/media/video/omap1_camera.c
drivers/media/video/omap24xxcam-dma.c
drivers/media/video/omap3isp/ispccdc.c
drivers/media/video/omap3isp/ispstat.c
drivers/media/video/omap3isp/ispvideo.c
drivers/media/video/ov6650.c
drivers/media/video/s5p-fimc/fimc-capture.c
drivers/media/video/s5p-fimc/fimc-core.c
drivers/media/video/s5p-fimc/fimc-core.h
drivers/media/video/s5p-fimc/fimc-mdevice.c
drivers/media/video/s5p-fimc/fimc-reg.c
drivers/media/video/s5p-mfc/s5p_mfc_enc.c
drivers/media/video/s5p-tv/mixer_video.c
drivers/media/video/sh_mobile_ceu_camera.c
drivers/media/video/sh_mobile_csi2.c
drivers/media/video/soc_camera.c
drivers/mfd/ab5500-debugfs.c
drivers/mfd/ab8500-core.c
drivers/mfd/adp5520.c
drivers/mfd/da903x.c
drivers/mfd/jz4740-adc.c
drivers/mfd/tps6586x.c
drivers/mfd/tps65910.c
drivers/mfd/twl-core.c
drivers/mfd/twl4030-irq.c
drivers/mfd/wm8994-core.c
drivers/mmc/card/block.c
drivers/mmc/core/core.c
drivers/mmc/core/host.c
drivers/mmc/core/mmc.c
drivers/mmc/host/mmci.c
drivers/mmc/host/mxcmmc.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/sdhci-cns3xxx.c
drivers/mmc/host/sdhci-dove.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-of-esdhc.c
drivers/mmc/host/sdhci-of-hlwd.c
drivers/mmc/host/sdhci-pci.c
drivers/mmc/host/sdhci-pltfm.c
drivers/mmc/host/sdhci-pltfm.h
drivers/mmc/host/sdhci-pxav2.c
drivers/mmc/host/sdhci-pxav3.c
drivers/mmc/host/sdhci-s3c.c
drivers/mmc/host/sdhci-tegra.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/sdhci.h
drivers/mmc/host/sh_mmcif.c
drivers/mmc/host/tmio_mmc_pio.c
drivers/mmc/host/vub300.c
drivers/mtd/maps/plat-ram.c
drivers/mtd/maps/pxa2xx-flash.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/ndfc.c
drivers/net/arcnet/Kconfig
drivers/net/bonding/bond_main.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/freescale/Kconfig
drivers/net/ethernet/freescale/fec.c
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/ibm/ehea/ehea.h
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/iseries_veth.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/jme.h
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/mellanox/mlx4/en_cq.c
drivers/net/ethernet/pasemi/Makefile
drivers/net/ethernet/qlogic/qlge/qlge.h
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/tile/tilepro.c
drivers/net/phy/Kconfig
drivers/net/ppp/pptp.c
drivers/net/usb/asix.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/rc.c
drivers/net/wireless/iwlwifi/iwl-1000.c
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
drivers/net/wireless/iwlwifi/iwl-agn-sta.c
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-agn.h
drivers/net/wireless/iwlwifi/iwl-core.c
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl-shared.h
drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/p54/p54spi.c
drivers/net/wireless/prism54/isl_ioctl.c
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rtlwifi/ps.c
drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
drivers/net/wireless/rtlwifi/rtl8192cu/phy.c
drivers/net/wireless/rtlwifi/rtl8192de/phy.c
drivers/net/wireless/rtlwifi/rtl8192se/phy.c
drivers/net/xen-netback/netback.c
drivers/of/irq.c
drivers/of/platform.c
drivers/oprofile/oprof.c
drivers/oprofile/oprofile_files.c
drivers/oprofile/oprofilefs.c
drivers/oprofile/timer_int.c
drivers/pci/ats.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/iov.c
drivers/pci/pci.c
drivers/platform/x86/toshiba_acpi.c
drivers/power/intel_mid_battery.c
drivers/ptp/ptp_clock.c
drivers/rapidio/devices/tsi721.c
drivers/rapidio/devices/tsi721.h
drivers/rtc/class.c
drivers/rtc/interface.c
drivers/rtc/rtc-m41t80.c
drivers/rtc/rtc-s3c.c
drivers/s390/cio/chsc.c
drivers/s390/cio/cio.h
drivers/s390/cio/css.c
drivers/s390/cio/device.c
drivers/s390/cio/device_fsm.c
drivers/s390/cio/device_ops.c
drivers/s390/cio/io_sch.h
drivers/s390/crypto/ap_bus.c
drivers/s390/scsi/zfcp_scsi.c
drivers/sbus/char/bbc_i2c.c
drivers/sbus/char/display7seg.c
drivers/sbus/char/envctrl.c
drivers/sbus/char/flash.c
drivers/sbus/char/uctrl.c
drivers/scsi/bnx2i/bnx2i_hwi.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe_ctlr.c
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_nx.h
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_fw.h
drivers/scsi/qla4xxx/ql4_glbl.h
drivers/scsi/qla4xxx/ql4_init.c
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/spi/Kconfig
drivers/spi/spi-ath79.c
drivers/spi/spi-gpio.c
drivers/spi/spi-nuc900.c
drivers/ssb/driver_pcicore.c
drivers/staging/comedi/comedi_fops.c
drivers/staging/comedi/drivers/usbduxsigma.c
drivers/staging/rtl8712/usb_intf.c
drivers/staging/rts_pstor/rtsx.c
drivers/staging/tidspbridge/core/dsp-clock.c
drivers/staging/tidspbridge/rmgr/drv_interface.c
drivers/staging/usbip/vhci_rx.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_core.h
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/loopback/tcm_loop.c
drivers/target/target_core_alua.c
drivers/target/target_core_cdb.c
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pr.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_rd.c
drivers/target/target_core_tmr.c
drivers/target/target_core_transport.c
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_conf.c
drivers/usb/class/cdc-acm.c
drivers/usb/dwc3/core.c
drivers/usb/gadget/amd5536udc.c
drivers/usb/gadget/epautoconf.c
drivers/usb/gadget/f_mass_storage.c
drivers/usb/gadget/f_serial.c
drivers/usb/gadget/fsl_mxc_udc.c
drivers/usb/gadget/fsl_qe_udc.c
drivers/usb/gadget/fsl_udc_core.c
drivers/usb/gadget/fsl_usb2_udc.h
drivers/usb/gadget/m66592-udc.c
drivers/usb/gadget/net2280.c
drivers/usb/gadget/r8a66597-udc.c
drivers/usb/gadget/s3c-hsotg.c
drivers/usb/gadget/s3c-hsudc.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/isp1760-if.c
drivers/usb/host/whci/qset.c
drivers/usb/host/xhci.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_host.c
drivers/usb/renesas_usbhs/mod.c
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/renesas_usbhs/mod_host.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/storage/unusual_devs.h
drivers/watchdog/coh901327_wdt.c
drivers/watchdog/hpwdt.c
drivers/watchdog/iTCO_wdt.c
drivers/watchdog/sp805_wdt.c
drivers/xen/swiotlb-xen.c
drivers/xen/xenbus/xenbus_xs.c
firmware/README.AddingFirmware
fs/btrfs/async-thread.c
fs/btrfs/async-thread.h
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/volumes.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/ioctl.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/snap.c
fs/ceph/super.c
fs/ceph/super.h
fs/ceph/xattr.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/readdir.c
fs/cifs/smbencrypt.c
fs/configfs/inode.c
fs/configfs/mount.c
fs/dcache.c
fs/ext4/extents.c
fs/ext4/inode.c
fs/ext4/page-io.c
fs/ext4/super.c
fs/fs-writeback.c
fs/fuse/dev.c
fs/fuse/file.c
fs/fuse/inode.c
fs/locks.c
fs/namespace.c
fs/ncpfs/inode.c
fs/nfs/file.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nilfs2/ioctl.c
fs/proc/meminfo.c
fs/proc/root.c
fs/proc/stat.c
fs/seq_file.c
fs/ubifs/super.c
fs/xfs/xfs_acl.c
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_export.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_log.c
fs/xfs/xfs_super.c
fs/xfs/xfs_sync.c
fs/xfs/xfs_sync.h
fs/xfs/xfs_trace.h
include/asm-generic/cputime.h
include/asm-generic/unistd.h
include/drm/drm_pciids.h
include/linux/blkdev.h
include/linux/clocksource.h
include/linux/compat.h
include/linux/dcache.h
include/linux/dma_remapping.h
include/linux/fs.h
include/linux/ftrace_event.h
include/linux/init_task.h
include/linux/kvm.h
include/linux/lglock.h
include/linux/log2.h
include/linux/mm.h
include/linux/mmc/card.h
include/linux/netdevice.h
include/linux/pci_ids.h
include/linux/perf_event.h
include/linux/pkt_sched.h
include/linux/shrinker.h
include/linux/sigma.h
include/media/soc_camera.h
include/net/dst.h
include/net/dst_ops.h
include/net/flow.h
include/net/inet_sock.h
include/net/inetpeer.h
include/net/ip_vs.h
include/net/netfilter/nf_conntrack_ecache.h
include/net/netns/conntrack.h
include/net/red.h
include/net/route.h
include/net/sctp/structs.h
include/net/sock.h
include/scsi/libfcoe.h
include/target/target_core_base.h
include/target/target_core_transport.h
include/trace/events/writeback.h
include/xen/interface/io/xs_wire.h
ipc/mqueue.c
ipc/msgutil.c
kernel/cgroup.c
kernel/cpuset.c
kernel/events/core.c
kernel/events/internal.h
kernel/events/ring_buffer.c
kernel/futex.c
kernel/irq/manage.c
kernel/jump_label.c
kernel/lockdep.c
kernel/printk.c
kernel/sched.c
kernel/sched_fair.c
kernel/sched_features.h
kernel/sched_rt.c
kernel/sysctl_binary.c
kernel/time/alarmtimer.c
kernel/time/clocksource.c
kernel/time/tick-broadcast.c
kernel/timer.c
kernel/trace/ftrace.c
kernel/trace/trace_events.c
kernel/trace/trace_events_filter.c
lib/dma-debug.c
mm/filemap.c
mm/huge_memory.c
mm/hugetlb.c
mm/memcontrol.c
mm/mempolicy.c
mm/migrate.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/percpu.c
mm/slab.c
mm/vmalloc.c
mm/vmscan.c
net/batman-adv/translation-table.c
net/bluetooth/bnep/core.c
net/bluetooth/cmtp/core.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_event.c
net/bluetooth/l2cap_core.c
net/bluetooth/rfcomm/core.c
net/bridge/br_netfilter.c
net/bridge/br_netlink.c
net/bridge/br_stp.c
net/caif/cffrml.c
net/ceph/crush/mapper.c
net/core/dev.c
net/core/dev_addr_lists.c
net/core/flow.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/request_sock.c
net/core/secure_seq.c
net/core/skbuff.c
net/core/sock.c
net/dccp/ipv4.c
net/decnet/dn_route.c
net/decnet/dn_timer.c
net/ipv4/devinet.c
net/ipv4/igmp.c
net/ipv4/inet_diag.c
net/ipv4/ip_forward.c
net/ipv4/ip_options.c
net/ipv4/ipconfig.c
net/ipv4/ipip.c
net/ipv4/netfilter.c
net/ipv4/netfilter/Kconfig
net/ipv4/route.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/inet6_connection_sock.c
net/ipv6/ip6_output.c
net/ipv6/ipv6_sockglue.c
net/ipv6/ndisc.c
net/ipv6/netfilter/Kconfig
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/l2tp/l2tp_core.c
net/llc/af_llc.c
net/mac80211/agg-tx.c
net/mac80211/debugfs_sta.c
net/mac80211/main.c
net/mac80211/status.c
net/mac80211/util.c
net/netfilter/Kconfig
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_conntrack_ecache.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/xt_connbytes.c
net/netlabel/netlabel_kapi.c
net/nfc/nci/core.c
net/packet/af_packet.c
net/sched/sch_gred.c
net/sched/sch_mqprio.c
net/sched/sch_netem.c
net/sched/sch_red.c
net/sched/sch_teql.c
net/sctp/associola.c
net/sctp/auth.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/protocol.c
net/sctp/socket.c
net/sctp/sysctl.c
net/sunrpc/sched.c
net/sunrpc/xprt.c
net/unix/af_unix.c
net/wireless/nl80211.c
net/wireless/reg.c
net/xfrm/xfrm_policy.c
scripts/kconfig/Makefile
security/apparmor/path.c
security/integrity/evm/evm_crypto.c
security/selinux/netport.c
security/tomoyo/realpath.c
sound/atmel/ac97c.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/sis7019.c
sound/soc/atmel/Kconfig
sound/soc/atmel/Makefile
sound/soc/atmel/playpaq_wm8510.c [deleted file]
sound/soc/codecs/Kconfig
sound/soc/codecs/ad1836.h
sound/soc/codecs/cs4270.c
sound/soc/codecs/cs42l51.c
sound/soc/codecs/jz4740.c
sound/soc/codecs/max9877.c
sound/soc/codecs/uda1380.c
sound/soc/codecs/wm8776.c
sound/soc/codecs/wm8958-dsp2.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm8996.c
sound/soc/fsl/mpc8610_hpcd.c
sound/soc/imx/Kconfig
sound/soc/kirkwood/Kconfig
sound/soc/mxs/mxs-pcm.c
sound/soc/mxs/mxs-sgtl5000.c
sound/soc/pxa/Kconfig
sound/soc/pxa/hx4700.c
sound/soc/samsung/jive_wm8750.c
sound/soc/samsung/smdk2443_wm9710.c
sound/soc/samsung/smdk_wm8994.c
sound/soc/samsung/speyside.c
sound/soc/soc-core.c
sound/soc/soc-utils.c
sound/usb/quirks-table.h
tools/perf/builtin-stat.c
tools/perf/util/evsel.c
tools/perf/util/header.c
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/session.c
tools/perf/util/trace-event-parse.c
virt/kvm/assigned-dev.c

diff --git a/CREDITS b/CREDITS
index 07e32a87d956808fbb8b979fb38d434d93c879fc..44fce988eaac8cd22bfe5a5e753ae1bb58b3476d 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -688,10 +688,13 @@ S: Oxfordshire, UK.
 
 N: Kees Cook
 E: kees@outflux.net
-W: http://outflux.net/
-P: 1024D/17063E6D 9FA3 C49C 23C9 D1BC 2E30  1975 1FFF 4BA9 1706 3E6D
-D: Minor updates to SCSI types, added /proc/pid/maps protection
+E: kees@ubuntu.com
+E: keescook@chromium.org
+W: http://outflux.net/blog/
+P: 4096R/DC6DC026 A5C3 F68F 229D D60F 723E  6E13 8972 F4DF DC6D C026
+D: Various security things, bug fixes, and documentation.
 S: (ask for current address)
+S: Portland, Oregon
 S: USA
 
 N: Robin Cornelius
index fa72ccb2282e77c879c0a7a135f6d86828a143cc..dbedafb095e24d3d3a8e2d93b6cbd727268d1754 100644 (file)
@@ -57,13 +57,6 @@ create_snap
 
         $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_create
 
-rollback_snap
-
-       Rolls back data to the specified snapshot. This goes over the entire
-       list of rados blocks and sends a rollback command to each.
-
-        $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_rollback
-
 snap_*
 
        A directory per each snapshot
index a0c5c5f4fce6e9587346a4a049c9725e5ca45de5..81c287fad79d6370d0d697d5ddf33b8af756a036 100644 (file)
@@ -315,12 +315,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        CPU-intensive style benchmark, and it can vary highly in
                        a microbenchmark depending on workload and compiler.
 
-                       1: only for 32-bit processes
-                       2: only for 64-bit processes
+                       32: only for 32-bit processes
+                       64: only for 64-bit processes
                        on: enable for both 32- and 64-bit processes
                        off: disable for both 32- and 64-bit processes
 
-       amd_iommu=      [HW,X86-84]
+       amd_iommu=      [HW,X86-64]
                        Pass parameters to the AMD IOMMU driver in the system.
                        Possible values are:
                        fullflush - enable flushing of IO/TLB entries when
index f049a1ca186fbf6eb5e55ed9eb3a65bb8601b1f8..589f2da5d5454dd96f828c01168aecb84770473f 100644 (file)
@@ -282,11 +282,11 @@ tcp_max_ssthresh - INTEGER
        Default: 0 (off)
 
 tcp_max_syn_backlog - INTEGER
-       Maximal number of remembered connection requests, which are
-       still did not receive an acknowledgment from connecting client.
-       Default value is 1024 for systems with more than 128Mb of memory,
-       and 128 for low memory machines. If server suffers of overload,
-       try to increase this number.
+       Maximal number of remembered connection requests, which have not
+       received an acknowledgment from connecting client.
+       The minimal value is 128 for low memory machines, and it will
+       increase in proportion to the memory of machine.
+       If server suffers from overload, try increasing this number.
 
 tcp_max_tw_buckets - INTEGER
        Maximal number of timewait sockets held by system simultaneously.
index 3e2ec9cbf3976d0d21c6ee90d7fe075a210a33eb..d50c14df34112ed2095942062bcaab90d90697bd 100644 (file)
@@ -50,8 +50,7 @@ Machine DAI Configuration
 The machine DAI configuration glues all the codec and CPU DAIs together. It can
 also be used to set up the DAI system clock and for any machine related DAI
 initialisation e.g. the machine audio map can be connected to the codec audio
-map, unconnected codec pins can be set as such. Please see corgi.c, spitz.c
-for examples.
+map, unconnected codec pins can be set as such.
 
 struct snd_soc_dai_link is used to set up each DAI in your machine. e.g.
 
@@ -83,8 +82,7 @@ Machine Power Map
 The machine driver can optionally extend the codec power map and to become an
 audio power map of the audio subsystem. This allows for automatic power up/down
 of speaker/HP amplifiers, etc. Codec pins can be connected to the machines jack
-sockets in the machine init function. See soc/pxa/spitz.c and dapm.txt for
-details.
+sockets in the machine init function.
 
 
 Machine Controls
index 37a02ce5484176670fde42a5bb915427316587f3..f0ffc27d4c0ac9d52efa62ecc0f775fd877b7387 100644 (file)
@@ -90,10 +90,10 @@ ServiceBinary=%12%\USBSER.sys
 [SourceDisksFiles]
 [SourceDisksNames]
 [DeviceList]
-%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02
+%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02, USB\VID_1D6B&PID_0106&MI_00
 
 [DeviceList.NTamd64]
-%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02
+%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02, USB\VID_1D6B&PID_0106&MI_00
 
 
 ;------------------------------------------------------------------------------
index 7945b0bd35e2ad50d7561ffa88a428c21175b50b..e2a4b5287361d25c0800954cbc79eccea88291d3 100644 (file)
@@ -1100,6 +1100,15 @@ emulate them efficiently. The fields in each entry are defined as follows:
    eax, ebx, ecx, edx: the values returned by the cpuid instruction for
          this function/index combination
 
+The TSC deadline timer feature (CPUID leaf 1, ecx[24]) is always returned
+as false, since the feature depends on KVM_CREATE_IRQCHIP for local APIC
+support.  Instead it is reported via
+
+  ioctl(KVM_CHECK_EXTENSION, KVM_CAP_TSC_DEADLINE_TIMER)
+
+if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the
+feature in userspace, then you can enable the feature for KVM_SET_CPUID2.
+
 4.47 KVM_PPC_GET_PVINFO
 
 Capability: KVM_CAP_PPC_GET_PVINFO
@@ -1151,6 +1160,13 @@ following flags are specified:
 /* Depends on KVM_CAP_IOMMU */
 #define KVM_DEV_ASSIGN_ENABLE_IOMMU    (1 << 0)
 
+The KVM_DEV_ASSIGN_ENABLE_IOMMU flag is a mandatory option to ensure
+isolation of the device.  Usages not specifying this flag are deprecated.
+
+Only PCI header type 0 devices with PCI BAR resources are supported by
+device assignment.  The user requesting this ioctl must have read/write
+access to the PCI sysfs resource files associated with the device.
+
 4.49 KVM_DEASSIGN_PCI_DEVICE
 
 Capability: KVM_CAP_DEVICE_DEASSIGNMENT
index 447560284996b3dfe8025180efdc87b1371906fd..0e7a80aefa0c27d52e9dec4e2a29d181ff7575ac 100644 (file)
@@ -511,8 +511,8 @@ M:  Joerg Roedel <joerg.roedel@amd.com>
 L:     iommu@lists.linux-foundation.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu.git
 S:     Supported
-F:     arch/x86/kernel/amd_iommu*.c
-F:     arch/x86/include/asm/amd_iommu*.h
+F:     drivers/iommu/amd_iommu*.[ch]
+F:     include/linux/amd-iommu.h
 
 AMD MICROCODE UPDATE SUPPORT
 M:     Andreas Herrmann <andreas.herrmann3@amd.com>
@@ -1054,35 +1054,18 @@ ARM/SAMSUNG ARM ARCHITECTURES
 M:     Ben Dooks <ben-linux@fluff.org>
 M:     Kukjin Kim <kgene.kim@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 W:     http://www.fluff.org/ben/linux/
 S:     Maintained
 F:     arch/arm/plat-samsung/
 F:     arch/arm/plat-s3c24xx/
 F:     arch/arm/plat-s5p/
+F:     arch/arm/mach-s3c24*/
+F:     arch/arm/mach-s3c64xx/
 F:     drivers/*/*s3c2410*
 F:     drivers/*/*/*s3c2410*
-
-ARM/S3C2410 ARM ARCHITECTURE
-M:     Ben Dooks <ben-linux@fluff.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.fluff.org/ben/linux/
-S:     Maintained
-F:     arch/arm/mach-s3c2410/
-
-ARM/S3C244x ARM ARCHITECTURE
-M:     Ben Dooks <ben-linux@fluff.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.fluff.org/ben/linux/
-S:     Maintained
-F:     arch/arm/mach-s3c2440/
-F:     arch/arm/mach-s3c2443/
-
-ARM/S3C64xx ARM ARCHITECTURE
-M:     Ben Dooks <ben-linux@fluff.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.fluff.org/ben/linux/
-S:     Maintained
-F:     arch/arm/mach-s3c64xx/
+F:     drivers/spi/spi-s3c*
+F:     sound/soc/samsung/*
 
 ARM/S5P EXYNOS ARM ARCHITECTURES
 M:     Kukjin Kim <kgene.kim@samsung.com>
@@ -2717,7 +2700,7 @@ FIREWIRE SUBSYSTEM
 M:     Stefan Richter <stefanr@s5r6.in-berlin.de>
 L:     linux1394-devel@lists.sourceforge.net
 W:     http://ieee1394.wiki.kernel.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394.git
 S:     Maintained
 F:     drivers/firewire/
 F:     include/linux/firewire*.h
@@ -3118,6 +3101,7 @@ F:        include/linux/hid*
 
 HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS
 M:     Thomas Gleixner <tglx@linutronix.de>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:     Maintained
 F:     Documentation/timers/
 F:     kernel/hrtimer.c
@@ -3627,7 +3611,7 @@ F:        net/irda/
 IRQ SUBSYSTEM
 M:     Thomas Gleixner <tglx@linutronix.de>
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
 F:     kernel/irq/
 
 ISAPNP
@@ -4115,7 +4099,7 @@ F:        drivers/hwmon/lm90.c
 LOCKDEP AND LOCKSTAT
 M:     Peter Zijlstra <peterz@infradead.org>
 M:     Ingo Molnar <mingo@redhat.com>
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/locking
 S:     Maintained
 F:     Documentation/lockdep*.txt
 F:     Documentation/lockstat.txt
@@ -4297,7 +4281,9 @@ T:        git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:     Maintained
 F:     Documentation/dvb/
 F:     Documentation/video4linux/
+F:     Documentation/DocBook/media/
 F:     drivers/media/
+F:     drivers/staging/media/
 F:     include/media/
 F:     include/linux/dvb/
 F:     include/linux/videodev*.h
@@ -4319,8 +4305,9 @@ F:        include/linux/mm.h
 F:     mm/
 
 MEMORY RESOURCE CONTROLLER
+M:     Johannes Weiner <hannes@cmpxchg.org>
+M:     Michal Hocko <mhocko@suse.cz>
 M:     Balbir Singh <bsingharora@gmail.com>
-M:     Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
 M:     KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
 L:     cgroups@vger.kernel.org
 L:     linux-mm@kvack.org
@@ -5102,6 +5089,7 @@ M:        Peter Zijlstra <a.p.zijlstra@chello.nl>
 M:     Paul Mackerras <paulus@samba.org>
 M:     Ingo Molnar <mingo@elte.hu>
 M:     Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
 S:     Supported
 F:     kernel/events/*
 F:     include/linux/perf_event.h
@@ -5181,6 +5169,7 @@ F:        drivers/scsi/pm8001/
 
 POSIX CLOCKS and TIMERS
 M:     Thomas Gleixner <tglx@linutronix.de>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:     Supported
 F:     fs/timerfd.c
 F:     include/linux/timer*
@@ -5696,6 +5685,7 @@ F:        drivers/dma/dw_dmac.c
 TIMEKEEPING, NTP
 M:     John Stultz <johnstul@us.ibm.com>
 M:     Thomas Gleixner <tglx@linutronix.de>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
 S:     Supported
 F:     include/linux/clocksource.h
 F:     include/linux/time.h
@@ -5720,6 +5710,7 @@ F:        drivers/watchdog/sc1200wdt.c
 SCHEDULER
 M:     Ingo Molnar <mingo@elte.hu>
 M:     Peter Zijlstra <peterz@infradead.org>
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
 S:     Maintained
 F:     kernel/sched*
 F:     include/linux/sched.h
@@ -6647,7 +6638,7 @@ TRACING
 M:     Steven Rostedt <rostedt@goodmis.org>
 M:     Frederic Weisbecker <fweisbec@gmail.com>
 M:     Ingo Molnar <mingo@redhat.com>
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git perf/core
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
 S:     Maintained
 F:     Documentation/trace/ftrace.txt
 F:     arch/*/*/*/ftrace.h
@@ -7397,7 +7388,7 @@ M:        Thomas Gleixner <tglx@linutronix.de>
 M:     Ingo Molnar <mingo@redhat.com>
 M:     "H. Peter Anvin" <hpa@zytor.com>
 M:     x86@kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
 S:     Maintained
 F:     Documentation/x86/
 F:     arch/x86/
index 12aafc20efbd0d610430d3a88df4e6359093cfc1..ea51081812f38d5ee8dfaeaab060a9fb4a86ba67 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc7
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
index 3ee1818540e71cb718e73918dd5befcbef73a9f4..afcb76ecea53bf519d8ea92ef836843b84d71db7 100644 (file)
@@ -220,8 +220,9 @@ config NEED_MACH_MEMORY_H
          be avoided when possible.
 
 config PHYS_OFFSET
-       hex "Physical address of main memory"
+       hex "Physical address of main memory" if MMU
        depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H
+       default DRAM_BASE if !MMU
        help
          Please provide the physical address corresponding to the
          location of main memory in your system.
@@ -257,6 +258,7 @@ config ARCH_INTEGRATOR
        select ARCH_HAS_CPUFREQ
        select CLKDEV_LOOKUP
        select HAVE_MACH_CLKDEV
+       select HAVE_TCM
        select ICST
        select GENERIC_CLOCKEVENTS
        select PLAT_VERSATILE
@@ -1137,6 +1139,11 @@ config ARM_TIMER_SP804
 
 source arch/arm/mm/Kconfig
 
+config ARM_NR_BANKS
+       int
+       default 16 if ARCH_EP93XX
+       default 8
+
 config IWMMXT
        bool "Enable iWMMXt support"
        depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4
@@ -1257,7 +1264,7 @@ config PL310_ERRATA_588369
 
 config ARM_ERRATA_720789
        bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
-       depends on CPU_V7 && SMP
+       depends on CPU_V7
        help
          This option enables the workaround for the 720789 Cortex-A9 (prior to
          r2p0) erratum. A faulty ASID can be sent to the other CPUs for the
@@ -1293,7 +1300,7 @@ config ARM_ERRATA_743622
 
 config ARM_ERRATA_751472
        bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation"
-       depends on CPU_V7 && SMP
+       depends on CPU_V7
        help
          This option enables the workaround for the 751472 Cortex-A9 (prior
          to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the
@@ -1577,6 +1584,16 @@ config LOCAL_TIMERS
          accounting to be spread across the timer interval, preventing a
          "thundering herd" at every timer tick.
 
+config ARCH_NR_GPIO
+       int
+       default 1024 if ARCH_SHMOBILE || ARCH_TEGRA
+       default 350 if ARCH_U8500
+       default 0
+       help
+         Maximum number of GPIOs in the system.
+
+         If unsure, leave the default value.
+
 source kernel/Kconfig.preempt
 
 config HZ
index 21f56ff32797eaef80af79c49db60ce2c89912fa..cf0a64ce4b83ad73b21b5cd6d59ab4c96fc5f2f3 100644 (file)
@@ -126,7 +126,8 @@ ccflags-y := -fpic -fno-builtin -I$(obj)
 asflags-y := -Wa,-march=all
 
 # Supply kernel BSS size to the decompressor via a linker symbol.
-KBSS_SZ = $(shell size $(obj)/../../../../vmlinux | awk 'END{print $$3}')
+KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \
+               awk 'END{print $$3}')
 LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ)
 # Supply ZRELADDR to the decompressor via a linker symbol.
 ifneq ($(CONFIG_AUTO_ZRELADDR),y)
index f407a6b35d3dd1e6ad1e72afec38837dcfdd796a..d8e44a43047ce5cce3a2db34b9368eafe157f579 100644 (file)
  */
 #define MCODE_BUFF_PER_REQ     256
 
-/*
- * Mark a _pl330_req as free.
- * We do it by writing DMAEND as the first instruction
- * because no valid request is going to have DMAEND as
- * its first instruction to execute.
- */
-#define MARK_FREE(req) do { \
-                               _emit_END(0, (req)->mc_cpu); \
-                               (req)->mc_len = 0; \
-                       } while (0)
-
 /* If the _pl330_req is available to the client */
 #define IS_FREE(req)   (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
 
@@ -301,8 +290,10 @@ struct pl330_thread {
        struct pl330_dmac *dmac;
        /* Only two at a time */
        struct _pl330_req req[2];
-       /* Index of the last submitted request */
+       /* Index of the last enqueued request */
        unsigned lstenq;
+       /* Index of the last submitted request or -1 if the DMA is stopped */
+       int req_running;
 };
 
 enum pl330_dmac_state {
@@ -778,6 +769,22 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd,
        writel(0, regs + DBGCMD);
 }
 
+/*
+ * Mark a _pl330_req as free.
+ * We do it by writing DMAEND as the first instruction
+ * because no valid request is going to have DMAEND as
+ * its first instruction to execute.
+ */
+static void mark_free(struct pl330_thread *thrd, int idx)
+{
+       struct _pl330_req *req = &thrd->req[idx];
+
+       _emit_END(0, req->mc_cpu);
+       req->mc_len = 0;
+
+       thrd->req_running = -1;
+}
+
 static inline u32 _state(struct pl330_thread *thrd)
 {
        void __iomem *regs = thrd->dmac->pinfo->base;
@@ -836,31 +843,6 @@ static inline u32 _state(struct pl330_thread *thrd)
        }
 }
 
-/* If the request 'req' of thread 'thrd' is currently active */
-static inline bool _req_active(struct pl330_thread *thrd,
-               struct _pl330_req *req)
-{
-       void __iomem *regs = thrd->dmac->pinfo->base;
-       u32 buf = req->mc_bus, pc = readl(regs + CPC(thrd->id));
-
-       if (IS_FREE(req))
-               return false;
-
-       return (pc >= buf && pc <= buf + req->mc_len) ? true : false;
-}
-
-/* Returns 0 if the thread is inactive, ID of active req + 1 otherwise */
-static inline unsigned _thrd_active(struct pl330_thread *thrd)
-{
-       if (_req_active(thrd, &thrd->req[0]))
-               return 1; /* First req active */
-
-       if (_req_active(thrd, &thrd->req[1]))
-               return 2; /* Second req active */
-
-       return 0;
-}
-
 static void _stop(struct pl330_thread *thrd)
 {
        void __iomem *regs = thrd->dmac->pinfo->base;
@@ -892,17 +874,22 @@ static bool _trigger(struct pl330_thread *thrd)
        struct _arg_GO go;
        unsigned ns;
        u8 insn[6] = {0, 0, 0, 0, 0, 0};
+       int idx;
 
        /* Return if already ACTIVE */
        if (_state(thrd) != PL330_STATE_STOPPED)
                return true;
 
-       if (!IS_FREE(&thrd->req[1 - thrd->lstenq]))
-               req = &thrd->req[1 - thrd->lstenq];
-       else if (!IS_FREE(&thrd->req[thrd->lstenq]))
-               req = &thrd->req[thrd->lstenq];
-       else
-               req = NULL;
+       idx = 1 - thrd->lstenq;
+       if (!IS_FREE(&thrd->req[idx]))
+               req = &thrd->req[idx];
+       else {
+               idx = thrd->lstenq;
+               if (!IS_FREE(&thrd->req[idx]))
+                       req = &thrd->req[idx];
+               else
+                       req = NULL;
+       }
 
        /* Return if no request */
        if (!req || !req->r)
@@ -933,6 +920,8 @@ static bool _trigger(struct pl330_thread *thrd)
        /* Only manager can execute GO */
        _execute_DBGINSN(thrd, insn, true);
 
+       thrd->req_running = idx;
+
        return true;
 }
 
@@ -1382,8 +1371,8 @@ static void pl330_dotask(unsigned long data)
 
                        thrd->req[0].r = NULL;
                        thrd->req[1].r = NULL;
-                       MARK_FREE(&thrd->req[0]);
-                       MARK_FREE(&thrd->req[1]);
+                       mark_free(thrd, 0);
+                       mark_free(thrd, 1);
 
                        /* Clear the reset flag */
                        pl330->dmac_tbd.reset_chan &= ~(1 << i);
@@ -1461,14 +1450,12 @@ int pl330_update(const struct pl330_info *pi)
 
                        thrd = &pl330->channels[id];
 
-                       active = _thrd_active(thrd);
-                       if (!active) /* Aborted */
+                       active = thrd->req_running;
+                       if (active == -1) /* Aborted */
                                continue;
 
-                       active -= 1;
-
                        rqdone = &thrd->req[active];
-                       MARK_FREE(rqdone);
+                       mark_free(thrd, active);
 
                        /* Get going again ASAP */
                        _start(thrd);
@@ -1480,13 +1467,19 @@ int pl330_update(const struct pl330_info *pi)
 
        /* Now that we are in no hurry, do the callbacks */
        while (!list_empty(&pl330->req_done)) {
+               struct pl330_req *r;
+
                rqdone = container_of(pl330->req_done.next,
                                        struct _pl330_req, rqd);
 
                list_del_init(&rqdone->rqd);
 
+               /* Detach the req */
+               r = rqdone->r;
+               rqdone->r = NULL;
+
                spin_unlock_irqrestore(&pl330->lock, flags);
-               _callback(rqdone->r, PL330_ERR_NONE);
+               _callback(r, PL330_ERR_NONE);
                spin_lock_irqsave(&pl330->lock, flags);
        }
 
@@ -1509,7 +1502,7 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
        struct pl330_thread *thrd = ch_id;
        struct pl330_dmac *pl330;
        unsigned long flags;
-       int ret = 0, active;
+       int ret = 0, active = thrd->req_running;
 
        if (!thrd || thrd->free || thrd->dmac->state == DYING)
                return -EINVAL;
@@ -1525,28 +1518,24 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
 
                thrd->req[0].r = NULL;
                thrd->req[1].r = NULL;
-               MARK_FREE(&thrd->req[0]);
-               MARK_FREE(&thrd->req[1]);
+               mark_free(thrd, 0);
+               mark_free(thrd, 1);
                break;
 
        case PL330_OP_ABORT:
-               active = _thrd_active(thrd);
-
                /* Make sure the channel is stopped */
                _stop(thrd);
 
                /* ABORT is only for the active req */
-               if (!active)
+               if (active == -1)
                        break;
 
-               active--;
-
                thrd->req[active].r = NULL;
-               MARK_FREE(&thrd->req[active]);
+               mark_free(thrd, active);
 
                /* Start the next */
        case PL330_OP_START:
-               if (!_thrd_active(thrd) && !_start(thrd))
+               if ((active == -1) && !_start(thrd))
                        ret = -EIO;
                break;
 
@@ -1587,14 +1576,13 @@ int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus)
        else
                pstatus->faulting = false;
 
-       active = _thrd_active(thrd);
+       active = thrd->req_running;
 
-       if (!active) {
+       if (active == -1) {
                /* Indicate that the thread is not running */
                pstatus->top_req = NULL;
                pstatus->wait_req = NULL;
        } else {
-               active--;
                pstatus->top_req = thrd->req[active].r;
                pstatus->wait_req = !IS_FREE(&thrd->req[1 - active])
                                        ? thrd->req[1 - active].r : NULL;
@@ -1659,9 +1647,9 @@ void *pl330_request_channel(const struct pl330_info *pi)
                                thrd->free = false;
                                thrd->lstenq = 1;
                                thrd->req[0].r = NULL;
-                               MARK_FREE(&thrd->req[0]);
+                               mark_free(thrd, 0);
                                thrd->req[1].r = NULL;
-                               MARK_FREE(&thrd->req[1]);
+                               mark_free(thrd, 1);
                                break;
                        }
                }
@@ -1767,14 +1755,14 @@ static inline void _reset_thread(struct pl330_thread *thrd)
        thrd->req[0].mc_bus = pl330->mcode_bus
                                + (thrd->id * pi->mcbufsz);
        thrd->req[0].r = NULL;
-       MARK_FREE(&thrd->req[0]);
+       mark_free(thrd, 0);
 
        thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
                                + pi->mcbufsz / 2;
        thrd->req[1].mc_bus = thrd->req[0].mc_bus
                                + pi->mcbufsz / 2;
        thrd->req[1].r = NULL;
-       MARK_FREE(&thrd->req[1]);
+       mark_free(thrd, 1);
 }
 
 static int dmac_alloc_threads(struct pl330_dmac *pl330)
index 2393b5bc96fa12845846bffe613a01c61db176b2..8794a34eae61d87b34d433dc1266c555c7687fca 100644 (file)
@@ -143,7 +143,6 @@ static int sp804_set_next_event(unsigned long next,
 }
 
 static struct clock_event_device sp804_clockevent = {
-       .shift          = 32,
        .features       = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
        .set_mode       = sp804_set_mode,
        .set_next_event = sp804_set_next_event,
@@ -169,13 +168,9 @@ void __init sp804_clockevents_init(void __iomem *base, unsigned int irq,
 
        clkevt_base = base;
        clkevt_reload = DIV_ROUND_CLOSEST(rate, HZ);
-
        evt->name = name;
        evt->irq = irq;
-       evt->mult = div_sc(rate, NSEC_PER_SEC, evt->shift);
-       evt->max_delta_ns = clockevent_delta2ns(0xffffffff, evt);
-       evt->min_delta_ns = clockevent_delta2ns(0xf, evt);
 
        setup_irq(irq, &sp804_timer_irq);
-       clockevents_register_device(evt);
+       clockevents_config_and_register(evt, rate, 0xf, 0xffffffff);
 }
index 11a4192197c8fbaef84a10a53ce24fb497ae3947..cf497ce41dfe725bf5f4549faab3bc4c82a5728c 100644 (file)
@@ -18,9 +18,10 @@ CONFIG_ARCH_MXC=y
 CONFIG_ARCH_IMX_V4_V5=y
 CONFIG_ARCH_MX1ADS=y
 CONFIG_MACH_SCB9328=y
+CONFIG_MACH_APF9328=y
 CONFIG_MACH_MX21ADS=y
 CONFIG_MACH_MX25_3DS=y
-CONFIG_MACH_EUKREA_CPUIMX25=y
+CONFIG_MACH_EUKREA_CPUIMX25SD=y
 CONFIG_MACH_MX27ADS=y
 CONFIG_MACH_PCM038=y
 CONFIG_MACH_CPUIMX27=y
@@ -72,17 +73,16 @@ CONFIG_MTD_CFI_GEOMETRY=y
 CONFIG_MTD_CFI_INTELEXT=y
 CONFIG_MTD_PHYSMAP=y
 CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_MXC=y
 CONFIG_MTD_UBI=y
 CONFIG_MISC_DEVICES=y
 CONFIG_EEPROM_AT24=y
 CONFIG_EEPROM_AT25=y
 CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMC91X=y
 CONFIG_DM9000=y
+CONFIG_SMC91X=y
 CONFIG_SMC911X=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+CONFIG_SMSC_PHY=y
 # CONFIG_INPUT_MOUSEDEV is not set
 CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_KEYBOARD is not set
@@ -100,6 +100,7 @@ CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_IMX=y
 CONFIG_SPI=y
 CONFIG_SPI_IMX=y
+CONFIG_SPI_SPIDEV=y
 CONFIG_W1=y
 CONFIG_W1_MASTER_MXC=y
 CONFIG_W1_SLAVE_THERM=y
@@ -139,6 +140,7 @@ CONFIG_MMC=y
 CONFIG_MMC_MXC=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
 CONFIG_LEDS_MC13783=y
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=y
index a7e77758137881bf52067b145ae220f0447b7fdc..945a34f2a34dbd9e711ac440500d1e17cfad2ac0 100644 (file)
@@ -48,12 +48,7 @@ CONFIG_MACH_SX1=y
 CONFIG_MACH_NOKIA770=y
 CONFIG_MACH_AMS_DELTA=y
 CONFIG_MACH_OMAP_GENERIC=y
-CONFIG_OMAP_ARM_216MHZ=y
-CONFIG_OMAP_ARM_195MHZ=y
-CONFIG_OMAP_ARM_192MHZ=y
 CONFIG_OMAP_ARM_182MHZ=y
-CONFIG_OMAP_ARM_168MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
 # CONFIG_ARM_THUMB is not set
 CONFIG_PCCARD=y
 CONFIG_OMAP_CF=y
index 9abe7a07d5acdde3eea377cda3634e5f5c838115..fac79dceb7361b411eafad341d824f1eef7b7bfb 100644 (file)
@@ -32,7 +32,6 @@
 
 #define __BUG(__file, __line, __value)                         \
 do {                                                           \
-       BUILD_BUG_ON(sizeof(struct bug_entry) != 12);           \
        asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n"        \
                ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
                "2:\t.asciz " #__file "\n"                      \
diff --git a/arch/arm/include/asm/edac.h b/arch/arm/include/asm/edac.h
new file mode 100644 (file)
index 0000000..0df7a2c
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2011 Calxeda, Inc.
+ * Based on PPC version Copyright 2007 MontaVista Software, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef ASM_EDAC_H
+#define ASM_EDAC_H
+/*
+ * ECC atomic, DMA, SMP and interrupt safe scrub function.
+ * Implements the per arch atomic_scrub() that EDAC use for software
+ * ECC scrubbing.  It reads memory and then writes back the original
+ * value, allowing the hardware to detect and correct memory errors.
+ */
+static inline void atomic_scrub(void *va, u32 size)
+{
+#if __LINUX_ARM_ARCH__ >= 6
+       unsigned int *virt_addr = va;
+       unsigned int temp, temp2;
+       unsigned int i;
+
+       for (i = 0; i < size / sizeof(*virt_addr); i++, virt_addr++) {
+               /* Very carefully read and write to memory atomically
+                * so we are interrupt, DMA and SMP safe.
+                */
+               __asm__ __volatile__("\n"
+                       "1:     ldrex   %0, [%2]\n"
+                       "       strex   %1, %0, [%2]\n"
+                       "       teq     %1, #0\n"
+                       "       bne     1b\n"
+                       : "=&r"(temp), "=&r"(temp2)
+                       : "r"(virt_addr)
+                       : "cc");
+       }
+#endif
+}
+
+#endif
index 11ad0bfbb0ad67ca3c3865d25a216b22a13c10f5..7151753b0989f922c5a0e9d29fce5e057ae2ad5e 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef _ARCH_ARM_GPIO_H
 #define _ARCH_ARM_GPIO_H
 
+#if CONFIG_ARCH_NR_GPIO > 0
+#define ARCH_NR_GPIO CONFIG_ARCH_NR_GPIO
+#endif
+
 /* not all ARM platforms necessarily support this API ... */
 #include <mach/gpio.h>
 
index ddf07a92a6c8484029927aa51fa09816ce38c337..436e60b2cf7a1bad5893ef33e3efac1791cb7ef5 100644 (file)
@@ -27,23 +27,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu);
 
 #define arch_irq_stat_cpu      smp_irq_stat_cpu
 
-#if NR_IRQS > 512
-#define HARDIRQ_BITS   10
-#elif NR_IRQS > 256
-#define HARDIRQ_BITS   9
-#else
-#define HARDIRQ_BITS   8
-#endif
-
-/*
- * The hardirq mask has to be large enough to have space
- * for potentially all IRQ sources in the system nesting
- * on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
 #define __ARCH_IRQ_EXIT_IRQS_DISABLED  1
 
 #endif /* __ASM_HARDIRQ_H */
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h
new file mode 100644 (file)
index 0000000..c0efdd6
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ *  arch/arm/include/asm/opcodes.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARM_OPCODES_H
+#define __ASM_ARM_OPCODES_H
+
+#ifndef __ASSEMBLY__
+extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
+#endif
+
+#define ARM_OPCODE_CONDTEST_FAIL   0
+#define ARM_OPCODE_CONDTEST_PASS   1
+#define ARM_OPCODE_CONDTEST_UNCOND 2
+
+#endif /* __ASM_ARM_OPCODES_H */
index 111979057e6a2036537fb3c4484d0bdd3cabbab1..f66626d71e7d1a304ad2c750fc4b00be9a436901 100644 (file)
@@ -299,6 +299,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  * We provide our own arch_get_unmapped_area to cope with VIPT caches.
  */
 #define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 
 /*
  * remap a physical page `pfn' of size `size' with page protection `prot'
index b2d9df5667af937476baf5e64ded8520ea3fd5d4..ce280b8d613cbc7821a2adedfa32186988caf1e0 100644 (file)
@@ -123,6 +123,8 @@ static inline void prefetch(const void *ptr)
 
 #endif
 
+#define HAVE_ARCH_PICK_MMAP_LAYOUT
+
 #endif
 
 #endif /* __ASM_ARM_PROCESSOR_H */
index 915696dd9c7c32d4d9702d54c933d50bbc4ca1e0..23ebc0c82a3975ae5c455dd39598e93ab33922e7 100644 (file)
@@ -192,11 +192,7 @@ static const struct tagtable __tagtable_##fn __tag = { tag, fn }
 /*
  * Memory map description
  */
-#ifdef CONFIG_ARCH_EP93XX
-# define NR_BANKS 16
-#else
-# define NR_BANKS 8
-#endif
+#define NR_BANKS       CONFIG_ARM_NR_BANKS
 
 struct membank {
        phys_addr_t start;
index 9997ad20eff11d04906882b0a8da3c7c47ac4f76..32ee164a2f6bb73e066dd34debd6b1355f118f2a 100644 (file)
 
 #if defined(__KERNEL__) && __LINUX_ARM_ARCH__ >= 6
 
-static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+static inline __attribute_const__ __u32 __arch_swahb32(__u32 x)
 {
        __asm__ ("rev16 %0, %1" : "=r" (x) : "r" (x));
        return x;
 }
-#define __arch_swab16 __arch_swab16
+#define __arch_swahb32 __arch_swahb32
+#define __arch_swab16(x) ((__u16)__arch_swahb32(x))
 
 static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
 {
index a5edf421005cce0d043cbb75be394253127a2582..d1c3f3a71c9454dd665b48a0cfc51bc954576a05 100644 (file)
@@ -30,14 +30,15 @@ enum unwind_reason_code {
 };
 
 struct unwind_idx {
-       unsigned long addr;
+       unsigned long addr_offset;
        unsigned long insn;
 };
 
 struct unwind_table {
        struct list_head list;
-       struct unwind_idx *start;
-       struct unwind_idx *stop;
+       const struct unwind_idx *start;
+       const struct unwind_idx *origin;
+       const struct unwind_idx *stop;
        unsigned long begin_addr;
        unsigned long end_addr;
 };
@@ -49,15 +50,6 @@ extern struct unwind_table *unwind_table_add(unsigned long start,
 extern void unwind_table_del(struct unwind_table *tab);
 extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
 
-#ifdef CONFIG_ARM_UNWIND
-extern int __init unwind_init(void);
-#else
-static inline int __init unwind_init(void)
-{
-       return 0;
-}
-#endif
-
 #endif /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_ARM_UNWIND
index 16eed6aebfa45b9b1812001a782d5f9c8247976c..43b740d0e3744ab65b3c6036f342d734753b97de 100644 (file)
@@ -13,7 +13,7 @@ CFLAGS_REMOVE_return_address.o = -pg
 
 # Object file lists.
 
-obj-y          := elf.o entry-armv.o entry-common.o irq.o \
+obj-y          := elf.o entry-armv.o entry-common.o irq.o opcodes.o \
                   process.o ptrace.o return_address.o setup.o signal.o \
                   sys_arm.o stacktrace.o time.o traps.o
 
index e17cdd6d90d8734553835e5b49bd87027226c1ad..1862d8f2fd44c00d1d535a1b447a5ab6cdf333b7 100644 (file)
 #include <linux/slab.h>
 #include <linux/kprobes.h>
 
+#include <asm/opcodes.h>
+
 #include "kprobes.h"
 #include "kprobes-test.h"
 
@@ -1050,65 +1052,9 @@ static int test_instance;
 
 static unsigned long test_check_cc(int cc, unsigned long cpsr)
 {
-       unsigned long temp;
-
-       switch (cc) {
-       case 0x0: /* eq */
-               return cpsr & PSR_Z_BIT;
-
-       case 0x1: /* ne */
-               return (~cpsr) & PSR_Z_BIT;
-
-       case 0x2: /* cs */
-               return cpsr & PSR_C_BIT;
-
-       case 0x3: /* cc */
-               return (~cpsr) & PSR_C_BIT;
-
-       case 0x4: /* mi */
-               return cpsr & PSR_N_BIT;
-
-       case 0x5: /* pl */
-               return (~cpsr) & PSR_N_BIT;
-
-       case 0x6: /* vs */
-               return cpsr & PSR_V_BIT;
-
-       case 0x7: /* vc */
-               return (~cpsr) & PSR_V_BIT;
+       int ret = arm_check_condition(cc << 28, cpsr);
 
-       case 0x8: /* hi */
-               cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
-               return cpsr & PSR_C_BIT;
-
-       case 0x9: /* ls */
-               cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
-               return (~cpsr) & PSR_C_BIT;
-
-       case 0xa: /* ge */
-               cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
-               return (~cpsr) & PSR_N_BIT;
-
-       case 0xb: /* lt */
-               cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
-               return cpsr & PSR_N_BIT;
-
-       case 0xc: /* gt */
-               temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
-               temp |= (cpsr << 1);       /* PSR_N_BIT |= PSR_Z_BIT */
-               return (~temp) & PSR_N_BIT;
-
-       case 0xd: /* le */
-               temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
-               temp |= (cpsr << 1);       /* PSR_N_BIT |= PSR_Z_BIT */
-               return temp & PSR_N_BIT;
-
-       case 0xe: /* al */
-       case 0xf: /* unconditional */
-               return true;
-       }
-       BUG();
-       return false;
+       return (ret != ARM_OPCODE_CONDTEST_FAIL);
 }
 
 static int is_last_scenario;
@@ -1128,7 +1074,9 @@ static unsigned long test_context_cpsr(int scenario)
 
        if (!test_case_is_thumb) {
                /* Testing ARM code */
-               probe_should_run = test_check_cc(current_instruction >> 28, cpsr) != 0;
+               int cc = current_instruction >> 28;
+
+               probe_should_run = test_check_cc(cc, cpsr) != 0;
                if (scenario == 15)
                        is_last_scenario = true;
 
diff --git a/arch/arm/kernel/opcodes.c b/arch/arm/kernel/opcodes.c
new file mode 100644 (file)
index 0000000..f8179c6
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ *  linux/arch/arm/kernel/opcodes.c
+ *
+ *  A32 condition code lookup feature moved from nwfpe/fpopcode.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <asm/opcodes.h>
+
+#define ARM_OPCODE_CONDITION_UNCOND 0xf
+
+/*
+ * condition code lookup table
+ * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
+ *
+ * bit position in short is condition code: NZCV
+ */
+static const unsigned short cc_map[16] = {
+       0xF0F0,                 /* EQ == Z set            */
+       0x0F0F,                 /* NE                     */
+       0xCCCC,                 /* CS == C set            */
+       0x3333,                 /* CC                     */
+       0xFF00,                 /* MI == N set            */
+       0x00FF,                 /* PL                     */
+       0xAAAA,                 /* VS == V set            */
+       0x5555,                 /* VC                     */
+       0x0C0C,                 /* HI == C set && Z clear */
+       0xF3F3,                 /* LS == C clear || Z set */
+       0xAA55,                 /* GE == (N==V)           */
+       0x55AA,                 /* LT == (N!=V)           */
+       0x0A05,                 /* GT == (!Z && (N==V))   */
+       0xF5FA,                 /* LE == (Z || (N!=V))    */
+       0xFFFF,                 /* AL always              */
+       0                       /* NV                     */
+};
+
+/*
+ * Returns:
+ * ARM_OPCODE_CONDTEST_FAIL   - if condition fails
+ * ARM_OPCODE_CONDTEST_PASS   - if condition passes (including AL)
+ * ARM_OPCODE_CONDTEST_UNCOND - if NV condition, or separate unconditional
+ *                              opcode space from v5 onwards
+ *
+ * Code that tests whether a conditional instruction would pass its condition
+ * check should check that return value == ARM_OPCODE_CONDTEST_PASS.
+ *
+ * Code that tests if a condition means that the instruction would be executed
+ * (regardless of conditional or unconditional) should instead check that the
+ * return value != ARM_OPCODE_CONDTEST_FAIL.
+ */
+asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr)
+{
+       u32 cc_bits  = opcode >> 28;
+       u32 psr_cond = psr >> 28;
+       unsigned int ret;
+
+       if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
+               if ((cc_map[cc_bits] >> (psr_cond)) & 1)
+                       ret = ARM_OPCODE_CONDTEST_PASS;
+               else
+                       ret = ARM_OPCODE_CONDTEST_FAIL;
+       } else {
+               ret = ARM_OPCODE_CONDTEST_UNCOND;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(arm_check_condition);
index 172101ac97de5a1125a014a02f1c1d71922a1640..5bb91bf3d47f24c9c6fb75324535c39e54b2c62e 100644 (file)
@@ -346,15 +346,15 @@ validate_group(struct perf_event *event)
        fake_pmu.used_mask = fake_used_mask;
 
        if (!validate_event(&fake_pmu, leader))
-               return -ENOSPC;
+               return -EINVAL;
 
        list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
                if (!validate_event(&fake_pmu, sibling))
-                       return -ENOSPC;
+                       return -EINVAL;
        }
 
        if (!validate_event(&fake_pmu, event))
-               return -ENOSPC;
+               return -EINVAL;
 
        return 0;
 }
@@ -639,6 +639,9 @@ static struct platform_device_id armpmu_plat_device_ids[] = {
 
 static int __devinit armpmu_device_probe(struct platform_device *pdev)
 {
+       if (!cpu_pmu)
+               return -ENODEV;
+
        cpu_pmu->plat_device = pdev;
        return 0;
 }
index 5c7094e8f6e9e5f765e0ef2ac79c3711fc7fc923..095d6611c84e60443ae429e2c5e36b4074a02df7 100644 (file)
@@ -902,8 +902,6 @@ void __init setup_arch(char **cmdline_p)
 {
        struct machine_desc *mdesc;
 
-       unwind_init();
-
        setup_processor();
        mdesc = setup_machine_fdt(__atags_pointer);
        if (!mdesc)
@@ -911,6 +909,12 @@ void __init setup_arch(char **cmdline_p)
        machine_desc = mdesc;
        machine_name = mdesc->name;
 
+#ifdef CONFIG_ZONE_DMA
+       if (mdesc->dma_zone_size) {
+               extern unsigned long arm_dma_zone_size;
+               arm_dma_zone_size = mdesc->dma_zone_size;
+       }
+#endif
        if (mdesc->restart_mode)
                reboot_setup(&mdesc->restart_mode);
 
@@ -945,12 +949,6 @@ void __init setup_arch(char **cmdline_p)
 
        tcm_init();
 
-#ifdef CONFIG_ZONE_DMA
-       if (mdesc->dma_zone_size) {
-               extern unsigned long arm_dma_zone_size;
-               arm_dma_zone_size = mdesc->dma_zone_size;
-       }
-#endif
 #ifdef CONFIG_MULTI_IRQ_HANDLER
        handle_arch_irq = mdesc->handle_irq;
 #endif
index a8a6682d6b52f38ce464d8f500f91c416c101ee3..c8e938553d478015d57d436ec43908b81b460d3a 100644 (file)
  */
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
 #include <linux/delay.h>
 #include <linux/device.h>
+#include <linux/err.h>
 #include <linux/smp.h>
 #include <linux/jiffies.h>
 #include <linux/clockchips.h>
@@ -25,6 +28,7 @@
 /* set up by the platform code */
 void __iomem *twd_base;
 
+static struct clk *twd_clk;
 static unsigned long twd_timer_rate;
 
 static struct clock_event_device __percpu **twd_evt;
@@ -89,6 +93,52 @@ void twd_timer_stop(struct clock_event_device *clk)
        disable_percpu_irq(clk->irq);
 }
 
+#ifdef CONFIG_CPU_FREQ
+
+/*
+ * Updates clockevent frequency when the cpu frequency changes.
+ * Called on the cpu that is changing frequency with interrupts disabled.
+ */
+static void twd_update_frequency(void *data)
+{
+       twd_timer_rate = clk_get_rate(twd_clk);
+
+       clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate);
+}
+
+static int twd_cpufreq_transition(struct notifier_block *nb,
+       unsigned long state, void *data)
+{
+       struct cpufreq_freqs *freqs = data;
+
+       /*
+        * The twd clock events must be reprogrammed to account for the new
+        * frequency.  The timer is local to a cpu, so cross-call to the
+        * changing cpu.
+        */
+       if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
+               smp_call_function_single(freqs->cpu, twd_update_frequency,
+                       NULL, 1);
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block twd_cpufreq_nb = {
+       .notifier_call = twd_cpufreq_transition,
+};
+
+static int twd_cpufreq_init(void)
+{
+       if (!IS_ERR(twd_clk))
+               return cpufreq_register_notifier(&twd_cpufreq_nb,
+                       CPUFREQ_TRANSITION_NOTIFIER);
+
+       return 0;
+}
+core_initcall(twd_cpufreq_init);
+
+#endif
+
 static void __cpuinit twd_calibrate_rate(void)
 {
        unsigned long count;
@@ -140,6 +190,35 @@ static irqreturn_t twd_handler(int irq, void *dev_id)
        return IRQ_NONE;
 }
 
+static struct clk *twd_get_clock(void)
+{
+       struct clk *clk;
+       int err;
+
+       clk = clk_get_sys("smp_twd", NULL);
+       if (IS_ERR(clk)) {
+               pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk));
+               return clk;
+       }
+
+       err = clk_prepare(clk);
+       if (err) {
+               pr_err("smp_twd: clock failed to prepare: %d\n", err);
+               clk_put(clk);
+               return ERR_PTR(err);
+       }
+
+       err = clk_enable(clk);
+       if (err) {
+               pr_err("smp_twd: clock failed to enable: %d\n", err);
+               clk_unprepare(clk);
+               clk_put(clk);
+               return ERR_PTR(err);
+       }
+
+       return clk;
+}
+
 /*
  * Setup the local clock events for a CPU.
  */
@@ -165,7 +244,13 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
                }
        }
 
-       twd_calibrate_rate();
+       if (!twd_clk)
+               twd_clk = twd_get_clock();
+
+       if (!IS_ERR_OR_NULL(twd_clk))
+               twd_timer_rate = clk_get_rate(twd_clk);
+       else
+               twd_calibrate_rate();
 
        clk->name = "local_timer";
        clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
@@ -173,15 +258,11 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
        clk->rating = 350;
        clk->set_mode = twd_set_mode;
        clk->set_next_event = twd_set_next_event;
-       clk->shift = 20;
-       clk->mult = div_sc(twd_timer_rate, NSEC_PER_SEC, clk->shift);
-       clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
-       clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
 
        this_cpu_clk = __this_cpu_ptr(twd_evt);
        *this_cpu_clk = clk;
 
-       clockevents_register_device(clk);
-
+       clockevents_config_and_register(clk, twd_timer_rate,
+                                       0xf, 0xffffffff);
        enable_percpu_irq(clk->irq, 0);
 }
index 5f452f8fde0569d140e0d6055220f29be08396f1..df745188f5de4abdf3bd04faaa5d31b653b0f210 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/syscalls.h>
 #include <linux/perf_event.h>
 
+#include <asm/opcodes.h>
 #include <asm/traps.h>
 #include <asm/uaccess.h>
 
@@ -185,6 +186,21 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr)
 
        perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
 
+       res = arm_check_condition(instr, regs->ARM_cpsr);
+       switch (res) {
+       case ARM_OPCODE_CONDTEST_PASS:
+               break;
+       case ARM_OPCODE_CONDTEST_FAIL:
+               /* Condition failed - return to next instruction */
+               regs->ARM_pc += 4;
+               return 0;
+       case ARM_OPCODE_CONDTEST_UNCOND:
+               /* If unconditional encoding - not a SWP, undef */
+               return -EFAULT;
+       default:
+               return -EINVAL;
+       }
+
        if (current->pid != previous_pid) {
                pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
                         current->comm, (unsigned long)current->pid);
index 30e302d33e0add9d51e5aeb1261ad350d935c9d4..01ec453bb924de3f48cab22c3a61362384a8957a 100644 (file)
@@ -180,9 +180,9 @@ static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks,
  */
 void __init tcm_init(void)
 {
-       u32 tcm_status = read_cpuid_tcmstatus();
-       u8 dtcm_banks = (tcm_status >> 16) & 0x03;
-       u8 itcm_banks = (tcm_status & 0x03);
+       u32 tcm_status;
+       u8 dtcm_banks;
+       u8 itcm_banks;
        size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data;
        size_t itcm_code_sz = &__eitcm_text - &__sitcm_text;
        char *start;
@@ -191,6 +191,22 @@ void __init tcm_init(void)
        int ret;
        int i;
 
+       /*
+        * Prior to ARMv5 there is no TCM, and trying to read the status
+        * register will hang the processor.
+        */
+       if (cpu_architecture() < CPU_ARCH_ARMv5) {
+               if (dtcm_code_sz || itcm_code_sz)
+                       pr_info("CPU TCM: %u bytes of DTCM and %u bytes of "
+                               "ITCM code compiled in, but no TCM present "
+                               "in pre-v5 CPU\n", dtcm_code_sz, itcm_code_sz);
+               return;
+       }
+
+       tcm_status = read_cpuid_tcmstatus();
+       dtcm_banks = (tcm_status >> 16) & 0x03;
+       itcm_banks = (tcm_status & 0x03);
+
        /* Values greater than 2 for D/ITCM banks are "reserved" */
        if (dtcm_banks > 2)
                dtcm_banks = 0;
index e7e8365795c3d3272a4ef56d34d41662acd608b8..00df012c46784ac8c510466f1a4124158c3f86a3 100644 (file)
@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
 
 struct unwind_ctrl_block {
        unsigned long vrs[16];          /* virtual register set */
-       unsigned long *insn;            /* pointer to the current instructions word */
+       const unsigned long *insn;      /* pointer to the current instructions word */
        int entries;                    /* number of entries left to interpret */
        int byte;                       /* current byte number in the instructions word */
 };
@@ -83,8 +83,9 @@ enum regs {
        PC = 15
 };
 
-extern struct unwind_idx __start_unwind_idx[];
-extern struct unwind_idx __stop_unwind_idx[];
+extern const struct unwind_idx __start_unwind_idx[];
+static const struct unwind_idx *__origin_unwind_idx;
+extern const struct unwind_idx __stop_unwind_idx[];
 
 static DEFINE_SPINLOCK(unwind_lock);
 static LIST_HEAD(unwind_tables);
@@ -98,45 +99,99 @@ static LIST_HEAD(unwind_tables);
 })
 
 /*
- * Binary search in the unwind index. The entries entries are
+ * Binary search in the unwind index. The entries are
  * guaranteed to be sorted in ascending order by the linker.
+ *
+ * start = first entry
+ * origin = first entry with positive offset (or stop if there is no such entry)
+ * stop - 1 = last entry
  */
-static struct unwind_idx *search_index(unsigned long addr,
-                                      struct unwind_idx *first,
-                                      struct unwind_idx *last)
+static const struct unwind_idx *search_index(unsigned long addr,
+                                      const struct unwind_idx *start,
+                                      const struct unwind_idx *origin,
+                                      const struct unwind_idx *stop)
 {
-       pr_debug("%s(%08lx, %p, %p)\n", __func__, addr, first, last);
+       unsigned long addr_prel31;
+
+       pr_debug("%s(%08lx, %p, %p, %p)\n",
+                       __func__, addr, start, origin, stop);
+
+       /*
+        * only search in the section with the matching sign. This way the
+        * prel31 numbers can be compared as unsigned longs.
+        */
+       if (addr < (unsigned long)start)
+               /* negative offsets: [start; origin) */
+               stop = origin;
+       else
+               /* positive offsets: [origin; stop) */
+               start = origin;
+
+       /* prel31 for address relavive to start */
+       addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
 
-       if (addr < first->addr) {
+       while (start < stop - 1) {
+               const struct unwind_idx *mid = start + ((stop - start) >> 1);
+
+               /*
+                * As addr_prel31 is relative to start an offset is needed to
+                * make it relative to mid.
+                */
+               if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
+                               mid->addr_offset)
+                       stop = mid;
+               else {
+                       /* keep addr_prel31 relative to start */
+                       addr_prel31 -= ((unsigned long)mid -
+                                       (unsigned long)start);
+                       start = mid;
+               }
+       }
+
+       if (likely(start->addr_offset <= addr_prel31))
+               return start;
+       else {
                pr_warning("unwind: Unknown symbol address %08lx\n", addr);
                return NULL;
-       } else if (addr >= last->addr)
-               return last;
+       }
+}
 
-       while (first < last - 1) {
-               struct unwind_idx *mid = first + ((last - first + 1) >> 1);
+static const struct unwind_idx *unwind_find_origin(
+               const struct unwind_idx *start, const struct unwind_idx *stop)
+{
+       pr_debug("%s(%p, %p)\n", __func__, start, stop);
+       while (start < stop) {
+               const struct unwind_idx *mid = start + ((stop - start) >> 1);
 
-               if (addr < mid->addr)
-                       last = mid;
+               if (mid->addr_offset >= 0x40000000)
+                       /* negative offset */
+                       start = mid + 1;
                else
-                       first = mid;
+                       /* positive offset */
+                       stop = mid;
        }
-
-       return first;
+       pr_debug("%s -> %p\n", __func__, stop);
+       return stop;
 }
 
-static struct unwind_idx *unwind_find_idx(unsigned long addr)
+static const struct unwind_idx *unwind_find_idx(unsigned long addr)
 {
-       struct unwind_idx *idx = NULL;
+       const struct unwind_idx *idx = NULL;
        unsigned long flags;
 
        pr_debug("%s(%08lx)\n", __func__, addr);
 
-       if (core_kernel_text(addr))
+       if (core_kernel_text(addr)) {
+               if (unlikely(!__origin_unwind_idx))
+                       __origin_unwind_idx =
+                               unwind_find_origin(__start_unwind_idx,
+                                               __stop_unwind_idx);
+
                /* main unwind table */
                idx = search_index(addr, __start_unwind_idx,
-                                  __stop_unwind_idx - 1);
-       else {
+                                  __origin_unwind_idx,
+                                  __stop_unwind_idx);
+       } else {
                /* module unwind tables */
                struct unwind_table *table;
 
@@ -145,7 +200,8 @@ static struct unwind_idx *unwind_find_idx(unsigned long addr)
                        if (addr >= table->begin_addr &&
                            addr < table->end_addr) {
                                idx = search_index(addr, table->start,
-                                                  table->stop - 1);
+                                                  table->origin,
+                                                  table->stop);
                                /* Move-to-front to exploit common traces */
                                list_move(&table->list, &unwind_tables);
                                break;
@@ -274,7 +330,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
 int unwind_frame(struct stackframe *frame)
 {
        unsigned long high, low;
-       struct unwind_idx *idx;
+       const struct unwind_idx *idx;
        struct unwind_ctrl_block ctrl;
 
        /* only go to a higher address on the stack */
@@ -399,7 +455,6 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
                                      unsigned long text_size)
 {
        unsigned long flags;
-       struct unwind_idx *idx;
        struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
 
        pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
@@ -408,15 +463,12 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
        if (!tab)
                return tab;
 
-       tab->start = (struct unwind_idx *)start;
-       tab->stop = (struct unwind_idx *)(start + size);
+       tab->start = (const struct unwind_idx *)start;
+       tab->stop = (const struct unwind_idx *)(start + size);
+       tab->origin = unwind_find_origin(tab->start, tab->stop);
        tab->begin_addr = text_addr;
        tab->end_addr = text_addr + text_size;
 
-       /* Convert the symbol addresses to absolute values */
-       for (idx = tab->start; idx < tab->stop; idx++)
-               idx->addr = prel31_to_addr(&idx->addr);
-
        spin_lock_irqsave(&unwind_lock, flags);
        list_add_tail(&tab->list, &unwind_tables);
        spin_unlock_irqrestore(&unwind_lock, flags);
@@ -437,16 +489,3 @@ void unwind_table_del(struct unwind_table *tab)
 
        kfree(tab);
 }
-
-int __init unwind_init(void)
-{
-       struct unwind_idx *idx;
-
-       /* Convert the symbol addresses to absolute values */
-       for (idx = __start_unwind_idx; idx < __stop_unwind_idx; idx++)
-               idx->addr = prel31_to_addr(&idx->addr);
-
-       pr_debug("unwind: ARM stack unwinding initialised\n");
-
-       return 0;
-}
index 66591fa53e057d59011a533a2037cfc976116790..ad930688358ca1c5683e984dc1b85799b582c5c0 100644 (file)
@@ -83,7 +83,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index b84a9f642f5953a5ff527d3b593b1a628b61232b..0d20677fbef027591c91c2d442d528f7fa6c73f0 100644 (file)
@@ -195,9 +195,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
        CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
        CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
        CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
-       CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk),
-       CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk),
-       CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk),
+       CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
+       CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
+       CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
        CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
        /* more usart lookup table for DT entries */
        CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck),
index 25e3464fb07f1fabe1714d009efd9ef8bb4783e2..629fa977497239f171d66ef47563c3da9d2b0127 100644 (file)
@@ -84,7 +84,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index ae78f4d03b738851b5e0ef191c26997c34304d9a..a178b58b0b9c8d59850ca91f0ecab9203198bbcc 100644 (file)
@@ -87,7 +87,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index ad017eb1f8df4c2ff9514cae10e0d555269ac09c..d5fbac9ff4faed0da1c112869b5c5144c4cfe2f6 100644 (file)
@@ -92,7 +92,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
  *  USB Device (Gadget)
  * -------------------------------------------------------------------- */
 
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
 static struct at91_udc_data udc_data;
 
 static struct resource udc_resources[] = {
index 8f4866045b41e213172c761f522ffca720a5802e..ec164a4124c9b27aa0c3b72325dc91ec13ddc673 100644 (file)
@@ -19,7 +19,7 @@
 #define BOARD_HAVE_NAND_16BIT  (1 << 31)
 static inline int board_have_nand_16bit(void)
 {
-       return system_rev & BOARD_HAVE_NAND_16BIT;
+       return (system_rev & BOARD_HAVE_NAND_16BIT) ? 1 : 0;
 }
 
 #endif /* __ARCH_SYSTEM_REV_H__ */
index 1d7d2499522674f94143771e84aa26f323869ac7..6659a90dbcadafffdc6ae4988f1a1c6148a955b5 100644 (file)
@@ -753,7 +753,7 @@ static struct snd_platform_data da850_evm_snd_data = {
        .num_serializer = ARRAY_SIZE(da850_iis_serializer_direction),
        .tdm_slots      = 2,
        .serial_dir     = da850_iis_serializer_direction,
-       .asp_chan_q     = EVENTQ_1,
+       .asp_chan_q     = EVENTQ_0,
        .version        = MCASP_VERSION_2,
        .txnumevt       = 1,
        .rxnumevt       = 1,
index 1918ae711428b3d5b9cf2556c212bfc156f90664..46e1f4173b9735c622c8a95c5c9a605782c1eda6 100644 (file)
@@ -107,7 +107,7 @@ static struct mtd_partition davinci_nand_partitions[] = {
                /* UBL (a few copies) plus U-Boot */
                .name           = "bootloader",
                .offset         = 0,
-               .size           = 28 * NAND_BLOCK_SIZE,
+               .size           = 30 * NAND_BLOCK_SIZE,
                .mask_flags     = MTD_WRITEABLE, /* force read-only */
        }, {
                /* U-Boot environment */
index e574d7f837a850e4ddd5efb02f102c47d4e10cd9..635bf7740157bb7ea88b94580c98ead839f44068 100644 (file)
@@ -564,7 +564,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
        int val;
        u32 value;
 
-       if (!vpif_vsclkdis_reg || !cpld_client)
+       if (!vpif_vidclkctl_reg || !cpld_client)
                return -ENXIO;
 
        val = i2c_smbus_read_byte(cpld_client);
@@ -572,7 +572,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
                return val;
 
        spin_lock_irqsave(&vpif_reg_lock, flags);
-       value = __raw_readl(vpif_vsclkdis_reg);
+       value = __raw_readl(vpif_vidclkctl_reg);
        if (mux_mode) {
                val &= VPIF_INPUT_TWO_CHANNEL;
                value |= VIDCH1CLK;
@@ -580,7 +580,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
                val |= VPIF_INPUT_ONE_CHANNEL;
                value &= ~VIDCH1CLK;
        }
-       __raw_writel(value, vpif_vsclkdis_reg);
+       __raw_writel(value, vpif_vidclkctl_reg);
        spin_unlock_irqrestore(&vpif_reg_lock, flags);
 
        err = i2c_smbus_write_byte(cpld_client, val);
index 0b68ed534f8e6d3d0a16effc5cc4811a8f94f85d..af27c130595fb6897cb104253ad157f567d53f04 100644 (file)
@@ -161,7 +161,6 @@ static struct clk dsp_clk = {
        .name = "dsp",
        .parent = &pll1_sysclk1,
        .lpsc = DM646X_LPSC_C64X_CPU,
-       .flags = PSC_DSP,
        .usecount = 1,                  /* REVISIT how to disable? */
 };
 
index fa59c097223dc85ec965224e9ebfc0ac8dccf2c2..8bc3fc2561711801610461c46b803a7fbfbfde9a 100644 (file)
 #define PTCMD          0x120
 #define PTSTAT         0x128
 #define PDSTAT         0x200
-#define PDCTL1         0x304
+#define PDCTL          0x300
 #define MDSTAT         0x800
 #define MDCTL          0xA00
 
 #define PSC_STATE_ENABLE       3
 
 #define MDSTAT_STATE_MASK      0x3f
+#define PDSTAT_STATE_MASK      0x1f
 #define MDCTL_FORCE            BIT(31)
+#define PDCTL_NEXT             BIT(1)
+#define PDCTL_EPCGOOD          BIT(8)
 
 #ifndef __ASSEMBLER__
 
index 1fb6bdff38c1f5e9ff796e72c4c57be824919209..d7e210f4b55c85d7e2a2fc4831fb3f0a77f72990 100644 (file)
@@ -52,7 +52,7 @@ int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id)
 void davinci_psc_config(unsigned int domain, unsigned int ctlr,
                unsigned int id, bool enable, u32 flags)
 {
-       u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl;
+       u32 epcpr, ptcmd, ptstat, pdstat, pdctl, mdstat, mdctl;
        void __iomem *psc_base;
        struct davinci_soc_info *soc_info = &davinci_soc_info;
        u32 next_state = PSC_STATE_ENABLE;
@@ -79,11 +79,11 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
                mdctl |= MDCTL_FORCE;
        __raw_writel(mdctl, psc_base + MDCTL + 4 * id);
 
-       pdstat = __raw_readl(psc_base + PDSTAT);
-       if ((pdstat & 0x00000001) == 0) {
-               pdctl1 = __raw_readl(psc_base + PDCTL1);
-               pdctl1 |= 0x1;
-               __raw_writel(pdctl1, psc_base + PDCTL1);
+       pdstat = __raw_readl(psc_base + PDSTAT + 4 * domain);
+       if ((pdstat & PDSTAT_STATE_MASK) == 0) {
+               pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
+               pdctl |= PDCTL_NEXT;
+               __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
 
                ptcmd = 1 << domain;
                __raw_writel(ptcmd, psc_base + PTCMD);
@@ -92,9 +92,9 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
                        epcpr = __raw_readl(psc_base + EPCPR);
                } while ((((epcpr >> domain) & 1) == 0));
 
-               pdctl1 = __raw_readl(psc_base + PDCTL1);
-               pdctl1 |= 0x100;
-               __raw_writel(pdctl1, psc_base + PDCTL1);
+               pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
+               pdctl |= PDCTL_EPCGOOD;
+               __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
        } else {
                ptcmd = 1 << domain;
                __raw_writel(ptcmd, psc_base + PTCMD);
index 22316cb31a8c4ff136d0f8b52c4df79f05f2669c..699774cbf11232cd295e85c26b31833c95b01180 100644 (file)
@@ -109,11 +109,6 @@ static struct map_desc exynos4_iodesc[] __initdata = {
                .pfn            = __phys_to_pfn(EXYNOS4_PA_DMC0),
                .length         = SZ_4K,
                .type           = MT_DEVICE,
-       }, {
-               .virtual        = (unsigned long)S5P_VA_SROMC,
-               .pfn            = __phys_to_pfn(EXYNOS4_PA_SROMC),
-               .length         = SZ_4K,
-               .type           = MT_DEVICE,
        }, {
                .virtual        = (unsigned long)S3C_VA_USB_HSPHY,
                .pfn            = __phys_to_pfn(EXYNOS4_PA_HSPHY),
index 97343df8f13227c371f8a906f371a7eb1d7c3c74..85b5527d0918e4bea1ca7b9abea9362dd64c357d 100644 (file)
@@ -44,8 +44,6 @@ struct mct_clock_event_device {
        char name[10];
 };
 
-static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
-
 static void exynos4_mct_write(unsigned int value, void *addr)
 {
        void __iomem *stat_addr;
@@ -264,6 +262,9 @@ static void exynos4_clockevent_init(void)
 }
 
 #ifdef CONFIG_LOCAL_TIMERS
+
+static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
+
 /* Clock event handling */
 static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
 {
@@ -428,9 +429,13 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
 
 void local_timer_stop(struct clock_event_device *evt)
 {
+       unsigned int cpu = smp_processor_id();
        evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
        if (mct_int_type == MCT_INT_SPI)
-               disable_irq(evt->irq);
+               if (cpu == 0)
+                       remove_irq(evt->irq, &mct_tick0_event_irq);
+               else
+                       remove_irq(evt->irq, &mct_tick1_event_irq);
        else
                disable_percpu_irq(IRQ_MCT_LOCALTIMER);
 }
@@ -443,6 +448,7 @@ static void __init exynos4_timer_resources(void)
 
        clk_rate = clk_get_rate(mct_clk);
 
+#ifdef CONFIG_LOCAL_TIMERS
        if (mct_int_type == MCT_INT_PPI) {
                int err;
 
@@ -452,6 +458,7 @@ static void __init exynos4_timer_resources(void)
                WARN(err, "MCT: can't request IRQ %d (%d)\n",
                     IRQ_MCT_LOCALTIMER, err);
        }
+#endif /* CONFIG_LOCAL_TIMERS */
 }
 
 static void __init exynos4_timer_init(void)
index 188ecc00915b1d3103c49308edf2aa733b91ce67..35a218cb5c7e96db49f67587606e6425c0ae9083 100644 (file)
@@ -132,7 +132,7 @@ config MACH_MX25_3DS
        select IMX_HAVE_PLATFORM_MXC_NAND
        select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
 
-config MACH_EUKREA_CPUIMX25
+config MACH_EUKREA_CPUIMX25SD
        bool "Support Eukrea CPUIMX25 Platform"
        select SOC_IMX25
        select IMX_HAVE_PLATFORM_FLEXCAN
@@ -148,7 +148,7 @@ config MACH_EUKREA_CPUIMX25
 
 choice
        prompt "Baseboard"
-       depends on MACH_EUKREA_CPUIMX25
+       depends on MACH_EUKREA_CPUIMX25SD
        default MACH_EUKREA_MBIMXSD25_BASEBOARD
 
 config MACH_EUKREA_MBIMXSD25_BASEBOARD
@@ -542,7 +542,7 @@ config MACH_MX35_3DS
          Include support for MX35PDK platform. This includes specific
          configurations for the board and its peripherals.
 
-config MACH_EUKREA_CPUIMX35
+config MACH_EUKREA_CPUIMX35SD
        bool "Support Eukrea CPUIMX35 Platform"
        select SOC_IMX35
        select IMX_HAVE_PLATFORM_FLEXCAN
@@ -560,7 +560,7 @@ config MACH_EUKREA_CPUIMX35
 
 choice
        prompt "Baseboard"
-       depends on MACH_EUKREA_CPUIMX35
+       depends on MACH_EUKREA_CPUIMX35SD
        default MACH_EUKREA_MBIMXSD35_BASEBOARD
 
 config MACH_EUKREA_MBIMXSD35_BASEBOARD
index aba73214c2a8cd640e0f4371caf4ce99ac53dc0f..d97f409ce98be4a9d796dc21d3dc30fb9c7de9c7 100644 (file)
@@ -24,7 +24,7 @@ obj-$(CONFIG_MACH_MX21ADS) += mach-mx21ads.o
 
 # i.MX25 based machines
 obj-$(CONFIG_MACH_MX25_3DS) += mach-mx25_3ds.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX25) += mach-eukrea_cpuimx25.o
+obj-$(CONFIG_MACH_EUKREA_CPUIMX25SD) += mach-eukrea_cpuimx25.o
 obj-$(CONFIG_MACH_EUKREA_MBIMXSD25_BASEBOARD) += eukrea_mbimxsd25-baseboard.o
 
 # i.MX27 based machines
@@ -57,7 +57,7 @@ obj-$(CONFIG_MACH_BUG) += mach-bug.o
 # i.MX35 based machines
 obj-$(CONFIG_MACH_PCM043) += mach-pcm043.o
 obj-$(CONFIG_MACH_MX35_3DS) += mach-mx35_3ds.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX35) += mach-cpuimx35.o
+obj-$(CONFIG_MACH_EUKREA_CPUIMX35SD) += mach-cpuimx35.o
 obj-$(CONFIG_MACH_EUKREA_MBIMXSD35_BASEBOARD) += eukrea_mbimxsd35-baseboard.o
 obj-$(CONFIG_MACH_VPR200) += mach-vpr200.o
 
index 8116f119517d8065ca0653a603863275e29be71b..ac8238caecb98a98bf6c326e267fae24d88bb861 100644 (file)
@@ -507,7 +507,7 @@ static struct clk_lookup lookups[] = {
 
 int __init mx35_clocks_init()
 {
-       unsigned int cgr2 = 3 << 26, cgr3 = 0;
+       unsigned int cgr2 = 3 << 26;
 
 #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
        cgr2 |= 3 << 16;
@@ -521,6 +521,12 @@ int __init mx35_clocks_init()
        __raw_writel((3 << 18), CCM_BASE + CCM_CGR0);
        __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
                        CCM_BASE + CCM_CGR1);
+       __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
+       __raw_writel(0, CCM_BASE + CCM_CGR3);
+
+       clk_enable(&iim_clk);
+       imx_print_silicon_rev("i.MX35", mx35_revision());
+       clk_disable(&iim_clk);
 
        /*
         * Check if we came up in internal boot mode. If yes, we need some
@@ -529,17 +535,11 @@ int __init mx35_clocks_init()
         */
        if (!(__raw_readl(CCM_BASE + CCM_RCSR) & (3 << 10))) {
                /* Additionally turn on UART1, SCC, and IIM clocks */
-               cgr2 |= 3 << 16 | 3 << 4;
-               cgr3 |= 3 << 2;
+               clk_enable(&iim_clk);
+               clk_enable(&uart1_clk);
+               clk_enable(&scc_clk);
        }
 
-       __raw_writel(cgr2, CCM_BASE + CCM_CGR2);
-       __raw_writel(cgr3, CCM_BASE + CCM_CGR3);
-
-       clk_enable(&iim_clk);
-       imx_print_silicon_rev("i.MX35", mx35_revision());
-       clk_disable(&iim_clk);
-
 #ifdef CONFIG_MXC_USE_EPIT
        epit_timer_init(&epit1_clk,
                        MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
index 66af2e8f7e576dffd5372862653d397d22811069..362aae780601efc41e346a8a7beade651f610d83 100644 (file)
@@ -53,12 +53,18 @@ static const struct imxi2c_platform_data
        .bitrate =              100000,
 };
 
+#define TSC2007_IRQGPIO                IMX_GPIO_NR(3, 2)
+static int tsc2007_get_pendown_state(void)
+{
+       return !gpio_get_value(TSC2007_IRQGPIO);
+}
+
 static struct tsc2007_platform_data tsc2007_info = {
        .model                  = 2007,
        .x_plate_ohms           = 180,
+       .get_pendown_state = tsc2007_get_pendown_state,
 };
 
-#define TSC2007_IRQGPIO                IMX_GPIO_NR(3, 2)
 static struct i2c_board_info eukrea_cpuimx35_i2c_devices[] = {
        {
                I2C_BOARD_INFO("pcf8563", 0x51),
index 9cd860a27af5f797cff2b9e5ceaf1798e09e8777..8deb012189b5a7e185f7299a8ad8c8503edad587 100644 (file)
@@ -37,14 +37,15 @@ static void __init imx6q_map_io(void)
        imx6q_clock_map_io();
 }
 
-static void __init imx6q_gpio_add_irq_domain(struct device_node *np,
+static int __init imx6q_gpio_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
-       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
-                                  32 * 7; /* imx6q gets 7 gpio ports */
+       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
 
+       gpio_irq_base -= 32;
        irq_domain_add_simple(np, gpio_irq_base);
-       gpio_irq_base += 32;
+
+       return 0;
 }
 
 static const struct of_device_id imx6q_irq_match[] __initconst = {
index dfd18f3b50e8a4eb27eddd5029855eb682377b74..350e26636a06c8b0c21a3552377ef268f788aca2 100644 (file)
@@ -6,6 +6,8 @@ config ARCH_INTEGRATOR_AP
        bool "Support Integrator/AP and Integrator/PP2 platforms"
        select CLKSRC_MMIO
        select MIGHT_HAVE_PCI
+       select SERIAL_AMBA_PL010
+       select SERIAL_AMBA_PL010_CONSOLE
        help
          Include support for the ARM(R) Integrator/AP and
          Integrator/PP2 platforms.
@@ -15,6 +17,8 @@ config ARCH_INTEGRATOR_CP
        select ARCH_CINTEGRATOR
        select ARM_TIMER_SP804
        select PLAT_VERSATILE_CLCD
+       select SERIAL_AMBA_PL011
+       select SERIAL_AMBA_PL011_CONSOLE
        help
          Include support for the ARM(R) Integrator CP platform.
 
index 4b38e13667acf0f316e8881305c065f939c5d2cb..18584beda5367aa25f2ab8d41aeb5800fc60a4d0 100644 (file)
@@ -29,6 +29,7 @@
 #include <mach/cm.h>
 #include <asm/system.h>
 #include <asm/leds.h>
+#include <asm/mach-types.h>
 #include <asm/mach/time.h>
 #include <asm/pgtable.h>
 
@@ -44,7 +45,6 @@ static struct amba_device rtc_device = {
                .flags  = IORESOURCE_MEM,
        },
        .irq            = { IRQ_RTCINT, NO_IRQ },
-       .periphid       = 0x00041030,
 };
 
 static struct amba_device uart0_device = {
@@ -58,7 +58,6 @@ static struct amba_device uart0_device = {
                .flags  = IORESOURCE_MEM,
        },
        .irq            = { IRQ_UARTINT0, NO_IRQ },
-       .periphid       = 0x0041010,
 };
 
 static struct amba_device uart1_device = {
@@ -72,7 +71,6 @@ static struct amba_device uart1_device = {
                .flags  = IORESOURCE_MEM,
        },
        .irq            = { IRQ_UARTINT1, NO_IRQ },
-       .periphid       = 0x0041010,
 };
 
 static struct amba_device kmi0_device = {
@@ -85,7 +83,6 @@ static struct amba_device kmi0_device = {
                .flags  = IORESOURCE_MEM,
        },
        .irq            = { IRQ_KMIINT0, NO_IRQ },
-       .periphid       = 0x00041050,
 };
 
 static struct amba_device kmi1_device = {
@@ -98,7 +95,6 @@ static struct amba_device kmi1_device = {
                .flags  = IORESOURCE_MEM,
        },
        .irq            = { IRQ_KMIINT1, NO_IRQ },
-       .periphid       = 0x00041050,
 };
 
 static struct amba_device *amba_devs[] __initdata = {
@@ -157,6 +153,19 @@ static int __init integrator_init(void)
 {
        int i;
 
+       /*
+        * The Integrator/AP lacks necessary AMBA PrimeCell IDs, so we need to
+        * hard-code them. The Integator/CP and forward have proper cell IDs.
+        * Else we leave them undefined to the bus driver can autoprobe them.
+        */
+       if (machine_is_integrator()) {
+               rtc_device.periphid     = 0x00041030;
+               uart0_device.periphid   = 0x00041010;
+               uart1_device.periphid   = 0x00041010;
+               kmi0_device.periphid    = 0x00041050;
+               kmi1_device.periphid    = 0x00041050;
+       }
+
        for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
                struct amba_device *d = amba_devs[i];
                amba_device_register(d, &iomem_resource);
index 8b102d62e82c7b7a3a5e2fb6dde9c7393e13171e..046eeb6b9f5491b86ab764c75559344cb66005fe 100644 (file)
@@ -107,7 +107,7 @@ static void __init sheevaplug_init(void)
        kirkwood_init();
 
        /* setup gpio pin select */
-       if (machine_is_sheeva_esata())
+       if (machine_is_esata_sheevaplug())
                kirkwood_mpp_conf(sheeva_esata_mpp_config);
        else
                kirkwood_mpp_conf(sheevaplug_mpp_config);
@@ -123,11 +123,11 @@ static void __init sheevaplug_init(void)
        kirkwood_ge00_init(&sheevaplug_ge00_data);
 
        /* honor lower power consumption for plugs with out eSATA */
-       if (machine_is_sheeva_esata())
+       if (machine_is_esata_sheevaplug())
                kirkwood_sata_init(&sheeva_esata_sata_data);
 
        /* enable sd wp and sd cd on plugs with esata */
-       if (machine_is_sheeva_esata())
+       if (machine_is_esata_sheevaplug())
                kirkwood_sdio_init(&sheeva_esata_mvsdio_data);
        else
                kirkwood_sdio_init(&sheevaplug_mvsdio_data);
index 24030d0da6e3c59ec9ddfa2a76995001f747bc62..0fb7a17df3987e259297d21de210971d2f65fb80 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/bootmem.h>
+#include <linux/module.h>
 #include <mach/irqs.h>
 #include <mach/iommu.h>
 
index 5c837603ff0fc3b6a173ab677bdfda2a75bec09b..24994bb521475b7ac728dc9a0616abbea7586ca7 100644 (file)
@@ -362,7 +362,7 @@ static void __init mx51_babbage_init(void)
 {
        iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
        iomux_v3_cfg_t power_key = NEW_PAD_CTRL(MX51_PAD_EIM_A27__GPIO2_21,
-               PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP);
+               PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH);
 
        imx51_soc_init();
 
index 6bea31ab8f8581ee637a7f9a998157cd982a1b1b..64bbfcea6f350cd781bbe875e29a2d63bc8a3b61 100644 (file)
@@ -106,7 +106,7 @@ static inline void mx53_evk_fec_reset(void)
        gpio_set_value(MX53_EVK_FEC_PHY_RST, 1);
 }
 
-static struct fec_platform_data mx53_evk_fec_pdata = {
+static const struct fec_platform_data mx53_evk_fec_pdata __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
index 7678f7734db631ab5163bc1144520311e2666e18..237bdecd933180f7e8579ccb08e458064a5ca8fa 100644 (file)
@@ -242,7 +242,7 @@ static inline void mx53_loco_fec_reset(void)
        gpio_set_value(LOCO_FEC_PHY_RST, 1);
 }
 
-static struct fec_platform_data mx53_loco_fec_data = {
+static const struct fec_platform_data mx53_loco_fec_data __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
index 59c0845eb4a6321badc4f6b1f58943e64c68a33e..d42132a80e8ffc8e7d4e5f34c99fe8f83da984c6 100644 (file)
@@ -104,7 +104,7 @@ static inline void mx53_smd_fec_reset(void)
        gpio_set_value(SMD_FEC_PHY_RST, 1);
 }
 
-static struct fec_platform_data mx53_smd_fec_data = {
+static const struct fec_platform_data mx53_smd_fec_data __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
index ccc61585659bdb17d55bb91bfc1cbb2ebf6cca1c..596edd967dbfef9a21b009b327f42cdda3ad60b0 100644 (file)
@@ -44,20 +44,22 @@ static const struct of_dev_auxdata imx51_auxdata_lookup[] __initconst = {
        { /* sentinel */ }
 };
 
-static void __init imx51_tzic_add_irq_domain(struct device_node *np,
+static int __init imx51_tzic_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
        irq_domain_add_simple(np, 0);
+       return 0;
 }
 
-static void __init imx51_gpio_add_irq_domain(struct device_node *np,
+static int __init imx51_gpio_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
-       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
-                                  32 * 4; /* imx51 gets 4 gpio ports */
+       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
 
+       gpio_irq_base -= 32;
        irq_domain_add_simple(np, gpio_irq_base);
-       gpio_irq_base += 32;
+
+       return 0;
 }
 
 static const struct of_device_id imx51_irq_match[] __initconst = {
index ccaa0b81b7683f86b7750ca852f6b8b67d80eabd..85bfd5ff21b0bb925583679260321fbc7e25731b 100644 (file)
@@ -48,20 +48,22 @@ static const struct of_dev_auxdata imx53_auxdata_lookup[] __initconst = {
        { /* sentinel */ }
 };
 
-static void __init imx53_tzic_add_irq_domain(struct device_node *np,
+static int __init imx53_tzic_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
        irq_domain_add_simple(np, 0);
+       return 0;
 }
 
-static void __init imx53_gpio_add_irq_domain(struct device_node *np,
+static int __init imx53_gpio_add_irq_domain(struct device_node *np,
                                struct device_node *interrupt_parent)
 {
-       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
-                                  32 * 7; /* imx53 gets 7 gpio ports */
+       static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
 
+       gpio_irq_base -= 32;
        irq_domain_add_simple(np, gpio_irq_base);
-       gpio_irq_base += 32;
+
+       return 0;
 }
 
 static const struct of_device_id imx53_irq_match[] __initconst = {
index 75d86118b76a2c524cea92c7320d25bb7d4225a5..30c7990f3c01d65c1f05ad9d425a090381661c9b 100644 (file)
 #define MX28_INT_CAN1                  9
 #define MX28_INT_LRADC_TOUCH           10
 #define MX28_INT_HSADC                 13
-#define MX28_INT_IRADC_THRESH0         14
-#define MX28_INT_IRADC_THRESH1         15
+#define MX28_INT_LRADC_THRESH0         14
+#define MX28_INT_LRADC_THRESH1         15
 #define MX28_INT_LRADC_CH0             16
 #define MX28_INT_LRADC_CH1             17
 #define MX28_INT_LRADC_CH2             18
index 0d2d2b470998a9d2cfac229654a9ee2135272a2f..bde5f6634747c639af514ecf9d699b1176891802 100644 (file)
@@ -30,6 +30,7 @@
  */
 #define cpu_is_mx23()          (                                       \
                machine_is_mx23evk() ||                                 \
+               machine_is_stmp378x() ||                                \
                0)
 #define cpu_is_mx28()          (                                       \
                machine_is_mx28evk() ||                                 \
index 3b1681e4f49a1ae633e5ac4b3d31137f2f92b014..6b00577b70256254e29951bdf8828a2ecd613fdb 100644 (file)
@@ -361,6 +361,6 @@ static struct sys_timer m28evk_timer = {
 MACHINE_START(M28EVK, "DENX M28 EVK")
        .map_io         = mx28_map_io,
        .init_irq       = mx28_init_irq,
-       .init_machine   = m28evk_init,
        .timer          = &m28evk_timer,
+       .init_machine   = m28evk_init,
 MACHINE_END
index 177e53123a02e5b67f617e5031c047354703103c..6834dea38c04cce77e20bffc2baa3019327d9610 100644 (file)
@@ -115,6 +115,6 @@ static struct sys_timer stmp378x_dvb_timer = {
 MACHINE_START(STMP378X, "STMP378X")
        .map_io         = mx23_map_io,
        .init_irq       = mx23_init_irq,
-       .init_machine   = stmp378x_dvb_init,
        .timer          = &stmp378x_dvb_timer,
+       .init_machine   = stmp378x_dvb_init,
 MACHINE_END
index 0fcff47009cf13ff53db9757e58c9dc0cbf5b927..9a7b08b2a92559caf1028df076040c5e5dc798ae 100644 (file)
@@ -66,11 +66,11 @@ static const iomux_cfg_t tx28_fec1_pads[] __initconst = {
        MX28_PAD_ENET0_CRS__ENET1_RX_EN,
 };
 
-static struct fec_platform_data tx28_fec0_data = {
+static const struct fec_platform_data tx28_fec0_data __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
-static struct fec_platform_data tx28_fec1_data = {
+static const struct fec_platform_data tx28_fec1_data __initconst = {
        .phy = PHY_INTERFACE_MODE_RMII,
 };
 
index 1297bb58869cf72dcda88e7329b9f83f5cd3da13..9ff90a744a2140a0bf5168403301ac9cec68b927 100644 (file)
@@ -16,6 +16,8 @@
 
 #include <linux/kernel.h>
 #include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
 #include <linux/io.h>
 
 #include <asm/mach-types.h>  /* for machine_is_* */
@@ -927,16 +929,22 @@ int __init omap1_clk_init(void)
 
 void __init omap1_clk_late_init(void)
 {
-       if (ck_dpll1.rate >= OMAP1_DPLL1_SANE_VALUE)
+       unsigned long rate = ck_dpll1.rate;
+
+       if (rate >= OMAP1_DPLL1_SANE_VALUE)
                return;
 
+       /* System booting at unusable rate, force reprogramming of DPLL1 */
+       ck_dpll1_p->rate = 0;
+
        /* Find the highest supported frequency and enable it */
        if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
                pr_err("System frequencies not set, using default. Check your config.\n");
                omap_writew(0x2290, DPLL_CTL);
-               omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL);
+               omap_writew(cpu_is_omap7xx() ? 0x2005 : 0x0005, ARM_CKCTL);
                ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE;
        }
        propagate_rate(&ck_dpll1);
        omap1_show_rates();
+       loops_per_jiffy = cpufreq_scale(loops_per_jiffy, rate, ck_dpll1.rate);
 }
index bd18d691c6ad9e9558703feff60c02b82b237a73..108fee6146fc45d4e615b342aa0f47a06ea213b9 100644 (file)
@@ -193,7 +193,7 @@ static struct platform_device rx51_charger_device = {
 static void __init rx51_charger_init(void)
 {
        WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
-               GPIOF_OUT_INIT_LOW, "isp1704_reset"));
+               GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
 
        platform_device_register(&rx51_charger_device);
 }
index 292eee3be15fdc10096b6833fc438b2ecab9ccea..28fcb27005d2912ec13dec5be59d777e721f67a6 100644 (file)
@@ -145,6 +145,9 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
                pdata->reg_size = 4;
                pdata->has_ccr = true;
        }
+       pdata->set_clk_src = omap2_mcbsp_set_clk_src;
+       if (id == 1)
+               pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
 
        if (oh->class->rev == MCBSP_CONFIG_TYPE3) {
                if (id == 2)
@@ -174,9 +177,6 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
                                        name, oh->name);
                return PTR_ERR(pdev);
        }
-       pdata->set_clk_src = omap2_mcbsp_set_clk_src;
-       if (id == 1)
-               pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
        omap_mcbsp_count++;
        return 0;
 }
index 7f8915ad50990b1af2e877ed169c32cd0d15cd02..eef43e2e163e92224e23ea3c5b6250c14d2747fb 100644 (file)
@@ -3247,18 +3247,14 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
 
 /* 3430ES1-only hwmods */
 static __initdata struct omap_hwmod *omap3430es1_hwmods[] = {
-       &omap3xxx_iva_hwmod,
        &omap3430es1_dss_core_hwmod,
-       &omap3xxx_mailbox_hwmod,
        NULL
 };
 
 /* 3430ES2+-only hwmods */
 static __initdata struct omap_hwmod *omap3430es2plus_hwmods[] = {
-       &omap3xxx_iva_hwmod,
        &omap3xxx_dss_core_hwmod,
        &omap3xxx_usbhsotg_hwmod,
-       &omap3xxx_mailbox_hwmod,
        NULL
 };
 
index 31e51f9b4b6402b3c5131168719cd0aed6c9d4ac..fbf66ea8c77fec7696e1a1bf57e5840865a95963 100644 (file)
@@ -178,7 +178,7 @@ static struct hw_pci qnap_ts209_pci __initdata = {
 
 static int __init qnap_ts209_pci_init(void)
 {
-       if (machine_is_ts_x09())
+       if (machine_is_ts209())
                pci_common_init(&qnap_ts209_pci);
 
        return 0;
index cb53160f6c5d3127b961edbf102e70ab34733712..26ebb57719df5d3fbf389e46cb723b92029b8191 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/kernel.h>
 #include <linux/suspend.h>
 #include <linux/slab.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
index ef555c041962983814be50c03d606b91bcd15b0d..a12b689a87026c9e06af50351b2b20d8c7e01d7d 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <asm/sizes.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <linux/of.h>
index 5e6b42089eb44d7048b39cc2855403cd44cc0ddc..3341fd118723ff11cc7a21df95241c1252c2f6a0 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/kernel.h>
 #include <linux/string.h>
+#include <linux/export.h>
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 #include <linux/gpio.h>
index 7a3bc32df425a3426016ec5448e884ad0e0443ea..51c00f2453c6c62ef9537143c0f31ad8ea416424 100644 (file)
@@ -70,7 +70,7 @@ void __init s3c6400_init_irq(void)
        s3c64xx_init_irq(~0 & ~(0xf << 5), ~0);
 }
 
-struct sysdev_class s3c6400_sysclass = {
+static struct sysdev_class s3c6400_sysclass = {
        .name   = "s3c6400-core",
 };
 
index 83d2afb79e9f88370fbced458f6ffbf9a5b1b1c8..2cf80026c58d470c5f328da8b6df696fa4514bae 100644 (file)
@@ -20,7 +20,7 @@
 #include <plat/fb.h>
 #include <plat/gpio-cfg.h>
 
-extern void s3c64xx_fb_gpio_setup_24bpp(void)
+void s3c64xx_fb_gpio_setup_24bpp(void)
 {
        s3c_gpio_cfgrange_nopull(S3C64XX_GPI(0), 16, S3C_GPIO_SFN(2));
        s3c_gpio_cfgrange_nopull(S3C64XX_GPJ(0), 12, S3C_GPIO_SFN(2));
index 4ca77c41d4996fc3074ff55dd91ab6efc3bca2a5..3ac9e57d9705ce252d804d4755673f5e9576fb2f 100644 (file)
@@ -274,6 +274,7 @@ static struct samsung_bl_gpio_info smdkv210_bl_gpio_info = {
 
 static struct platform_pwm_backlight_data smdkv210_bl_data = {
        .pwm_id = 3,
+       .pwm_period_ns = 1000,
 };
 
 static void __init smdkv210_map_io(void)
index 5a616f6e56120c850c9afa93af19513a4b80ef1c..f7951aa0456287eadb4356ef5d1a3ecd3266abba 100644 (file)
@@ -1,5 +1,5 @@
-ifeq ($(CONFIG_ARCH_SA1100),y)
-   zreladdr-$(CONFIG_SA1111)           += 0xc0208000
+ifeq ($(CONFIG_SA1111),y)
+   zreladdr-y  += 0xc0208000
 else
    zreladdr-y  += 0xc0008000
 endif
index 032f3881d145174cf00838fda285d893492a87dd..d480438579dd11aa58e23557570633bf35899cd0 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <asm/mach-types.h>
 #include <asm/setup.h>
+#include <asm/page.h>
 
 #include <asm/mach/arch.h>
 #include <asm/mach/flash.h>
index 83c270949465d1b49734759460e18d1ffdeae1e1..a4e6ca04e319534dfc4a0a94fa2a8fd7bb3e5caa 100644 (file)
@@ -605,6 +605,7 @@ struct sys_timer ag5evm_timer = {
 
 MACHINE_START(AG5EVM, "ag5evm")
        .map_io         = ag5evm_map_io,
+       .nr_irqs        = NR_IRQS_LEGACY,
        .init_irq       = sh73a0_init_irq,
        .handle_irq     = gic_handle_irq,
        .init_machine   = ag5evm_init,
index 1b4439d3f9d51e41b9c3c0824eb5990389cd1343..857ceeec1bb0e9975eb589deb82fbde7a9be856a 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/input/sh_keysc.h>
 #include <linux/gpio_keys.h>
 #include <linux/leds.h>
+#include <linux/platform_data/leds-renesas-tpu.h>
 #include <linux/mmc/host.h>
 #include <linux/mmc/sh_mmcif.h>
 #include <linux/mfd/tmio.h>
@@ -56,7 +57,7 @@ static struct resource smsc9220_resources[] = {
                .flags          = IORESOURCE_MEM,
        },
        [1] = {
-               .start          = gic_spi(33), /* PINTA2 @ PORT144 */
+               .start          = SH73A0_PINT0_IRQ(2), /* PINTA2 */
                .flags          = IORESOURCE_IRQ,
        },
 };
@@ -157,10 +158,6 @@ static struct platform_device gpio_keys_device = {
 #define GPIO_LED(n, g) { .name = n, .gpio = g }
 
 static struct gpio_led gpio_leds[] = {
-       GPIO_LED("V2513", GPIO_PORT153), /* PORT153 [TPU1T02] -> V2513 */
-       GPIO_LED("V2514", GPIO_PORT199), /* PORT199 [TPU4TO1] -> V2514 */
-       GPIO_LED("V2515", GPIO_PORT197), /* PORT197 [TPU2TO1] -> V2515 */
-       GPIO_LED("KEYLED", GPIO_PORT163), /* PORT163 [TPU3TO0] -> KEYLED */
        GPIO_LED("G", GPIO_PORT20), /* PORT20 [GPO0] -> LED7 -> "G" */
        GPIO_LED("H", GPIO_PORT21), /* PORT21 [GPO1] -> LED8 -> "H" */
        GPIO_LED("J", GPIO_PORT22), /* PORT22 [GPO2] -> LED9 -> "J" */
@@ -179,6 +176,119 @@ static struct platform_device gpio_leds_device = {
        },
 };
 
+/* TPU LED */
+static struct led_renesas_tpu_config led_renesas_tpu12_pdata = {
+       .name           = "V2513",
+       .pin_gpio_fn    = GPIO_FN_TPU1TO2,
+       .pin_gpio       = GPIO_PORT153,
+       .channel_offset = 0x90,
+       .timer_bit = 2,
+       .max_brightness = 1000,
+};
+
+static struct resource tpu12_resources[] = {
+       [0] = {
+               .name   = "TPU12",
+               .start  = 0xe6610090,
+               .end    = 0xe66100b5,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct platform_device leds_tpu12_device = {
+       .name = "leds-renesas-tpu",
+       .id = 12,
+       .dev = {
+               .platform_data  = &led_renesas_tpu12_pdata,
+       },
+       .num_resources  = ARRAY_SIZE(tpu12_resources),
+       .resource       = tpu12_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu41_pdata = {
+       .name           = "V2514",
+       .pin_gpio_fn    = GPIO_FN_TPU4TO1,
+       .pin_gpio       = GPIO_PORT199,
+       .channel_offset = 0x50,
+       .timer_bit = 1,
+       .max_brightness = 1000,
+};
+
+static struct resource tpu41_resources[] = {
+       [0] = {
+               .name   = "TPU41",
+               .start  = 0xe6640050,
+               .end    = 0xe6640075,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct platform_device leds_tpu41_device = {
+       .name = "leds-renesas-tpu",
+       .id = 41,
+       .dev = {
+               .platform_data  = &led_renesas_tpu41_pdata,
+       },
+       .num_resources  = ARRAY_SIZE(tpu41_resources),
+       .resource       = tpu41_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu21_pdata = {
+       .name           = "V2515",
+       .pin_gpio_fn    = GPIO_FN_TPU2TO1,
+       .pin_gpio       = GPIO_PORT197,
+       .channel_offset = 0x50,
+       .timer_bit = 1,
+       .max_brightness = 1000,
+};
+
+static struct resource tpu21_resources[] = {
+       [0] = {
+               .name   = "TPU21",
+               .start  = 0xe6620050,
+               .end    = 0xe6620075,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct platform_device leds_tpu21_device = {
+       .name = "leds-renesas-tpu",
+       .id = 21,
+       .dev = {
+               .platform_data  = &led_renesas_tpu21_pdata,
+       },
+       .num_resources  = ARRAY_SIZE(tpu21_resources),
+       .resource       = tpu21_resources,
+};
+
+static struct led_renesas_tpu_config led_renesas_tpu30_pdata = {
+       .name           = "KEYLED",
+       .pin_gpio_fn    = GPIO_FN_TPU3TO0,
+       .pin_gpio       = GPIO_PORT163,
+       .channel_offset = 0x10,
+       .timer_bit = 0,
+       .max_brightness = 1000,
+};
+
+static struct resource tpu30_resources[] = {
+       [0] = {
+               .name   = "TPU30",
+               .start  = 0xe6630010,
+               .end    = 0xe6630035,
+               .flags  = IORESOURCE_MEM,
+       },
+};
+
+static struct platform_device leds_tpu30_device = {
+       .name = "leds-renesas-tpu",
+       .id = 30,
+       .dev = {
+               .platform_data  = &led_renesas_tpu30_pdata,
+       },
+       .num_resources  = ARRAY_SIZE(tpu30_resources),
+       .resource       = tpu30_resources,
+};
+
 /* MMCIF */
 static struct resource mmcif_resources[] = {
        [0] = {
@@ -291,6 +401,10 @@ static struct platform_device *kota2_devices[] __initdata = {
        &keysc_device,
        &gpio_keys_device,
        &gpio_leds_device,
+       &leds_tpu12_device,
+       &leds_tpu41_device,
+       &leds_tpu21_device,
+       &leds_tpu30_device,
        &mmcif_device,
        &sdhi0_device,
        &sdhi1_device,
@@ -317,18 +431,6 @@ static void __init kota2_map_io(void)
        shmobile_setup_console();
 }
 
-#define PINTER0A       0xe69000a0
-#define PINTCR0A       0xe69000b0
-
-void __init kota2_init_irq(void)
-{
-       sh73a0_init_irq();
-
-       /* setup PINT: enable PINTA2 as active low */
-       __raw_writel(1 << 29, PINTER0A);
-       __raw_writew(2 << 10, PINTCR0A);
-}
-
 static void __init kota2_init(void)
 {
        sh73a0_pinmux_init();
@@ -447,7 +549,8 @@ struct sys_timer kota2_timer = {
 
 MACHINE_START(KOTA2, "kota2")
        .map_io         = kota2_map_io,
-       .init_irq       = kota2_init_irq,
+       .nr_irqs        = NR_IRQS_LEGACY,
+       .init_irq       = sh73a0_init_irq,
        .handle_irq     = gic_handle_irq,
        .init_machine   = kota2_init,
        .timer          = &kota2_timer,
index 61a846bb30f2034ec3ae69253aea2d2d6d695aa8..1370a89ca358ba548c80ae5ed3ec29b52156b501 100644 (file)
@@ -113,6 +113,12 @@ static struct clk main_clk = {
        .ops            = &main_clk_ops,
 };
 
+/* Divide Main clock by two */
+static struct clk main_div2_clk = {
+       .ops            = &div2_clk_ops,
+       .parent         = &main_clk,
+};
+
 /* PLL0, PLL1, PLL2, PLL3 */
 static unsigned long pll_recalc(struct clk *clk)
 {
@@ -181,6 +187,7 @@ static struct clk *main_clks[] = {
        &extal1_div2_clk,
        &extal2_div2_clk,
        &main_clk,
+       &main_div2_clk,
        &pll0_clk,
        &pll1_clk,
        &pll2_clk,
@@ -243,7 +250,7 @@ static struct clk div6_clks[DIV6_NR] = {
        [DIV6_VCK1] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR1, 0),
        [DIV6_VCK2] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR2, 0),
        [DIV6_VCK3] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR3, 0),
-       [DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, 0),
+       [DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, CLK_ENABLE_ON_INIT),
        [DIV6_FLCTL] = SH_CLK_DIV6(&pll1_div2_clk, FLCKCR, 0),
        [DIV6_SDHI0] = SH_CLK_DIV6(&pll1_div2_clk, SD0CKCR, 0),
        [DIV6_SDHI1] = SH_CLK_DIV6(&pll1_div2_clk, SD1CKCR, 0),
@@ -268,6 +275,7 @@ enum { MSTP001,
        MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
        MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
        MSTP314, MSTP313, MSTP312, MSTP311,
+       MSTP303, MSTP302, MSTP301, MSTP300,
        MSTP411, MSTP410, MSTP403,
        MSTP_NR };
 
@@ -301,6 +309,10 @@ static struct clk mstp_clks[MSTP_NR] = {
        [MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
        [MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
        [MSTP311] = MSTP(&div6_clks[DIV6_SDHI2], SMSTPCR3, 11, 0), /* SDHI2 */
+       [MSTP303] = MSTP(&main_div2_clk, SMSTPCR3, 3, 0), /* TPU1 */
+       [MSTP302] = MSTP(&main_div2_clk, SMSTPCR3, 2, 0), /* TPU2 */
+       [MSTP301] = MSTP(&main_div2_clk, SMSTPCR3, 1, 0), /* TPU3 */
+       [MSTP300] = MSTP(&main_div2_clk, SMSTPCR3, 0, 0), /* TPU4 */
        [MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
        [MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */
        [MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
@@ -350,6 +362,10 @@ static struct clk_lookup lookups[] = {
        CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
        CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
        CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP311]), /* SDHI2 */
+       CLKDEV_DEV_ID("leds-renesas-tpu.12", &mstp_clks[MSTP303]), /* TPU1 */
+       CLKDEV_DEV_ID("leds-renesas-tpu.21", &mstp_clks[MSTP302]), /* TPU2 */
+       CLKDEV_DEV_ID("leds-renesas-tpu.30", &mstp_clks[MSTP301]), /* TPU3 */
+       CLKDEV_DEV_ID("leds-renesas-tpu.41", &mstp_clks[MSTP300]), /* TPU4 */
        CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */
        CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */
        CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
index 7bf0890e16ba43963171bf5dea4b3345c27e25d8..de795b42232a8940720ed8e44ca3bc7da7431372 100644 (file)
@@ -12,8 +12,6 @@
 
 #include <linux/kernel.h>
 #include <linux/errno.h>
-
-#define ARCH_NR_GPIOS 1024
 #include <linux/sh_pfc.h>
 
 #ifdef CONFIG_GPIOLIB
index 7389df911b1ac8639e6e1dc3c5975d9ed5c112dc..c01ef66537f34a344f4fcf41ea6a9f67740ffb4c 100644 (file)
@@ -1,10 +1,5 @@
 #ifndef __ASM_ARCH_GPIO_H
 #define __ASM_ARCH_GPIO_H
 
-/*
- * 288 (#267 is the highest one actually hooked up) onchip GPIOs, plus enough
- * room for a couple of GPIO expanders.
- */
-#define ARCH_NR_GPIOS  350
 
 #endif /* __ASM_ARCH_GPIO_H */
index eb5520fc755facac778bfac4da1383cb23794cfb..bb7eac381a8e60f619591e7c80c4ad30cc972d62 100644 (file)
@@ -220,7 +220,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma)
 
 static int __kprobes
 __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
-               struct task_struct *tsk)
+               unsigned int flags, struct task_struct *tsk)
 {
        struct vm_area_struct *vma;
        int fault;
@@ -242,18 +242,7 @@ good_area:
                goto out;
        }
 
-       /*
-        * If for any reason at all we couldn't handle the fault, make
-        * sure we exit gracefully rather than endlessly redo the fault.
-        */
-       fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0);
-       if (unlikely(fault & VM_FAULT_ERROR))
-               return fault;
-       if (fault & VM_FAULT_MAJOR)
-               tsk->maj_flt++;
-       else
-               tsk->min_flt++;
-       return fault;
+       return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
 
 check_stack:
        if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
@@ -268,6 +257,9 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        struct task_struct *tsk;
        struct mm_struct *mm;
        int fault, sig, code;
+       int write = fsr & FSR_WRITE;
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+                               (write ? FAULT_FLAG_WRITE : 0);
 
        if (notify_page_fault(regs, fsr))
                return 0;
@@ -294,6 +286,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        if (!down_read_trylock(&mm->mmap_sem)) {
                if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
                        goto no_context;
+retry:
                down_read(&mm->mmap_sem);
        } else {
                /*
@@ -309,14 +302,41 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 #endif
        }
 
-       fault = __do_page_fault(mm, addr, fsr, tsk);
-       up_read(&mm->mmap_sem);
+       fault = __do_page_fault(mm, addr, fsr, flags, tsk);
+
+       /* If we need to retry but a fatal signal is pending, handle the
+        * signal first. We do not need to release the mmap_sem because
+        * it would already be released in __lock_page_or_retry in
+        * mm/filemap.c. */
+       if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+               return 0;
+
+       /*
+        * Major/minor page fault accounting is only done on the
+        * initial attempt. If we go through a retry, it is extremely
+        * likely that the page will be found in page cache at that point.
+        */
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
-       if (fault & VM_FAULT_MAJOR)
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
-       else if (fault & VM_FAULT_MINOR)
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);
+       if (flags & FAULT_FLAG_ALLOW_RETRY) {
+               if (fault & VM_FAULT_MAJOR) {
+                       tsk->maj_flt++;
+                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
+                                       regs, addr);
+               } else {
+                       tsk->min_flt++;
+                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
+                                       regs, addr);
+               }
+               if (fault & VM_FAULT_RETRY) {
+                       /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+                       * of starvation. */
+                       flags &= ~FAULT_FLAG_ALLOW_RETRY;
+                       goto retry;
+               }
+       }
+
+       up_read(&mm->mmap_sem);
 
        /*
         * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
index 44b628e4d6ea9c0121acf892ffbcfb30d0fc40ad..ce8cb1970d7ae393da39208c929e8a1d983f91c2 100644 (file)
 #include <linux/random.h>
 #include <asm/cachetype.h>
 
+static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
+                                             unsigned long pgoff)
+{
+       unsigned long base = addr & ~(SHMLBA-1);
+       unsigned long off = (pgoff << PAGE_SHIFT) & (SHMLBA-1);
+
+       if (base + off <= addr)
+               return base + off;
+
+       return base - off;
+}
+
 #define COLOUR_ALIGN(addr,pgoff)               \
        ((((addr)+SHMLBA-1)&~(SHMLBA-1)) +      \
         (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
 
+/* gap between mmap and stack */
+#define MIN_GAP (128*1024*1024UL)
+#define MAX_GAP ((TASK_SIZE)/6*5)
+
+static int mmap_is_legacy(void)
+{
+       if (current->personality & ADDR_COMPAT_LAYOUT)
+               return 1;
+
+       if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
+               return 1;
+
+       return sysctl_legacy_va_layout;
+}
+
+static unsigned long mmap_base(unsigned long rnd)
+{
+       unsigned long gap = rlimit(RLIMIT_STACK);
+
+       if (gap < MIN_GAP)
+               gap = MIN_GAP;
+       else if (gap > MAX_GAP)
+               gap = MAX_GAP;
+
+       return PAGE_ALIGN(TASK_SIZE - gap - rnd);
+}
+
 /*
  * We need to ensure that shared mappings are correctly aligned to
  * avoid aliasing issues with VIPT caches.  We need to ensure that
@@ -68,13 +107,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        if (len > mm->cached_hole_size) {
                start_addr = addr = mm->free_area_cache;
        } else {
-               start_addr = addr = TASK_UNMAPPED_BASE;
+               start_addr = addr = mm->mmap_base;
                mm->cached_hole_size = 0;
        }
-       /* 8 bits of randomness in 20 address space bits */
-       if ((current->flags & PF_RANDOMIZE) &&
-           !(current->personality & ADDR_NO_RANDOMIZE))
-               addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT;
 
 full_search:
        if (do_align)
@@ -111,6 +146,134 @@ full_search:
        }
 }
 
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+                       const unsigned long len, const unsigned long pgoff,
+                       const unsigned long flags)
+{
+       struct vm_area_struct *vma;
+       struct mm_struct *mm = current->mm;
+       unsigned long addr = addr0;
+       int do_align = 0;
+       int aliasing = cache_is_vipt_aliasing();
+
+       /*
+        * We only need to do colour alignment if either the I or D
+        * caches alias.
+        */
+       if (aliasing)
+               do_align = filp || (flags & MAP_SHARED);
+
+       /* requested length too big for entire address space */
+       if (len > TASK_SIZE)
+               return -ENOMEM;
+
+       if (flags & MAP_FIXED) {
+               if (aliasing && flags & MAP_SHARED &&
+                   (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
+                       return -EINVAL;
+               return addr;
+       }
+
+       /* requesting a specific address */
+       if (addr) {
+               if (do_align)
+                       addr = COLOUR_ALIGN(addr, pgoff);
+               else
+                       addr = PAGE_ALIGN(addr);
+               vma = find_vma(mm, addr);
+               if (TASK_SIZE - len >= addr &&
+                               (!vma || addr + len <= vma->vm_start))
+                       return addr;
+       }
+
+       /* check if free_area_cache is useful for us */
+       if (len <= mm->cached_hole_size) {
+               mm->cached_hole_size = 0;
+               mm->free_area_cache = mm->mmap_base;
+       }
+
+       /* either no address requested or can't fit in requested address hole */
+       addr = mm->free_area_cache;
+       if (do_align) {
+               unsigned long base = COLOUR_ALIGN_DOWN(addr - len, pgoff);
+               addr = base + len;
+       }
+
+       /* make sure it can fit in the remaining address space */
+       if (addr > len) {
+               vma = find_vma(mm, addr-len);
+               if (!vma || addr <= vma->vm_start)
+                       /* remember the address as a hint for next time */
+                       return (mm->free_area_cache = addr-len);
+       }
+
+       if (mm->mmap_base < len)
+               goto bottomup;
+
+       addr = mm->mmap_base - len;
+       if (do_align)
+               addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+
+       do {
+               /*
+                * Lookup failure means no vma is above this address,
+                * else if new region fits below vma->vm_start,
+                * return with success:
+                */
+               vma = find_vma(mm, addr);
+               if (!vma || addr+len <= vma->vm_start)
+                       /* remember the address as a hint for next time */
+                       return (mm->free_area_cache = addr);
+
+               /* remember the largest hole we saw so far */
+               if (addr + mm->cached_hole_size < vma->vm_start)
+                       mm->cached_hole_size = vma->vm_start - addr;
+
+               /* try just below the current vma->vm_start */
+               addr = vma->vm_start - len;
+               if (do_align)
+                       addr = COLOUR_ALIGN_DOWN(addr, pgoff);
+       } while (len < vma->vm_start);
+
+bottomup:
+       /*
+        * A failed mmap() very likely causes application failure,
+        * so fall back to the bottom-up function here. This scenario
+        * can happen with large stack limits and large mmap()
+        * allocations.
+        */
+       mm->cached_hole_size = ~0UL;
+       mm->free_area_cache = TASK_UNMAPPED_BASE;
+       addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
+       /*
+        * Restore the topdown base:
+        */
+       mm->free_area_cache = mm->mmap_base;
+       mm->cached_hole_size = ~0UL;
+
+       return addr;
+}
+
+void arch_pick_mmap_layout(struct mm_struct *mm)
+{
+       unsigned long random_factor = 0UL;
+
+       /* 8 bits of randomness in 20 address space bits */
+       if ((current->flags & PF_RANDOMIZE) &&
+           !(current->personality & ADDR_NO_RANDOMIZE))
+               random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT;
+
+       if (mmap_is_legacy()) {
+               mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
+               mm->get_unmapped_area = arch_get_unmapped_area;
+               mm->unmap_area = arch_unmap_area;
+       } else {
+               mm->mmap_base = mmap_base(random_factor);
+               mm->get_unmapped_area = arch_get_unmapped_area_topdown;
+               mm->unmap_area = arch_unmap_area_topdown;
+       }
+}
 
 /*
  * You really shouldn't be using read() or write() on /dev/mem.  This
index 7efa2a721d5dd9dbc0853eab8b9c8980c47c5e17..7e9b5bf910c199cba4fc2fce9bead46e91d470b8 100644 (file)
@@ -161,6 +161,7 @@ __v7_ca5mp_setup:
 __v7_ca9mp_setup:
        mov     r10, #(1 << 0)                  @ TLB ops broadcasting
        b       1f
+__v7_ca7mp_setup:
 __v7_ca15mp_setup:
        mov     r10, #0
 1:
@@ -240,11 +241,13 @@ __v7_setup:
        orreq   r10, r10, #1 << 6               @ set bit #6
        mcreq   p15, 0, r10, c15, c0, 1         @ write diagnostic register
 #endif
-#ifdef CONFIG_ARM_ERRATA_751472
-       cmp     r6, #0x30                       @ present prior to r3p0
+#if defined(CONFIG_ARM_ERRATA_751472) && defined(CONFIG_SMP)
+       ALT_SMP(cmp r6, #0x30)                  @ present prior to r3p0
+       ALT_UP_B(1f)
        mrclt   p15, 0, r10, c15, c0, 1         @ read diagnostic register
        orrlt   r10, r10, #1 << 11              @ set bit #11
        mcrlt   p15, 0, r10, c15, c0, 1         @ write diagnostic register
+1:
 #endif
 
 3:     mov     r10, #0
@@ -326,6 +329,16 @@ __v7_ca5mp_proc_info:
        __v7_proc __v7_ca5mp_setup
        .size   __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info
 
+       /*
+        * ARM Ltd. Cortex A7 processor.
+        */
+       .type   __v7_ca7mp_proc_info, #object
+__v7_ca7mp_proc_info:
+       .long   0x410fc070
+       .long   0xff0ffff0
+       __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV
+       .size   __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
+
        /*
         * ARM Ltd. Cortex A9 processor.
         */
index cafa1835433977808d47ce5900d85bbc19a3ea7f..d18dde95b8aae6a12dd319e71747ff58dfad8d84 100644 (file)
@@ -20,6 +20,8 @@
     Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
+#include <asm/opcodes.h>
+
 /* This is the kernel's entry point into the floating point emulator.
 It is called from the kernel with code similar to this:
 
@@ -81,11 +83,11 @@ nwfpe_enter:
        mov     r6, r0                  @ save the opcode
 emulate:
        ldr     r1, [sp, #S_PSR]        @ fetch the PSR
-       bl      checkCondition          @ check the condition
-       cmp     r0, #0                  @ r0 = 0 ==> condition failed
+       bl      arm_check_condition     @ check the condition
+       cmp     r0, #ARM_OPCODE_CONDTEST_PASS   @ condition passed?
 
        @ if condition code failed to match, next insn
-       beq     next                    @ get the next instruction;
+       bne     next                    @ get the next instruction;
 
        mov     r0, r6                  @ prepare for EmulateAll()
        bl      EmulateAll              @ emulate the instruction
index 922b81107585f324ccf7e5878643232729ad5d79..ff9834673085cb72c403fc97ee4398f59e2b087c 100644 (file)
@@ -61,29 +61,3 @@ const float32 float32Constant[] = {
        0x41200000              /* single 10.0 */
 };
 
-/* condition code lookup table
- index into the table is test code: EQ, NE, ... LT, GT, AL, NV
- bit position in short is condition code: NZCV */
-static const unsigned short aCC[16] = {
-       0xF0F0,                 // EQ == Z set
-       0x0F0F,                 // NE
-       0xCCCC,                 // CS == C set
-       0x3333,                 // CC
-       0xFF00,                 // MI == N set
-       0x00FF,                 // PL
-       0xAAAA,                 // VS == V set
-       0x5555,                 // VC
-       0x0C0C,                 // HI == C set && Z clear
-       0xF3F3,                 // LS == C clear || Z set
-       0xAA55,                 // GE == (N==V)
-       0x55AA,                 // LT == (N!=V)
-       0x0A05,                 // GT == (!Z && (N==V))
-       0xF5FA,                 // LE == (Z || (N!=V))
-       0xFFFF,                 // AL always
-       0                       // NV
-};
-
-unsigned int checkCondition(const unsigned int opcode, const unsigned int ccodes)
-{
-       return (aCC[opcode >> 28] >> (ccodes >> 28)) & 1;
-}
index 786e4c96156dcea95429d0ebca6fd9c0c2bfad23..78f02dbfaa8ffea4efda621044b119367f1e57e4 100644 (file)
@@ -475,9 +475,6 @@ static inline unsigned int getDestinationSize(const unsigned int opcode)
        return (nRc);
 }
 
-extern unsigned int checkCondition(const unsigned int opcode,
-                                  const unsigned int ccodes);
-
 extern const float64 float64Constant[];
 extern const float32 float32Constant[];
 
index c074e66ad224e83d18d1f278afc78aa00e18b494..4e0a371630b38fb3a950b9063f30a53d3ed0b5a7 100644 (file)
@@ -116,7 +116,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
        return oprofile_perf_init(ops);
 }
 
-void __exit oprofile_arch_exit(void)
+void oprofile_arch_exit(void)
 {
        oprofile_perf_exit();
 }
index 74aac96cda2007f9edcd0aea03c9fd40520952a1..73db34bf588ae7985fc50c71e9e2a4aa3af7a5f8 100644 (file)
@@ -17,6 +17,7 @@
  * the CPU clock speed on the fly.
  */
 
+#include <linux/module.h>
 #include <linux/cpufreq.h>
 #include <linux/clk.h>
 #include <linux/err.h>
@@ -97,7 +98,7 @@ static int mxc_set_target(struct cpufreq_policy *policy,
        return ret;
 }
 
-static int __init mxc_cpufreq_init(struct cpufreq_policy *policy)
+static int mxc_cpufreq_init(struct cpufreq_policy *policy)
 {
        int ret;
        int i;
index 88fd40452567a30aeb6f2b55ba1e3f4684959a78..477971b009308a929c53c19ce40c4b35c425d623 100644 (file)
@@ -98,6 +98,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id)
        case MACH_TYPE_PCM043:
        case MACH_TYPE_LILLY1131:
        case MACH_TYPE_VPR200:
+       case MACH_TYPE_EUKREA_CPUIMX35SD:
                uart_base = MX3X_UART1_BASE_ADDR;
                break;
        case MACH_TYPE_MAGX_ZN5:
index 42d74ea590848fe2fc31fbdf16dfb19d77cf78ab..e032717f7d02c211ee8cad0d0200cec5f83974cf 100644 (file)
@@ -32,6 +32,9 @@
 #define MX3_PWMSAR                0x0C    /* PWM Sample Register */
 #define MX3_PWMPR                 0x10    /* PWM Period Register */
 #define MX3_PWMCR_PRESCALER(x)    (((x - 1) & 0xFFF) << 4)
+#define MX3_PWMCR_DOZEEN                (1 << 24)
+#define MX3_PWMCR_WAITEN                (1 << 23)
+#define MX3_PWMCR_DBGEN                        (1 << 22)
 #define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
 #define MX3_PWMCR_CLKSRC_IPG      (1 << 16)
 #define MX3_PWMCR_EN              (1 << 0)
@@ -74,10 +77,21 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
                do_div(c, period_ns);
                duty_cycles = c;
 
+               /*
+                * according to imx pwm RM, the real period value should be
+                * PERIOD value in PWMPR plus 2.
+                */
+               if (period_cycles > 2)
+                       period_cycles -= 2;
+               else
+                       period_cycles = 0;
+
                writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
                writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
 
-               cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
+               cr = MX3_PWMCR_PRESCALER(prescale) |
+                       MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
+                       MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
 
                if (cpu_is_mx25())
                        cr |= MX3_PWMCR_CLKSRC_IPG;
index 41ab97ebe4cfc8877fc58cd09f446c02636a030d..10d160888133c72101101f8e1dfafb53ddaf1af9 100644 (file)
@@ -384,12 +384,16 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
        struct orion_gpio_chip *ochip;
        struct irq_chip_generic *gc;
        struct irq_chip_type *ct;
+       char gc_label[16];
 
        if (orion_gpio_chip_count == ARRAY_SIZE(orion_gpio_chips))
                return;
 
+       snprintf(gc_label, sizeof(gc_label), "orion_gpio%d",
+               orion_gpio_chip_count);
+
        ochip = orion_gpio_chips + orion_gpio_chip_count;
-       ochip->chip.label = "orion_gpio";
+       ochip->chip.label = kstrdup(gc_label, GFP_KERNEL);
        ochip->chip.request = orion_gpio_request;
        ochip->chip.direction_input = orion_gpio_direction_input;
        ochip->chip.get = orion_gpio_get;
index e657305644cc27140f8c878d99e34ac2ea668ac0..a976c023b286b4a1b991cfa63c2dc93e9912151d 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/pwm_backlight.h>
-#include <linux/slab.h>
 
 #include <plat/devs.h>
 #include <plat/gpio-cfg.h>
index dac4760c0f0aeb58b4c68b5b01913f41dc1430b4..95509d8eb140fda1367658d048660790253b35d5 100644 (file)
@@ -202,14 +202,6 @@ extern int s3c_plltab_register(struct cpufreq_frequency_table *plls,
 extern struct s3c_cpufreq_config *s3c_cpufreq_getconfig(void);
 extern struct s3c_iotimings *s3c_cpufreq_getiotimings(void);
 
-extern void s3c2410_iotiming_debugfs(struct seq_file *seq,
-                                    struct s3c_cpufreq_config *cfg,
-                                    union s3c_iobank *iob);
-
-extern void s3c2412_iotiming_debugfs(struct seq_file *seq,
-                                    struct s3c_cpufreq_config *cfg,
-                                    union s3c_iobank *iob);
-
 #ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUGFS
 #define s3c_cpufreq_debugfs_call(x) x
 #else
@@ -226,6 +218,10 @@ extern void s3c2410_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg);
 extern void s3c2410_set_fvco(struct s3c_cpufreq_config *cfg);
 
 #ifdef CONFIG_S3C2410_IOTIMING
+extern void s3c2410_iotiming_debugfs(struct seq_file *seq,
+                                    struct s3c_cpufreq_config *cfg,
+                                    union s3c_iobank *iob);
+
 extern int s3c2410_iotiming_calc(struct s3c_cpufreq_config *cfg,
                                 struct s3c_iotimings *iot);
 
@@ -235,6 +231,7 @@ extern int s3c2410_iotiming_get(struct s3c_cpufreq_config *cfg,
 extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg,
                                 struct s3c_iotimings *iot);
 #else
+#define s3c2410_iotiming_debugfs NULL
 #define s3c2410_iotiming_calc NULL
 #define s3c2410_iotiming_get NULL
 #define s3c2410_iotiming_set NULL
@@ -242,8 +239,10 @@ extern void s3c2410_iotiming_set(struct s3c_cpufreq_config *cfg,
 
 /* S3C2412 compatible routines */
 
-extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg,
-                               struct s3c_iotimings *timings);
+#ifdef CONFIG_S3C2412_IOTIMING
+extern void s3c2412_iotiming_debugfs(struct seq_file *seq,
+                                    struct s3c_cpufreq_config *cfg,
+                                    union s3c_iobank *iob);
 
 extern int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg,
                                struct s3c_iotimings *timings);
@@ -253,6 +252,12 @@ extern int s3c2412_iotiming_calc(struct s3c_cpufreq_config *cfg,
 
 extern void s3c2412_iotiming_set(struct s3c_cpufreq_config *cfg,
                                 struct s3c_iotimings *iot);
+#else
+#define s3c2412_iotiming_debugfs NULL
+#define s3c2412_iotiming_calc NULL
+#define s3c2412_iotiming_get NULL
+#define s3c2412_iotiming_set NULL
+#endif /* CONFIG_S3C2412_IOTIMING */
 
 #ifdef CONFIG_CPU_FREQ_S3C24XX_DEBUG
 #define s3c_freq_dbg(x...) printk(KERN_INFO x)
index ccbe16f47227e2ebb171032b2a5de66289014cd0..f9c9f33f8cbe2f651277793400d8287eec0073c9 100644 (file)
@@ -16,7 +16,7 @@
 # are merged into mainline or have been edited in the machine database
 # within the last 12 months.  References to machine_is_NAME() do not count!
 #
-# Last update: Sat May 7 08:48:24 2011
+# Last update: Tue Dec 6 11:07:38 2011
 #
 # machine_is_xxx       CONFIG_xxxx             MACH_TYPE_xxx           number
 #
@@ -269,7 +269,7 @@ dns323                      MACH_DNS323             DNS323                  1542
 omap3_beagle           MACH_OMAP3_BEAGLE       OMAP3_BEAGLE            1546
 nokia_n810             MACH_NOKIA_N810         NOKIA_N810              1548
 pcm038                 MACH_PCM038             PCM038                  1551
-ts_x09                 MACH_TS209              TS209                   1565
+ts209                  MACH_TS209              TS209                   1565
 at91cap9adk            MACH_AT91CAP9ADK        AT91CAP9ADK             1566
 mx31moboard            MACH_MX31MOBOARD        MX31MOBOARD             1574
 vision_ep9307          MACH_VISION_EP9307      VISION_EP9307           1578
@@ -321,7 +321,6 @@ lb88rc8480          MACH_LB88RC8480         LB88RC8480              1769
 mx25_3ds               MACH_MX25_3DS           MX25_3DS                1771
 omap3530_lv_som                MACH_OMAP3530_LV_SOM    OMAP3530_LV_SOM         1773
 davinci_da830_evm      MACH_DAVINCI_DA830_EVM  DAVINCI_DA830_EVM       1781
-at572d940hfek          MACH_AT572D940HFEB      AT572D940HFEB           1783
 dove_db                        MACH_DOVE_DB            DOVE_DB                 1788
 overo                  MACH_OVERO              OVERO                   1798
 at2440evb              MACH_AT2440EVB          AT2440EVB               1799
@@ -459,7 +458,7 @@ guruplug            MACH_GURUPLUG           GURUPLUG                2659
 spear310               MACH_SPEAR310           SPEAR310                2660
 spear320               MACH_SPEAR320           SPEAR320                2661
 aquila                 MACH_AQUILA             AQUILA                  2676
-sheeva_esata           MACH_ESATA_SHEEVAPLUG   ESATA_SHEEVAPLUG        2678
+esata_sheevaplug       MACH_ESATA_SHEEVAPLUG   ESATA_SHEEVAPLUG        2678
 msm7x30_surf           MACH_MSM7X30_SURF       MSM7X30_SURF            2679
 ea2478devkit           MACH_EA2478DEVKIT       EA2478DEVKIT            2683
 terastation_wxl                MACH_TERASTATION_WXL    TERASTATION_WXL         2697
@@ -491,380 +490,53 @@ eukrea_cpuimx35sd        MACH_EUKREA_CPUIMX35SD  EUKREA_CPUIMX35SD       2821
 eukrea_cpuimx51sd      MACH_EUKREA_CPUIMX51SD  EUKREA_CPUIMX51SD       2822
 eukrea_cpuimx51                MACH_EUKREA_CPUIMX51    EUKREA_CPUIMX51         2823
 smdkc210               MACH_SMDKC210           SMDKC210                2838
-omap3_braillo          MACH_OMAP3_BRAILLO      OMAP3_BRAILLO           2839
-spyplug                        MACH_SPYPLUG            SPYPLUG                 2840
-ginger                 MACH_GINGER             GINGER                  2841
-tny_t3530              MACH_TNY_T3530          TNY_T3530               2842
 pca102                 MACH_PCA102             PCA102                  2843
-spade                  MACH_SPADE              SPADE                   2844
-mxc25_topaz            MACH_MXC25_TOPAZ        MXC25_TOPAZ             2845
 t5325                  MACH_T5325              T5325                   2846
-gw2361                 MACH_GW2361             GW2361                  2847
-elog                   MACH_ELOG               ELOG                    2848
 income                 MACH_INCOME             INCOME                  2849
-bcm589x                        MACH_BCM589X            BCM589X                 2850
-etna                   MACH_ETNA               ETNA                    2851
-hawks                  MACH_HAWKS              HAWKS                   2852
-meson                  MACH_MESON              MESON                   2853
-xsbase255              MACH_XSBASE255          XSBASE255               2854
-pvm2030                        MACH_PVM2030            PVM2030                 2855
-mioa502                        MACH_MIOA502            MIOA502                 2856
 vvbox_sdorig2          MACH_VVBOX_SDORIG2      VVBOX_SDORIG2           2857
 vvbox_sdlite2          MACH_VVBOX_SDLITE2      VVBOX_SDLITE2           2858
 vvbox_sdpro4           MACH_VVBOX_SDPRO4       VVBOX_SDPRO4            2859
-htc_spv_m700           MACH_HTC_SPV_M700       HTC_SPV_M700            2860
 mx257sx                        MACH_MX257SX            MX257SX                 2861
 goni                   MACH_GONI               GONI                    2862
-msm8x55_svlte_ffa      MACH_MSM8X55_SVLTE_FFA  MSM8X55_SVLTE_FFA       2863
-msm8x55_svlte_surf     MACH_MSM8X55_SVLTE_SURF MSM8X55_SVLTE_SURF      2864
-quickstep              MACH_QUICKSTEP          QUICKSTEP               2865
-dmw96                  MACH_DMW96              DMW96                   2866
-hammerhead             MACH_HAMMERHEAD         HAMMERHEAD              2867
-trident                        MACH_TRIDENT            TRIDENT                 2868
-lightning              MACH_LIGHTNING          LIGHTNING               2869
-iconnect               MACH_ICONNECT           ICONNECT                2870
-autobot                        MACH_AUTOBOT            AUTOBOT                 2871
-coconut                        MACH_COCONUT            COCONUT                 2872
-durian                 MACH_DURIAN             DURIAN                  2873
-cayenne                        MACH_CAYENNE            CAYENNE                 2874
-fuji                   MACH_FUJI               FUJI                    2875
-synology_6282          MACH_SYNOLOGY_6282      SYNOLOGY_6282           2876
-em1sy                  MACH_EM1SY              EM1SY                   2877
-m502                   MACH_M502               M502                    2878
-matrix518              MACH_MATRIX518          MATRIX518               2879
-tiny_gurnard           MACH_TINY_GURNARD       TINY_GURNARD            2880
-spear1310              MACH_SPEAR1310          SPEAR1310               2881
 bv07                   MACH_BV07               BV07                    2882
-mxt_td61               MACH_MXT_TD61           MXT_TD61                2883
 openrd_ultimate                MACH_OPENRD_ULTIMATE    OPENRD_ULTIMATE         2884
 devixp                 MACH_DEVIXP             DEVIXP                  2885
 miccpt                 MACH_MICCPT             MICCPT                  2886
 mic256                 MACH_MIC256             MIC256                  2887
-as1167                 MACH_AS1167             AS1167                  2888
-omap3_ibiza            MACH_OMAP3_IBIZA        OMAP3_IBIZA             2889
 u5500                  MACH_U5500              U5500                   2890
-davinci_picto          MACH_DAVINCI_PICTO      DAVINCI_PICTO           2891
-mecha                  MACH_MECHA              MECHA                   2892
-bubba3                 MACH_BUBBA3             BUBBA3                  2893
-pupitre                        MACH_PUPITRE            PUPITRE                 2894
-tegra_vogue            MACH_TEGRA_VOGUE        TEGRA_VOGUE             2896
-tegra_e1165            MACH_TEGRA_E1165        TEGRA_E1165             2897
-simplenet              MACH_SIMPLENET          SIMPLENET               2898
-ec4350tbm              MACH_EC4350TBM          EC4350TBM               2899
-pec_tc                 MACH_PEC_TC             PEC_TC                  2900
-pec_hc2                        MACH_PEC_HC2            PEC_HC2                 2901
-esl_mobilis_a          MACH_ESL_MOBILIS_A      ESL_MOBILIS_A           2902
-esl_mobilis_b          MACH_ESL_MOBILIS_B      ESL_MOBILIS_B           2903
-esl_wave_a             MACH_ESL_WAVE_A         ESL_WAVE_A              2904
-esl_wave_b             MACH_ESL_WAVE_B         ESL_WAVE_B              2905
-unisense_mmm           MACH_UNISENSE_MMM       UNISENSE_MMM            2906
-blueshark              MACH_BLUESHARK          BLUESHARK               2907
-e10                    MACH_E10                E10                     2908
-app3k_robin            MACH_APP3K_ROBIN        APP3K_ROBIN             2909
-pov15hd                        MACH_POV15HD            POV15HD                 2910
-stella                 MACH_STELLA             STELLA                  2911
 linkstation_lschl      MACH_LINKSTATION_LSCHL  LINKSTATION_LSCHL       2913
-netwalker              MACH_NETWALKER          NETWALKER               2914
-acsx106                        MACH_ACSX106            ACSX106                 2915
-atlas5_c1              MACH_ATLAS5_C1          ATLAS5_C1               2916
-nsb3ast                        MACH_NSB3AST            NSB3AST                 2917
-gnet_slc               MACH_GNET_SLC           GNET_SLC                2918
-af4000                 MACH_AF4000             AF4000                  2919
-ark9431                        MACH_ARK9431            ARK9431                 2920
-fs_s5pc100             MACH_FS_S5PC100         FS_S5PC100              2921
-omap3505nova8          MACH_OMAP3505NOVA8      OMAP3505NOVA8           2922
-omap3621_edp1          MACH_OMAP3621_EDP1      OMAP3621_EDP1           2923
-oratisaes              MACH_ORATISAES          ORATISAES               2924
 smdkv310               MACH_SMDKV310           SMDKV310                2925
-siemens_l0             MACH_SIEMENS_L0         SIEMENS_L0              2926
-ventana                        MACH_VENTANA            VENTANA                 2927
 wm8505_7in_netbook     MACH_WM8505_7IN_NETBOOK WM8505_7IN_NETBOOK      2928
-ec4350sdb              MACH_EC4350SDB          EC4350SDB               2929
-mimas                  MACH_MIMAS              MIMAS                   2930
-titan                  MACH_TITAN              TITAN                   2931
 craneboard             MACH_CRANEBOARD         CRANEBOARD              2932
-es2440                 MACH_ES2440             ES2440                  2933
-najay_a9263            MACH_NAJAY_A9263        NAJAY_A9263             2934
-htctornado             MACH_HTCTORNADO         HTCTORNADO              2935
-dimm_mx257             MACH_DIMM_MX257         DIMM_MX257              2936
-jigen301               MACH_JIGEN              JIGEN                   2937
 smdk6450               MACH_SMDK6450           SMDK6450                2938
-meno_qng               MACH_MENO_QNG           MENO_QNG                2939
-ns2416                 MACH_NS2416             NS2416                  2940
-rpc353                 MACH_RPC353             RPC353                  2941
-tq6410                 MACH_TQ6410             TQ6410                  2942
-sky6410                        MACH_SKY6410            SKY6410                 2943
-dynasty                        MACH_DYNASTY            DYNASTY                 2944
-vivo                   MACH_VIVO               VIVO                    2945
-bury_bl7582            MACH_BURY_BL7582        BURY_BL7582             2946
-bury_bps5270           MACH_BURY_BPS5270       BURY_BPS5270            2947
-basi                   MACH_BASI               BASI                    2948
-tn200                  MACH_TN200              TN200                   2949
-c2mmi                  MACH_C2MMI              C2MMI                   2950
-meson_6236m            MACH_MESON_6236M        MESON_6236M             2951
-meson_8626m            MACH_MESON_8626M        MESON_8626M             2952
-tube                   MACH_TUBE               TUBE                    2953
-messina                        MACH_MESSINA            MESSINA                 2954
-mx50_arm2              MACH_MX50_ARM2          MX50_ARM2               2955
-cetus9263              MACH_CETUS9263          CETUS9263               2956
 brownstone             MACH_BROWNSTONE         BROWNSTONE              2957
-vmx25                  MACH_VMX25              VMX25                   2958
-vmx51                  MACH_VMX51              VMX51                   2959
-abacus                 MACH_ABACUS             ABACUS                  2960
-cm4745                 MACH_CM4745             CM4745                  2961
-oratislink             MACH_ORATISLINK         ORATISLINK              2962
-davinci_dm365_dvr      MACH_DAVINCI_DM365_DVR  DAVINCI_DM365_DVR       2963
-netviz                 MACH_NETVIZ             NETVIZ                  2964
 flexibity              MACH_FLEXIBITY          FLEXIBITY               2965
-wlan_computer          MACH_WLAN_COMPUTER      WLAN_COMPUTER           2966
-lpc24xx                        MACH_LPC24XX            LPC24XX                 2967
-spica                  MACH_SPICA              SPICA                   2968
-gpsdisplay             MACH_GPSDISPLAY         GPSDISPLAY              2969
-bipnet                 MACH_BIPNET             BIPNET                  2970
-overo_ctu_inertial     MACH_OVERO_CTU_INERTIAL OVERO_CTU_INERTIAL      2971
-davinci_dm355_mmm      MACH_DAVINCI_DM355_MMM  DAVINCI_DM355_MMM       2972
-pc9260_v2              MACH_PC9260_V2          PC9260_V2               2973
-ptx7545                        MACH_PTX7545            PTX7545                 2974
-tm_efdc                        MACH_TM_EFDC            TM_EFDC                 2975
-omap3_waldo1           MACH_OMAP3_WALDO1       OMAP3_WALDO1            2977
-flyer                  MACH_FLYER              FLYER                   2978
-tornado3240            MACH_TORNADO3240        TORNADO3240             2979
-soli_01                        MACH_SOLI_01            SOLI_01                 2980
-omapl138_europalc      MACH_OMAPL138_EUROPALC  OMAPL138_EUROPALC       2981
-helios_v1              MACH_HELIOS_V1          HELIOS_V1               2982
-netspace_lite_v2       MACH_NETSPACE_LITE_V2   NETSPACE_LITE_V2        2983
-ssc                    MACH_SSC                SSC                     2984
-premierwave_en         MACH_PREMIERWAVE_EN     PREMIERWAVE_EN          2985
-wasabi                 MACH_WASABI             WASABI                  2986
 mx50_rdp               MACH_MX50_RDP           MX50_RDP                2988
 universal_c210         MACH_UNIVERSAL_C210     UNIVERSAL_C210          2989
 real6410               MACH_REAL6410           REAL6410                2990
-spx_sakura             MACH_SPX_SAKURA         SPX_SAKURA              2991
-ij3k_2440              MACH_IJ3K_2440          IJ3K_2440               2992
-omap3_bc10             MACH_OMAP3_BC10         OMAP3_BC10              2993
-thebe                  MACH_THEBE              THEBE                   2994
-rv082                  MACH_RV082              RV082                   2995
-armlguest              MACH_ARMLGUEST          ARMLGUEST               2996
-tjinc1000              MACH_TJINC1000          TJINC1000               2997
 dockstar               MACH_DOCKSTAR           DOCKSTAR                2998
-ax8008                 MACH_AX8008             AX8008                  2999
-gnet_sgce              MACH_GNET_SGCE          GNET_SGCE               3000
-pxwnas_500_1000                MACH_PXWNAS_500_1000    PXWNAS_500_1000         3001
-ea20                   MACH_EA20               EA20                    3002
-awm2                   MACH_AWM2               AWM2                    3003
 ti8148evm              MACH_TI8148EVM          TI8148EVM               3004
 seaboard               MACH_SEABOARD           SEABOARD                3005
-linkstation_chlv2      MACH_LINKSTATION_CHLV2  LINKSTATION_CHLV2       3006
-tera_pro2_rack         MACH_TERA_PRO2_RACK     TERA_PRO2_RACK          3007
-rubys                  MACH_RUBYS              RUBYS                   3008
-aquarius               MACH_AQUARIUS           AQUARIUS                3009
 mx53_ard               MACH_MX53_ARD           MX53_ARD                3010
 mx53_smd               MACH_MX53_SMD           MX53_SMD                3011
-lswxl                  MACH_LSWXL              LSWXL                   3012
-dove_avng_v3           MACH_DOVE_AVNG_V3       DOVE_AVNG_V3            3013
-sdi_ess_9263           MACH_SDI_ESS_9263       SDI_ESS_9263            3014
-jocpu550               MACH_JOCPU550           JOCPU550                3015
 msm8x60_rumi3          MACH_MSM8X60_RUMI3      MSM8X60_RUMI3           3016
 msm8x60_ffa            MACH_MSM8X60_FFA        MSM8X60_FFA             3017
-yanomami               MACH_YANOMAMI           YANOMAMI                3018
-gta04                  MACH_GTA04              GTA04                   3019
 cm_a510                        MACH_CM_A510            CM_A510                 3020
-omap3_rfs200           MACH_OMAP3_RFS200       OMAP3_RFS200            3021
-kx33xx                 MACH_KX33XX             KX33XX                  3022
-ptx7510                        MACH_PTX7510            PTX7510                 3023
-top9000                        MACH_TOP9000            TOP9000                 3024
-teenote                        MACH_TEENOTE            TEENOTE                 3025
-ts3                    MACH_TS3                TS3                     3026
-a0                     MACH_A0                 A0                      3027
-fsm9xxx_surf           MACH_FSM9XXX_SURF       FSM9XXX_SURF            3028
-fsm9xxx_ffa            MACH_FSM9XXX_FFA        FSM9XXX_FFA             3029
-frrhwcdma60w           MACH_FRRHWCDMA60W       FRRHWCDMA60W            3030
-remus                  MACH_REMUS              REMUS                   3031
-at91cap7xdk            MACH_AT91CAP7XDK        AT91CAP7XDK             3032
-at91cap7stk            MACH_AT91CAP7STK        AT91CAP7STK             3033
-kt_sbc_sam9_1          MACH_KT_SBC_SAM9_1      KT_SBC_SAM9_1           3034
-armada_xp_db           MACH_ARMADA_XP_DB       ARMADA_XP_DB            3036
-spdm                   MACH_SPDM               SPDM                    3037
-gtib                   MACH_GTIB               GTIB                    3038
-dgm3240                        MACH_DGM3240            DGM3240                 3039
-htcmega                        MACH_HTCMEGA            HTCMEGA                 3041
-tricorder              MACH_TRICORDER          TRICORDER               3042
 tx28                   MACH_TX28               TX28                    3043
-bstbrd                 MACH_BSTBRD             BSTBRD                  3044
-pwb3090                        MACH_PWB3090            PWB3090                 3045
-idea6410               MACH_IDEA6410           IDEA6410                3046
-qbc9263                        MACH_QBC9263            QBC9263                 3047
-borabora               MACH_BORABORA           BORABORA                3048
-valdez                 MACH_VALDEZ             VALDEZ                  3049
-ls9g20                 MACH_LS9G20             LS9G20                  3050
-mios_v1                        MACH_MIOS_V1            MIOS_V1                 3051
-s5pc110_crespo         MACH_S5PC110_CRESPO     S5PC110_CRESPO          3052
-controltek9g20         MACH_CONTROLTEK9G20     CONTROLTEK9G20          3053
-tin307                 MACH_TIN307             TIN307                  3054
-tin510                 MACH_TIN510             TIN510                  3055
-bluecheese             MACH_BLUECHEESE         BLUECHEESE              3057
-tem3x30                        MACH_TEM3X30            TEM3X30                 3058
-harvest_desoto         MACH_HARVEST_DESOTO     HARVEST_DESOTO          3059
-msm8x60_qrdc           MACH_MSM8X60_QRDC       MSM8X60_QRDC            3060
-spear900               MACH_SPEAR900           SPEAR900                3061
 pcontrol_g20           MACH_PCONTROL_G20       PCONTROL_G20            3062
-rdstor                 MACH_RDSTOR             RDSTOR                  3063
-usdloader              MACH_USDLOADER          USDLOADER               3064
-tsoploader             MACH_TSOPLOADER         TSOPLOADER              3065
-kronos                 MACH_KRONOS             KRONOS                  3066
-ffcore                 MACH_FFCORE             FFCORE                  3067
-mone                   MACH_MONE               MONE                    3068
-unit2s                 MACH_UNIT2S             UNIT2S                  3069
-acer_a5                        MACH_ACER_A5            ACER_A5                 3070
-etherpro_isp           MACH_ETHERPRO_ISP       ETHERPRO_ISP            3071
-stretchs7000           MACH_STRETCHS7000       STRETCHS7000            3072
-p87_smartsim           MACH_P87_SMARTSIM       P87_SMARTSIM            3073
-tulip                  MACH_TULIP              TULIP                   3074
-sunflower              MACH_SUNFLOWER          SUNFLOWER               3075
-rib                    MACH_RIB                RIB                     3076
-clod                   MACH_CLOD               CLOD                    3077
-rump                   MACH_RUMP               RUMP                    3078
-tenderloin             MACH_TENDERLOIN         TENDERLOIN              3079
-shortloin              MACH_SHORTLOIN          SHORTLOIN               3080
-antares                        MACH_ANTARES            ANTARES                 3082
-wb40n                  MACH_WB40N              WB40N                   3083
-herring                        MACH_HERRING            HERRING                 3084
-naxy400                        MACH_NAXY400            NAXY400                 3085
-naxy1200               MACH_NAXY1200           NAXY1200                3086
 vpr200                 MACH_VPR200             VPR200                  3087
-bug20                  MACH_BUG20              BUG20                   3088
-goflexnet              MACH_GOFLEXNET          GOFLEXNET               3089
 torbreck               MACH_TORBRECK           TORBRECK                3090
-saarb_mg1              MACH_SAARB_MG1          SAARB_MG1               3091
-callisto               MACH_CALLISTO           CALLISTO                3092
-multhsu                        MACH_MULTHSU            MULTHSU                 3093
-saluda                 MACH_SALUDA             SALUDA                  3094
-pemp_omap3_apollo      MACH_PEMP_OMAP3_APOLLO  PEMP_OMAP3_APOLLO       3095
-vc0718                 MACH_VC0718             VC0718                  3096
-mvblx                  MACH_MVBLX              MVBLX                   3097
-inhand_apeiron         MACH_INHAND_APEIRON     INHAND_APEIRON          3098
-inhand_fury            MACH_INHAND_FURY        INHAND_FURY             3099
-inhand_siren           MACH_INHAND_SIREN       INHAND_SIREN            3100
-hdnvp                  MACH_HDNVP              HDNVP                   3101
-softwinner             MACH_SOFTWINNER         SOFTWINNER              3102
 prima2_evb             MACH_PRIMA2_EVB         PRIMA2_EVB              3103
-nas6210                        MACH_NAS6210            NAS6210                 3104
-unisdev                        MACH_UNISDEV            UNISDEV                 3105
-sbca11                 MACH_SBCA11             SBCA11                  3106
-saga                   MACH_SAGA               SAGA                    3107
-ns_k330                        MACH_NS_K330            NS_K330                 3108
-tanna                  MACH_TANNA              TANNA                   3109
-imate8502              MACH_IMATE8502          IMATE8502               3110
-aspen                  MACH_ASPEN              ASPEN                   3111
-daintree_cwac          MACH_DAINTREE_CWAC      DAINTREE_CWAC           3112
-zmx25                  MACH_ZMX25              ZMX25                   3113
-maple1                 MACH_MAPLE1             MAPLE1                  3114
-qsd8x72_surf           MACH_QSD8X72_SURF       QSD8X72_SURF            3115
-qsd8x72_ffa            MACH_QSD8X72_FFA        QSD8X72_FFA             3116
-abilene                        MACH_ABILENE            ABILENE                 3117
-eigen_ttr              MACH_EIGEN_TTR          EIGEN_TTR               3118
-iomega_ix2_200         MACH_IOMEGA_IX2_200     IOMEGA_IX2_200          3119
-coretec_vcx7400                MACH_CORETEC_VCX7400    CORETEC_VCX7400         3120
-santiago               MACH_SANTIAGO           SANTIAGO                3121
-mx257sol               MACH_MX257SOL           MX257SOL                3122
-strasbourg             MACH_STRASBOURG         STRASBOURG              3123
-msm8x60_fluid          MACH_MSM8X60_FLUID      MSM8X60_FLUID           3124
-smartqv5               MACH_SMARTQV5           SMARTQV5                3125
-smartqv3               MACH_SMARTQV3           SMARTQV3                3126
-smartqv7               MACH_SMARTQV7           SMARTQV7                3127
 paz00                  MACH_PAZ00              PAZ00                   3128
 acmenetusfoxg20                MACH_ACMENETUSFOXG20    ACMENETUSFOXG20         3129
-fwbd_0404              MACH_FWBD_0404          FWBD_0404               3131
-hdgu                   MACH_HDGU               HDGU                    3132
-pyramid                        MACH_PYRAMID            PYRAMID                 3133
-epiphan                        MACH_EPIPHAN            EPIPHAN                 3134
-omap_bender            MACH_OMAP_BENDER        OMAP_BENDER             3135
-gurnard                        MACH_GURNARD            GURNARD                 3136
-gtl_it5100             MACH_GTL_IT5100         GTL_IT5100              3137
-bcm2708                        MACH_BCM2708            BCM2708                 3138
-mx51_ggc               MACH_MX51_GGC           MX51_GGC                3139
-sharespace             MACH_SHARESPACE         SHARESPACE              3140
-haba_knx_explorer      MACH_HABA_KNX_EXPLORER  HABA_KNX_EXPLORER       3141
-simtec_kirkmod         MACH_SIMTEC_KIRKMOD     SIMTEC_KIRKMOD          3142
-crux                   MACH_CRUX               CRUX                    3143
-mx51_bravo             MACH_MX51_BRAVO         MX51_BRAVO              3144
-charon                 MACH_CHARON             CHARON                  3145
-picocom3               MACH_PICOCOM3           PICOCOM3                3146
-picocom4               MACH_PICOCOM4           PICOCOM4                3147
-serrano                        MACH_SERRANO            SERRANO                 3148
-doubleshot             MACH_DOUBLESHOT         DOUBLESHOT              3149
-evsy                   MACH_EVSY               EVSY                    3150
-huashan                        MACH_HUASHAN            HUASHAN                 3151
-lausanne               MACH_LAUSANNE           LAUSANNE                3152
-emerald                        MACH_EMERALD            EMERALD                 3153
-tqma35                 MACH_TQMA35             TQMA35                  3154
-marvel                 MACH_MARVEL             MARVEL                  3155
-manuae                 MACH_MANUAE             MANUAE                  3156
-chacha                 MACH_CHACHA             CHACHA                  3157
-lemon                  MACH_LEMON              LEMON                   3158
-csc                    MACH_CSC                CSC                     3159
-gira_knxip_router      MACH_GIRA_KNXIP_ROUTER  GIRA_KNXIP_ROUTER       3160
-t20                    MACH_T20                T20                     3161
-hdmini                 MACH_HDMINI             HDMINI                  3162
-sciphone_g2            MACH_SCIPHONE_G2        SCIPHONE_G2             3163
-express                        MACH_EXPRESS            EXPRESS                 3164
-express_kt             MACH_EXPRESS_KT         EXPRESS_KT              3165
-maximasp               MACH_MAXIMASP           MAXIMASP                3166
-nitrogen_imx51         MACH_NITROGEN_IMX51     NITROGEN_IMX51          3167
-nitrogen_imx53         MACH_NITROGEN_IMX53     NITROGEN_IMX53          3168
-sunfire                        MACH_SUNFIRE            SUNFIRE                 3169
-arowana                        MACH_AROWANA            AROWANA                 3170
-tegra_daytona          MACH_TEGRA_DAYTONA      TEGRA_DAYTONA           3171
-tegra_swordfish                MACH_TEGRA_SWORDFISH    TEGRA_SWORDFISH         3172
-edison                 MACH_EDISON             EDISON                  3173
-svp8500v1              MACH_SVP8500V1          SVP8500V1               3174
-svp8500v2              MACH_SVP8500V2          SVP8500V2               3175
-svp5500                        MACH_SVP5500            SVP5500                 3176
-b5500                  MACH_B5500              B5500                   3177
-s5500                  MACH_S5500              S5500                   3178
-icon                   MACH_ICON               ICON                    3179
-elephant               MACH_ELEPHANT           ELEPHANT                3180
-shooter                        MACH_SHOOTER            SHOOTER                 3182
-spade_lte              MACH_SPADE_LTE          SPADE_LTE               3183
-philhwani              MACH_PHILHWANI          PHILHWANI               3184
-gsncomm                        MACH_GSNCOMM            GSNCOMM                 3185
-strasbourg_a2          MACH_STRASBOURG_A2      STRASBOURG_A2           3186
-mmm                    MACH_MMM                MMM                     3187
-davinci_dm365_bv       MACH_DAVINCI_DM365_BV   DAVINCI_DM365_BV        3188
 ag5evm                 MACH_AG5EVM             AG5EVM                  3189
-sc575plc               MACH_SC575PLC           SC575PLC                3190
-sc575hmi               MACH_SC575IPC           SC575IPC                3191
-omap3_tdm3730          MACH_OMAP3_TDM3730      OMAP3_TDM3730           3192
-top9000_eval           MACH_TOP9000_EVAL       TOP9000_EVAL            3194
-top9000_su             MACH_TOP9000_SU         TOP9000_SU              3195
-utm300                 MACH_UTM300             UTM300                  3196
 tsunagi                        MACH_TSUNAGI            TSUNAGI                 3197
-ts75xx                 MACH_TS75XX             TS75XX                  3198
-ts47xx                 MACH_TS47XX             TS47XX                  3200
-da850_k5               MACH_DA850_K5           DA850_K5                3201
-ax502                  MACH_AX502              AX502                   3202
-igep0032               MACH_IGEP0032           IGEP0032                3203
-antero                 MACH_ANTERO             ANTERO                  3204
-synergy                        MACH_SYNERGY            SYNERGY                 3205
 ics_if_voip            MACH_ICS_IF_VOIP        ICS_IF_VOIP             3206
 wlf_cragg_6410         MACH_WLF_CRAGG_6410     WLF_CRAGG_6410          3207
-punica                 MACH_PUNICA             PUNICA                  3208
 trimslice              MACH_TRIMSLICE          TRIMSLICE               3209
-mx27_wmultra           MACH_MX27_WMULTRA       MX27_WMULTRA            3210
 mackerel               MACH_MACKEREL           MACKEREL                3211
-fa9x27                 MACH_FA9X27             FA9X27                  3213
-ns2816tb               MACH_NS2816TB           NS2816TB                3214
-ns2816_ntpad           MACH_NS2816_NTPAD       NS2816_NTPAD            3215
-ns2816_ntnb            MACH_NS2816_NTNB        NS2816_NTNB             3216
 kaen                   MACH_KAEN               KAEN                    3217
-nv1000                 MACH_NV1000             NV1000                  3218
-nuc950ts               MACH_NUC950TS           NUC950TS                3219
 nokia_rm680            MACH_NOKIA_RM680        NOKIA_RM680             3220
-ast2200                        MACH_AST2200            AST2200                 3221
-lead                   MACH_LEAD               LEAD                    3222
-unino1                 MACH_UNINO1             UNINO1                  3223
-greeco                 MACH_GREECO             GREECO                  3224
-verdi                  MACH_VERDI              VERDI                   3225
 dm6446_adbox           MACH_DM6446_ADBOX       DM6446_ADBOX            3226
 quad_salsa             MACH_QUAD_SALSA         QUAD_SALSA              3227
 abb_gma_1_1            MACH_ABB_GMA_1_1        ABB_GMA_1_1             3228
@@ -949,13 +621,11 @@ koi                       MACH_KOI                KOI                     3312
 ts4800                 MACH_TS4800             TS4800                  3313
 tqma9263               MACH_TQMA9263           TQMA9263                3314
 holiday                        MACH_HOLIDAY            HOLIDAY                 3315
-dma_6410               MACH_DMA6410            DMA6410                 3316
 pcats_overlay          MACH_PCATS_OVERLAY      PCATS_OVERLAY           3317
 hwgw6410               MACH_HWGW6410           HWGW6410                3318
 shenzhou               MACH_SHENZHOU           SHENZHOU                3319
 cwme9210               MACH_CWME9210           CWME9210                3320
 cwme9210js             MACH_CWME9210JS         CWME9210JS              3321
-pgs_v1                 MACH_PGS_SITARA         PGS_SITARA              3322
 colibri_tegra2         MACH_COLIBRI_TEGRA2     COLIBRI_TEGRA2          3323
 w21                    MACH_W21                W21                     3324
 polysat1               MACH_POLYSAT1           POLYSAT1                3325
@@ -1021,13 +691,11 @@ viprinet         MACH_VIPRINET           VIPRINET                3385
 bockw                  MACH_BOCKW              BOCKW                   3386
 eva2000                        MACH_EVA2000            EVA2000                 3387
 steelyard              MACH_STEELYARD          STEELYARD               3388
-sdh001                 MACH_MACH_SDH001        MACH_SDH001             3390
 nsslsboard             MACH_NSSLSBOARD         NSSLSBOARD              3392
 geneva_b5              MACH_GENEVA_B5          GENEVA_B5               3393
 spear1340              MACH_SPEAR1340          SPEAR1340               3394
 rexmas                 MACH_REXMAS             REXMAS                  3395
 msm8960_cdp            MACH_MSM8960_CDP        MSM8960_CDP             3396
-msm8960_mdp            MACH_MSM8960_MDP        MSM8960_MDP             3397
 msm8960_fluid          MACH_MSM8960_FLUID      MSM8960_FLUID           3398
 msm8960_apq            MACH_MSM8960_APQ        MSM8960_APQ             3399
 helios_v2              MACH_HELIOS_V2          HELIOS_V2               3400
@@ -1123,6 +791,381 @@ blissc                   MACH_BLISSC             BLISSC                  3491
 thales_adc             MACH_THALES_ADC         THALES_ADC              3492
 ubisys_p9d_evp         MACH_UBISYS_P9D_EVP     UBISYS_P9D_EVP          3493
 atdgp318               MACH_ATDGP318           ATDGP318                3494
+dma210u                        MACH_DMA210U            DMA210U                 3495
+em_t3                  MACH_EM_T3              EM_T3                   3496
+htx3250                        MACH_HTX3250            HTX3250                 3497
+g50                    MACH_G50                G50                     3498
+eco5                   MACH_ECO5               ECO5                    3499
+wintergrasp            MACH_WINTERGRASP        WINTERGRASP             3500
+puro                   MACH_PURO               PURO                    3501
+shooter_k              MACH_SHOOTER_K          SHOOTER_K               3502
+nspire                 MACH_NSPIRE             NSPIRE                  3503
+mickxx                 MACH_MICKXX             MICKXX                  3504
+lxmb                   MACH_LXMB               LXMB                    3505
+adam                   MACH_ADAM               ADAM                    3507
+b1004                  MACH_B1004              B1004                   3508
+oboea                  MACH_OBOEA              OBOEA                   3509
+a1015                  MACH_A1015              A1015                   3510
+robin_vbdt30           MACH_ROBIN_VBDT30       ROBIN_VBDT30            3511
+tegra_enterprise       MACH_TEGRA_ENTERPRISE   TEGRA_ENTERPRISE        3512
+rfl108200_mk10         MACH_RFL108200_MK10     RFL108200_MK10          3513
+rfl108300_mk16         MACH_RFL108300_MK16     RFL108300_MK16          3514
+rover_v7               MACH_ROVER_V7           ROVER_V7                3515
+miphone                        MACH_MIPHONE            MIPHONE                 3516
+femtobts               MACH_FEMTOBTS           FEMTOBTS                3517
+monopoli               MACH_MONOPOLI           MONOPOLI                3518
+boss                   MACH_BOSS               BOSS                    3519
+davinci_dm368_vtam     MACH_DAVINCI_DM368_VTAM DAVINCI_DM368_VTAM      3520
+clcon                  MACH_CLCON              CLCON                   3521
+nokia_rm696            MACH_NOKIA_RM696        NOKIA_RM696             3522
+tahiti                 MACH_TAHITI             TAHITI                  3523
+fighter                        MACH_FIGHTER            FIGHTER                 3524
+sgh_i710               MACH_SGH_I710           SGH_I710                3525
+integreproscb          MACH_INTEGREPROSCB      INTEGREPROSCB           3526
+monza                  MACH_MONZA              MONZA                   3527
+calimain               MACH_CALIMAIN           CALIMAIN                3528
+mx6q_sabreauto         MACH_MX6Q_SABREAUTO     MX6Q_SABREAUTO          3529
+gma01x                 MACH_GMA01X             GMA01X                  3530
+sbc51                  MACH_SBC51              SBC51                   3531
+fit                    MACH_FIT                FIT                     3532
+steelhead              MACH_STEELHEAD          STEELHEAD               3533
+panther                        MACH_PANTHER            PANTHER                 3534
+msm8960_liquid         MACH_MSM8960_LIQUID     MSM8960_LIQUID          3535
+lexikonct              MACH_LEXIKONCT          LEXIKONCT               3536
+ns2816_stb             MACH_NS2816_STB         NS2816_STB              3537
+sei_mm2_lpc3250                MACH_SEI_MM2_LPC3250    SEI_MM2_LPC3250         3538
+cmimx53                        MACH_CMIMX53            CMIMX53                 3539
+sandwich               MACH_SANDWICH           SANDWICH                3540
+chief                  MACH_CHIEF              CHIEF                   3541
+pogo_e02               MACH_POGO_E02           POGO_E02                3542
+mikrap_x168            MACH_MIKRAP_X168        MIKRAP_X168             3543
+htcmozart              MACH_HTCMOZART          HTCMOZART               3544
+htcgold                        MACH_HTCGOLD            HTCGOLD                 3545
+mt72xx                 MACH_MT72XX             MT72XX                  3546
+mx51_ivy               MACH_MX51_IVY           MX51_IVY                3547
+mx51_lvd               MACH_MX51_LVD           MX51_LVD                3548
+omap3_wiser2           MACH_OMAP3_WISER2       OMAP3_WISER2            3549
+dreamplug              MACH_DREAMPLUG          DREAMPLUG               3550
+cobas_c_111            MACH_COBAS_C_111        COBAS_C_111             3551
+cobas_u_411            MACH_COBAS_U_411        COBAS_U_411             3552
+hssd                   MACH_HSSD               HSSD                    3553
+iom35x                 MACH_IOM35X             IOM35X                  3554
+psom_omap              MACH_PSOM_OMAP          PSOM_OMAP               3555
+iphone_2g              MACH_IPHONE_2G          IPHONE_2G               3556
+iphone_3g              MACH_IPHONE_3G          IPHONE_3G               3557
+ipod_touch_1g          MACH_IPOD_TOUCH_1G      IPOD_TOUCH_1G           3558
+pharos_tpc             MACH_PHAROS_TPC         PHAROS_TPC              3559
+mx53_hydra             MACH_MX53_HYDRA         MX53_HYDRA              3560
+ns2816_dev_board       MACH_NS2816_DEV_BOARD   NS2816_DEV_BOARD        3561
+iphone_3gs             MACH_IPHONE_3GS         IPHONE_3GS              3562
+iphone_4               MACH_IPHONE_4           IPHONE_4                3563
+ipod_touch_4g          MACH_IPOD_TOUCH_4G      IPOD_TOUCH_4G           3564
+dragon_e1100           MACH_DRAGON_E1100       DRAGON_E1100            3565
+topside                        MACH_TOPSIDE            TOPSIDE                 3566
+irisiii                        MACH_IRISIII            IRISIII                 3567
+deto_macarm9           MACH_DETO_MACARM9       DETO_MACARM9            3568
+eti_d1                 MACH_ETI_D1             ETI_D1                  3569
+som3530sdk             MACH_SOM3530SDK         SOM3530SDK              3570
+oc_engine              MACH_OC_ENGINE          OC_ENGINE               3571
+apq8064_sim            MACH_APQ8064_SIM        APQ8064_SIM             3572
+alps                   MACH_ALPS               ALPS                    3575
+tny_t3730              MACH_TNY_T3730          TNY_T3730               3576
+geryon_nfe             MACH_GERYON_NFE         GERYON_NFE              3577
+ns2816_ref_board       MACH_NS2816_REF_BOARD   NS2816_REF_BOARD        3578
+silverstone            MACH_SILVERSTONE        SILVERSTONE             3579
+mtt2440                        MACH_MTT2440            MTT2440                 3580
+ynicdb                 MACH_YNICDB             YNICDB                  3581
+bct                    MACH_BCT                BCT                     3582
+tuscan                 MACH_TUSCAN             TUSCAN                  3583
+xbt_sam9g45            MACH_XBT_SAM9G45        XBT_SAM9G45             3584
+enbw_cmc               MACH_ENBW_CMC           ENBW_CMC                3585
+ch104mx257             MACH_CH104MX257         CH104MX257              3587
+openpri                        MACH_OPENPRI            OPENPRI                 3588
+am335xevm              MACH_AM335XEVM          AM335XEVM               3589
+picodmb                        MACH_PICODMB            PICODMB                 3590
+waluigi                        MACH_WALUIGI            WALUIGI                 3591
+punicag7               MACH_PUNICAG7           PUNICAG7                3592
+ipad_1g                        MACH_IPAD_1G            IPAD_1G                 3593
+appletv_2g             MACH_APPLETV_2G         APPLETV_2G              3594
+mach_ecog45            MACH_MACH_ECOG45        MACH_ECOG45             3595
+ait_cam_enc_4xx                MACH_AIT_CAM_ENC_4XX    AIT_CAM_ENC_4XX         3596
+runnymede              MACH_RUNNYMEDE          RUNNYMEDE               3597
+play                   MACH_PLAY               PLAY                    3598
+hw90260                        MACH_HW90260            HW90260                 3599
+tagh                   MACH_TAGH               TAGH                    3600
+filbert                        MACH_FILBERT            FILBERT                 3601
+getinge_netcomv3       MACH_GETINGE_NETCOMV3   GETINGE_NETCOMV3        3602
+cw20                   MACH_CW20               CW20                    3603
+cinema                 MACH_CINEMA             CINEMA                  3604
+cinema_tea             MACH_CINEMA_TEA         CINEMA_TEA              3605
+cinema_coffee          MACH_CINEMA_COFFEE      CINEMA_COFFEE           3606
+cinema_juice           MACH_CINEMA_JUICE       CINEMA_JUICE            3607
+mx53_mirage2           MACH_MX53_MIRAGE2       MX53_MIRAGE2            3609
+mx53_efikasb           MACH_MX53_EFIKASB       MX53_EFIKASB            3610
+stm_b2000              MACH_STM_B2000          STM_B2000               3612
 m28evk                 MACH_M28EVK             M28EVK                  3613
+pda                    MACH_PDA                PDA                     3614
+meraki_mr58            MACH_MERAKI_MR58        MERAKI_MR58             3615
+kota2                  MACH_KOTA2              KOTA2                   3616
+letcool                        MACH_LETCOOL            LETCOOL                 3617
+mx27iat                        MACH_MX27IAT            MX27IAT                 3618
+apollo_td              MACH_APOLLO_TD          APOLLO_TD               3619
+arena                  MACH_ARENA              ARENA                   3620
+gsngateway             MACH_GSNGATEWAY         GSNGATEWAY              3621
+lf2000                 MACH_LF2000             LF2000                  3622
+bonito                 MACH_BONITO             BONITO                  3623
+asymptote              MACH_ASYMPTOTE          ASYMPTOTE               3624
+bst2brd                        MACH_BST2BRD            BST2BRD                 3625
+tx335s                 MACH_TX335S             TX335S                  3626
+pelco_tesla            MACH_PELCO_TESLA        PELCO_TESLA             3627
+rrhtestplat            MACH_RRHTESTPLAT        RRHTESTPLAT             3628
+vidtonic_pro           MACH_VIDTONIC_PRO       VIDTONIC_PRO            3629
+pl_apollo              MACH_PL_APOLLO          PL_APOLLO               3630
+pl_phoenix             MACH_PL_PHOENIX         PL_PHOENIX              3631
+m28cu3                 MACH_M28CU3             M28CU3                  3632
+vvbox_hd               MACH_VVBOX_HD           VVBOX_HD                3633
+coreware_sam9260_      MACH_COREWARE_SAM9260_  COREWARE_SAM9260_       3634
+marmaduke              MACH_MARMADUKE          MARMADUKE               3635
+amg_xlcore_camera      MACH_AMG_XLCORE_CAMERA  AMG_XLCORE_CAMERA       3636
+omap3_egf              MACH_OMAP3_EGF          OMAP3_EGF               3637
 smdk4212               MACH_SMDK4212           SMDK4212                3638
+dnp9200                        MACH_DNP9200            DNP9200                 3639
+tf101                  MACH_TF101              TF101                   3640
+omap3silvio            MACH_OMAP3SILVIO        OMAP3SILVIO             3641
+picasso2               MACH_PICASSO2           PICASSO2                3642
+vangogh2               MACH_VANGOGH2           VANGOGH2                3643
+olpc_xo_1_75           MACH_OLPC_XO_1_75       OLPC_XO_1_75            3644
+gx400                  MACH_GX400              GX400                   3645
+gs300                  MACH_GS300              GS300                   3646
+acer_a9                        MACH_ACER_A9            ACER_A9                 3647
+vivow_evm              MACH_VIVOW_EVM          VIVOW_EVM               3648
+veloce_cxq             MACH_VELOCE_CXQ         VELOCE_CXQ              3649
+veloce_cxm             MACH_VELOCE_CXM         VELOCE_CXM              3650
+p1852                  MACH_P1852              P1852                   3651
+naxy100                        MACH_NAXY100            NAXY100                 3652
+taishan                        MACH_TAISHAN            TAISHAN                 3653
+touchlink              MACH_TOUCHLINK          TOUCHLINK               3654
+stm32f103ze            MACH_STM32F103ZE        STM32F103ZE             3655
+mcx                    MACH_MCX                MCX                     3656
+stm_nmhdk_fli7610      MACH_STM_NMHDK_FLI7610  STM_NMHDK_FLI7610       3657
+top28x                 MACH_TOP28X             TOP28X                  3658
+okl4vp_microvisor      MACH_OKL4VP_MICROVISOR  OKL4VP_MICROVISOR       3659
+pop                    MACH_POP                POP                     3660
+layer                  MACH_LAYER              LAYER                   3661
+trondheim              MACH_TRONDHEIM          TRONDHEIM               3662
+eva                    MACH_EVA                EVA                     3663
+trust_taurus           MACH_TRUST_TAURUS       TRUST_TAURUS            3664
+ns2816_huashan         MACH_NS2816_HUASHAN     NS2816_HUASHAN          3665
+ns2816_yangcheng       MACH_NS2816_YANGCHENG   NS2816_YANGCHENG        3666
+p852                   MACH_P852               P852                    3667
+flea3                  MACH_FLEA3              FLEA3                   3668
+bowfin                 MACH_BOWFIN             BOWFIN                  3669
+mv88de3100             MACH_MV88DE3100         MV88DE3100              3670
+pia_am35x              MACH_PIA_AM35X          PIA_AM35X               3671
+cedar                  MACH_CEDAR              CEDAR                   3672
+picasso_e              MACH_PICASSO_E          PICASSO_E               3673
+samsung_e60            MACH_SAMSUNG_E60        SAMSUNG_E60             3674
+sdvr_mini              MACH_SDVR_MINI          SDVR_MINI               3676
+omap3_ij3k             MACH_OMAP3_IJ3K         OMAP3_IJ3K              3677
+modasmc1               MACH_MODASMC1           MODASMC1                3678
+apq8064_rumi3          MACH_APQ8064_RUMI3      APQ8064_RUMI3           3679
+matrix506              MACH_MATRIX506          MATRIX506               3680
+msm9615_mtp            MACH_MSM9615_MTP        MSM9615_MTP             3681
+dm36x_spawndc          MACH_DM36X_SPAWNDC      DM36X_SPAWNDC           3682
+sff792                 MACH_SFF792             SFF792                  3683
+am335xiaevm            MACH_AM335XIAEVM        AM335XIAEVM             3684
+g3c2440                        MACH_G3C2440            G3C2440                 3685
+tion270                        MACH_TION270            TION270                 3686
+w22q7arm02             MACH_W22Q7ARM02         W22Q7ARM02              3687
+omap_cat               MACH_OMAP_CAT           OMAP_CAT                3688
+at91sam9n12ek          MACH_AT91SAM9N12EK      AT91SAM9N12EK           3689
+morrison               MACH_MORRISON           MORRISON                3690
+svdu                   MACH_SVDU               SVDU                    3691
+lpp01                  MACH_LPP01              LPP01                   3692
+ubc283                 MACH_UBC283             UBC283                  3693
+zeppelin               MACH_ZEPPELIN           ZEPPELIN                3694
+motus                  MACH_MOTUS              MOTUS                   3695
+neomainboard           MACH_NEOMAINBOARD       NEOMAINBOARD            3696
+devkit3250             MACH_DEVKIT3250         DEVKIT3250              3697
+devkit7000             MACH_DEVKIT7000         DEVKIT7000              3698
+fmc_uic                        MACH_FMC_UIC            FMC_UIC                 3699
+fmc_dcm                        MACH_FMC_DCM            FMC_DCM                 3700
+batwm                  MACH_BATWM              BATWM                   3701
+atlas6cb               MACH_ATLAS6CB           ATLAS6CB                3702
+blue                   MACH_BLUE               BLUE                    3705
+colorado               MACH_COLORADO           COLORADO                3706
+popc                   MACH_POPC               POPC                    3707
+promwad_jade           MACH_PROMWAD_JADE       PROMWAD_JADE            3708
+amp                    MACH_AMP                AMP                     3709
+gnet_amp               MACH_GNET_AMP           GNET_AMP                3710
+toques                 MACH_TOQUES             TOQUES                  3711
+dct_storm              MACH_DCT_STORM          DCT_STORM               3713
+owl                    MACH_OWL                OWL                     3715
+cogent_csb1741         MACH_COGENT_CSB1741     COGENT_CSB1741          3716
+adillustra610          MACH_ADILLUSTRA610      ADILLUSTRA610           3718
+ecafe_na04             MACH_ECAFE_NA04         ECAFE_NA04              3719
+popct                  MACH_POPCT              POPCT                   3720
+omap3_helena           MACH_OMAP3_HELENA       OMAP3_HELENA            3721
+ach                    MACH_ACH                ACH                     3722
+module_dtb             MACH_MODULE_DTB         MODULE_DTB              3723
+oslo_elisabeth         MACH_OSLO_ELISABETH     OSLO_ELISABETH          3725
+tt01                   MACH_TT01               TT01                    3726
+msm8930_cdp            MACH_MSM8930_CDP        MSM8930_CDP             3727
+msm8930_mtp            MACH_MSM8930_MTP        MSM8930_MTP             3728
+msm8930_fluid          MACH_MSM8930_FLUID      MSM8930_FLUID           3729
+ltu11                  MACH_LTU11              LTU11                   3730
+am1808_spawnco         MACH_AM1808_SPAWNCO     AM1808_SPAWNCO          3731
+flx6410                        MACH_FLX6410            FLX6410                 3732
+mx6q_qsb               MACH_MX6Q_QSB           MX6Q_QSB                3733
+mx53_plt424            MACH_MX53_PLT424        MX53_PLT424             3734
+jasmine                        MACH_JASMINE            JASMINE                 3735
+l138_owlboard_plus     MACH_L138_OWLBOARD_PLUS L138_OWLBOARD_PLUS      3736
+wr21                   MACH_WR21               WR21                    3737
+peaboy                 MACH_PEABOY             PEABOY                  3739
+mx28_plato             MACH_MX28_PLATO         MX28_PLATO              3740
+kacom2                 MACH_KACOM2             KACOM2                  3741
+slco                   MACH_SLCO               SLCO                    3742
+imx51pico              MACH_IMX51PICO          IMX51PICO               3743
+glink1                 MACH_GLINK1             GLINK1                  3744
+diamond                        MACH_DIAMOND            DIAMOND                 3745
+d9000                  MACH_D9000              D9000                   3746
+w5300e01               MACH_W5300E01           W5300E01                3747
+im6000                 MACH_IM6000             IM6000                  3748
+mx51_fred51            MACH_MX51_FRED51        MX51_FRED51             3749
+stm32f2                        MACH_STM32F2            STM32F2                 3750
+ville                  MACH_VILLE              VILLE                   3751
+ptip_murnau            MACH_PTIP_MURNAU        PTIP_MURNAU             3752
+ptip_classic           MACH_PTIP_CLASSIC       PTIP_CLASSIC            3753
+mx53grb                        MACH_MX53GRB            MX53GRB                 3754
+gagarin                        MACH_GAGARIN            GAGARIN                 3755
+nas2big                        MACH_NAS2BIG            NAS2BIG                 3757
+superfemto             MACH_SUPERFEMTO         SUPERFEMTO              3758
+teufel                 MACH_TEUFEL             TEUFEL                  3759
+dinara                 MACH_DINARA             DINARA                  3760
+vanquish               MACH_VANQUISH           VANQUISH                3761
+zipabox1               MACH_ZIPABOX1           ZIPABOX1                3762
+u9540                  MACH_U9540              U9540                   3763
+jet                    MACH_JET                JET                     3764
 smdk4412               MACH_SMDK4412           SMDK4412                3765
+elite                  MACH_ELITE              ELITE                   3766
+spear320_hmi           MACH_SPEAR320_HMI       SPEAR320_HMI            3767
+ontario                        MACH_ONTARIO            ONTARIO                 3768
+mx6q_sabrelite         MACH_MX6Q_SABRELITE     MX6Q_SABRELITE          3769
+vc200                  MACH_VC200              VC200                   3770
+msm7625a_ffa           MACH_MSM7625A_FFA       MSM7625A_FFA            3771
+msm7625a_surf          MACH_MSM7625A_SURF      MSM7625A_SURF           3772
+benthossbp             MACH_BENTHOSSBP         BENTHOSSBP              3773
+smdk5210               MACH_SMDK5210           SMDK5210                3774
+empq2300               MACH_EMPQ2300           EMPQ2300                3775
+minipos                        MACH_MINIPOS            MINIPOS                 3776
+omap5_sevm             MACH_OMAP5_SEVM         OMAP5_SEVM              3777
+shelter                        MACH_SHELTER            SHELTER                 3778
+omap3_devkit8500       MACH_OMAP3_DEVKIT8500   OMAP3_DEVKIT8500        3779
+edgetd                 MACH_EDGETD             EDGETD                  3780
+copperyard             MACH_COPPERYARD         COPPERYARD              3781
+edge                   MACH_EDGE               EDGE                    3782
+edge_u                 MACH_EDGE_U             EDGE_U                  3783
+edge_td                        MACH_EDGE_TD            EDGE_TD                 3784
+wdss                   MACH_WDSS               WDSS                    3785
+dl_pb25                        MACH_DL_PB25            DL_PB25                 3786
+dss11                  MACH_DSS11              DSS11                   3787
+cpa                    MACH_CPA                CPA                     3788
+aptp2000               MACH_APTP2000           APTP2000                3789
+marzen                 MACH_MARZEN             MARZEN                  3790
+st_turbine             MACH_ST_TURBINE         ST_TURBINE              3791
+gtl_it3300             MACH_GTL_IT3300         GTL_IT3300              3792
+mx6_mule               MACH_MX6_MULE           MX6_MULE                3793
+v7pxa_dt               MACH_V7PXA_DT           V7PXA_DT                3794
+v7mmp_dt               MACH_V7MMP_DT           V7MMP_DT                3795
+dragon7                        MACH_DRAGON7            DRAGON7                 3796
+krome                  MACH_KROME              KROME                   3797
+oratisdante            MACH_ORATISDANTE        ORATISDANTE             3798
+fathom                 MACH_FATHOM             FATHOM                  3799
+dns325                 MACH_DNS325             DNS325                  3800
+sarnen                 MACH_SARNEN             SARNEN                  3801
+ubisys_g1              MACH_UBISYS_G1          UBISYS_G1               3802
+mx53_pf1               MACH_MX53_PF1           MX53_PF1                3803
+asanti                 MACH_ASANTI             ASANTI                  3804
+volta                  MACH_VOLTA              VOLTA                   3805
+knight                 MACH_KNIGHT             KNIGHT                  3807
+beaglebone             MACH_BEAGLEBONE         BEAGLEBONE              3808
+becker                 MACH_BECKER             BECKER                  3809
+fc360                  MACH_FC360              FC360                   3810
+pmi2_xls               MACH_PMI2_XLS           PMI2_XLS                3811
+taranto                        MACH_TARANTO            TARANTO                 3812
+plutux                 MACH_PLUTUX             PLUTUX                  3813
+ipmp_medcom            MACH_IPMP_MEDCOM        IPMP_MEDCOM             3814
+absolut                        MACH_ABSOLUT            ABSOLUT                 3815
+awpb3                  MACH_AWPB3              AWPB3                   3816
+nfp32xx_dt             MACH_NFP32XX_DT         NFP32XX_DT              3817
+dl_pb53                        MACH_DL_PB53            DL_PB53                 3818
+acu_ii                 MACH_ACU_II             ACU_II                  3819
+avalon                 MACH_AVALON             AVALON                  3820
+sphinx                 MACH_SPHINX             SPHINX                  3821
+titan_t                        MACH_TITAN_T            TITAN_T                 3822
+harvest_boris          MACH_HARVEST_BORIS      HARVEST_BORIS           3823
+mach_msm7x30_m3s       MACH_MACH_MSM7X30_M3S   MACH_MSM7X30_M3S        3824
+smdk5250               MACH_SMDK5250           SMDK5250                3825
+imxt_lite              MACH_IMXT_LITE          IMXT_LITE               3826
+imxt_std               MACH_IMXT_STD           IMXT_STD                3827
+imxt_log               MACH_IMXT_LOG           IMXT_LOG                3828
+imxt_nav               MACH_IMXT_NAV           IMXT_NAV                3829
+imxt_full              MACH_IMXT_FULL          IMXT_FULL               3830
+ag09015                        MACH_AG09015            AG09015                 3831
+am3517_mt_ventoux      MACH_AM3517_MT_VENTOUX  AM3517_MT_VENTOUX       3832
+dp1arm9                        MACH_DP1ARM9            DP1ARM9                 3833
+picasso_m              MACH_PICASSO_M          PICASSO_M               3834
+video_gadget           MACH_VIDEO_GADGET       VIDEO_GADGET            3835
+mtt_om3x               MACH_MTT_OM3X           MTT_OM3X                3836
+mx6q_arm2              MACH_MX6Q_ARM2          MX6Q_ARM2               3837
+picosam9g45            MACH_PICOSAM9G45        PICOSAM9G45             3838
+vpm_dm365              MACH_VPM_DM365          VPM_DM365               3839
+bonfire                        MACH_BONFIRE            BONFIRE                 3840
+mt2p2d                 MACH_MT2P2D             MT2P2D                  3841
+sigpda01               MACH_SIGPDA01           SIGPDA01                3842
+cn27                   MACH_CN27               CN27                    3843
+mx25_cwtap             MACH_MX25_CWTAP         MX25_CWTAP              3844
+apf28                  MACH_APF28              APF28                   3845
+pelco_maxwell          MACH_PELCO_MAXWELL      PELCO_MAXWELL           3846
+ge_phoenix             MACH_GE_PHOENIX         GE_PHOENIX              3847
+empc_a500              MACH_EMPC_A500          EMPC_A500               3848
+ims_arm9               MACH_IMS_ARM9           IMS_ARM9                3849
+mini2416               MACH_MINI2416           MINI2416                3850
+mini2450               MACH_MINI2450           MINI2450                3851
+mini310                        MACH_MINI310            MINI310                 3852
+spear_hurricane                MACH_SPEAR_HURRICANE    SPEAR_HURRICANE         3853
+mt7208                 MACH_MT7208             MT7208                  3854
+lpc178x                        MACH_LPC178X            LPC178X                 3855
+farleys                        MACH_FARLEYS            FARLEYS                 3856
+efm32gg_dk3750         MACH_EFM32GG_DK3750     EFM32GG_DK3750          3857
+zeus_board             MACH_ZEUS_BOARD         ZEUS_BOARD              3858
+cc51                   MACH_CC51               CC51                    3859
+fxi_c210               MACH_FXI_C210           FXI_C210                3860
+msm8627_cdp            MACH_MSM8627_CDP        MSM8627_CDP             3861
+msm8627_mtp            MACH_MSM8627_MTP        MSM8627_MTP             3862
+armadillo800eva                MACH_ARMADILLO800EVA    ARMADILLO800EVA         3863
+primou                 MACH_PRIMOU             PRIMOU                  3864
+primoc                 MACH_PRIMOC             PRIMOC                  3865
+primoct                        MACH_PRIMOCT            PRIMOCT                 3866
+a9500                  MACH_A9500              A9500                   3867
+pluto                  MACH_PLUTO              PLUTO                   3869
+acfx100                        MACH_ACFX100            ACFX100                 3870
+msm8625_rumi3          MACH_MSM8625_RUMI3      MSM8625_RUMI3           3871
+valente                        MACH_VALENTE            VALENTE                 3872
+crfs_rfeye             MACH_CRFS_RFEYE         CRFS_RFEYE              3873
+rfeye                  MACH_RFEYE              RFEYE                   3874
+phidget_sbc3           MACH_PHIDGET_SBC3       PHIDGET_SBC3            3875
+tcw_mika               MACH_TCW_MIKA           TCW_MIKA                3876
+imx28_egf              MACH_IMX28_EGF          IMX28_EGF               3877
+valente_wx             MACH_VALENTE_WX         VALENTE_WX              3878
+huangshans             MACH_HUANGSHANS         HUANGSHANS              3879
+bosphorus1             MACH_BOSPHORUS1         BOSPHORUS1              3880
+prima                  MACH_PRIMA              PRIMA                   3881
+evita_ulk              MACH_EVITA_ULK          EVITA_ULK               3884
+merisc600              MACH_MERISC600          MERISC600               3885
+dolak                  MACH_DOLAK              DOLAK                   3886
+sbc53                  MACH_SBC53              SBC53                   3887
+elite_ulk              MACH_ELITE_ULK          ELITE_ULK               3888
+pov2                   MACH_POV2               POV2                    3889
+ipod_touch_2g          MACH_IPOD_TOUCH_2G      IPOD_TOUCH_2G           3890
+da850_pqab             MACH_DA850_PQAB         DA850_PQAB              3891
index 6073b187528a26a8ac16826aa98cd7986c5724ed..5a274af31b2b82b8f1e561fe66d7c2f0ce58d96b 100644 (file)
@@ -60,6 +60,7 @@ typedef u64 cputime64_t;
  */
 #define cputime_to_usecs(__ct)         ((__ct) / NSEC_PER_USEC)
 #define usecs_to_cputime(__usecs)      ((__usecs) * NSEC_PER_USEC)
+#define usecs_to_cputime64(__usecs)    usecs_to_cputime(__usecs)
 
 /*
  * Convert cputime <-> seconds
index 43f984e93970b8acbabbfd6554920a4b4fb265f4..303192fc9260d50f44cdf6e7ef0371a2fc41d9da 100644 (file)
 #define __NR_clock_adjtime     342
 #define __NR_syncfs            343
 #define __NR_setns             344
+#define __NR_process_vm_readv  345
+#define __NR_process_vm_writev 346
 
 #ifdef __KERNEL__
 
-#define NR_syscalls            345
+#define NR_syscalls            347
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index c468f2edaa85ec0cd2356e392a0f0e7efddb8218..ce827b376110a6a815b4ccece0a066e5ad66cc59 100644 (file)
@@ -365,4 +365,6 @@ ENTRY(sys_call_table)
        .long sys_clock_adjtime
        .long sys_syncfs
        .long sys_setns
+       .long sys_process_vm_readv      /* 345 */
+       .long sys_process_vm_writev
 
index 4f2971bcf8e5464577885b5ecc64db11c388d295..315fc0b250f8fe3373684f12f5c0437fd3d0b7b1 100644 (file)
@@ -623,7 +623,7 @@ static int mipspmu_event_init(struct perf_event *event)
        if (!atomic_inc_not_zero(&active_events)) {
                if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
                        atomic_dec(&active_events);
-                       return -ENOSPC;
+                       return -EINVAL;
                }
 
                mutex_lock(&pmu_reserve_mutex);
@@ -732,15 +732,15 @@ static int validate_group(struct perf_event *event)
        memset(&fake_cpuc, 0, sizeof(fake_cpuc));
 
        if (!validate_event(&fake_cpuc, leader))
-               return -ENOSPC;
+               return -EINVAL;
 
        list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
                if (!validate_event(&fake_cpuc, sibling))
-                       return -ENOSPC;
+                       return -EINVAL;
        }
 
        if (!validate_event(&fake_cpuc, event))
-               return -ENOSPC;
+               return -EINVAL;
 
        return 0;
 }
index 1cf20bdfbecaada5bb2b88d439d776c32af2558c..98b7c4b49c9d0586cce4d7132593ee359c3fd803 100644 (file)
@@ -150,6 +150,8 @@ static inline cputime_t usecs_to_cputime(const unsigned long us)
        return ct;
 }
 
+#define usecs_to_cputime64(us)         usecs_to_cputime(us)
+
 /*
  * Convert cputime <-> seconds
  */
index d4df013ad77964353fdf5a59c8ed6afde4ed1ce2..69c7377d2071aa821fd1ff9529cafd4e49d10081 100644 (file)
@@ -381,39 +381,6 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
 }
 #endif
 
-static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
-                                            unsigned long pte_index)
-{
-       unsigned long rb, va_low;
-
-       rb = (v & ~0x7fUL) << 16;               /* AVA field */
-       va_low = pte_index >> 3;
-       if (v & HPTE_V_SECONDARY)
-               va_low = ~va_low;
-       /* xor vsid from AVA */
-       if (!(v & HPTE_V_1TB_SEG))
-               va_low ^= v >> 12;
-       else
-               va_low ^= v >> 24;
-       va_low &= 0x7ff;
-       if (v & HPTE_V_LARGE) {
-               rb |= 1;                        /* L field */
-               if (cpu_has_feature(CPU_FTR_ARCH_206) &&
-                   (r & 0xff000)) {
-                       /* non-16MB large page, must be 64k */
-                       /* (masks depend on page size) */
-                       rb |= 0x1000;           /* page encoding in LP field */
-                       rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
-                       rb |= (va_low & 0xfe);  /* AVAL field (P7 doesn't seem to care) */
-               }
-       } else {
-               /* 4kB page */
-               rb |= (va_low & 0x7ff) << 12;   /* remaining 11b of VA */
-       }
-       rb |= (v >> 54) & 0x300;                /* B field */
-       return rb;
-}
-
 /* Magic register values loaded into r3 and r4 before the 'sc' assembly
  * instruction for the OSI hypercalls */
 #define OSI_SC_MAGIC_R3                        0x113724FA
index e43fe42b9875308b49e13faa8a67ef91fe125200..d0ac94f98f9e3a2dcca21ff90631b841e4ab5060 100644 (file)
@@ -29,4 +29,37 @@ static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu)
 
 #define SPAPR_TCE_SHIFT                12
 
+static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
+                                            unsigned long pte_index)
+{
+       unsigned long rb, va_low;
+
+       rb = (v & ~0x7fUL) << 16;               /* AVA field */
+       va_low = pte_index >> 3;
+       if (v & HPTE_V_SECONDARY)
+               va_low = ~va_low;
+       /* xor vsid from AVA */
+       if (!(v & HPTE_V_1TB_SEG))
+               va_low ^= v >> 12;
+       else
+               va_low ^= v >> 24;
+       va_low &= 0x7ff;
+       if (v & HPTE_V_LARGE) {
+               rb |= 1;                        /* L field */
+               if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+                   (r & 0xff000)) {
+                       /* non-16MB large page, must be 64k */
+                       /* (masks depend on page size) */
+                       rb |= 0x1000;           /* page encoding in LP field */
+                       rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
+                       rb |= (va_low & 0xfe);  /* AVAL field (P7 doesn't seem to care) */
+               }
+       } else {
+               /* 4kB page */
+               rb |= (va_low & 0x7ff) << 12;   /* remaining 11b of VA */
+       }
+       rb |= (v >> 54) & 0x300;                /* B field */
+       return rb;
+}
+
 #endif /* __ASM_KVM_BOOK3S_64_H__ */
index 0cb137a9b0381f1175d5962c67aac18f566af662..336983da9e726c1e144b9a29be03934308800958 100644 (file)
@@ -538,7 +538,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
        tpaca->kvm_hstate.napping = 0;
        vcpu->cpu = vc->pcpu;
        smp_wmb();
-#ifdef CONFIG_PPC_ICP_NATIVE
+#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
        if (vcpu->arch.ptid) {
                tpaca->cpu_start = 0x80;
                wmb();
index 3c791e1eb675299c96e7b428b499e7bec4e5f434..e2cfb9e1e20ebdea21c726e946e1f37aef1c8c6f 100644 (file)
@@ -658,10 +658,12 @@ program_interrupt:
                        ulong cmd = kvmppc_get_gpr(vcpu, 3);
                        int i;
 
+#ifdef CONFIG_KVM_BOOK3S_64_PR
                        if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
                                r = RESUME_GUEST;
                                break;
                        }
+#endif
 
                        run->papr_hcall.nr = cmd;
                        for (i = 0; i < 9; ++i) {
index 26d20903f2bc5c8cce1a94637f58ea9dea73a696..8c0d45a6faf7f49db9aef9c9a495c1ef20d155d8 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/kvm_host.h>
 #include <linux/slab.h>
 #include <linux/err.h>
+#include <linux/export.h>
 
 #include <asm/reg.h>
 #include <asm/cputable.h>
index 0814348782966ac99c571990c89975d06aedffe1..b9acaaa175d8af35a25ba42d8157f5f5bec299df 100644 (file)
@@ -87,6 +87,8 @@ usecs_to_cputime(const unsigned int m)
        return (cputime_t) m * 4096;
 }
 
+#define usecs_to_cputime64(m)          usecs_to_cputime(m)
+
 /*
  * Convert cputime to milliseconds and back.
  */
index 524d23b8610ceb65c42661a97e79226125dbadf3..4f289ff0b7fe27b7de54d8886b6aee0b8890066f 100644 (file)
@@ -599,10 +599,10 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
        skey = page_get_storage_key(address);
        bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
        /* Clear page changed & referenced bit in the storage key */
-       if (bits) {
-               skey ^= bits;
-               page_set_storage_key(address, skey, 1);
-       }
+       if (bits & _PAGE_CHANGED)
+               page_set_storage_key(address, skey ^ bits, 1);
+       else if (bits)
+               page_reset_referenced(address);
        /* Transfer page changed & referenced bit to guest bits in pgste */
        pgste_val(pgste) |= bits << 48;         /* RCP_GR_BIT & RCP_GC_BIT */
        /* Get host changed & referenced bits from pgste */
index 450931a45b684b2044106921a66a4702e1eb93f1..573bc29551ef471fee58b89d02df0ea0ff956503 100644 (file)
@@ -296,13 +296,6 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
                     ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
                        /* Invalid psw mask. */
                        return -EINVAL;
-               if (addr == (addr_t) &dummy->regs.psw.addr)
-                       /*
-                        * The debugger changed the instruction address,
-                        * reset system call restart, see signal.c:do_signal
-                        */
-                       task_thread_info(child)->system_call = 0;
-
                *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
 
        } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
@@ -614,11 +607,6 @@ static int __poke_user_compat(struct task_struct *child,
                        /* Transfer 31 bit amode bit to psw mask. */
                        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
                                (__u64)(tmp & PSW32_ADDR_AMODE);
-                       /*
-                        * The debugger changed the instruction address,
-                        * reset system call restart, see signal.c:do_signal
-                        */
-                       task_thread_info(child)->system_call = 0;
                } else {
                        /* gpr 0-15 */
                        *(__u32*)((addr_t) &regs->psw + addr*2 + 4) = tmp;
@@ -905,6 +893,14 @@ static int s390_last_break_get(struct task_struct *target,
        return 0;
 }
 
+static int s390_last_break_set(struct task_struct *target,
+                              const struct user_regset *regset,
+                              unsigned int pos, unsigned int count,
+                              const void *kbuf, const void __user *ubuf)
+{
+       return 0;
+}
+
 #endif
 
 static int s390_system_call_get(struct task_struct *target,
@@ -951,6 +947,7 @@ static const struct user_regset s390_regsets[] = {
                .size = sizeof(long),
                .align = sizeof(long),
                .get = s390_last_break_get,
+               .set = s390_last_break_set,
        },
 #endif
        [REGSET_SYSTEM_CALL] = {
@@ -1116,6 +1113,14 @@ static int s390_compat_last_break_get(struct task_struct *target,
        return 0;
 }
 
+static int s390_compat_last_break_set(struct task_struct *target,
+                                     const struct user_regset *regset,
+                                     unsigned int pos, unsigned int count,
+                                     const void *kbuf, const void __user *ubuf)
+{
+       return 0;
+}
+
 static const struct user_regset s390_compat_regsets[] = {
        [REGSET_GENERAL] = {
                .core_note_type = NT_PRSTATUS,
@@ -1139,6 +1144,7 @@ static const struct user_regset s390_compat_regsets[] = {
                .size = sizeof(long),
                .align = sizeof(long),
                .get = s390_compat_last_break_get,
+               .set = s390_compat_last_break_set,
        },
        [REGSET_SYSTEM_CALL] = {
                .core_note_type = NT_S390_SYSTEM_CALL,
index e58a462949b164ea90c5697696b9ccec47bb7ff4..e54c4ff8abaaa3d1a34efd34decd0d12c713e4ef 100644 (file)
@@ -579,7 +579,7 @@ static unsigned long __init find_crash_base(unsigned long crash_size,
                *msg = "first memory chunk must be at least crashkernel size";
                return 0;
        }
-       if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE))
+       if (OLDMEM_BASE && crash_size == OLDMEM_SIZE)
                return OLDMEM_BASE;
 
        for (i = MEMORY_CHUNKS - 1; i >= 0; i--) {
index 05a85bc14c98a2556e86bf40cec7e76de2dc2423..7f6f9f35454518f091e4fb86e3d39e46c8aaf391 100644 (file)
@@ -460,9 +460,9 @@ void do_signal(struct pt_regs *regs)
                                                     regs->svc_code >> 16);
                                break;
                        }
-                       /* No longer in a system call */
-                       clear_thread_flag(TIF_SYSCALL);
                }
+               /* No longer in a system call */
+               clear_thread_flag(TIF_SYSCALL);
 
                if ((is_compat_task() ?
                     handle_signal32(signr, &ka, &info, oldset, regs) :
@@ -486,6 +486,7 @@ void do_signal(struct pt_regs *regs)
        }
 
        /* No handlers present - check for system call restart */
+       clear_thread_flag(TIF_SYSCALL);
        if (current_thread_info()->system_call) {
                regs->svc_code = current_thread_info()->system_call;
                switch (regs->gprs[2]) {
@@ -500,9 +501,6 @@ void do_signal(struct pt_regs *regs)
                        regs->gprs[2] = regs->orig_gpr2;
                        set_thread_flag(TIF_SYSCALL);
                        break;
-               default:
-                       clear_thread_flag(TIF_SYSCALL);
-                       break;
                }
        }
 
index 6efc18b5e60af4e3ce202edb5e381fe2ab45c03e..bd58b72454cf52c62b76bc049547931c74e020e6 100644 (file)
@@ -88,7 +88,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf,
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        if (oprofile_started)
index ec8c84c14b17219cd22c08a4d7e1dfdd9c3da0ec..895e337c79b60ed7477f754f968ca80a9a52d754 100644 (file)
@@ -50,9 +50,9 @@ static struct platform_device heartbeat_device = {
 #define GBECONT                0xffc10100
 #define GBECONT_RMII1  BIT(17)
 #define GBECONT_RMII0  BIT(16)
-static void sh7757_eth_set_mdio_gate(unsigned long addr)
+static void sh7757_eth_set_mdio_gate(void *addr)
 {
-       if ((addr & 0x00000fff) < 0x0800)
+       if (((unsigned long)addr & 0x00000fff) < 0x0800)
                writel(readl(GBECONT) | GBECONT_RMII0, GBECONT);
        else
                writel(readl(GBECONT) | GBECONT_RMII1, GBECONT);
@@ -116,9 +116,9 @@ static struct platform_device sh7757_eth1_device = {
        },
 };
 
-static void sh7757_eth_giga_set_mdio_gate(unsigned long addr)
+static void sh7757_eth_giga_set_mdio_gate(void *addr)
 {
-       if ((addr & 0x00000fff) < 0x0800) {
+       if (((unsigned long)addr & 0x00000fff) < 0x0800) {
                gpio_set_value(GPIO_PTT4, 1);
                writel(readl(GBECONT) & ~GBECONT_RMII0, GBECONT);
        } else {
@@ -210,8 +210,12 @@ static struct resource sh_mmcif_resources[] = {
 };
 
 static struct sh_mmcif_dma sh7757lcr_mmcif_dma = {
-       .chan_priv_tx   = SHDMA_SLAVE_MMCIF_TX,
-       .chan_priv_rx   = SHDMA_SLAVE_MMCIF_RX,
+       .chan_priv_tx   = {
+               .slave_id = SHDMA_SLAVE_MMCIF_TX,
+       },
+       .chan_priv_rx   = {
+               .slave_id = SHDMA_SLAVE_MMCIF_RX,
+       }
 };
 
 static struct sh_mmcif_plat_data sh_mmcif_plat = {
index b4c2d2b946ddc084bfe48b2d58128d47e2f724aa..e4dd5d5a111506889b5a69284355721fc23c80f7 100644 (file)
@@ -49,7 +49,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
        return oprofile_perf_init(ops);
 }
 
-void __exit oprofile_arch_exit(void)
+void oprofile_arch_exit(void)
 {
        oprofile_perf_exit();
        kfree(sh_pmu_op_name);
@@ -60,5 +60,5 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
        ops->backtrace = sh_backtrace;
        return -ENODEV;
 }
-void __exit oprofile_arch_exit(void) {}
+void oprofile_arch_exit(void) {}
 #endif /* CONFIG_HW_PERF_EVENTS */
index 7429b47c3acad8adb97ff177c4459fafcb1ae1e3..381edcd5bc2946471e2c47e4f73a607272d25c09 100644 (file)
@@ -1181,13 +1181,11 @@ static int __devinit ds_probe(struct vio_dev *vdev,
 
        dp->rcv_buf_len = 4096;
 
-       dp->ds_states = kzalloc(sizeof(ds_states_template),
-                               GFP_KERNEL);
+       dp->ds_states = kmemdup(ds_states_template,
+                               sizeof(ds_states_template), GFP_KERNEL);
        if (!dp->ds_states)
                goto out_free_rcv_buf;
 
-       memcpy(dp->ds_states, ds_states_template,
-              sizeof(ds_states_template));
        dp->num_ds_states = ARRAY_SIZE(ds_states_template);
 
        for (i = 0; i < dp->num_ds_states; i++)
index b272cda35a0125904bc6f17eb1c5d14054dec24e..af5755d20fbe91eb43b682e99aa8f836bfafa6ad 100644 (file)
@@ -849,10 +849,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
        if (!irq)
                return -ENOMEM;
 
-       if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
-               return -EINVAL;
        if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
                return -EINVAL;
+       if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
+               return -EINVAL;
 
        return irq;
 }
index 46614807a57f45150f89dbb1cb5ee115beaad28c..741df916c124b10b12751da38bf7d3dac335c5dd 100644 (file)
@@ -58,12 +58,10 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
        void *new_val;
        int err;
 
-       new_val = kmalloc(len, GFP_KERNEL);
+       new_val = kmemdup(val, len, GFP_KERNEL);
        if (!new_val)
                return -ENOMEM;
 
-       memcpy(new_val, val, len);
-
        err = -ENODEV;
 
        mutex_lock(&of_set_property_mutex);
index 5175ac2f4820c603d55e5fba0c06b735080edf96..8a7f81743c126b1b8648584e20504dce6111dd48 100644 (file)
@@ -302,8 +302,7 @@ void __init btfixup(void)
                                case 'i':       /* INT */
                                        if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
                                                set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
-                                       else if ((insn & 0x80002000) == 0x80002000 &&
-                                                (insn & 0x01800000) != 0x01800000) /* %LO */
+                                       else if ((insn & 0x80002000) == 0x80002000) /* %LO */
                                                set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
                                        else {
                                                prom_printf(insn_i, p, addr, insn);
index 94e9a511de849c925bcf28841368ce278feade0c..f80f8ceabc67abd6ef57fce76cb1a224f53204c8 100644 (file)
@@ -74,16 +74,6 @@ enum {
  */
 void tile_irq_activate(unsigned int irq, int tile_irq_type);
 
-/*
- * For onboard, non-PCI (e.g. TILE_IRQ_PERCPU) devices, drivers know
- * how to use enable/disable_percpu_irq() to manage interrupts on each
- * core.  We can't use the generic enable/disable_irq() because they
- * use a single reference count per irq, rather than per cpu per irq.
- */
-void enable_percpu_irq(unsigned int irq);
-void disable_percpu_irq(unsigned int irq);
-
-
 void setup_irq_regs(void);
 
 #endif /* _ASM_TILE_IRQ_H */
index aa0134db2dd683e4f2bde38823bcabc931f7d8ef..02e62806501256ad63f322d26461bd34f8bcc4e9 100644 (file)
@@ -152,14 +152,13 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
  * Remove an irq from the disabled mask.  If we're in an interrupt
  * context, defer enabling the HW interrupt until we leave.
  */
-void enable_percpu_irq(unsigned int irq)
+static void tile_irq_chip_enable(struct irq_data *d)
 {
-       get_cpu_var(irq_disable_mask) &= ~(1UL << irq);
+       get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
        if (__get_cpu_var(irq_depth) == 0)
-               unmask_irqs(1UL << irq);
+               unmask_irqs(1UL << d->irq);
        put_cpu_var(irq_disable_mask);
 }
-EXPORT_SYMBOL(enable_percpu_irq);
 
 /*
  * Add an irq to the disabled mask.  We disable the HW interrupt
@@ -167,13 +166,12 @@ EXPORT_SYMBOL(enable_percpu_irq);
  * in an interrupt context, the return path is careful to avoid
  * unmasking a newly disabled interrupt.
  */
-void disable_percpu_irq(unsigned int irq)
+static void tile_irq_chip_disable(struct irq_data *d)
 {
-       get_cpu_var(irq_disable_mask) |= (1UL << irq);
-       mask_irqs(1UL << irq);
+       get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
+       mask_irqs(1UL << d->irq);
        put_cpu_var(irq_disable_mask);
 }
-EXPORT_SYMBOL(disable_percpu_irq);
 
 /* Mask an interrupt. */
 static void tile_irq_chip_mask(struct irq_data *d)
@@ -209,6 +207,8 @@ static void tile_irq_chip_eoi(struct irq_data *d)
 
 static struct irq_chip tile_irq_chip = {
        .name = "tile_irq_chip",
+       .irq_enable = tile_irq_chip_enable,
+       .irq_disable = tile_irq_chip_disable,
        .irq_ack = tile_irq_chip_ack,
        .irq_eoi = tile_irq_chip_eoi,
        .irq_mask = tile_irq_chip_mask,
index 658f2ce426a44ef5fefab5852cfbe05dc796953b..b3ed19f8779c4a9058ea818bc8fb31a6a75ef34b 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include <linux/vmalloc.h>
+#include <linux/export.h>
 #include <asm/tlbflush.h>
 #include <asm/homecache.h>
 
index 2a8014cb1ff52f0ef3e24a58a5e20ddcfeb3c8cc..9d610d3fb11e9ac93c6e17875cff26257fa89e46 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/irq.h>
 #include <linux/io.h>
 #include <linux/uaccess.h>
+#include <linux/export.h>
 
 #include <asm/processor.h>
 #include <asm/sections.h>
index b671a86f45152155bf0170504bdf2511112cdfb0..602908268093cf53397df895e68e240cc601d552 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/cpu.h>
 #include <linux/slab.h>
 #include <linux/smp.h>
+#include <linux/stat.h>
 #include <hv/hypervisor.h>
 
 /* Return a string queried from the hypervisor, truncated to page size. */
index a87d2a859ba97de91db6e8769cb8ecb1bcd78e49..2a81d32de0da518989e5a5118ab91b77774e3e7f 100644 (file)
@@ -39,6 +39,9 @@ EXPORT_SYMBOL(finv_user_asm);
 EXPORT_SYMBOL(current_text_addr);
 EXPORT_SYMBOL(dump_stack);
 
+/* arch/tile/kernel/head.S */
+EXPORT_SYMBOL(empty_zero_page);
+
 /* arch/tile/lib/, various memcpy files */
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(__copy_to_user_inatomic);
index cbe6f4f9eca3c93c43039be31238fdeb56ee36dc..1cc6ae477c98b59711c29deb5674e50c63c96be3 100644 (file)
@@ -449,9 +449,12 @@ void homecache_free_pages(unsigned long addr, unsigned int order)
        VM_BUG_ON(!virt_addr_valid((void *)addr));
        page = virt_to_page((void *)addr);
        if (put_page_testzero(page)) {
-               int pages = (1 << order);
                homecache_change_page_home(page, order, initial_page_home());
-               while (pages--)
-                       __free_page(page++);
+               if (order == 0) {
+                       free_hot_cold_page(page, 0);
+               } else {
+                       init_page_count(page);
+                       __free_pages(page, order);
+               }
        }
 }
index cb9a1044a771be75563305f8909097a67cd21778..efb42949cc09349e37246baa07f018648186386f 100644 (file)
@@ -390,7 +390,7 @@ config X86_INTEL_CE
          This option compiles in support for the CE4100 SOC for settop
          boxes and media devices.
 
-config X86_INTEL_MID
+config X86_WANT_INTEL_MID
        bool "Intel MID platform support"
        depends on X86_32
        depends on X86_EXTENDED_PLATFORM
@@ -399,7 +399,10 @@ config X86_INTEL_MID
          systems which do not have the PCI legacy interfaces (Moorestown,
          Medfield). If you are building for a PC class system say N here.
 
-if X86_INTEL_MID
+if X86_WANT_INTEL_MID
+
+config X86_INTEL_MID
+       bool
 
 config X86_MRST
        bool "Moorestown MID platform"
@@ -411,6 +414,7 @@ config X86_MRST
        select SPI
        select INTEL_SCU_IPC
        select X86_PLATFORM_DEVICES
+       select X86_INTEL_MID
        ---help---
          Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin
          Internet Device(MID) platform. Moorestown consists of two chips:
index 4420993acc4734c962922d58ee63e0b700cf330e..925b605eb5c601fa9a6f6c24cf41e596b92d86f8 100644 (file)
@@ -3,11 +3,15 @@
 
 #include <linux/notifier.h>
 
-#define IPCMSG_VRTC    0xFA     /* Set vRTC device */
-
-/* Command id associated with message IPCMSG_VRTC */
-#define IPC_CMD_VRTC_SETTIME      1 /* Set time */
-#define IPC_CMD_VRTC_SETALARM     2 /* Set alarm */
+#define IPCMSG_WARM_RESET      0xF0
+#define IPCMSG_COLD_RESET      0xF1
+#define IPCMSG_SOFT_RESET      0xF2
+#define IPCMSG_COLD_BOOT       0xF3
+
+#define IPCMSG_VRTC            0xFA     /* Set vRTC device */
+       /* Command id associated with message IPCMSG_VRTC */
+       #define IPC_CMD_VRTC_SETTIME      1 /* Set time */
+       #define IPC_CMD_VRTC_SETALARM     2 /* Set alarm */
 
 /* Read single register */
 int intel_scu_ipc_ioread8(u16 addr, u8 *data);
index e6283129c821014eba1afcbbb1bdd042b6b73e04..93f79094c2243211eede22db91acdf33098a059a 100644 (file)
@@ -31,11 +31,20 @@ enum mrst_cpu_type {
 };
 
 extern enum mrst_cpu_type __mrst_cpu_chip;
+
+#ifdef CONFIG_X86_INTEL_MID
+
 static inline enum mrst_cpu_type mrst_identify_cpu(void)
 {
        return __mrst_cpu_chip;
 }
 
+#else /* !CONFIG_X86_INTEL_MID */
+
+#define mrst_identify_cpu()    (0)
+
+#endif /* !CONFIG_X86_INTEL_MID */
+
 enum mrst_timer_options {
        MRST_TIMER_DEFAULT,
        MRST_TIMER_APBT_ONLY,
index 084ef95274cd78ceb51b1ea7a208a7a5e486199a..95203d40ffdde69d014c986453905280b49ee9e5 100644 (file)
@@ -169,7 +169,14 @@ static inline int wrmsr_safe(unsigned msr, unsigned low, unsigned high)
        return native_write_msr_safe(msr, low, high);
 }
 
-/* rdmsr with exception handling */
+/*
+ * rdmsr with exception handling.
+ *
+ * Please note that the exception handling works only after we've
+ * switched to the "smart" #GP handler in trap_init() which knows about
+ * exception tables - using this macro earlier than that causes machine
+ * hangs on boxes which do not implement the @msr in the first argument.
+ */
 #define rdmsr_safe(msr, p1, p2)                                        \
 ({                                                             \
        int __err;                                              \
index c2ff2a1d845e402249e44a70e41459805c3faaa8..2d2f01ce6dcbf1a9c8b72ebef77a159e1b3a5b1a 100644 (file)
@@ -401,6 +401,7 @@ extern unsigned long arch_align_stack(unsigned long sp);
 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
 
 void default_idle(void);
+bool set_pm_idle_to_default(void);
 
 void stop_this_cpu(void *dummy);
 
index fa7b9176b76cb33820034403fd8f4a50dc49709c..431793e5d4846f23bf5947f933fdaf9f5ae1987a 100644 (file)
@@ -32,6 +32,22 @@ extern int no_timer_check;
  *  (mathieu.desnoyers@polymtl.ca)
  *
  *                     -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ *
+ * In:
+ *
+ * ns = cycles * cyc2ns_scale / SC
+ *
+ * Although we may still have enough bits to store the value of ns,
+ * in some cases, we may not have enough bits to store cycles * cyc2ns_scale,
+ * leading to an incorrect result.
+ *
+ * To avoid this, we can decompose 'cycles' into quotient and remainder
+ * of division by SC.  Then,
+ *
+ * ns = (quot * SC + rem) * cyc2ns_scale / SC
+ *    = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC
+ *
+ *                     - sqazi@google.com
  */
 
 DECLARE_PER_CPU(unsigned long, cyc2ns);
@@ -41,9 +57,14 @@ DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
 
 static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
 {
+       unsigned long long quot;
+       unsigned long long rem;
        int cpu = smp_processor_id();
        unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
-       ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR;
+       quot = (cyc >> CYC2NS_SCALE_FACTOR);
+       rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1);
+       ns += quot * per_cpu(cyc2ns, cpu) +
+               ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR);
        return ns;
 }
 
index 10474fb1185df7e30f52101cea4e358a18e39458..cf1d73643f60723dc514b52e66969ed46a9c01a4 100644 (file)
@@ -57,6 +57,7 @@
 
 #define UV1_HUB_PART_NUMBER    0x88a5
 #define UV2_HUB_PART_NUMBER    0x8eb8
+#define UV2_HUB_PART_NUMBER_X  0x1111
 
 /* Compat: if this #define is present, UV headers support UV2 */
 #define UV2_HUB_IS_SUPPORTED   1
index 62ae3001ae02c4348d640dc01f67fd055718deb0..9d59bbacd4e3cb7a76474a59e2965c739140d475 100644 (file)
@@ -93,6 +93,8 @@ static int __init early_get_pnodeid(void)
 
        if (node_id.s.part_number == UV2_HUB_PART_NUMBER)
                uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
+       if (node_id.s.part_number == UV2_HUB_PART_NUMBER_X)
+               uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
 
        uv_hub_info->hub_revision = uv_min_hub_revision_id;
        pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
index c7e46cb353279080f2b1f67b34e283f0086a5e83..0bab2b18bb2099c4290f046bb210035b05083869 100644 (file)
@@ -442,8 +442,6 @@ static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
 
 static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
 {
-       u32 dummy;
-
        early_init_amd_mc(c);
 
        /*
@@ -473,12 +471,12 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
                        set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
        }
 #endif
-
-       rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
 }
 
 static void __cpuinit init_amd(struct cpuinfo_x86 *c)
 {
+       u32 dummy;
+
 #ifdef CONFIG_SMP
        unsigned long long value;
 
@@ -657,6 +655,8 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                        checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
                }
        }
+
+       rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
 }
 
 #ifdef CONFIG_X86_32
index a71efcdbb0925ffe7c2f97d58d0625e4179662fc..97b26356e9ee8b022b45ae1adc5ef7628a5e2539 100644 (file)
@@ -547,6 +547,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
 
                if (tmp != mask_lo) {
                        printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
+                       add_taint(TAINT_FIRMWARE_WORKAROUND);
                        mask_lo = tmp;
                }
        }
@@ -693,6 +694,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
 
        /* Disable MTRRs, and set the default type to uncached */
        mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
+       wbinvd();
 }
 
 static void post_set(void) __releases(set_atomicity_lock)
index 640891014b2ae3dccdef498db11f05a0942145e5..2bda212a0010ca561e8f34a9d56c2502b47ff70b 100644 (file)
@@ -312,12 +312,8 @@ int x86_setup_perfctr(struct perf_event *event)
                        return -EOPNOTSUPP;
        }
 
-       /*
-        * Do not allow config1 (extended registers) to propagate,
-        * there's no sane user-space generalization yet:
-        */
        if (attr->type == PERF_TYPE_RAW)
-               return 0;
+               return x86_pmu_extra_regs(event->attr.config, event);
 
        if (attr->type == PERF_TYPE_HW_CACHE)
                return set_ext_hw_attr(hwc, event);
@@ -588,7 +584,7 @@ done:
                                x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
                }
        }
-       return num ? -ENOSPC : 0;
+       return num ? -EINVAL : 0;
 }
 
 /*
@@ -607,7 +603,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
 
        if (is_x86_event(leader)) {
                if (n >= max_count)
-                       return -ENOSPC;
+                       return -EINVAL;
                cpuc->event_list[n] = leader;
                n++;
        }
@@ -620,7 +616,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
                        continue;
 
                if (n >= max_count)
-                       return -ENOSPC;
+                       return -EINVAL;
 
                cpuc->event_list[n] = event;
                n++;
@@ -1316,7 +1312,7 @@ static int validate_event(struct perf_event *event)
        c = x86_pmu.get_event_constraints(fake_cpuc, event);
 
        if (!c || !c->weight)
-               ret = -ENOSPC;
+               ret = -EINVAL;
 
        if (x86_pmu.put_event_constraints)
                x86_pmu.put_event_constraints(fake_cpuc, event);
@@ -1341,7 +1337,7 @@ static int validate_group(struct perf_event *event)
 {
        struct perf_event *leader = event->group_leader;
        struct cpu_hw_events *fake_cpuc;
-       int ret = -ENOSPC, n;
+       int ret = -EINVAL, n;
 
        fake_cpuc = allocate_fake_cpuc();
        if (IS_ERR(fake_cpuc))
index ab6343d21825d7d9328fae81fd6faef3bf092a13..3b8a2d30d14e8ebeb2c58406212fbbd3c79ba935 100644 (file)
@@ -199,8 +199,7 @@ static int force_ibs_eilvt_setup(void)
                goto out;
        }
 
-       pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
-       pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
+       pr_info("IBS: LVT offset %d assigned\n", offset);
 
        return 0;
 out:
@@ -265,19 +264,23 @@ perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *h
 static __init int amd_ibs_init(void)
 {
        u32 caps;
-       int ret;
+       int ret = -EINVAL;
 
        caps = __get_ibs_caps();
        if (!caps)
                return -ENODEV; /* ibs not supported by the cpu */
 
-       if (!ibs_eilvt_valid()) {
-               ret = force_ibs_eilvt_setup();
-               if (ret) {
-                       pr_err("Failed to setup IBS, %d\n", ret);
-                       return ret;
-               }
-       }
+       /*
+        * Force LVT offset assignment for family 10h: The offsets are
+        * not assigned by the BIOS for this family, so the OS is
+        * responsible for doing it. If the OS assignment fails, fall
+        * back to BIOS settings and try to setup this.
+        */
+       if (boot_cpu_data.x86 == 0x10)
+               force_ibs_eilvt_setup();
+
+       if (!ibs_eilvt_valid())
+               goto out;
 
        get_online_cpus();
        ibs_caps = caps;
@@ -287,7 +290,11 @@ static __init int amd_ibs_init(void)
        smp_call_function(setup_APIC_ibs, NULL, 1);
        put_online_cpus();
 
-       return perf_event_ibs_init();
+       ret = perf_event_ibs_init();
+out:
+       if (ret)
+               pr_err("Failed to setup IBS, %d\n", ret);
+       return ret;
 }
 
 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
index 2be5ebe9987209d41e76aede9e28ee9271070a34..121f1be4da19430426c45d1c17fce6d8d2d7037e 100644 (file)
@@ -1169,7 +1169,7 @@ again:
                 */
                c = &unconstrained;
        } else if (intel_try_alt_er(event, orig_idx)) {
-               raw_spin_unlock(&era->lock);
+               raw_spin_unlock_irqrestore(&era->lock, flags);
                goto again;
        }
        raw_spin_unlock_irqrestore(&era->lock, flags);
@@ -1545,6 +1545,13 @@ static void intel_clovertown_quirks(void)
        x86_pmu.pebs_constraints = NULL;
 }
 
+static void intel_sandybridge_quirks(void)
+{
+       printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
+       x86_pmu.pebs = 0;
+       x86_pmu.pebs_constraints = NULL;
+}
+
 __init int intel_pmu_init(void)
 {
        union cpuid10_edx edx;
@@ -1694,6 +1701,7 @@ __init int intel_pmu_init(void)
                break;
 
        case 42: /* SandyBridge */
+               x86_pmu.quirks = intel_sandybridge_quirks;
        case 45: /* SandyBridge, "Romely-EP" */
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
index c0d238f49db843cbd98bea7a7bc68b700fc40d7f..73da6b64f5b788ccbb83eef3317c32161b755fca 100644 (file)
@@ -493,6 +493,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
        unsigned long from = cpuc->lbr_entries[0].from;
        unsigned long old_to, to = cpuc->lbr_entries[0].to;
        unsigned long ip = regs->ip;
+       int is_64bit = 0;
 
        /*
         * We don't need to fixup if the PEBS assist is fault like
@@ -544,7 +545,10 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
                } else
                        kaddr = (void *)to;
 
-               kernel_insn_init(&insn, kaddr);
+#ifdef CONFIG_X86_64
+               is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
+#endif
+               insn_init(&insn, kaddr, is_64bit);
                insn_get_length(&insn);
                to += insn.length;
        } while (to < ip);
index 492bf1358a7c388a9e252e8f031d6c6122e540d8..ef484d9d0a251b0128a164486096ce43c4ea8f4c 100644 (file)
@@ -1268,7 +1268,7 @@ reserve:
        }
 
 done:
-       return num ? -ENOSPC : 0;
+       return num ? -EINVAL : 0;
 }
 
 static __initconst const struct x86_pmu p4_pmu = {
index 3b97a80ce32948ffa995f7b310024f901811703b..c99f9ed013d59985850702a617467b0bf0b556db 100644 (file)
@@ -116,16 +116,16 @@ void show_registers(struct pt_regs *regs)
                for (i = 0; i < code_len; i++, ip++) {
                        if (ip < (u8 *)PAGE_OFFSET ||
                                        probe_kernel_address(ip, c)) {
-                               printk(" Bad EIP value.");
+                               printk(KERN_CONT " Bad EIP value.");
                                break;
                        }
                        if (ip == (u8 *)regs->ip)
-                               printk("<%02x> ", c);
+                               printk(KERN_CONT "<%02x> ", c);
                        else
-                               printk("%02x ", c);
+                               printk(KERN_CONT "%02x ", c);
                }
        }
-       printk("\n");
+       printk(KERN_CONT "\n");
 }
 
 int is_valid_bugaddr(unsigned long ip)
index 19853ad8afc5ff940cbf6806b6ae283695296cd7..6d728d9284bd0e3b94213206bd93af5afbfb7912 100644 (file)
@@ -284,16 +284,16 @@ void show_registers(struct pt_regs *regs)
                for (i = 0; i < code_len; i++, ip++) {
                        if (ip < (u8 *)PAGE_OFFSET ||
                                        probe_kernel_address(ip, c)) {
-                               printk(" Bad RIP value.");
+                               printk(KERN_CONT " Bad RIP value.");
                                break;
                        }
                        if (ip == (u8 *)regs->ip)
-                               printk("<%02x> ", c);
+                               printk(KERN_CONT "<%02x> ", c);
                        else
-                               printk("%02x ", c);
+                               printk(KERN_CONT "%02x ", c);
                }
        }
-       printk("\n");
+       printk(KERN_CONT "\n");
 }
 
 int is_valid_bugaddr(unsigned long ip)
index b946a9eac7d9f29fb6d955e3541bf03295612367..1bb0bf4d92cd8edf9d639ea5aabd08bc7efad892 100644 (file)
@@ -1049,6 +1049,14 @@ int hpet_rtc_timer_init(void)
 }
 EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
 
+static void hpet_disable_rtc_channel(void)
+{
+       unsigned long cfg;
+       cfg = hpet_readl(HPET_T1_CFG);
+       cfg &= ~HPET_TN_ENABLE;
+       hpet_writel(cfg, HPET_T1_CFG);
+}
+
 /*
  * The functions below are called from rtc driver.
  * Return 0 if HPET is not being used.
@@ -1060,6 +1068,9 @@ int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
                return 0;
 
        hpet_rtc_flags &= ~bit_mask;
+       if (unlikely(!hpet_rtc_flags))
+               hpet_disable_rtc_channel();
+
        return 1;
 }
 EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
@@ -1125,15 +1136,11 @@ EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
 
 static void hpet_rtc_timer_reinit(void)
 {
-       unsigned int cfg, delta;
+       unsigned int delta;
        int lost_ints = -1;
 
-       if (unlikely(!hpet_rtc_flags)) {
-               cfg = hpet_readl(HPET_T1_CFG);
-               cfg &= ~HPET_TN_ENABLE;
-               hpet_writel(cfg, HPET_T1_CFG);
-               return;
-       }
+       if (unlikely(!hpet_rtc_flags))
+               hpet_disable_rtc_channel();
 
        if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
                delta = hpet_default_delta;
index acf8fbf8fbda1960de6cbd18eaf47a0a049eed8b..69bca468c47a8ffc22ea5811cb3bdf63caf0f44b 100644 (file)
@@ -38,6 +38,9 @@ static inline void stack_overflow_check(struct pt_regs *regs)
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
        u64 curbase = (u64)task_stack_page(current);
 
+       if (user_mode_vm(regs))
+               return;
+
        WARN_ONCE(regs->sp >= curbase &&
                  regs->sp <= curbase + THREAD_SIZE &&
                  regs->sp <  curbase + sizeof(struct thread_info) +
index f2d2a664e7975acace35742dc7bf3fe3446c2aeb..9d46f5e43b51f0dd2b02ad0575c1e90470df862a 100644 (file)
@@ -256,7 +256,7 @@ static int __init microcode_dev_init(void)
        return 0;
 }
 
-static void microcode_dev_exit(void)
+static void __exit microcode_dev_exit(void)
 {
        misc_deregister(&microcode_dev);
 }
@@ -519,10 +519,8 @@ static int __init microcode_init(void)
 
        microcode_pdev = platform_device_register_simple("microcode", -1,
                                                         NULL, 0);
-       if (IS_ERR(microcode_pdev)) {
-               microcode_dev_exit();
+       if (IS_ERR(microcode_pdev))
                return PTR_ERR(microcode_pdev);
-       }
 
        get_online_cpus();
        mutex_lock(&microcode_mutex);
@@ -532,14 +530,12 @@ static int __init microcode_init(void)
        mutex_unlock(&microcode_mutex);
        put_online_cpus();
 
-       if (error) {
-               platform_device_unregister(microcode_pdev);
-               return error;
-       }
+       if (error)
+               goto out_pdev;
 
        error = microcode_dev_init();
        if (error)
-               return error;
+               goto out_sysdev_driver;
 
        register_syscore_ops(&mc_syscore_ops);
        register_hotcpu_notifier(&mc_cpu_notifier);
@@ -548,6 +544,20 @@ static int __init microcode_init(void)
                " <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n");
 
        return 0;
+
+out_sysdev_driver:
+       get_online_cpus();
+       mutex_lock(&microcode_mutex);
+
+       sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver);
+
+       mutex_unlock(&microcode_mutex);
+       put_online_cpus();
+
+out_pdev:
+       platform_device_unregister(microcode_pdev);
+       return error;
+
 }
 module_init(microcode_init);
 
index 9103b89c145a534215824a9b2a7d80aa9e112527..0741b062a3048a6e2b1b5bd0eb4edbbf3d5bb9cf 100644 (file)
@@ -95,8 +95,8 @@ static void __init MP_bus_info(struct mpc_bus *m)
        }
 #endif
 
+       set_bit(m->busid, mp_bus_not_pci);
        if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
-               set_bit(m->busid, mp_bus_not_pci);
 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
                mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
 #endif
index b9b3b1a51643931a0405a1c0f9cc5b62eec43b1f..ee5d4fbd53b4bac72d19e3aa3000e77467f0c25e 100644 (file)
@@ -403,6 +403,14 @@ void default_idle(void)
 EXPORT_SYMBOL(default_idle);
 #endif
 
+bool set_pm_idle_to_default(void)
+{
+       bool ret = !!pm_idle;
+
+       pm_idle = default_idle;
+
+       return ret;
+}
 void stop_this_cpu(void *dummy)
 {
        local_irq_disable();
index b78643d0f9a53d8b2050a14f7e8185b540ed8595..03920a15a632289605c6f0ba0b563d64065214a7 100644 (file)
@@ -553,4 +553,17 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
                        quirk_amd_nb_node);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
                        quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
+                       quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
+                       quirk_amd_nb_node);
+
 #endif
index e334be1182b9f0b3666464f8ff9f13b22d3ef1dc..37a458b521a6020598b69305c782a0f28bd75b27 100644 (file)
@@ -124,7 +124,7 @@ __setup("reboot=", reboot_setup);
  */
 
 /*
- * Some machines require the "reboot=b"  commandline option,
+ * Some machines require the "reboot=b" or "reboot=k"  commandline options,
  * this quirk makes that automatic.
  */
 static int __init set_bios_reboot(const struct dmi_system_id *d)
@@ -136,6 +136,15 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
        return 0;
 }
 
+static int __init set_kbd_reboot(const struct dmi_system_id *d)
+{
+       if (reboot_type != BOOT_KBD) {
+               reboot_type = BOOT_KBD;
+               printk(KERN_INFO "%s series board detected. Selecting KBD-method for reboot.\n", d->ident);
+       }
+       return 0;
+}
+
 static struct dmi_system_id __initdata reboot_dmi_table[] = {
        {       /* Handle problems with rebooting on Dell E520's */
                .callback = set_bios_reboot,
@@ -295,7 +304,7 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                },
        },
        { /* Handle reboot issue on Acer Aspire one */
-               .callback = set_bios_reboot,
+               .callback = set_kbd_reboot,
                .ident = "Acer Aspire One A110",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -443,6 +452,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
                },
        },
+       {       /* Handle problems with rebooting on the OptiPlex 990. */
+               .callback = set_pci_reboot,
+               .ident = "Dell OptiPlex 990",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
+               },
+       },
        { }
 };
 
index 348ce016a835c291deeae528fb327ea8f93ffbdb..af6db6ec5b2a20db3d13861bdb97b878f8371f1e 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/vsyscall.h>
 #include <asm/x86_init.h>
 #include <asm/time.h>
+#include <asm/mrst.h>
 
 #ifdef CONFIG_X86_32
 /*
@@ -242,6 +243,10 @@ static __init int add_rtc_cmos(void)
        if (of_have_populated_dt())
                return 0;
 
+       /* Intel MID platforms don't have ioport rtc */
+       if (mrst_identify_cpu())
+               return -ENODEV;
+
        platform_device_register(&rtc_device);
        dev_info(&rtc_device.dev,
                 "registered platform RTC device (no PNP device found)\n");
index 76e3f1cd03696997964db7814de98a578a9eb4a7..405f2620392f5e32d393166520e707b2d039b3bb 100644 (file)
@@ -338,11 +338,15 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
                return HRTIMER_NORESTART;
 }
 
-static void create_pit_timer(struct kvm_kpit_state *ps, u32 val, int is_period)
+static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
 {
+       struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
        struct kvm_timer *pt = &ps->pit_timer;
        s64 interval;
 
+       if (!irqchip_in_kernel(kvm))
+               return;
+
        interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
 
        pr_debug("create pit timer, interval is %llu nsec\n", interval);
@@ -394,13 +398,13 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
         /* FIXME: enhance mode 4 precision */
        case 4:
                if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) {
-                       create_pit_timer(ps, val, 0);
+                       create_pit_timer(kvm, val, 0);
                }
                break;
        case 2:
        case 3:
                if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){
-                       create_pit_timer(ps, val, 1);
+                       create_pit_timer(kvm, val, 1);
                }
                break;
        default:
index c38efd7b792eec9aab414892c23818446ad3fad0..4c938da2ba00b40dc430d5a7854b0ee62b6ce631 100644 (file)
@@ -602,7 +602,6 @@ static void update_cpuid(struct kvm_vcpu *vcpu)
 {
        struct kvm_cpuid_entry2 *best;
        struct kvm_lapic *apic = vcpu->arch.apic;
-       u32 timer_mode_mask;
 
        best = kvm_find_cpuid_entry(vcpu, 1, 0);
        if (!best)
@@ -615,15 +614,12 @@ static void update_cpuid(struct kvm_vcpu *vcpu)
                        best->ecx |= bit(X86_FEATURE_OSXSAVE);
        }
 
-       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
-               best->function == 0x1) {
-               best->ecx |= bit(X86_FEATURE_TSC_DEADLINE_TIMER);
-               timer_mode_mask = 3 << 17;
-       } else
-               timer_mode_mask = 1 << 17;
-
-       if (apic)
-               apic->lapic_timer.timer_mode_mask = timer_mode_mask;
+       if (apic) {
+               if (best->ecx & bit(X86_FEATURE_TSC_DEADLINE_TIMER))
+                       apic->lapic_timer.timer_mode_mask = 3 << 17;
+               else
+                       apic->lapic_timer.timer_mode_mask = 1 << 17;
+       }
 }
 
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -2135,6 +2131,9 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_TSC_CONTROL:
                r = kvm_has_tsc_control;
                break;
+       case KVM_CAP_TSC_DEADLINE_TIMER:
+               r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
+               break;
        default:
                r = 0;
                break;
index ea305856151cefc62fccd7f216519bc9d5f2945c..dd74e46828c0fc243740b61a18c2dea654fafb5e 100644 (file)
@@ -201,6 +201,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
        do {
                VM_BUG_ON(compound_head(page) != head);
                pages[*nr] = page;
+               if (PageTail(page))
+                       get_huge_page_tail(page);
                (*nr)++;
                page++;
                refs++;
index b49962662101a0cf7361f0035e1b017333efc22a..f4f29b19fac5f2cc7c46023ef86c02a66b137e8e 100644 (file)
@@ -45,6 +45,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
        vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
        BUG_ON(!pte_none(*(kmap_pte-idx)));
        set_pte(kmap_pte-idx, mk_pte(page, prot));
+       arch_flush_lazy_mmu_mode();
 
        return (void *)vaddr;
 }
@@ -88,6 +89,7 @@ void __kunmap_atomic(void *kvaddr)
                 */
                kpte_clear_flush(kmap_pte-idx, vaddr);
                kmap_atomic_idx_pop();
+               arch_flush_lazy_mmu_mode();
        }
 #ifdef CONFIG_DEBUG_HIGHMEM
        else {
index bfab3fa10edc63e50b4184d00e75566678242a0d..7b65f752c5f8fd79af2c6b4afb342988bdd8d56c 100644 (file)
@@ -568,8 +568,8 @@ cond_branch:                        f_offset = addrs[i + filter[i].jf] - addrs[i];
                                        break;
                                }
                                if (filter[i].jt != 0) {
-                                       if (filter[i].jf)
-                                               t_offset += is_near(f_offset) ? 2 : 6;
+                                       if (filter[i].jf && f_offset)
+                                               t_offset += is_near(f_offset) ? 2 : 5;
                                        EMIT_COND_JMP(t_op, t_offset);
                                        if (filter[i].jf)
                                                EMIT_JMP(f_offset);
index cdfe4c54decac05e4943a00e27803f78898b6419..f148cf65267836d66e1fa666d612dca5669950c3 100644 (file)
@@ -21,6 +21,7 @@ extern int op_nmi_timer_init(struct oprofile_operations *ops);
 extern void op_nmi_exit(void);
 extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
 
+static int nmi_timer;
 
 int __init oprofile_arch_init(struct oprofile_operations *ops)
 {
@@ -31,8 +32,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
 #ifdef CONFIG_X86_LOCAL_APIC
        ret = op_nmi_init(ops);
 #endif
+       nmi_timer = (ret != 0);
 #ifdef CONFIG_X86_IO_APIC
-       if (ret < 0)
+       if (nmi_timer)
                ret = op_nmi_timer_init(ops);
 #endif
        ops->backtrace = x86_backtrace;
@@ -44,6 +46,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
 void oprofile_arch_exit(void)
 {
 #ifdef CONFIG_X86_LOCAL_APIC
-       op_nmi_exit();
+       if (!nmi_timer)
+               op_nmi_exit();
 #endif
 }
index e36bf714cb7727352c237d6bffa0d838d2c1c08e..40e446941dd7eceb587b2b4572c96279631e5f56 100644 (file)
  */
 
 static unsigned long efi_rt_eflags;
-static pgd_t efi_bak_pg_dir_pointer[2];
 
 void efi_call_phys_prelog(void)
 {
-       unsigned long cr4;
-       unsigned long temp;
        struct desc_ptr gdt_descr;
 
        local_irq_save(efi_rt_eflags);
 
-       /*
-        * If I don't have PAE, I should just duplicate two entries in page
-        * directory. If I have PAE, I just need to duplicate one entry in
-        * page directory.
-        */
-       cr4 = read_cr4_safe();
-
-       if (cr4 & X86_CR4_PAE) {
-               efi_bak_pg_dir_pointer[0].pgd =
-                   swapper_pg_dir[pgd_index(0)].pgd;
-               swapper_pg_dir[0].pgd =
-                   swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
-       } else {
-               efi_bak_pg_dir_pointer[0].pgd =
-                   swapper_pg_dir[pgd_index(0)].pgd;
-               efi_bak_pg_dir_pointer[1].pgd =
-                   swapper_pg_dir[pgd_index(0x400000)].pgd;
-               swapper_pg_dir[pgd_index(0)].pgd =
-                   swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
-               temp = PAGE_OFFSET + 0x400000;
-               swapper_pg_dir[pgd_index(0x400000)].pgd =
-                   swapper_pg_dir[pgd_index(temp)].pgd;
-       }
-
-       /*
-        * After the lock is released, the original page table is restored.
-        */
+       load_cr3(initial_page_table);
        __flush_tlb_all();
 
        gdt_descr.address = __pa(get_cpu_gdt_table(0));
@@ -85,28 +56,13 @@ void efi_call_phys_prelog(void)
 
 void efi_call_phys_epilog(void)
 {
-       unsigned long cr4;
        struct desc_ptr gdt_descr;
 
        gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
        gdt_descr.size = GDT_SIZE - 1;
        load_gdt(&gdt_descr);
 
-       cr4 = read_cr4_safe();
-
-       if (cr4 & X86_CR4_PAE) {
-               swapper_pg_dir[pgd_index(0)].pgd =
-                   efi_bak_pg_dir_pointer[0].pgd;
-       } else {
-               swapper_pg_dir[pgd_index(0)].pgd =
-                   efi_bak_pg_dir_pointer[0].pgd;
-               swapper_pg_dir[pgd_index(0x400000)].pgd =
-                   efi_bak_pg_dir_pointer[1].pgd;
-       }
-
-       /*
-        * After the lock is released, the original page table is restored.
-        */
+       load_cr3(swapper_pg_dir);
        __flush_tlb_all();
 
        local_irq_restore(efi_rt_eflags);
index b1489a06a49dbc5ac0f4acd5eeaddc6136f05c1a..ad4ec1cb097ecfae17e4a99aed37cf985b03cb78 100644 (file)
@@ -76,6 +76,20 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
 int sfi_mrtc_num;
 
+static void mrst_power_off(void)
+{
+       if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
+               intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
+}
+
+static void mrst_reboot(void)
+{
+       if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
+               intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
+       else
+               intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
+}
+
 /* parse all the mtimer info to a static mtimer array */
 static int __init sfi_parse_mtmr(struct sfi_table_header *table)
 {
@@ -265,17 +279,6 @@ static int mrst_i8042_detect(void)
        return 0;
 }
 
-/* Reboot and power off are handled by the SCU on a MID device */
-static void mrst_power_off(void)
-{
-       intel_scu_ipc_simple_command(0xf1, 1);
-}
-
-static void mrst_reboot(void)
-{
-       intel_scu_ipc_simple_command(0xf1, 0);
-}
-
 /*
  * Moorestown does not have external NMI source nor port 0x61 to report
  * NMI status. The possible NMI sources are from pmu as a result of NMI
@@ -484,6 +487,46 @@ static void __init *max7315_platform_data(void *info)
        return max7315;
 }
 
+static void *tca6416_platform_data(void *info)
+{
+       static struct pca953x_platform_data tca6416;
+       struct i2c_board_info *i2c_info = info;
+       int gpio_base, intr;
+       char base_pin_name[SFI_NAME_LEN + 1];
+       char intr_pin_name[SFI_NAME_LEN + 1];
+
+       strcpy(i2c_info->type, "tca6416");
+       strcpy(base_pin_name, "tca6416_base");
+       strcpy(intr_pin_name, "tca6416_int");
+
+       gpio_base = get_gpio_by_name(base_pin_name);
+       intr = get_gpio_by_name(intr_pin_name);
+
+       if (gpio_base == -1)
+               return NULL;
+       tca6416.gpio_base = gpio_base;
+       if (intr != -1) {
+               i2c_info->irq = intr + MRST_IRQ_OFFSET;
+               tca6416.irq_base = gpio_base + MRST_IRQ_OFFSET;
+       } else {
+               i2c_info->irq = -1;
+               tca6416.irq_base = -1;
+       }
+       return &tca6416;
+}
+
+static void *mpu3050_platform_data(void *info)
+{
+       struct i2c_board_info *i2c_info = info;
+       int intr = get_gpio_by_name("mpu3050_int");
+
+       if (intr == -1)
+               return NULL;
+
+       i2c_info->irq = intr + MRST_IRQ_OFFSET;
+       return NULL;
+}
+
 static void __init *emc1403_platform_data(void *info)
 {
        static short intr2nd_pdata;
@@ -646,12 +689,15 @@ static void *msic_ocd_platform_data(void *info)
 static const struct devs_id __initconst device_ids[] = {
        {"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data},
        {"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data},
+       {"pmic_gpio", SFI_DEV_TYPE_IPC, 1, &pmic_gpio_platform_data},
        {"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data},
        {"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
        {"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
+       {"tca6416", SFI_DEV_TYPE_I2C, 1, &tca6416_platform_data},
        {"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data},
        {"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data},
        {"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data},
+       {"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data},
 
        /* MSIC subdevices */
        {"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data},
index 38d0af4fefec19f52d5e724c8f08102d391dc2e6..b2c7179fa26343d5cedfb7846d2f1b92ceb97390 100644 (file)
@@ -173,9 +173,21 @@ static unsigned long __init xen_get_max_pages(void)
        domid_t domid = DOMID_SELF;
        int ret;
 
-       ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
-       if (ret > 0)
-               max_pages = ret;
+       /*
+        * For the initial domain we use the maximum reservation as
+        * the maximum page.
+        *
+        * For guest domains the current maximum reservation reflects
+        * the current maximum rather than the static maximum. In this
+        * case the e820 map provided to us will cover the static
+        * maximum region.
+        */
+       if (xen_initial_domain()) {
+               ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
+               if (ret > 0)
+                       max_pages = ret;
+       }
+
        return min(max_pages, MAX_DOMAIN_PAGES);
 }
 
@@ -410,6 +422,6 @@ void __init xen_arch_setup(void)
 #endif
        disable_cpuidle();
        boot_option_idle_override = IDLE_HALT;
-
+       WARN_ON(set_pm_idle_to_default());
        fiddle_vdso();
 }
index ea70e6c80cd34f0c2b563ceabcc7189027fc5182..15de223c7f9371a9da852825ea8857789d94ae70 100644 (file)
@@ -366,7 +366,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
                if (drain_all)
                        blk_throtl_drain(q);
 
-               __blk_run_queue(q);
+               /*
+                * This function might be called on a queue which failed
+                * driver init after queue creation.  Some drivers
+                * (e.g. fd) get unhappy in such cases.  Kick queue iff
+                * dispatch queue has something on it.
+                */
+               if (!list_empty(&q->queue_head))
+                       __blk_run_queue(q);
 
                if (drain_all)
                        nr_rqs = q->rq.count[0] + q->rq.count[1];
@@ -467,6 +474,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
        q->backing_dev_info.state = 0;
        q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
        q->backing_dev_info.name = "block";
+       q->node = node_id;
 
        err = bdi_init(&q->backing_dev_info);
        if (err) {
@@ -551,7 +559,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
        if (!uninit_q)
                return NULL;
 
-       q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
+       q = blk_init_allocated_queue(uninit_q, rfn, lock);
        if (!q)
                blk_cleanup_queue(uninit_q);
 
@@ -562,19 +570,10 @@ EXPORT_SYMBOL(blk_init_queue_node);
 struct request_queue *
 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
                         spinlock_t *lock)
-{
-       return blk_init_allocated_queue_node(q, rfn, lock, -1);
-}
-EXPORT_SYMBOL(blk_init_allocated_queue);
-
-struct request_queue *
-blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
-                             spinlock_t *lock, int node_id)
 {
        if (!q)
                return NULL;
 
-       q->node = node_id;
        if (blk_init_free_list(q))
                return NULL;
 
@@ -604,7 +603,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
 
        return NULL;
 }
-EXPORT_SYMBOL(blk_init_allocated_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
 
 int blk_get_queue(struct request_queue *q)
 {
index 164cd0059706214e53c01b780b353fca98d9b829..623e1cd4cffe997e71fbb54577bd42f220af64b3 100644 (file)
@@ -311,7 +311,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
        if (IS_ERR(bio))
                return PTR_ERR(bio);
 
-       if (rq_data_dir(rq) == WRITE)
+       if (!reading)
                bio->bi_rw |= REQ_WRITE;
 
        if (do_copy)
index e74d6d13838f3ae9f717911a38d21885623029c7..4af6f5cc1167a65494dc52b39cd3f1dfd1271087 100644 (file)
@@ -282,18 +282,9 @@ EXPORT_SYMBOL(blk_queue_resize_tags);
 void blk_queue_end_tag(struct request_queue *q, struct request *rq)
 {
        struct blk_queue_tag *bqt = q->queue_tags;
-       int tag = rq->tag;
+       unsigned tag = rq->tag; /* negative tags invalid */
 
-       BUG_ON(tag == -1);
-
-       if (unlikely(tag >= bqt->max_depth)) {
-               /*
-                * This can happen after tag depth has been reduced.
-                * But tag shouldn't be larger than real_max_depth.
-                */
-               WARN_ON(tag >= bqt->real_max_depth);
-               return;
-       }
+       BUG_ON(tag >= bqt->real_max_depth);
 
        list_del_init(&rq->queuelist);
        rq->cmd_flags &= ~REQ_QUEUED;
index 16ace89613bc6e4ce343cdd2f1da85218b7dc5e7..3548705b04e482a4405097217011256105c4bdca 100644 (file)
@@ -1655,6 +1655,8 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
                    struct request *next)
 {
        struct cfq_queue *cfqq = RQ_CFQQ(rq);
+       struct cfq_data *cfqd = q->elevator->elevator_data;
+
        /*
         * reposition in fifo if next is older than rq
         */
@@ -1669,6 +1671,16 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
        cfq_remove_request(next);
        cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
                                        rq_data_dir(next), rq_is_sync(next));
+
+       cfqq = RQ_CFQQ(next);
+       /*
+        * all requests of this queue are merged to other queues, delete it
+        * from the service tree. If it's the active_queue,
+        * cfq_dispatch_requests() will choose to expire it or do idle
+        */
+       if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
+           cfqq != cfqd->active_queue)
+               cfq_del_cfqq_rr(cfqd, cfqq);
 }
 
 static int cfq_allow_merge(struct request_queue *q, struct request *rq,
@@ -3184,7 +3196,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
                }
        }
 
-       if (ret)
+       if (ret && ret != -EEXIST)
                printk(KERN_ERR "cfq: cic link failed!\n");
 
        return ret;
@@ -3200,6 +3212,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
 {
        struct io_context *ioc = NULL;
        struct cfq_io_context *cic;
+       int ret;
 
        might_sleep_if(gfp_mask & __GFP_WAIT);
 
@@ -3207,6 +3220,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
        if (!ioc)
                return NULL;
 
+retry:
        cic = cfq_cic_lookup(cfqd, ioc);
        if (cic)
                goto out;
@@ -3215,7 +3229,12 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
        if (cic == NULL)
                goto err;
 
-       if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
+       ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
+       if (ret == -EEXIST) {
+               /* someone has linked cic to ioc already */
+               cfq_cic_free(cic);
+               goto retry;
+       } else if (ret)
                goto err_free;
 
 out:
@@ -4036,6 +4055,11 @@ static void *cfq_init_queue(struct request_queue *q)
 
        if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
                kfree(cfqg);
+
+               spin_lock(&cic_index_lock);
+               ida_remove(&cic_index_ida, cfqd->cic_index);
+               spin_unlock(&cic_index_lock);
+
                kfree(cfqd);
                return NULL;
        }
index 6bdedd7cca2cd3cd630370732b71b8b530992ca9..cf047c406d92797777d6255122f23b5a8297c58a 100644 (file)
@@ -820,7 +820,7 @@ config PATA_PLATFORM
 
 config PATA_OF_PLATFORM
        tristate "OpenFirmware platform device PATA support"
-       depends on PATA_PLATFORM && OF
+       depends on PATA_PLATFORM && OF && OF_IRQ
        help
          This option enables support for generic directly connected ATA
          devices commonly found on embedded systems with OpenFirmware
index d8b3d89db043e7e9ead44cb8b4a00946f4980fa6..919daa7cd5b1db3443c65863cc07c20c830f680b 100644 (file)
@@ -1743,8 +1743,10 @@ void device_shutdown(void)
                 */
                list_del_init(&dev->kobj.entry);
                spin_unlock(&devices_kset->list_lock);
-               /* Disable all device's runtime power management */
-               pm_runtime_disable(dev);
+
+               /* Don't allow any more runtime suspends */
+               pm_runtime_get_noresume(dev);
+               pm_runtime_barrier(dev);
 
                if (dev->bus && dev->bus->shutdown) {
                        dev_dbg(dev, "shutdown\n");
index 8004ac30a7a8634bc7f963be8db2c6c1eac61ce4..587cce57adae23fcddb22270de69ae9ce2bff76b 100644 (file)
@@ -2601,6 +2601,8 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
                        c->Request.Timeout = 0;
                        c->Request.CDB[0] = BMIC_WRITE;
                        c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+                       c->Request.CDB[7] = (size >> 8) & 0xFF;
+                       c->Request.CDB[8] = size & 0xFF;
                        break;
                case TEST_UNIT_READY:
                        c->Request.CDBLen = 6;
@@ -4880,7 +4882,7 @@ static int cciss_request_irq(ctlr_info_t *h,
 {
        if (h->msix_vector || h->msi_vector) {
                if (!request_irq(h->intr[h->intr_mode], msixhandler,
-                               IRQF_DISABLED, h->devname, h))
+                               0, h->devname, h))
                        return 0;
                dev_err(&h->pdev->dev, "Unable to get msi irq %d"
                        " for %s\n", h->intr[h->intr_mode],
@@ -4889,7 +4891,7 @@ static int cciss_request_irq(ctlr_info_t *h,
        }
 
        if (!request_irq(h->intr[h->intr_mode], intxhandler,
-                       IRQF_DISABLED, h->devname, h))
+                       IRQF_SHARED, h->devname, h))
                return 0;
        dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
                h->intr[h->intr_mode], h->devname);
index 68b205a9338f631adbd98df74836b98aa95d27ac..1e888c9e85b3dd10a42238a6588dd4f0b2f7c9e1 100644 (file)
@@ -422,7 +422,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
 
                /*
                 * We use punch hole to reclaim the free space used by the
-                * image a.k.a. discard. However we do support discard if
+                * image a.k.a. discard. However we do not support discard if
                 * encryption is enabled, because it may give an attacker
                 * useful information.
                 */
@@ -797,7 +797,7 @@ static void loop_config_discard(struct loop_device *lo)
        }
 
        q->limits.discard_granularity = inode->i_sb->s_blocksize;
-       q->limits.discard_alignment = inode->i_sb->s_blocksize;
+       q->limits.discard_alignment = 0;
        q->limits.max_discard_sectors = UINT_MAX >> 9;
        q->limits.discard_zeroes_data = 1;
        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
index 65cc424359b05e735703b1b9a74277cb1f1359a9..148ab944378d57bdaec596e96bed6dc131777a28 100644 (file)
@@ -183,10 +183,6 @@ static LIST_HEAD(rbd_client_list);      /* clients */
 
 static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
 static void rbd_dev_release(struct device *dev);
-static ssize_t rbd_snap_rollback(struct device *dev,
-                                struct device_attribute *attr,
-                                const char *buf,
-                                size_t size);
 static ssize_t rbd_snap_add(struct device *dev,
                            struct device_attribute *attr,
                            const char *buf,
@@ -461,6 +457,10 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
        u32 snap_count = le32_to_cpu(ondisk->snap_count);
        int ret = -ENOMEM;
 
+       if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) {
+               return -ENXIO;
+       }
+
        init_rwsem(&header->snap_rwsem);
        header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
        header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
@@ -1355,32 +1355,6 @@ fail:
        return ret;
 }
 
-/*
- * Request sync osd rollback
- */
-static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
-                                    u64 snapid,
-                                    const char *obj)
-{
-       struct ceph_osd_req_op *ops;
-       int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
-       if (ret < 0)
-               return ret;
-
-       ops[0].snap.snapid = snapid;
-
-       ret = rbd_req_sync_op(dev, NULL,
-                              CEPH_NOSNAP,
-                              0,
-                              CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
-                              ops,
-                              1, obj, 0, 0, NULL, NULL, NULL);
-
-       rbd_destroy_ops(ops);
-
-       return ret;
-}
-
 /*
  * Request sync osd read
  */
@@ -1610,8 +1584,13 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
                        goto out_dh;
 
                rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
-               if (rc < 0)
+               if (rc < 0) {
+                       if (rc == -ENXIO) {
+                               pr_warning("unrecognized header format"
+                                          " for image %s", rbd_dev->obj);
+                       }
                        goto out_dh;
+               }
 
                if (snap_count != header->total_snaps) {
                        snap_count = header->total_snaps;
@@ -1882,7 +1861,6 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
 static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
-static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
 
 static struct attribute *rbd_attrs[] = {
        &dev_attr_size.attr,
@@ -1893,7 +1871,6 @@ static struct attribute *rbd_attrs[] = {
        &dev_attr_current_snap.attr,
        &dev_attr_refresh.attr,
        &dev_attr_create_snap.attr,
-       &dev_attr_rollback_snap.attr,
        NULL
 };
 
@@ -2424,64 +2401,6 @@ err_unlock:
        return ret;
 }
 
-static ssize_t rbd_snap_rollback(struct device *dev,
-                                struct device_attribute *attr,
-                                const char *buf,
-                                size_t count)
-{
-       struct rbd_device *rbd_dev = dev_to_rbd(dev);
-       int ret;
-       u64 snapid;
-       u64 cur_ofs;
-       char *seg_name = NULL;
-       char *snap_name = kmalloc(count + 1, GFP_KERNEL);
-       ret = -ENOMEM;
-       if (!snap_name)
-               return ret;
-
-       /* parse snaps add command */
-       snprintf(snap_name, count, "%s", buf);
-       seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
-       if (!seg_name)
-               goto done;
-
-       mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
-       ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
-       if (ret < 0)
-               goto done_unlock;
-
-       dout("snapid=%lld\n", snapid);
-
-       cur_ofs = 0;
-       while (cur_ofs < rbd_dev->header.image_size) {
-               cur_ofs += rbd_get_segment(&rbd_dev->header,
-                                          rbd_dev->obj,
-                                          cur_ofs, (u64)-1,
-                                          seg_name, NULL);
-               dout("seg_name=%s\n", seg_name);
-
-               ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
-               if (ret < 0)
-                       pr_warning("could not roll back obj %s err=%d\n",
-                                  seg_name, ret);
-       }
-
-       ret = __rbd_update_snaps(rbd_dev);
-       if (ret < 0)
-               goto done_unlock;
-
-       ret = count;
-
-done_unlock:
-       mutex_unlock(&ctl_mutex);
-done:
-       kfree(seg_name);
-       kfree(snap_name);
-
-       return ret;
-}
-
 static struct bus_attribute rbd_bus_attrs[] = {
        __ATTR(add, S_IWUSR, NULL, rbd_add),
        __ATTR(remove, S_IWUSR, NULL, rbd_remove),
index ae3e167e17adc3f18bc4b14fe3b1635d299bfb3e..89ddab127e33df525924b73f23501fc9fcab1006 100644 (file)
@@ -16,6 +16,8 @@
  * handle GCR disks
  */
 
+#undef DEBUG
+
 #include <linux/stddef.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 
-static DEFINE_MUTEX(swim3_mutex);
-static struct request_queue *swim3_queue;
-static struct gendisk *disks[2];
-static struct request *fd_req;
-
 #define MAX_FLOPPIES   2
 
+static DEFINE_MUTEX(swim3_mutex);
+static struct gendisk *disks[MAX_FLOPPIES];
+
 enum swim_state {
        idle,
        locating,
@@ -177,7 +177,6 @@ struct swim3 {
 
 struct floppy_state {
        enum swim_state state;
-       spinlock_t lock;
        struct swim3 __iomem *swim3;    /* hardware registers */
        struct dbdma_regs __iomem *dma; /* DMA controller registers */
        int     swim3_intr;     /* interrupt number for SWIM3 */
@@ -204,8 +203,20 @@ struct floppy_state {
        int     wanted;
        struct macio_dev *mdev;
        char    dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
+       int     index;
+       struct request *cur_req;
 };
 
+#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_warn(fmt, arg...)        dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_info(fmt, arg...)        dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+
+#ifdef DEBUG
+#define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#else
+#define swim3_dbg(fmt, arg...) do { } while(0)
+#endif
+
 static struct floppy_state floppy_states[MAX_FLOPPIES];
 static int floppy_count = 0;
 static DEFINE_SPINLOCK(swim3_lock);
@@ -224,17 +235,8 @@ static unsigned short write_postamble[] = {
        0, 0, 0, 0, 0, 0
 };
 
-static void swim3_select(struct floppy_state *fs, int sel);
-static void swim3_action(struct floppy_state *fs, int action);
-static int swim3_readbit(struct floppy_state *fs, int bit);
-static void do_fd_request(struct request_queue * q);
-static void start_request(struct floppy_state *fs);
-static void set_timeout(struct floppy_state *fs, int nticks,
-                       void (*proc)(unsigned long));
-static void scan_track(struct floppy_state *fs);
 static void seek_track(struct floppy_state *fs, int n);
 static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
-static void setup_transfer(struct floppy_state *fs);
 static void act(struct floppy_state *fs);
 static void scan_timeout(unsigned long data);
 static void seek_timeout(unsigned long data);
@@ -254,18 +256,21 @@ static unsigned int floppy_check_events(struct gendisk *disk,
                                        unsigned int clearing);
 static int floppy_revalidate(struct gendisk *disk);
 
-static bool swim3_end_request(int err, unsigned int nr_bytes)
+static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
 {
-       if (__blk_end_request(fd_req, err, nr_bytes))
-               return true;
+       struct request *req = fs->cur_req;
+       int rc;
 
-       fd_req = NULL;
-       return false;
-}
+       swim3_dbg("  end request, err=%d nr_bytes=%d, cur_req=%p\n",
+                 err, nr_bytes, req);
 
-static bool swim3_end_request_cur(int err)
-{
-       return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
+       if (err)
+               nr_bytes = blk_rq_cur_bytes(req);
+       rc = __blk_end_request(req, err, nr_bytes);
+       if (rc)
+               return true;
+       fs->cur_req = NULL;
+       return false;
 }
 
 static void swim3_select(struct floppy_state *fs, int sel)
@@ -303,50 +308,53 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
        return (stat & DATA) == 0;
 }
 
-static void do_fd_request(struct request_queue * q)
-{
-       int i;
-
-       for(i=0; i<floppy_count; i++) {
-               struct floppy_state *fs = &floppy_states[i];
-               if (fs->mdev->media_bay &&
-                   check_media_bay(fs->mdev->media_bay) != MB_FD)
-                       continue;
-               start_request(fs);
-       }
-}
-
 static void start_request(struct floppy_state *fs)
 {
        struct request *req;
        unsigned long x;
 
+       swim3_dbg("start request, initial state=%d\n", fs->state);
+
        if (fs->state == idle && fs->wanted) {
                fs->state = available;
                wake_up(&fs->wait);
                return;
        }
        while (fs->state == idle) {
-               if (!fd_req) {
-                       fd_req = blk_fetch_request(swim3_queue);
-                       if (!fd_req)
+               swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
+               if (!fs->cur_req) {
+                       fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
+                       swim3_dbg("  fetched request %p\n", fs->cur_req);
+                       if (!fs->cur_req)
                                break;
                }
-               req = fd_req;
-#if 0
-               printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
-                      req->rq_disk->disk_name, req->cmd,
-                      (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
-               printk("           errors=%d current_nr_sectors=%u\n",
-                      req->errors, blk_rq_cur_sectors(req));
+               req = fs->cur_req;
+
+               if (fs->mdev->media_bay &&
+                   check_media_bay(fs->mdev->media_bay) != MB_FD) {
+                       swim3_dbg("%s", "  media bay absent, dropping req\n");
+                       swim3_end_request(fs, -ENODEV, 0);
+                       continue;
+               }
+
+#if 0 /* This is really too verbose */
+               swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
+                         req->rq_disk->disk_name, req->cmd,
+                         (long)blk_rq_pos(req), blk_rq_sectors(req),
+                         req->buffer);
+               swim3_dbg("           errors=%d current_nr_sectors=%u\n",
+                         req->errors, blk_rq_cur_sectors(req));
 #endif
 
                if (blk_rq_pos(req) >= fs->total_secs) {
-                       swim3_end_request_cur(-EIO);
+                       swim3_dbg("  pos out of bounds (%ld, max is %ld)\n",
+                                 (long)blk_rq_pos(req), (long)fs->total_secs);
+                       swim3_end_request(fs, -EIO, 0);
                        continue;
                }
                if (fs->ejected) {
-                       swim3_end_request_cur(-EIO);
+                       swim3_dbg("%s", "  disk ejected\n");
+                       swim3_end_request(fs, -EIO, 0);
                        continue;
                }
 
@@ -354,7 +362,8 @@ static void start_request(struct floppy_state *fs)
                        if (fs->write_prot < 0)
                                fs->write_prot = swim3_readbit(fs, WRITE_PROT);
                        if (fs->write_prot) {
-                               swim3_end_request_cur(-EIO);
+                               swim3_dbg("%s", "  try to write, disk write protected\n");
+                               swim3_end_request(fs, -EIO, 0);
                                continue;
                        }
                }
@@ -369,7 +378,6 @@ static void start_request(struct floppy_state *fs)
                x = ((long)blk_rq_pos(req)) % fs->secpercyl;
                fs->head = x / fs->secpertrack;
                fs->req_sector = x % fs->secpertrack + 1;
-               fd_req = req;
                fs->state = do_transfer;
                fs->retries = 0;
 
@@ -377,12 +385,14 @@ static void start_request(struct floppy_state *fs)
        }
 }
 
+static void do_fd_request(struct request_queue * q)
+{
+       start_request(q->queuedata);
+}
+
 static void set_timeout(struct floppy_state *fs, int nticks,
                        void (*proc)(unsigned long))
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&fs->lock, flags);
        if (fs->timeout_pending)
                del_timer(&fs->timeout);
        fs->timeout.expires = jiffies + nticks;
@@ -390,7 +400,6 @@ static void set_timeout(struct floppy_state *fs, int nticks,
        fs->timeout.data = (unsigned long) fs;
        add_timer(&fs->timeout);
        fs->timeout_pending = 1;
-       spin_unlock_irqrestore(&fs->lock, flags);
 }
 
 static inline void scan_track(struct floppy_state *fs)
@@ -442,40 +451,45 @@ static inline void setup_transfer(struct floppy_state *fs)
        struct swim3 __iomem *sw = fs->swim3;
        struct dbdma_cmd *cp = fs->dma_cmd;
        struct dbdma_regs __iomem *dr = fs->dma;
+       struct request *req = fs->cur_req;
 
-       if (blk_rq_cur_sectors(fd_req) <= 0) {
-               printk(KERN_ERR "swim3: transfer 0 sectors?\n");
+       if (blk_rq_cur_sectors(req) <= 0) {
+               swim3_warn("%s", "Transfer 0 sectors ?\n");
                return;
        }
-       if (rq_data_dir(fd_req) == WRITE)
+       if (rq_data_dir(req) == WRITE)
                n = 1;
        else {
                n = fs->secpertrack - fs->req_sector + 1;
-               if (n > blk_rq_cur_sectors(fd_req))
-                       n = blk_rq_cur_sectors(fd_req);
+               if (n > blk_rq_cur_sectors(req))
+                       n = blk_rq_cur_sectors(req);
        }
+
+       swim3_dbg("  setup xfer at sect %d (of %d) head %d for %d\n",
+                 fs->req_sector, fs->secpertrack, fs->head, n);
+
        fs->scount = n;
        swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
        out_8(&sw->sector, fs->req_sector);
        out_8(&sw->nsect, n);
        out_8(&sw->gap3, 0);
        out_le32(&dr->cmdptr, virt_to_bus(cp));
-       if (rq_data_dir(fd_req) == WRITE) {
+       if (rq_data_dir(req) == WRITE) {
                /* Set up 3 dma commands: write preamble, data, postamble */
                init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
                ++cp;
-               init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512);
+               init_dma(cp, OUTPUT_MORE, req->buffer, 512);
                ++cp;
                init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
        } else {
-               init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512);
+               init_dma(cp, INPUT_LAST, req->buffer, n * 512);
        }
        ++cp;
        out_le16(&cp->command, DBDMA_STOP);
        out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
        in_8(&sw->error);
        out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
-       if (rq_data_dir(fd_req) == WRITE)
+       if (rq_data_dir(req) == WRITE)
                out_8(&sw->control_bis, WRITE_SECTORS);
        in_8(&sw->intr);
        out_le32(&dr->control, (RUN << 16) | RUN);
@@ -488,12 +502,16 @@ static inline void setup_transfer(struct floppy_state *fs)
 static void act(struct floppy_state *fs)
 {
        for (;;) {
+               swim3_dbg("  act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
+                         fs->state, fs->req_cyl, fs->cur_cyl);
+
                switch (fs->state) {
                case idle:
                        return;         /* XXX shouldn't get here */
 
                case locating:
                        if (swim3_readbit(fs, TRACK_ZERO)) {
+                               swim3_dbg("%s", "    locate track 0\n");
                                fs->cur_cyl = 0;
                                if (fs->req_cyl == 0)
                                        fs->state = do_transfer;
@@ -511,7 +529,7 @@ static void act(struct floppy_state *fs)
                                break;
                        }
                        if (fs->req_cyl == fs->cur_cyl) {
-                               printk("whoops, seeking 0\n");
+                               swim3_warn("%s", "Whoops, seeking 0\n");
                                fs->state = do_transfer;
                                break;
                        }
@@ -527,7 +545,9 @@ static void act(struct floppy_state *fs)
                case do_transfer:
                        if (fs->cur_cyl != fs->req_cyl) {
                                if (fs->retries > 5) {
-                                       swim3_end_request_cur(-EIO);
+                                       swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
+                                                 fs->req_cyl, fs->cur_cyl);
+                                       swim3_end_request(fs, -EIO, 0);
                                        fs->state = idle;
                                        return;
                                }
@@ -542,7 +562,7 @@ static void act(struct floppy_state *fs)
                        return;
 
                default:
-                       printk(KERN_ERR"swim3: unknown state %d\n", fs->state);
+                       swim3_err("Unknown state %d\n", fs->state);
                        return;
                }
        }
@@ -552,59 +572,75 @@ static void scan_timeout(unsigned long data)
 {
        struct floppy_state *fs = (struct floppy_state *) data;
        struct swim3 __iomem *sw = fs->swim3;
+       unsigned long flags;
+
+       swim3_dbg("* scan timeout, state=%d\n", fs->state);
 
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->timeout_pending = 0;
        out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
        out_8(&sw->select, RELAX);
        out_8(&sw->intr_enable, 0);
        fs->cur_cyl = -1;
        if (fs->retries > 5) {
-               swim3_end_request_cur(-EIO);
+               swim3_end_request(fs, -EIO, 0);
                fs->state = idle;
                start_request(fs);
        } else {
                fs->state = jogging;
                act(fs);
        }
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static void seek_timeout(unsigned long data)
 {
        struct floppy_state *fs = (struct floppy_state *) data;
        struct swim3 __iomem *sw = fs->swim3;
+       unsigned long flags;
+
+       swim3_dbg("* seek timeout, state=%d\n", fs->state);
 
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->timeout_pending = 0;
        out_8(&sw->control_bic, DO_SEEK);
        out_8(&sw->select, RELAX);
        out_8(&sw->intr_enable, 0);
-       printk(KERN_ERR "swim3: seek timeout\n");
-       swim3_end_request_cur(-EIO);
+       swim3_err("%s", "Seek timeout\n");
+       swim3_end_request(fs, -EIO, 0);
        fs->state = idle;
        start_request(fs);
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static void settle_timeout(unsigned long data)
 {
        struct floppy_state *fs = (struct floppy_state *) data;
        struct swim3 __iomem *sw = fs->swim3;
+       unsigned long flags;
+
+       swim3_dbg("* settle timeout, state=%d\n", fs->state);
 
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->timeout_pending = 0;
        if (swim3_readbit(fs, SEEK_COMPLETE)) {
                out_8(&sw->select, RELAX);
                fs->state = locating;
                act(fs);
-               return;
+               goto unlock;
        }
        out_8(&sw->select, RELAX);
        if (fs->settle_time < 2*HZ) {
                ++fs->settle_time;
                set_timeout(fs, 1, settle_timeout);
-               return;
+               goto unlock;
        }
-       printk(KERN_ERR "swim3: seek settle timeout\n");
-       swim3_end_request_cur(-EIO);
+       swim3_err("%s", "Seek settle timeout\n");
+       swim3_end_request(fs, -EIO, 0);
        fs->state = idle;
        start_request(fs);
+ unlock:
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static void xfer_timeout(unsigned long data)
@@ -612,8 +648,12 @@ static void xfer_timeout(unsigned long data)
        struct floppy_state *fs = (struct floppy_state *) data;
        struct swim3 __iomem *sw = fs->swim3;
        struct dbdma_regs __iomem *dr = fs->dma;
+       unsigned long flags;
        int n;
 
+       swim3_dbg("* xfer timeout, state=%d\n", fs->state);
+
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->timeout_pending = 0;
        out_le32(&dr->control, RUN << 16);
        /* We must wait a bit for dbdma to stop */
@@ -622,12 +662,13 @@ static void xfer_timeout(unsigned long data)
        out_8(&sw->intr_enable, 0);
        out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
        out_8(&sw->select, RELAX);
-       printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
-              (rq_data_dir(fd_req)==WRITE? "writ": "read"),
-              (long)blk_rq_pos(fd_req));
-       swim3_end_request_cur(-EIO);
+       swim3_err("Timeout %sing sector %ld\n",
+              (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
+              (long)blk_rq_pos(fs->cur_req));
+       swim3_end_request(fs, -EIO, 0);
        fs->state = idle;
        start_request(fs);
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static irqreturn_t swim3_interrupt(int irq, void *dev_id)
@@ -638,12 +679,17 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
        int stat, resid;
        struct dbdma_regs __iomem *dr;
        struct dbdma_cmd *cp;
+       unsigned long flags;
+       struct request *req = fs->cur_req;
+
+       swim3_dbg("* interrupt, state=%d\n", fs->state);
 
+       spin_lock_irqsave(&swim3_lock, flags);
        intr = in_8(&sw->intr);
        err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
        if ((intr & ERROR_INTR) && fs->state != do_transfer)
-               printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
-                      fs->state, rq_data_dir(fd_req), intr, err);
+               swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
+                         fs->state, rq_data_dir(req), intr, err);
        switch (fs->state) {
        case locating:
                if (intr & SEEN_SECTOR) {
@@ -653,10 +699,10 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                        del_timer(&fs->timeout);
                        fs->timeout_pending = 0;
                        if (sw->ctrack == 0xff) {
-                               printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
+                               swim3_err("%s", "Seen sector but cyl=ff?\n");
                                fs->cur_cyl = -1;
                                if (fs->retries > 5) {
-                                       swim3_end_request_cur(-EIO);
+                                       swim3_end_request(fs, -EIO, 0);
                                        fs->state = idle;
                                        start_request(fs);
                                } else {
@@ -668,8 +714,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                        fs->cur_cyl = sw->ctrack;
                        fs->cur_sector = sw->csect;
                        if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
-                               printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
-                                      fs->expect_cyl, fs->cur_cyl);
+                               swim3_err("Expected cyl %d, got %d\n",
+                                         fs->expect_cyl, fs->cur_cyl);
                        fs->state = do_transfer;
                        act(fs);
                }
@@ -704,7 +750,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                fs->timeout_pending = 0;
                dr = fs->dma;
                cp = fs->dma_cmd;
-               if (rq_data_dir(fd_req) == WRITE)
+               if (rq_data_dir(req) == WRITE)
                        ++cp;
                /*
                 * Check that the main data transfer has finished.
@@ -729,31 +775,32 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                if (intr & ERROR_INTR) {
                        n = fs->scount - 1 - resid / 512;
                        if (n > 0) {
-                               blk_update_request(fd_req, 0, n << 9);
+                               blk_update_request(req, 0, n << 9);
                                fs->req_sector += n;
                        }
                        if (fs->retries < 5) {
                                ++fs->retries;
                                act(fs);
                        } else {
-                               printk("swim3: error %sing block %ld (err=%x)\n",
-                                      rq_data_dir(fd_req) == WRITE? "writ": "read",
-                                      (long)blk_rq_pos(fd_req), err);
-                               swim3_end_request_cur(-EIO);
+                               swim3_err("Error %sing block %ld (err=%x)\n",
+                                      rq_data_dir(req) == WRITE? "writ": "read",
+                                      (long)blk_rq_pos(req), err);
+                               swim3_end_request(fs, -EIO, 0);
                                fs->state = idle;
                        }
                } else {
                        if ((stat & ACTIVE) == 0 || resid != 0) {
                                /* musta been an error */
-                               printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
-                               printk(KERN_ERR "  state=%d, dir=%x, intr=%x, err=%x\n",
-                                      fs->state, rq_data_dir(fd_req), intr, err);
-                               swim3_end_request_cur(-EIO);
+                               swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
+                               swim3_err("  state=%d, dir=%x, intr=%x, err=%x\n",
+                                         fs->state, rq_data_dir(req), intr, err);
+                               swim3_end_request(fs, -EIO, 0);
                                fs->state = idle;
                                start_request(fs);
                                break;
                        }
-                       if (swim3_end_request(0, fs->scount << 9)) {
+                       fs->retries = 0;
+                       if (swim3_end_request(fs, 0, fs->scount << 9)) {
                                fs->req_sector += fs->scount;
                                if (fs->req_sector > fs->secpertrack) {
                                        fs->req_sector -= fs->secpertrack;
@@ -770,8 +817,9 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
                        start_request(fs);
                break;
        default:
-               printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
+               swim3_err("Don't know what to do in state %d\n", fs->state);
        }
+       spin_unlock_irqrestore(&swim3_lock, flags);
        return IRQ_HANDLED;
 }
 
@@ -781,26 +829,31 @@ static void fd_dma_interrupt(int irq, void *dev_id)
 }
 */
 
+/* Called under the mutex to grab exclusive access to a drive */
 static int grab_drive(struct floppy_state *fs, enum swim_state state,
                      int interruptible)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&fs->lock, flags);
-       if (fs->state != idle) {
+       swim3_dbg("%s", "-> grab drive\n");
+
+       spin_lock_irqsave(&swim3_lock, flags);
+       if (fs->state != idle && fs->state != available) {
                ++fs->wanted;
                while (fs->state != available) {
+                       spin_unlock_irqrestore(&swim3_lock, flags);
                        if (interruptible && signal_pending(current)) {
                                --fs->wanted;
-                               spin_unlock_irqrestore(&fs->lock, flags);
                                return -EINTR;
                        }
                        interruptible_sleep_on(&fs->wait);
+                       spin_lock_irqsave(&swim3_lock, flags);
                }
                --fs->wanted;
        }
        fs->state = state;
-       spin_unlock_irqrestore(&fs->lock, flags);
+       spin_unlock_irqrestore(&swim3_lock, flags);
+
        return 0;
 }
 
@@ -808,10 +861,12 @@ static void release_drive(struct floppy_state *fs)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&fs->lock, flags);
+       swim3_dbg("%s", "-> release drive\n");
+
+       spin_lock_irqsave(&swim3_lock, flags);
        fs->state = idle;
        start_request(fs);
-       spin_unlock_irqrestore(&fs->lock, flags);
+       spin_unlock_irqrestore(&swim3_lock, flags);
 }
 
 static int fd_eject(struct floppy_state *fs)
@@ -966,6 +1021,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
 {
        struct floppy_state *fs = disk->private_data;
        struct swim3 __iomem *sw = fs->swim3;
+
        mutex_lock(&swim3_mutex);
        if (fs->ref_count > 0 && --fs->ref_count == 0) {
                swim3_action(fs, MOTOR_OFF);
@@ -1031,30 +1087,48 @@ static const struct block_device_operations floppy_fops = {
        .revalidate_disk= floppy_revalidate,
 };
 
+static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
+{
+       struct floppy_state *fs = macio_get_drvdata(mdev);
+       struct swim3 __iomem *sw = fs->swim3;
+
+       if (!fs)
+               return;
+       if (mb_state != MB_FD)
+               return;
+
+       /* Clear state */
+       out_8(&sw->intr_enable, 0);
+       in_8(&sw->intr);
+       in_8(&sw->error);
+}
+
 static int swim3_add_device(struct macio_dev *mdev, int index)
 {
        struct device_node *swim = mdev->ofdev.dev.of_node;
        struct floppy_state *fs = &floppy_states[index];
        int rc = -EBUSY;
 
+       /* Do this first for message macros */
+       memset(fs, 0, sizeof(*fs));
+       fs->mdev = mdev;
+       fs->index = index;
+
        /* Check & Request resources */
        if (macio_resource_count(mdev) < 2) {
-               printk(KERN_WARNING "ifd%d: no address for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "No address in device-tree\n");
                return -ENXIO;
        }
-       if (macio_irq_count(mdev) < 2) {
-               printk(KERN_WARNING "fd%d: no intrs for device %s\n",
-                       index, swim->full_name);
+       if (macio_irq_count(mdev) < 1) {
+               swim3_err("%s", "No interrupt in device-tree\n");
+               return -ENXIO;
        }
        if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
-               printk(KERN_ERR "fd%d: can't request mmio resource for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "Can't request mmio resource\n");
                return -EBUSY;
        }
        if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
-               printk(KERN_ERR "fd%d: can't request dma resource for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "Can't request dma resource\n");
                macio_release_resource(mdev, 0);
                return -EBUSY;
        }
@@ -1063,22 +1137,18 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
        if (mdev->media_bay == NULL)
                pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
        
-       memset(fs, 0, sizeof(*fs));
-       spin_lock_init(&fs->lock);
        fs->state = idle;
        fs->swim3 = (struct swim3 __iomem *)
                ioremap(macio_resource_start(mdev, 0), 0x200);
        if (fs->swim3 == NULL) {
-               printk("fd%d: couldn't map registers for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "Couldn't map mmio registers\n");
                rc = -ENOMEM;
                goto out_release;
        }
        fs->dma = (struct dbdma_regs __iomem *)
                ioremap(macio_resource_start(mdev, 1), 0x200);
        if (fs->dma == NULL) {
-               printk("fd%d: couldn't map DMA for %s\n",
-                      index, swim->full_name);
+               swim3_err("%s", "Couldn't map dma registers\n");
                iounmap(fs->swim3);
                rc = -ENOMEM;
                goto out_release;
@@ -1090,31 +1160,25 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
        fs->secpercyl = 36;
        fs->secpertrack = 18;
        fs->total_secs = 2880;
-       fs->mdev = mdev;
        init_waitqueue_head(&fs->wait);
 
        fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
        memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
        st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
 
+       if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
+               swim3_mb_event(mdev, MB_FD);
+
        if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
-               printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n",
-                      index, fs->swim3_intr, swim->full_name);
+               swim3_err("%s", "Couldn't request interrupt\n");
                pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
                goto out_unmap;
                return -EBUSY;
        }
-/*
-       if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
-               printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
-                      fs->dma_intr);
-               return -EBUSY;
-       }
-*/
 
        init_timer(&fs->timeout);
 
-       printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
+       swim3_info("SWIM3 floppy controller %s\n",
                mdev->media_bay ? "in media bay" : "");
 
        return 0;
@@ -1132,41 +1196,42 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
 
 static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
 {
-       int i, rc;
        struct gendisk *disk;
+       int index, rc;
+
+       index = floppy_count++;
+       if (index >= MAX_FLOPPIES)
+               return -ENXIO;
 
        /* Add the drive */
-       rc = swim3_add_device(mdev, floppy_count);
+       rc = swim3_add_device(mdev, index);
        if (rc)
                return rc;
+       /* Now register that disk. Same comment about failure handling */
+       disk = disks[index] = alloc_disk(1);
+       if (disk == NULL)
+               return -ENOMEM;
+       disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
+       if (disk->queue == NULL) {
+               put_disk(disk);
+               return -ENOMEM;
+       }
+       disk->queue->queuedata = &floppy_states[index];
 
-       /* Now create the queue if not there yet */
-       if (swim3_queue == NULL) {
+       if (index == 0) {
                /* If we failed, there isn't much we can do as the driver is still
                 * too dumb to remove the device, just bail out
                 */
                if (register_blkdev(FLOPPY_MAJOR, "fd"))
                        return 0;
-               swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
-               if (swim3_queue == NULL) {
-                       unregister_blkdev(FLOPPY_MAJOR, "fd");
-                       return 0;
-               }
        }
 
-       /* Now register that disk. Same comment about failure handling */
-       i = floppy_count++;
-       disk = disks[i] = alloc_disk(1);
-       if (disk == NULL)
-               return 0;
-
        disk->major = FLOPPY_MAJOR;
-       disk->first_minor = i;
+       disk->first_minor = index;
        disk->fops = &floppy_fops;
-       disk->private_data = &floppy_states[i];
-       disk->queue = swim3_queue;
+       disk->private_data = &floppy_states[index];
        disk->flags |= GENHD_FL_REMOVABLE;
-       sprintf(disk->disk_name, "fd%d", i);
+       sprintf(disk->disk_name, "fd%d", index);
        set_capacity(disk, 2880);
        add_disk(disk);
 
@@ -1194,6 +1259,9 @@ static struct macio_driver swim3_driver =
                .of_match_table = swim3_match,
        },
        .probe          = swim3_attach,
+#ifdef CONFIG_PMAC_MEDIABAY
+       .mediabay_event = swim3_mb_event,
+#endif
 #if 0
        .suspend        = swim3_suspend,
        .resume         = swim3_resume,
index 11b41fd40c275b0f61aa77120dfab5b14219564a..5ccf142ef0b8139f47a95b75b13bdb5086b034a7 100644 (file)
@@ -188,7 +188,7 @@ config BT_MRVL
          The core driver to support Marvell Bluetooth devices.
 
          This driver is required if you want to support
-         Marvell Bluetooth devices, such as 8688/8787.
+         Marvell Bluetooth devices, such as 8688/8787/8797.
 
          Say Y here to compile Marvell Bluetooth driver
          into the kernel or say M to compile it as module.
@@ -201,8 +201,8 @@ config BT_MRVL_SDIO
          The driver for Marvell Bluetooth chipsets with SDIO interface.
 
          This driver is required if you want to use Marvell Bluetooth
-         devices with SDIO interface. Currently SD8688/SD8787 chipsets are
-         supported.
+         devices with SDIO interface. Currently SD8688/SD8787/SD8797
+         chipsets are supported.
 
          Say Y here to compile support for Marvell BT-over-SDIO driver
          into the kernel or say M to compile it as module.
index 9ef48167e2cf8ba64c790d4846a2d297e84c6fd0..27b74b0d547b540043fd318917e2cb5d6ebe0591 100644 (file)
@@ -65,7 +65,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
        .io_port_1 = 0x01,
        .io_port_2 = 0x02,
 };
-static const struct btmrvl_sdio_card_reg btmrvl_reg_8787 = {
+static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
        .cfg = 0x00,
        .host_int_mask = 0x02,
        .host_intstatus = 0x03,
@@ -92,7 +92,14 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
 static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
        .helper         = NULL,
        .firmware       = "mrvl/sd8787_uapsta.bin",
-       .reg            = &btmrvl_reg_8787,
+       .reg            = &btmrvl_reg_87xx,
+       .sd_blksz_fw_dl = 256,
+};
+
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
+       .helper         = NULL,
+       .firmware       = "mrvl/sd8797_uapsta.bin",
+       .reg            = &btmrvl_reg_87xx,
        .sd_blksz_fw_dl = 256,
 };
 
@@ -103,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
        /* Marvell SD8787 Bluetooth device */
        { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
                        .driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+       /* Marvell SD8797 Bluetooth device */
+       { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
+                       .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
 
        { }     /* Terminating entry */
 };
@@ -1076,3 +1086,4 @@ MODULE_LICENSE("GPL v2");
 MODULE_FIRMWARE("sd8688_helper.bin");
 MODULE_FIRMWARE("sd8688.bin");
 MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
index fe4ebc375b3dafd274da41c803cbb9ac37754146..eabc437ce5002a507b3fcf17973abc1b4baffc59 100644 (file)
@@ -777,9 +777,8 @@ skip_waking:
                usb_mark_last_busy(data->udev);
        }
 
-       usb_free_urb(urb);
-
 done:
+       usb_free_urb(urb);
        return err;
 }
 
index c2917ffad2c2a311beeeaed7f8171196601d78ff..34767a6d7f42a354edca655d901c298e3883609a 100644 (file)
 #define IPMI_WDOG_SET_TIMER            0x24
 #define IPMI_WDOG_GET_TIMER            0x25
 
+#define IPMI_WDOG_TIMER_NOT_INIT_RESP  0x80
+
 /* These are here until the real ones get into the watchdog.h interface. */
 #ifndef WDIOC_GETTIMEOUT
 #define        WDIOC_GETTIMEOUT        _IOW(WATCHDOG_IOCTL_BASE, 20, int)
@@ -596,6 +598,7 @@ static int ipmi_heartbeat(void)
        struct kernel_ipmi_msg            msg;
        int                               rv;
        struct ipmi_system_interface_addr addr;
+       int                               timeout_retries = 0;
 
        if (ipmi_ignore_heartbeat)
                return 0;
@@ -616,6 +619,7 @@ static int ipmi_heartbeat(void)
 
        mutex_lock(&heartbeat_lock);
 
+restart:
        atomic_set(&heartbeat_tofree, 2);
 
        /*
@@ -653,7 +657,33 @@ static int ipmi_heartbeat(void)
        /* Wait for the heartbeat to be sent. */
        wait_for_completion(&heartbeat_wait);
 
-       if (heartbeat_recv_msg.msg.data[0] != 0) {
+       if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)  {
+               timeout_retries++;
+               if (timeout_retries > 3) {
+                       printk(KERN_ERR PFX ": Unable to restore the IPMI"
+                              " watchdog's settings, giving up.\n");
+                       rv = -EIO;
+                       goto out_unlock;
+               }
+
+               /*
+                * The timer was not initialized, that means the BMC was
+                * probably reset and lost the watchdog information.  Attempt
+                * to restore the timer's info.  Note that we still hold
+                * the heartbeat lock, to keep a heartbeat from happening
+                * in this process, so must say no heartbeat to avoid a
+                * deadlock on this mutex.
+                */
+               rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+               if (rv) {
+                       printk(KERN_ERR PFX ": Unable to send the command to"
+                              " set the watchdog's settings, giving up.\n");
+                       goto out_unlock;
+               }
+
+               /* We might need a new heartbeat, so do it now */
+               goto restart;
+       } else if (heartbeat_recv_msg.msg.data[0] != 0) {
                /*
                 * Got an error in the heartbeat response.  It was already
                 * reported in ipmi_wdog_msg_handler, but we should return
@@ -662,6 +692,7 @@ static int ipmi_heartbeat(void)
                rv = -EINVAL;
        }
 
+out_unlock:
        mutex_unlock(&heartbeat_lock);
 
        return rv;
@@ -922,11 +953,15 @@ static struct miscdevice ipmi_wdog_miscdev = {
 static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
                                  void                 *handler_data)
 {
-       if (msg->msg.data[0] != 0) {
+       if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
+                       msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
+               printk(KERN_INFO PFX "response: The IPMI controller appears"
+                      " to have been reset, will attempt to reinitialize"
+                      " the watchdog timer\n");
+       else if (msg->msg.data[0] != 0)
                printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
                       msg->msg.data[0],
                       msg->msg.cmd);
-       }
 
        ipmi_free_recv_msg(msg);
 }
index ab8f469f5cf8154a244a59759cbadac01279b1d9..5a99bb3f255ae7c34fedc540949ab0163a15d5a5 100644 (file)
@@ -124,7 +124,7 @@ config MV_XOR
 
 config MX3_IPU
        bool "MX3x Image Processing Unit support"
-       depends on ARCH_MX3
+       depends on SOC_IMX31 || SOC_IMX35
        select DMA_ENGINE
        default y
        help
@@ -216,7 +216,7 @@ config PCH_DMA
 
 config IMX_SDMA
        tristate "i.MX SDMA support"
-       depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5
+       depends on ARCH_MX25 || SOC_IMX31 || SOC_IMX35 || ARCH_MX5
        select DMA_ENGINE
        help
          Support the i.MX SDMA engine. This engine is integrated into
index c811cb107904f64377a5c77f8dad8dbb5abc1bd2..2cce44a1d7d09c4af8ad76bb13a632779542d295 100644 (file)
@@ -746,6 +746,37 @@ static void __exit ibft_exit(void)
        ibft_cleanup();
 }
 
+#ifdef CONFIG_ACPI
+static const struct {
+       char *sign;
+} ibft_signs[] = {
+       /*
+        * One spec says "IBFT", the other says "iBFT". We have to check
+        * for both.
+        */
+       { ACPI_SIG_IBFT },
+       { "iBFT" },
+};
+
+static void __init acpi_find_ibft_region(void)
+{
+       int i;
+       struct acpi_table_header *table = NULL;
+
+       if (acpi_disabled)
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
+               acpi_get_table(ibft_signs[i].sign, 0, &table);
+               ibft_addr = (struct acpi_table_ibft *)table;
+       }
+}
+#else
+static void __init acpi_find_ibft_region(void)
+{
+}
+#endif
+
 /*
  * ibft_init() - creates sysfs tree entries for the iBFT data.
  */
@@ -753,9 +784,16 @@ static int __init ibft_init(void)
 {
        int rc = 0;
 
+       /*
+          As on UEFI systems the setup_arch()/find_ibft_region()
+          is called before ACPI tables are parsed and it only does
+          legacy finding.
+       */
+       if (!ibft_addr)
+               acpi_find_ibft_region();
+
        if (ibft_addr) {
-               printk(KERN_INFO "iBFT detected at 0x%llx.\n",
-                      (u64)isa_virt_to_bus(ibft_addr));
+               pr_info("iBFT detected.\n");
 
                rc = ibft_check_device();
                if (rc)
index bfe723266fd89bb84726d88cf15c206c020ee707..4da4eb9ae92604c35349ebaeb39b4a612bac3ed6 100644 (file)
@@ -45,13 +45,6 @@ EXPORT_SYMBOL_GPL(ibft_addr);
 static const struct {
        char *sign;
 } ibft_signs[] = {
-#ifdef CONFIG_ACPI
-       /*
-        * One spec says "IBFT", the other says "iBFT". We have to check
-        * for both.
-        */
-       { ACPI_SIG_IBFT },
-#endif
        { "iBFT" },
        { "BIFT" },     /* Broadcom iSCSI Offload */
 };
@@ -62,14 +55,6 @@ static const struct {
 #define VGA_MEM 0xA0000 /* VGA buffer */
 #define VGA_SIZE 0x20000 /* 128kB */
 
-#ifdef CONFIG_ACPI
-static int __init acpi_find_ibft(struct acpi_table_header *header)
-{
-       ibft_addr = (struct acpi_table_ibft *)header;
-       return 0;
-}
-#endif /* CONFIG_ACPI */
-
 static int __init find_ibft_in_mem(void)
 {
        unsigned long pos;
@@ -94,6 +79,7 @@ static int __init find_ibft_in_mem(void)
                                 * the table cannot be valid. */
                                if (pos + len <= (IBFT_END-1)) {
                                        ibft_addr = (struct acpi_table_ibft *)virt;
+                                       pr_info("iBFT found at 0x%lx.\n", pos);
                                        goto done;
                                }
                        }
@@ -108,20 +94,12 @@ done:
  */
 unsigned long __init find_ibft_region(unsigned long *sizep)
 {
-#ifdef CONFIG_ACPI
-       int i;
-#endif
        ibft_addr = NULL;
 
-#ifdef CONFIG_ACPI
-       for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
-               acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
-#endif /* CONFIG_ACPI */
-
        /* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
         * only use ACPI for this */
 
-       if (!ibft_addr && !efi_enabled)
+       if (!efi_enabled)
                find_ibft_in_mem();
 
        if (ibft_addr) {
index f10fc521951b17491348f0cd0fbb1f3013e31eae..1eedb6f7fdabe46efa082039818bef6f31fe1591 100644 (file)
 #include <linux/module.h>
 #include <linux/sigma.h>
 
-/* Return: 0==OK, <0==error, =1 ==no more actions */
+static size_t sigma_action_size(struct sigma_action *sa)
+{
+       size_t payload = 0;
+
+       switch (sa->instr) {
+       case SIGMA_ACTION_WRITEXBYTES:
+       case SIGMA_ACTION_WRITESINGLE:
+       case SIGMA_ACTION_WRITESAFELOAD:
+               payload = sigma_action_len(sa);
+               break;
+       default:
+               break;
+       }
+
+       payload = ALIGN(payload, 2);
+
+       return payload + sizeof(struct sigma_action);
+}
+
+/*
+ * Returns a negative error value in case of an error, 0 if processing of
+ * the firmware should be stopped after this action, 1 otherwise.
+ */
 static int
-process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
+process_sigma_action(struct i2c_client *client, struct sigma_action *sa)
 {
-       struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos);
        size_t len = sigma_action_len(sa);
-       int ret = 0;
+       int ret;
 
        pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
                sa->instr, sa->addr, len);
@@ -29,44 +50,50 @@ process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
        case SIGMA_ACTION_WRITEXBYTES:
        case SIGMA_ACTION_WRITESINGLE:
        case SIGMA_ACTION_WRITESAFELOAD:
-               if (ssfw->fw->size < ssfw->pos + len)
-                       return -EINVAL;
                ret = i2c_master_send(client, (void *)&sa->addr, len);
                if (ret < 0)
                        return -EINVAL;
                break;
-
        case SIGMA_ACTION_DELAY:
-               ret = 0;
                udelay(len);
                len = 0;
                break;
-
        case SIGMA_ACTION_END:
-               return 1;
-
+               return 0;
        default:
                return -EINVAL;
        }
 
-       /* when arrive here ret=0 or sent data */
-       ssfw->pos += sigma_action_size(sa, len);
-       return ssfw->pos == ssfw->fw->size;
+       return 1;
 }
 
 static int
 process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw)
 {
-       pr_debug("%s: processing %p\n", __func__, ssfw);
+       struct sigma_action *sa;
+       size_t size;
+       int ret;
+
+       while (ssfw->pos + sizeof(*sa) <= ssfw->fw->size) {
+               sa = (struct sigma_action *)(ssfw->fw->data + ssfw->pos);
+
+               size = sigma_action_size(sa);
+               ssfw->pos += size;
+               if (ssfw->pos > ssfw->fw->size || size == 0)
+                       break;
+
+               ret = process_sigma_action(client, sa);
 
-       while (1) {
-               int ret = process_sigma_action(client, ssfw);
                pr_debug("%s: action returned %i\n", __func__, ret);
-               if (ret == 1)
-                       return 0;
-               else if (ret)
+
+               if (ret <= 0)
                        return ret;
        }
+
+       if (ssfw->pos != ssfw->fw->size)
+               return -EINVAL;
+
+       return 0;
 }
 
 int process_sigma_firmware(struct i2c_client *client, const char *name)
@@ -89,16 +116,24 @@ int process_sigma_firmware(struct i2c_client *client, const char *name)
 
        /* then verify the header */
        ret = -EINVAL;
-       if (fw->size < sizeof(*ssfw_head))
+
+       /*
+        * Reject too small or unreasonable large files. The upper limit has been
+        * chosen a bit arbitrarily, but it should be enough for all practical
+        * purposes and having the limit makes it easier to avoid integer
+        * overflows later in the loading process.
+        */
+       if (fw->size < sizeof(*ssfw_head) || fw->size >= 0x4000000)
                goto done;
 
        ssfw_head = (void *)fw->data;
        if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic)))
                goto done;
 
-       crc = crc32(0, fw->data, fw->size);
+       crc = crc32(0, fw->data + sizeof(*ssfw_head),
+                       fw->size - sizeof(*ssfw_head));
        pr_debug("%s: crc=%x\n", __func__, crc);
-       if (crc != ssfw_head->crc)
+       if (crc != le32_to_cpu(ssfw_head->crc))
                goto done;
 
        ssfw.pos = sizeof(*ssfw_head);
index dbcb0bcfd8dadf49ed156311594e704f81a9b9df..4e018d6a763996127cd370a6a3908273024f37a8 100644 (file)
@@ -18,7 +18,7 @@ obj-$(CONFIG_ARCH_DAVINCI)    += gpio-davinci.o
 obj-$(CONFIG_GPIO_EP93XX)      += gpio-ep93xx.o
 obj-$(CONFIG_GPIO_IT8761E)     += gpio-it8761e.o
 obj-$(CONFIG_GPIO_JANZ_TTL)    += gpio-janz-ttl.o
-obj-$(CONFIG_MACH_KS8695)      += gpio-ks8695.o
+obj-$(CONFIG_ARCH_KS8695)      += gpio-ks8695.o
 obj-$(CONFIG_GPIO_LANGWELL)    += gpio-langwell.o
 obj-$(CONFIG_ARCH_LPC32XX)     += gpio-lpc32xx.o
 obj-$(CONFIG_GPIO_MAX730X)     += gpio-max730x.o
index 038f5eb8b13d0ce2ff508fb31facd4619f64dcb1..f8ce29ef9f883a047b1e551f29aac5ead956e65a 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/mfd/da9052/da9052.h>
 #include <linux/mfd/da9052/reg.h>
 #include <linux/mfd/da9052/pdata.h>
-#include <linux/mfd/da9052/gpio.h>
 
 #define DA9052_INPUT                           1
 #define DA9052_OUTPUT_OPENDRAIN                2
@@ -43,6 +42,9 @@
 #define DA9052_GPIO_MASK_UPPER_NIBBLE          0xF0
 #define DA9052_GPIO_MASK_LOWER_NIBBLE          0x0F
 #define DA9052_GPIO_NIBBLE_SHIFT               4
+#define DA9052_IRQ_GPI0                        16
+#define DA9052_GPIO_ODD_SHIFT                  7
+#define DA9052_GPIO_EVEN_SHIFT                 3
 
 struct da9052_gpio {
        struct da9052 *da9052;
@@ -104,33 +106,26 @@ static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
 static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
 {
        struct da9052_gpio *gpio = to_da9052_gpio(gc);
-       unsigned char register_value = 0;
        int ret;
 
        if (da9052_gpio_port_odd(offset)) {
-               if (value) {
-                       register_value = DA9052_GPIO_ODD_PORT_MODE;
                        ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
                                                DA9052_GPIO_0_1_REG,
                                                DA9052_GPIO_ODD_PORT_MODE,
-                                               register_value);
+                                               value << DA9052_GPIO_ODD_SHIFT);
                        if (ret != 0)
                                dev_err(gpio->da9052->dev,
                                        "Failed to updated gpio odd reg,%d",
                                        ret);
-               }
        } else {
-               if (value) {
-                       register_value = DA9052_GPIO_EVEN_PORT_MODE;
                        ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
                                                DA9052_GPIO_0_1_REG,
                                                DA9052_GPIO_EVEN_PORT_MODE,
-                                               register_value);
+                                               value << DA9052_GPIO_EVEN_SHIFT);
                        if (ret != 0)
                                dev_err(gpio->da9052->dev,
                                        "Failed to updated gpio even reg,%d",
                                        ret);
-               }
        }
 }
 
@@ -201,9 +196,9 @@ static struct gpio_chip reference_gp __devinitdata = {
        .direction_input = da9052_gpio_direction_input,
        .direction_output = da9052_gpio_direction_output,
        .to_irq = da9052_gpio_to_irq,
-       .can_sleep = 1;
-       .ngpio = 16;
-       .base = -1;
+       .can_sleep = 1,
+       .ngpio = 16,
+       .base = -1,
 };
 
 static int __devinit da9052_gpio_probe(struct platform_device *pdev)
index ea8e73869250c24be2fd3e1fc35439fb3271f917..461958fc2264e91321a5bb80f7ba9cbed0a1a5bc 100644 (file)
@@ -332,6 +332,34 @@ static void ioh_irq_mask(struct irq_data *d)
                  &chip->reg->regs[chip->ch].imask);
 }
 
+static void ioh_irq_disable(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct ioh_gpio *chip = gc->private;
+       unsigned long flags;
+       u32 ien;
+
+       spin_lock_irqsave(&chip->spinlock, flags);
+       ien = ioread32(&chip->reg->regs[chip->ch].ien);
+       ien &= ~(1 << (d->irq - chip->irq_base));
+       iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+       spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
+static void ioh_irq_enable(struct irq_data *d)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+       struct ioh_gpio *chip = gc->private;
+       unsigned long flags;
+       u32 ien;
+
+       spin_lock_irqsave(&chip->spinlock, flags);
+       ien = ioread32(&chip->reg->regs[chip->ch].ien);
+       ien |= 1 << (d->irq - chip->irq_base);
+       iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+       spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
 static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
 {
        struct ioh_gpio *chip = dev_id;
@@ -339,7 +367,7 @@ static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
        int i, j;
        int ret = IRQ_NONE;
 
-       for (i = 0; i < 8; i++) {
+       for (i = 0; i < 8; i++, chip++) {
                reg_val = ioread32(&chip->reg->regs[i].istatus);
                for (j = 0; j < num_ports[i]; j++) {
                        if (reg_val & BIT(j)) {
@@ -370,6 +398,8 @@ static __devinit void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
        ct->chip.irq_mask = ioh_irq_mask;
        ct->chip.irq_unmask = ioh_irq_unmask;
        ct->chip.irq_set_type = ioh_irq_type;
+       ct->chip.irq_disable = ioh_irq_disable;
+       ct->chip.irq_enable = ioh_irq_enable;
 
        irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
                               IRQ_NOREQUEST | IRQ_NOPROBE, 0);
index ec3fcf0a7e125ce19b3604959717a00e0b981cc0..5cd04b65c55610f1e015f25738f7c9f3b2649df7 100644 (file)
@@ -132,6 +132,15 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
        return 0;
 }
 
+static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       /* GPIO 28..31 are input only on MPC5121 */
+       if (gpio >= 28)
+               return -EINVAL;
+
+       return mpc8xxx_gpio_dir_out(gc, gpio, val);
+}
+
 static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
 {
        struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@@ -340,11 +349,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
        mm_gc->save_regs = mpc8xxx_gpio_save_regs;
        gc->ngpio = MPC8XXX_GPIO_PINS;
        gc->direction_input = mpc8xxx_gpio_dir_in;
-       gc->direction_output = mpc8xxx_gpio_dir_out;
-       if (of_device_is_compatible(np, "fsl,mpc8572-gpio"))
-               gc->get = mpc8572_gpio_get;
-       else
-               gc->get = mpc8xxx_gpio_get;
+       gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
+               mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
+       gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
+               mpc8572_gpio_get : mpc8xxx_gpio_get;
        gc->set = mpc8xxx_gpio_set;
        gc->to_irq = mpc8xxx_gpio_to_irq;
 
index 093c90bd3c1d1b1d888ce2465831c95d125e0036..4102f63230fdd2950396e5bcf1013e1f169de42c 100644 (file)
@@ -238,10 +238,6 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
        int ret, irq, i;
        static DECLARE_BITMAP(init_irq, NR_IRQS);
 
-       pdata = dev->dev.platform_data;
-       if (pdata == NULL)
-               return -ENODEV;
-
        chip = kzalloc(sizeof(*chip), GFP_KERNEL);
        if (chip == NULL)
                return -ENOMEM;
index 3969f7553fe75bf5fa877020e3ccea53f150dfd8..d2619d72ceceb16d7b88a33b09708219483019b4 100644 (file)
@@ -456,6 +456,30 @@ done:
 EXPORT_SYMBOL(drm_crtc_helper_set_mode);
 
 
+static int
+drm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+
+       /* Decouple all encoders and their attached connectors from this crtc */
+       list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+               if (encoder->crtc != crtc)
+                       continue;
+
+               list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+                       if (connector->encoder != encoder)
+                               continue;
+
+                       connector->encoder = NULL;
+               }
+       }
+
+       drm_helper_disable_unused_functions(dev);
+       return 0;
+}
+
 /**
  * drm_crtc_helper_set_config - set a new config from userspace
  * @crtc: CRTC to setup
@@ -510,8 +534,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                                (int)set->num_connectors, set->x, set->y);
        } else {
                DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
-               set->mode = NULL;
-               set->num_connectors = 0;
+               return drm_crtc_helper_disable(set->crtc);
        }
 
        dev = set->crtc->dev;
index d09a6e02dc95374caf456923cba25efa94aafa94..004b048c5192979ce0e180d14824220eb271c9ab 100644 (file)
@@ -62,6 +62,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
        const struct intel_device_info *info = INTEL_INFO(dev);
 
        seq_printf(m, "gen: %d\n", info->gen);
+       seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
 #define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
        B(is_mobile);
        B(is_i85x);
index a9533c54c93c766a73acc453d38377a9e0ef5a64..a9ae374861e788fe7ced3b6cf8f1f129c6e84c86 100644 (file)
@@ -1454,6 +1454,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
 
        diff1 = now - dev_priv->last_time1;
 
+       /* Prevent division-by-zero if we are asking too fast.
+        * Also, we don't get interesting results if we are polling
+        * faster than once in 10ms, so just return the saved value
+        * in such cases.
+        */
+       if (diff1 <= 10)
+               return dev_priv->chipset_power;
+
        count1 = I915_READ(DMIEC);
        count2 = I915_READ(DDREC);
        count3 = I915_READ(CSIEC);
@@ -1484,6 +1492,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
        dev_priv->last_count1 = total_count;
        dev_priv->last_time1 = now;
 
+       dev_priv->chipset_power = ret;
+
        return ret;
 }
 
index 15bfa9145d2b7f007b21f54ecee19b8b502648f2..a1103fc6597dde91958658d4d6faaabaec38e803 100644 (file)
@@ -58,15 +58,15 @@ module_param_named(powersave, i915_powersave, int, 0600);
 MODULE_PARM_DESC(powersave,
                "Enable powersavings, fbc, downclocking, etc. (default: true)");
 
-unsigned int i915_semaphores __read_mostly = 0;
+int i915_semaphores __read_mostly = -1;
 module_param_named(semaphores, i915_semaphores, int, 0600);
 MODULE_PARM_DESC(semaphores,
-               "Use semaphores for inter-ring sync (default: false)");
+               "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
 
-unsigned int i915_enable_rc6 __read_mostly = 0;
+int i915_enable_rc6 __read_mostly = -1;
 module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
 MODULE_PARM_DESC(i915_enable_rc6,
-               "Enable power-saving render C-state 6 (default: true)");
+               "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
 
 int i915_enable_fbc __read_mostly = -1;
 module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
@@ -328,7 +328,7 @@ void intel_detect_pch(struct drm_device *dev)
        }
 }
 
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 {
        int count;
 
@@ -344,6 +344,22 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
                udelay(10);
 }
 
+void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+       int count;
+
+       count = 0;
+       while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
+               udelay(10);
+
+       I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+       POSTING_READ(FORCEWAKE_MT);
+
+       count = 0;
+       while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
+               udelay(10);
+}
+
 /*
  * Generally this is called implicitly by the register read function. However,
  * if some sequence requires the GT to not power down then this function should
@@ -356,15 +372,21 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 
        /* Forcewake is atomic in case we get in here without the lock */
        if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
-               __gen6_gt_force_wake_get(dev_priv);
+               dev_priv->display.force_wake_get(dev_priv);
 }
 
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 {
        I915_WRITE_NOTRACE(FORCEWAKE, 0);
        POSTING_READ(FORCEWAKE);
 }
 
+void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+       I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+       POSTING_READ(FORCEWAKE_MT);
+}
+
 /*
  * see gen6_gt_force_wake_get()
  */
@@ -373,7 +395,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
        WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
 
        if (atomic_dec_and_test(&dev_priv->forcewake_count))
-               __gen6_gt_force_wake_put(dev_priv);
+               dev_priv->display.force_wake_put(dev_priv);
 }
 
 void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -903,8 +925,9 @@ MODULE_LICENSE("GPL and additional rights");
 /* We give fast paths for the really cool registers */
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
        (((dev_priv)->info->gen >= 6) && \
-       ((reg) < 0x40000) && \
-       ((reg) != FORCEWAKE))
+        ((reg) < 0x40000) &&            \
+        ((reg) != FORCEWAKE) &&         \
+        ((reg) != ECOBUS))
 
 #define __i915_read(x, y) \
 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
index 4a9c1b97980493b47c1f4e03a7e299f800a7f4e0..554bef7a3b9c8db39c5963fcc72ba1d411f31125 100644 (file)
@@ -107,6 +107,7 @@ struct opregion_header;
 struct opregion_acpi;
 struct opregion_swsci;
 struct opregion_asle;
+struct drm_i915_private;
 
 struct intel_opregion {
        struct opregion_header *header;
@@ -221,6 +222,8 @@ struct drm_i915_display_funcs {
                          struct drm_i915_gem_object *obj);
        int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                            int x, int y);
+       void (*force_wake_get)(struct drm_i915_private *dev_priv);
+       void (*force_wake_put)(struct drm_i915_private *dev_priv);
        /* clock updates for mode set */
        /* cursor updates */
        /* render clock increase/decrease */
@@ -710,6 +713,7 @@ typedef struct drm_i915_private {
 
        u64 last_count1;
        unsigned long last_time1;
+       unsigned long chipset_power;
        u64 last_count2;
        struct timespec last_time2;
        unsigned long gfx_power;
@@ -998,11 +1002,11 @@ extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc __always_unused;
 extern int i915_panel_ignore_lid __read_mostly;
 extern unsigned int i915_powersave __read_mostly;
-extern unsigned int i915_semaphores __read_mostly;
+extern int i915_semaphores __read_mostly;
 extern unsigned int i915_lvds_downclock __read_mostly;
 extern int i915_panel_use_ssc __read_mostly;
 extern int i915_vbt_sdvo_panel_type __read_mostly;
-extern unsigned int i915_enable_rc6 __read_mostly;
+extern int i915_enable_rc6 __read_mostly;
 extern int i915_enable_fbc __read_mostly;
 extern bool i915_enable_hangcheck __read_mostly;
 
@@ -1308,6 +1312,11 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
 extern void intel_detect_pch(struct drm_device *dev);
 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
 
+extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+
 /* overlay */
 #ifdef CONFIG_DEBUG_FS
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@@ -1352,8 +1361,9 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
 /* We give fast paths for the really cool registers */
 #define NEEDS_FORCE_WAKE(dev_priv, reg) \
        (((dev_priv)->info->gen >= 6) && \
-       ((reg) < 0x40000) && \
-       ((reg) != FORCEWAKE))
+        ((reg) < 0x40000) &&            \
+        ((reg) != FORCEWAKE) &&         \
+        ((reg) != ECOBUS))
 
 #define __i915_read(x, y) \
        u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
index 3693e83a97f325f95276b4e9c009ac48569174ed..b9da8900ae4eaef10c12f2d68da79a226f9f3260 100644 (file)
@@ -32,6 +32,7 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
+#include <linux/dma_remapping.h>
 
 struct change_domains {
        uint32_t invalidate_domains;
@@ -746,6 +747,22 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
        return 0;
 }
 
+static bool
+intel_enable_semaphores(struct drm_device *dev)
+{
+       if (INTEL_INFO(dev)->gen < 6)
+               return 0;
+
+       if (i915_semaphores >= 0)
+               return i915_semaphores;
+
+       /* Disable semaphores on SNB */
+       if (INTEL_INFO(dev)->gen == 6)
+               return 0;
+
+       return 1;
+}
+
 static int
 i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
                               struct intel_ring_buffer *to)
@@ -758,7 +775,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
                return 0;
 
        /* XXX gpu semaphores are implicated in various hard hangs on SNB */
-       if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
+       if (!intel_enable_semaphores(obj->base.dev))
                return i915_gem_object_wait_rendering(obj);
 
        idx = intel_ring_sync_index(from, to);
index b080cc82400153a68aed7cca1f3319de361a1f93..a26d5b0a36908c3d7ddd5c9c7407921fd2826db9 100644 (file)
 /* or SDVOB */
 #define HDMIB   0xe1140
 #define  PORT_ENABLE    (1 << 31)
-#define  TRANSCODER_A   (0)
-#define  TRANSCODER_B   (1 << 30)
-#define  TRANSCODER(pipe)      ((pipe) << 30)
-#define  TRANSCODER_MASK   (1 << 30)
+#define  TRANSCODER(pipe)       ((pipe) << 30)
+#define  TRANSCODER_CPT(pipe)   ((pipe) << 29)
+#define  TRANSCODER_MASK        (1 << 30)
+#define  TRANSCODER_MASK_CPT    (3 << 29)
 #define  COLOR_FORMAT_8bpc      (0)
 #define  COLOR_FORMAT_12bpc     (3 << 26)
 #define  SDVOB_HOTPLUG_ENABLE   (1 << 23)
 #define  EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B   (0x38<<22)
 #define  EDP_LINK_TRAIN_VOL_EMP_MASK_SNB       (0x3f<<22)
 
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB           (0x24 <<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB         (0x2a <<22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB           (0x2f <<22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB           (0x30 <<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB         (0x36 <<22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB           (0x38 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB         (0x33 <<22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB           (0x00 <<22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB          (0x20 <<22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB         (0x02 <<22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB                (0x22 <<22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB          (0x23 <<22)
+
+#define  EDP_LINK_TRAIN_VOL_EMP_MASK_IVB       (0x3f<<22)
+
 #define  FORCEWAKE                             0xA18C
 #define  FORCEWAKE_ACK                         0x130090
+#define  FORCEWAKE_MT                          0xa188 /* multi-threaded */
+#define  FORCEWAKE_MT_ACK                      0x130040
+#define  ECOBUS                                        0xa180
+#define    FORCEWAKE_MT_ENABLE                 (1<<5)
 
 #define  GT_FIFO_FREE_ENTRIES                  0x120008
 #define    GT_FIFO_NUM_RESERVED_ENTRIES                20
index e77a863a3833f63511a98991508b2f5c1a6ee0d1..daa5743ccbd63ad87a45697bf49b13fb5538d9e1 100644 (file)
@@ -38,8 +38,8 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "drm_dp_helper.h"
-
 #include "drm_crtc_helper.h"
+#include <linux/dma_remapping.h>
 
 #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
 
@@ -4670,6 +4670,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
 /**
  * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
  * @crtc: CRTC structure
+ * @mode: requested mode
  *
  * A pipe may be connected to one or more outputs.  Based on the depth of the
  * attached framebuffer, choose a good color depth to use on the pipe.
@@ -4681,13 +4682,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
  *    HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
  *    Displays may support a restricted set as well, check EDID and clamp as
  *      appropriate.
+ *    DP may want to dither down to 6bpc to fit larger modes
  *
  * RETURNS:
  * Dithering requirement (i.e. false if display bpc and pipe bpc match,
  * true if they don't match).
  */
 static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
-                                        unsigned int *pipe_bpp)
+                                        unsigned int *pipe_bpp,
+                                        struct drm_display_mode *mode)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4758,6 +4761,11 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
                }
        }
 
+       if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+               DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
+               display_bpc = 6;
+       }
+
        /*
         * We could just drive the pipe at the highest bpc all the time and
         * enable dithering as needed, but that costs bandwidth.  So choose
@@ -5019,6 +5027,16 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
                        pipeconf &= ~PIPECONF_DOUBLE_WIDE;
        }
 
+       /* default to 8bpc */
+       pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+       if (is_dp) {
+               if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+                       pipeconf |= PIPECONF_BPP_6 |
+                                   PIPECONF_DITHER_EN |
+                                   PIPECONF_DITHER_TYPE_SP;
+               }
+       }
+
        dpll |= DPLL_VCO_ENABLE;
 
        DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
@@ -5480,7 +5498,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        /* determine panel color depth */
        temp = I915_READ(PIPECONF(pipe));
        temp &= ~PIPE_BPC_MASK;
-       dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
+       dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
        switch (pipe_bpp) {
        case 18:
                temp |= PIPE_6BPC;
@@ -7189,11 +7207,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        work->old_fb_obj = intel_fb->obj;
        INIT_WORK(&work->work, intel_unpin_work_fn);
 
+       ret = drm_vblank_get(dev, intel_crtc->pipe);
+       if (ret)
+               goto free_work;
+
        /* We borrow the event spin lock for protecting unpin_work */
        spin_lock_irqsave(&dev->event_lock, flags);
        if (intel_crtc->unpin_work) {
                spin_unlock_irqrestore(&dev->event_lock, flags);
                kfree(work);
+               drm_vblank_put(dev, intel_crtc->pipe);
 
                DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
                return -EBUSY;
@@ -7212,10 +7235,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
        crtc->fb = fb;
 
-       ret = drm_vblank_get(dev, intel_crtc->pipe);
-       if (ret)
-               goto cleanup_objs;
-
        work->pending_flip_obj = obj;
 
        work->enable_stall_check = true;
@@ -7238,7 +7257,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
 cleanup_pending:
        atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
-cleanup_objs:
        drm_gem_object_unreference(&work->old_fb_obj->base);
        drm_gem_object_unreference(&obj->base);
        mutex_unlock(&dev->struct_mutex);
@@ -7247,6 +7265,8 @@ cleanup_objs:
        intel_crtc->unpin_work = NULL;
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
+       drm_vblank_put(dev, intel_crtc->pipe);
+free_work:
        kfree(work);
 
        return ret;
@@ -7887,6 +7907,31 @@ void intel_init_emon(struct drm_device *dev)
        dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
 }
 
+static bool intel_enable_rc6(struct drm_device *dev)
+{
+       /*
+        * Respect the kernel parameter if it is set
+        */
+       if (i915_enable_rc6 >= 0)
+               return i915_enable_rc6;
+
+       /*
+        * Disable RC6 on Ironlake
+        */
+       if (INTEL_INFO(dev)->gen == 5)
+               return 0;
+
+       /*
+        * Disable rc6 on Sandybridge
+        */
+       if (INTEL_INFO(dev)->gen == 6) {
+               DRM_DEBUG_DRIVER("Sandybridge: RC6 disabled\n");
+               return 0;
+       }
+       DRM_DEBUG_DRIVER("RC6 enabled\n");
+       return 1;
+}
+
 void gen6_enable_rps(struct drm_i915_private *dev_priv)
 {
        u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@@ -7923,7 +7968,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
        I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
        I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
-       if (i915_enable_rc6)
+       if (intel_enable_rc6(dev_priv->dev))
                rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
                        GEN6_RC_CTL_RC6_ENABLE;
 
@@ -8372,7 +8417,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
        /* rc6 disabled by default due to repeated reports of hanging during
         * boot and resume.
         */
-       if (!i915_enable_rc6)
+       if (!intel_enable_rc6(dev))
                return;
 
        mutex_lock(&dev->struct_mutex);
@@ -8491,6 +8536,28 @@ static void intel_init_display(struct drm_device *dev)
 
        /* For FIFO watermark updates */
        if (HAS_PCH_SPLIT(dev)) {
+               dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
+               dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
+
+               /* IVB configs may use multi-threaded forcewake */
+               if (IS_IVYBRIDGE(dev)) {
+                       u32     ecobus;
+
+                       mutex_lock(&dev->struct_mutex);
+                       __gen6_gt_force_wake_mt_get(dev_priv);
+                       ecobus = I915_READ(ECOBUS);
+                       __gen6_gt_force_wake_mt_put(dev_priv);
+                       mutex_unlock(&dev->struct_mutex);
+
+                       if (ecobus & FORCEWAKE_MT_ENABLE) {
+                               DRM_DEBUG_KMS("Using MT version of forcewake\n");
+                               dev_priv->display.force_wake_get =
+                                       __gen6_gt_force_wake_mt_get;
+                               dev_priv->display.force_wake_put =
+                                       __gen6_gt_force_wake_mt_put;
+                       }
+               }
+
                if (HAS_PCH_IBX(dev))
                        dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
                else if (HAS_PCH_CPT(dev))
index 4d0358fad93795c31dd9d4229daa3c7c2ecedcc5..92b041b66e491cbd7775e58458f03fe9b7eee303 100644 (file)
@@ -208,13 +208,15 @@ intel_dp_link_clock(uint8_t link_bw)
  */
 
 static int
-intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock)
+intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
 {
        struct drm_crtc *crtc = intel_dp->base.base.crtc;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int bpp = 24;
 
-       if (intel_crtc)
+       if (check_bpp)
+               bpp = check_bpp;
+       else if (intel_crtc)
                bpp = intel_crtc->bpp;
 
        return (pixel_clock * bpp + 9) / 10;
@@ -233,6 +235,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
        struct intel_dp *intel_dp = intel_attached_dp(connector);
        int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
        int max_lanes = intel_dp_max_lane_count(intel_dp);
+       int max_rate, mode_rate;
 
        if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
                if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -242,9 +245,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
                        return MODE_PANEL;
        }
 
-       if (intel_dp_link_required(intel_dp, mode->clock)
-           > intel_dp_max_data_rate(max_link_clock, max_lanes))
-               return MODE_CLOCK_HIGH;
+       mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
+       max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+       if (mode_rate > max_rate) {
+                       mode_rate = intel_dp_link_required(intel_dp,
+                                                          mode->clock, 18);
+                       if (mode_rate > max_rate)
+                               return MODE_CLOCK_HIGH;
+                       else
+                               mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
+       }
 
        if (mode->clock < 10000)
                return MODE_CLOCK_LOW;
@@ -362,8 +373,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
         * clock divider.
         */
        if (is_cpu_edp(intel_dp)) {
-               if (IS_GEN6(dev))
-                       aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+               if (IS_GEN6(dev) || IS_GEN7(dev))
+                       aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
                else
                        aux_clock_divider = 225; /* eDP input clock at 450Mhz */
        } else if (HAS_PCH_SPLIT(dev))
@@ -672,6 +683,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
        int lane_count, clock;
        int max_lane_count = intel_dp_max_lane_count(intel_dp);
        int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+       int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
        static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 
        if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -689,7 +701,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
                for (clock = 0; clock <= max_clock; clock++) {
                        int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
 
-                       if (intel_dp_link_required(intel_dp, mode->clock)
+                       if (intel_dp_link_required(intel_dp, mode->clock, bpp)
                                        <= link_avail) {
                                intel_dp->link_bw = bws[clock];
                                intel_dp->lane_count = lane_count;
@@ -817,10 +829,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
        }
 
        /*
-        * There are three kinds of DP registers:
+        * There are four kinds of DP registers:
         *
         *      IBX PCH
-        *      CPU
+        *      SNB CPU
+        *      IVB CPU
         *      CPT PCH
         *
         * IBX PCH and CPU are the same for almost everything,
@@ -873,7 +886,25 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 
        /* Split out the IBX/CPU vs CPT settings */
 
-       if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+       if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+               if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+                       intel_dp->DP |= DP_SYNC_HS_HIGH;
+               if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+                       intel_dp->DP |= DP_SYNC_VS_HIGH;
+               intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+               if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+                       intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+               intel_dp->DP |= intel_crtc->pipe << 29;
+
+               /* don't miss out required setting for eDP */
+               intel_dp->DP |= DP_PLL_ENABLE;
+               if (adjusted_mode->clock < 200000)
+                       intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+               else
+                       intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+       } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
                intel_dp->DP |= intel_dp->color_range;
 
                if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1375,34 +1406,59 @@ static char     *link_train_names[] = {
  * These are source-specific values; current Intel hardware supports
  * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
  */
-#define I830_DP_VOLTAGE_MAX        DP_TRAIN_VOLTAGE_SWING_800
-#define I830_DP_VOLTAGE_MAX_CPT            DP_TRAIN_VOLTAGE_SWING_1200
 
 static uint8_t
-intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+intel_dp_voltage_max(struct intel_dp *intel_dp)
 {
-       switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
-       case DP_TRAIN_VOLTAGE_SWING_400:
-               return DP_TRAIN_PRE_EMPHASIS_6;
-       case DP_TRAIN_VOLTAGE_SWING_600:
-               return DP_TRAIN_PRE_EMPHASIS_6;
-       case DP_TRAIN_VOLTAGE_SWING_800:
-               return DP_TRAIN_PRE_EMPHASIS_3_5;
-       case DP_TRAIN_VOLTAGE_SWING_1200:
-       default:
-               return DP_TRAIN_PRE_EMPHASIS_0;
+       struct drm_device *dev = intel_dp->base.base.dev;
+
+       if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
+               return DP_TRAIN_VOLTAGE_SWING_800;
+       else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+               return DP_TRAIN_VOLTAGE_SWING_1200;
+       else
+               return DP_TRAIN_VOLTAGE_SWING_800;
+}
+
+static uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+       struct drm_device *dev = intel_dp->base.base.dev;
+
+       if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+               switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+               case DP_TRAIN_VOLTAGE_SWING_400:
+                       return DP_TRAIN_PRE_EMPHASIS_6;
+               case DP_TRAIN_VOLTAGE_SWING_600:
+               case DP_TRAIN_VOLTAGE_SWING_800:
+                       return DP_TRAIN_PRE_EMPHASIS_3_5;
+               default:
+                       return DP_TRAIN_PRE_EMPHASIS_0;
+               }
+       } else {
+               switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+               case DP_TRAIN_VOLTAGE_SWING_400:
+                       return DP_TRAIN_PRE_EMPHASIS_6;
+               case DP_TRAIN_VOLTAGE_SWING_600:
+                       return DP_TRAIN_PRE_EMPHASIS_6;
+               case DP_TRAIN_VOLTAGE_SWING_800:
+                       return DP_TRAIN_PRE_EMPHASIS_3_5;
+               case DP_TRAIN_VOLTAGE_SWING_1200:
+               default:
+                       return DP_TRAIN_PRE_EMPHASIS_0;
+               }
        }
 }
 
 static void
 intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
 {
-       struct drm_device *dev = intel_dp->base.base.dev;
        uint8_t v = 0;
        uint8_t p = 0;
        int lane;
        uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
-       int voltage_max;
+       uint8_t voltage_max;
+       uint8_t preemph_max;
 
        for (lane = 0; lane < intel_dp->lane_count; lane++) {
                uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
@@ -1414,15 +1470,13 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
                        p = this_p;
        }
 
-       if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
-               voltage_max = I830_DP_VOLTAGE_MAX_CPT;
-       else
-               voltage_max = I830_DP_VOLTAGE_MAX;
+       voltage_max = intel_dp_voltage_max(intel_dp);
        if (v >= voltage_max)
                v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
 
-       if (p >= intel_dp_pre_emphasis_max(v))
-               p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+       preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+       if (p >= preemph_max)
+               p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
 
        for (lane = 0; lane < 4; lane++)
                intel_dp->train_set[lane] = v | p;
@@ -1494,6 +1548,37 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
        }
 }
 
+/* Gen7's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen7_edp_signal_levels(uint8_t train_set)
+{
+       int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+                                        DP_TRAIN_PRE_EMPHASIS_MASK);
+       switch (signal_levels) {
+       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+               return EDP_LINK_TRAIN_400MV_0DB_IVB;
+       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+               return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+       case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+               return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+               return EDP_LINK_TRAIN_600MV_0DB_IVB;
+       case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+               return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+               return EDP_LINK_TRAIN_800MV_0DB_IVB;
+       case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+               return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+       default:
+               DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+                             "0x%x\n", signal_levels);
+               return EDP_LINK_TRAIN_500MV_0DB_IVB;
+       }
+}
+
 static uint8_t
 intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
                      int lane)
@@ -1599,7 +1684,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
                                  DP_LINK_CONFIGURATION_SIZE);
 
        DP |= DP_PORT_EN;
-       if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+
+       if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                DP &= ~DP_LINK_TRAIN_MASK_CPT;
        else
                DP &= ~DP_LINK_TRAIN_MASK;
@@ -1613,7 +1699,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
                uint8_t     link_status[DP_LINK_STATUS_SIZE];
                uint32_t    signal_levels;
 
-               if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+
+               if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+                       signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+                       DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+               } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
                        signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
                        DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
                } else {
@@ -1622,7 +1712,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
                        DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
                }
 
-               if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+               if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                        reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
                else
                        reg = DP | DP_LINK_TRAIN_PAT_1;
@@ -1703,7 +1793,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                        break;
                }
 
-               if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+               if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+                       signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+                       DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+               } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
                        signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
                        DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
                } else {
@@ -1711,7 +1804,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                        DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
                }
 
-               if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+               if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                        reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
                else
                        reg = DP | DP_LINK_TRAIN_PAT_2;
@@ -1752,7 +1845,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
                ++tries;
        }
 
-       if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+       if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                reg = DP | DP_LINK_TRAIN_OFF_CPT;
        else
                reg = DP | DP_LINK_TRAIN_OFF;
@@ -1782,7 +1875,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
                udelay(100);
        }
 
-       if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) {
+       if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
                DP &= ~DP_LINK_TRAIN_MASK_CPT;
                I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
        } else {
@@ -1794,7 +1887,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
        msleep(17);
 
        if (is_edp(intel_dp)) {
-               if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+               if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
                        DP |= DP_LINK_TRAIN_OFF_CPT;
                else
                        DP |= DP_LINK_TRAIN_OFF;
index bd9a604b73da2f07aefdfae712135a5c256f18f7..a1b4343814e8faac807384057ba51a541746b6bd 100644 (file)
 /* drm_display_mode->private_flags */
 #define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
 #define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+#define INTEL_MODE_DP_FORCE_6BPC (0x10)
 
 static inline void
 intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
index 42f165a520de440880b574c70f1ca4aa4b7b5282..e44191132ac4e97307029e44a50d65e7a96be53a 100644 (file)
@@ -715,6 +715,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
                },
        },
+       {
+               .callback = intel_no_lvds_dmi_callback,
+               .ident = "Asus AT5NM10T-I",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+               },
+       },
 
        { }     /* terminating entry */
 };
index 21f60b7d69a30819d13f7e0fd942e3ebe4fe7bbd..04d79fd1dc9d9e1477ee2592ac0fb2212883e180 100644 (file)
@@ -178,13 +178,10 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
        if (HAS_PCH_SPLIT(dev)) {
                max >>= 16;
        } else {
-               if (IS_PINEVIEW(dev)) {
+               if (INTEL_INFO(dev)->gen < 4)
                        max >>= 17;
-               } else {
+               else
                        max >>= 16;
-                       if (INTEL_INFO(dev)->gen < 4)
-                               max &= ~1;
-               }
 
                if (is_backlight_combination_mode(dev))
                        max *= 0xff;
@@ -203,13 +200,12 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
                val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
        } else {
                val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-               if (IS_PINEVIEW(dev))
+               if (INTEL_INFO(dev)->gen < 4)
                        val >>= 1;
 
                if (is_backlight_combination_mode(dev)) {
                        u8 lbpc;
 
-                       val &= ~1;
                        pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
                        val *= lbpc;
                }
@@ -246,11 +242,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
        }
 
        tmp = I915_READ(BLC_PWM_CTL);
-       if (IS_PINEVIEW(dev)) {
-               tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+       if (INTEL_INFO(dev)->gen < 4) 
                level <<= 1;
-       } else
-               tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+       tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
        I915_WRITE(BLC_PWM_CTL, tmp | level);
 }
 
index 3003fb25aefde4ac697a5d2694d825faec76fe0d..f7b9268df2666831795835c1f378a93cf1340379 100644 (file)
@@ -50,6 +50,7 @@
 #define IS_TMDS(c)     (c->output_flag & SDVO_TMDS_MASK)
 #define IS_LVDS(c)     (c->output_flag & SDVO_LVDS_MASK)
 #define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
 
 
 static const char *tv_format_names[] = {
@@ -1086,8 +1087,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
                }
                sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
        }
-       if (intel_crtc->pipe == 1)
-               sdvox |= SDVO_PIPE_B_SELECT;
+
+       if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+               sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
+       else
+               sdvox |= TRANSCODER(intel_crtc->pipe);
+
        if (intel_sdvo->has_hdmi_audio)
                sdvox |= SDVO_AUDIO_ENABLE;
 
@@ -1314,6 +1319,18 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
        return status;
 }
 
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+                                 struct edid *edid)
+{
+       bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+       bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+       DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+                     connector_is_digital, monitor_is_digital);
+       return connector_is_digital == monitor_is_digital;
+}
+
 static enum drm_connector_status
 intel_sdvo_detect(struct drm_connector *connector, bool force)
 {
@@ -1358,10 +1375,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
                if (edid == NULL)
                        edid = intel_sdvo_get_analog_edid(connector);
                if (edid != NULL) {
-                       if (edid->input & DRM_EDID_INPUT_DIGITAL)
-                               ret = connector_status_disconnected;
-                       else
+                       if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+                                                             edid))
                                ret = connector_status_connected;
+                       else
+                               ret = connector_status_disconnected;
+
                        connector->display_info.raw_edid = NULL;
                        kfree(edid);
                } else
@@ -1402,11 +1421,8 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
                edid = intel_sdvo_get_analog_edid(connector);
 
        if (edid != NULL) {
-               struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
-               bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
-               bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
-
-               if (connector_is_digital == monitor_is_digital) {
+               if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+                                                     edid)) {
                        drm_mode_connector_update_edid_property(connector, edid);
                        drm_add_edid_modes(connector, edid);
                }
index ddbabefb4273ffa0fe071d49422dd448d21875d9..b12fd2c80812d002e0840787e8ff47481a3691a2 100644 (file)
@@ -369,3 +369,48 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
        spin_unlock_irqrestore(&dev->event_lock, flags);
        return 0;
 }
+
+int
+nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+                           struct drm_mode_create_dumb *args)
+{
+       struct nouveau_bo *bo;
+       int ret;
+
+       args->pitch = roundup(args->width * (args->bpp / 8), 256);
+       args->size = args->pitch * args->height;
+       args->size = roundup(args->size, PAGE_SIZE);
+
+       ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
+       if (ret)
+               return ret;
+
+       ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
+       drm_gem_object_unreference_unlocked(bo->gem);
+       return ret;
+}
+
+int
+nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+                            uint32_t handle)
+{
+       return drm_gem_handle_delete(file_priv, handle);
+}
+
+int
+nouveau_display_dumb_map_offset(struct drm_file *file_priv,
+                               struct drm_device *dev,
+                               uint32_t handle, uint64_t *poffset)
+{
+       struct drm_gem_object *gem;
+
+       gem = drm_gem_object_lookup(dev, file_priv, handle);
+       if (gem) {
+               struct nouveau_bo *bo = gem->driver_private;
+               *poffset = bo->bo.addr_space_offset;
+               drm_gem_object_unreference_unlocked(gem);
+               return 0;
+       }
+
+       return -ENOENT;
+}
index 9f7bb12952623b51bed52e21148ec04dcca902af..9791d13c9e3b8d9d223bd5bb1cae182de4bc89ee 100644 (file)
@@ -433,6 +433,10 @@ static struct drm_driver driver = {
        .gem_open_object = nouveau_gem_object_open,
        .gem_close_object = nouveau_gem_object_close,
 
+       .dumb_create = nouveau_display_dumb_create,
+       .dumb_map_offset = nouveau_display_dumb_map_offset,
+       .dumb_destroy = nouveau_display_dumb_destroy,
+
        .name = DRIVER_NAME,
        .desc = DRIVER_DESC,
 #ifdef GIT_REVISION
index 29837da1098b3a85a50bbc9ee4ccc2b0a4901dab..4c0be3a4ed882f5430ea628ba4d80fef1f1d3690 100644 (file)
@@ -1418,6 +1418,12 @@ int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                           struct drm_pending_vblank_event *event);
 int nouveau_finish_page_flip(struct nouveau_channel *,
                             struct nouveau_page_flip_state *);
+int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
+                               struct drm_mode_create_dumb *args);
+int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
+                                   uint32_t handle, uint64_t *offset);
+int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
+                                uint32_t handle);
 
 /* nv10_gpio.c */
 int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
index 02222c540aee1a38f963dd13eeda7f0f7c7e8844..960c0ae0c0c3de650dac30beaec208b9bfd3558d 100644 (file)
@@ -680,7 +680,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
                return ret;
        }
 
-       ret = drm_mm_init(&chan->ramin_heap, base, size);
+       ret = drm_mm_init(&chan->ramin_heap, base, size - base);
        if (ret) {
                NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
                nouveau_gpuobj_ref(NULL, &chan->ramin);
index b75258a9fe44d544521431133ecd64ec0136637d..c8a463b76c89f03f96f68e48107d9993d2a2bda1 100644 (file)
@@ -67,7 +67,10 @@ nouveau_sgdma_clear(struct ttm_backend *be)
                        pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
                                       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
                }
+               nvbe->unmap_pages = false;
        }
+
+       nvbe->pages = NULL;
 }
 
 static void
index d23ca00e7d627c65e3814891c0a8aa0136f58395..06de250fe617df89ad4e05a34d3e05be8907f126 100644 (file)
@@ -616,7 +616,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nv50_display *disp = nv50_display(dev);
        u32 unk30 = nv_rd32(dev, 0x610030), mc;
-       int i, crtc, or, type = OUTPUT_ANY;
+       int i, crtc, or = 0, type = OUTPUT_ANY;
 
        NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
        disp->irq.dcb = NULL;
@@ -708,7 +708,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
        struct nv50_display *disp = nv50_display(dev);
        u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
        struct dcb_entry *dcb;
-       int i, crtc, or, type = OUTPUT_ANY;
+       int i, crtc, or = 0, type = OUTPUT_ANY;
 
        NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
        dcb = disp->irq.dcb;
index a74e501afd25b44b7315d497f8ca30908dcb3995..ecfafd70cf0ed2b6f9ee0b51c74031caa337de27 100644 (file)
@@ -381,6 +381,8 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
        u8  tpnr[GPC_MAX];
        int i, gpc, tpc;
 
+       nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
+
        /*
         *      TP      ROP UNKVAL(magic_not_rop_nr)
         * 450: 4/0/0/0 2        3
index 23d63b4b3d77078ce4a351f82b8d7cba16d280b7..cb006a718e700f2c72cf06dfdac0c7f22e8cbdac 100644 (file)
@@ -780,7 +780,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
                        continue;
 
                if (nv_partner != nv_encoder &&
-                   nv_partner->dcb->or == nv_encoder->or) {
+                   nv_partner->dcb->or == nv_encoder->dcb->or) {
                        if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
                                return;
                        break;
index 87631fede1f8ed2a750419200688c151f552e7e6..2b97262e3ab14af5af32b4d85711828563450b09 100644 (file)
@@ -1107,9 +1107,40 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
                return -EINVAL;
        }
 
-       if (tiling_flags & RADEON_TILING_MACRO)
+       if (tiling_flags & RADEON_TILING_MACRO) {
+               if (rdev->family >= CHIP_CAYMAN)
+                       tmp = rdev->config.cayman.tile_config;
+               else
+                       tmp = rdev->config.evergreen.tile_config;
+
+               switch ((tmp & 0xf0) >> 4) {
+               case 0: /* 4 banks */
+                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+                       break;
+               case 1: /* 8 banks */
+               default:
+                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+                       break;
+               case 2: /* 16 banks */
+                       fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+                       break;
+               }
+
+               switch ((tmp & 0xf000) >> 12) {
+               case 0: /* 1KB rows */
+               default:
+                       fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB);
+                       break;
+               case 1: /* 2KB rows */
+                       fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB);
+                       break;
+               case 2: /* 4KB rows */
+                       fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB);
+                       break;
+               }
+
                fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
-       else if (tiling_flags & RADEON_TILING_MICRO)
+       else if (tiling_flags & RADEON_TILING_MICRO)
                fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
 
        switch (radeon_crtc->crtc_id) {
index 1d603a3335db65b4bf425228a8f98221d01fcd48..92c9628c572daa2495a685b0a7898273957ffc73 100644 (file)
@@ -82,6 +82,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+       int i;
 
        /* Lock the graphics update lock */
        tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
@@ -99,7 +100,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
               (u32)crtc_base);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
@@ -3271,6 +3276,18 @@ int evergreen_init(struct radeon_device *rdev)
                        rdev->accel_working = false;
                }
        }
+
+       /* Don't start up if the MC ucode is missing on BTC parts.
+        * The default clocks and voltages before the MC ucode
+        * is loaded are not suffient for advanced operations.
+        */
+       if (ASIC_IS_DCE5(rdev)) {
+               if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
+                       DRM_ERROR("radeon: MC ucode required for NI+.\n");
+                       return -EINVAL;
+               }
+       }
+
        return 0;
 }
 
index 38e1bda73d33be343058b1f63f9349ca7f80e3f1..cd4590aae154154d62906d901a63b8b368fea861 100644 (file)
@@ -38,6 +38,7 @@ struct evergreen_cs_track {
        u32                     group_size;
        u32                     nbanks;
        u32                     npipes;
+       u32                     row_size;
        /* value we track */
        u32                     nsamples;
        u32                     cb_color_base_last[12];
@@ -77,6 +78,44 @@ struct evergreen_cs_track {
        struct radeon_bo        *db_s_write_bo;
 };
 
+static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
+{
+       if (tiling_flags & RADEON_TILING_MACRO)
+               return ARRAY_2D_TILED_THIN1;
+       else if (tiling_flags & RADEON_TILING_MICRO)
+               return ARRAY_1D_TILED_THIN1;
+       else
+               return ARRAY_LINEAR_GENERAL;
+}
+
+static u32 evergreen_cs_get_num_banks(u32 nbanks)
+{
+       switch (nbanks) {
+       case 2:
+               return ADDR_SURF_2_BANK;
+       case 4:
+               return ADDR_SURF_4_BANK;
+       case 8:
+       default:
+               return ADDR_SURF_8_BANK;
+       case 16:
+               return ADDR_SURF_16_BANK;
+       }
+}
+
+static u32 evergreen_cs_get_tile_split(u32 row_size)
+{
+       switch (row_size) {
+       case 1:
+       default:
+               return ADDR_SURF_TILE_SPLIT_1KB;
+       case 2:
+               return ADDR_SURF_TILE_SPLIT_2KB;
+       case 4:
+               return ADDR_SURF_TILE_SPLIT_4KB;
+       }
+}
+
 static void evergreen_cs_track_init(struct evergreen_cs_track *track)
 {
        int i;
@@ -490,12 +529,11 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                        }
                        ib[idx] &= ~Z_ARRAY_MODE(0xf);
                        track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+                       ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                       track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
                        if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
-                               ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                               track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                       } else {
-                               ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
-                               track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+                               ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+                               ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
                        }
                }
                break;
@@ -618,13 +656,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                                "0x%04X\n", reg);
                                return -EINVAL;
                        }
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
-                               ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                               track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                       } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
-                               ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
-                               track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
-                       }
+                       ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                       track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
                }
                break;
        case CB_COLOR8_INFO:
@@ -640,13 +673,8 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
                                                "0x%04X\n", reg);
                                return -EINVAL;
                        }
-                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
-                               ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                               track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                       } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
-                               ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
-                               track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
-                       }
+                       ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                       track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
                }
                break;
        case CB_COLOR0_PITCH:
@@ -701,6 +729,16 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
        case CB_COLOR9_ATTRIB:
        case CB_COLOR10_ATTRIB:
        case CB_COLOR11_ATTRIB:
+               r = evergreen_cs_packet_next_reloc(p, &reloc);
+               if (r) {
+                       dev_warn(p->dev, "bad SET_CONTEXT_REG "
+                                       "0x%04X\n", reg);
+                       return -EINVAL;
+               }
+               if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+                       ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+                       ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+               }
                break;
        case CB_COLOR0_DIM:
        case CB_COLOR1_DIM:
@@ -1318,10 +1356,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
                                }
                                ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
                                if (!p->keep_tiling_flags) {
-                                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
-                                               ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
-                                       else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
-                                               ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+                                       ib[idx+1+(i*8)+1] |=
+                                               TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+                                       if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+                                               ib[idx+1+(i*8)+6] |=
+                                                       TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+                                               ib[idx+1+(i*8)+7] |=
+                                                       TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+                                       }
                                }
                                texture = reloc->robj;
                                /* tex mip base */
@@ -1422,6 +1464,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
 {
        struct radeon_cs_packet pkt;
        struct evergreen_cs_track *track;
+       u32 tmp;
        int r;
 
        if (p->track == NULL) {
@@ -1430,9 +1473,63 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
                if (track == NULL)
                        return -ENOMEM;
                evergreen_cs_track_init(track);
-               track->npipes = p->rdev->config.evergreen.tiling_npipes;
-               track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
-               track->group_size = p->rdev->config.evergreen.tiling_group_size;
+               if (p->rdev->family >= CHIP_CAYMAN)
+                       tmp = p->rdev->config.cayman.tile_config;
+               else
+                       tmp = p->rdev->config.evergreen.tile_config;
+
+               switch (tmp & 0xf) {
+               case 0:
+                       track->npipes = 1;
+                       break;
+               case 1:
+               default:
+                       track->npipes = 2;
+                       break;
+               case 2:
+                       track->npipes = 4;
+                       break;
+               case 3:
+                       track->npipes = 8;
+                       break;
+               }
+
+               switch ((tmp & 0xf0) >> 4) {
+               case 0:
+                       track->nbanks = 4;
+                       break;
+               case 1:
+               default:
+                       track->nbanks = 8;
+                       break;
+               case 2:
+                       track->nbanks = 16;
+                       break;
+               }
+
+               switch ((tmp & 0xf00) >> 8) {
+               case 0:
+                       track->group_size = 256;
+                       break;
+               case 1:
+               default:
+                       track->group_size = 512;
+                       break;
+               }
+
+               switch ((tmp & 0xf000) >> 12) {
+               case 0:
+                       track->row_size = 1;
+                       break;
+               case 1:
+               default:
+                       track->row_size = 2;
+                       break;
+               case 2:
+                       track->row_size = 4;
+                       break;
+               }
+
                p->track = track;
        }
        do {
index c781c92c3451285a14360f57bf18a53802dca1ab..7d7f2155e34c305729f8487c55fc8c2c741ccc44 100644 (file)
 #       define EVERGREEN_GRPH_DEPTH_8BPP                0
 #       define EVERGREEN_GRPH_DEPTH_16BPP               1
 #       define EVERGREEN_GRPH_DEPTH_32BPP               2
+#       define EVERGREEN_GRPH_NUM_BANKS(x)              (((x) & 0x3) << 2)
+#       define EVERGREEN_ADDR_SURF_2_BANK               0
+#       define EVERGREEN_ADDR_SURF_4_BANK               1
+#       define EVERGREEN_ADDR_SURF_8_BANK               2
+#       define EVERGREEN_ADDR_SURF_16_BANK              3
+#       define EVERGREEN_GRPH_Z(x)                      (((x) & 0x3) << 4)
+#       define EVERGREEN_GRPH_BANK_WIDTH(x)             (((x) & 0x3) << 6)
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_1         0
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_2         1
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_4         2
+#       define EVERGREEN_ADDR_SURF_BANK_WIDTH_8         3
 #       define EVERGREEN_GRPH_FORMAT(x)                 (((x) & 0x7) << 8)
 /* 8 BPP */
 #       define EVERGREEN_GRPH_FORMAT_INDEXED            0
 #       define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102     5
 #       define EVERGREEN_GRPH_FORMAT_RGB111110          6
 #       define EVERGREEN_GRPH_FORMAT_BGR101111          7
+#       define EVERGREEN_GRPH_BANK_HEIGHT(x)            (((x) & 0x3) << 11)
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1        0
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2        1
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4        2
+#       define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8        3
+#       define EVERGREEN_GRPH_TILE_SPLIT(x)             (((x) & 0x7) << 13)
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B       0
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B      1
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B      2
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B      3
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB       4
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB       5
+#       define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB       6
+#       define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x)      (((x) & 0x3) << 18)
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1  0
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2  1
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4  2
+#       define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8  3
 #       define EVERGREEN_GRPH_ARRAY_MODE(x)             (((x) & 0x7) << 20)
 #       define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL      0
 #       define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED      1
index b937c49054d9df9d63fea1279d6a1e123fd899e0..e00039e59a75b2bceb45ff69f63b740fd3193f40 100644 (file)
 #define DB_HTILE_DATA_BASE                             0x28014
 #define DB_Z_INFO                                      0x28040
 #       define Z_ARRAY_MODE(x)                          ((x) << 4)
+#       define DB_TILE_SPLIT(x)                         (((x) & 0x7) << 8)
+#       define DB_NUM_BANKS(x)                          (((x) & 0x3) << 12)
+#       define DB_BANK_WIDTH(x)                         (((x) & 0x3) << 16)
+#       define DB_BANK_HEIGHT(x)                        (((x) & 0x3) << 20)
 #define DB_STENCIL_INFO                                        0x28044
 #define DB_Z_READ_BASE                                 0x28048
 #define DB_STENCIL_READ_BASE                           0x2804c
 #      define CB_SF_EXPORT_FULL                        0
 #      define CB_SF_EXPORT_NORM                        1
 #define        CB_COLOR0_ATTRIB                                0x28c74
+#       define CB_TILE_SPLIT(x)                         (((x) & 0x7) << 5)
+#       define ADDR_SURF_TILE_SPLIT_64B                 0
+#       define ADDR_SURF_TILE_SPLIT_128B                1
+#       define ADDR_SURF_TILE_SPLIT_256B                2
+#       define ADDR_SURF_TILE_SPLIT_512B                3
+#       define ADDR_SURF_TILE_SPLIT_1KB                 4
+#       define ADDR_SURF_TILE_SPLIT_2KB                 5
+#       define ADDR_SURF_TILE_SPLIT_4KB                 6
+#       define CB_NUM_BANKS(x)                          (((x) & 0x3) << 10)
+#       define ADDR_SURF_2_BANK                         0
+#       define ADDR_SURF_4_BANK                         1
+#       define ADDR_SURF_8_BANK                         2
+#       define ADDR_SURF_16_BANK                        3
+#       define CB_BANK_WIDTH(x)                         (((x) & 0x3) << 13)
+#       define ADDR_SURF_BANK_WIDTH_1                   0
+#       define ADDR_SURF_BANK_WIDTH_2                   1
+#       define ADDR_SURF_BANK_WIDTH_4                   2
+#       define ADDR_SURF_BANK_WIDTH_8                   3
+#       define CB_BANK_HEIGHT(x)                        (((x) & 0x3) << 16)
+#       define ADDR_SURF_BANK_HEIGHT_1                  0
+#       define ADDR_SURF_BANK_HEIGHT_2                  1
+#       define ADDR_SURF_BANK_HEIGHT_4                  2
+#       define ADDR_SURF_BANK_HEIGHT_8                  3
 #define        CB_COLOR0_DIM                                   0x28c78
 /* only CB0-7 blocks have these regs */
 #define        CB_COLOR0_CMASK                                 0x28c7c
 #      define SQ_SEL_1                                 5
 #define SQ_TEX_RESOURCE_WORD5_0                         0x30014
 #define SQ_TEX_RESOURCE_WORD6_0                         0x30018
+#       define TEX_TILE_SPLIT(x)                        (((x) & 0x7) << 29)
 #define SQ_TEX_RESOURCE_WORD7_0                         0x3001c
+#       define TEX_BANK_WIDTH(x)                        (((x) & 0x3) << 8)
+#       define TEX_BANK_HEIGHT(x)                       (((x) & 0x3) << 10)
+#       define TEX_NUM_BANKS(x)                         (((x) & 0x3) << 16)
 
 #define SQ_VTX_CONSTANT_WORD0_0                                0x30000
 #define SQ_VTX_CONSTANT_WORD1_0                                0x30004
index ad158ea499015e1ceb8ca956c611d9f8944a133d..bfc08f6320f83b83569bec08a2968014c9e90a4f 100644 (file)
@@ -187,13 +187,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+       int i;
 
        /* Lock the graphics update lock */
        /* update the scanout addresses */
        WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
index 3f6636bb2d7f874abf0208cff67010f82c2f4e75..3516a6081dcfcc3acb35d4d64286ea0f321da01e 100644 (file)
@@ -35,7 +35,8 @@ static int radeon_atif_call(acpi_handle handle)
 
        /* Fail only if calling the method fails and ATIF is supported */
        if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
-               printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status));
+               DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
+                                acpi_format_exception(status));
                kfree(buffer.pointer);
                return 1;
        }
@@ -50,13 +51,13 @@ int radeon_acpi_init(struct radeon_device *rdev)
        acpi_handle handle;
        int ret;
 
-       /* No need to proceed if we're sure that ATIF is not supported */
-       if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
-               return 0;
-
        /* Get the device handle */
        handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
 
+       /* No need to proceed if we're sure that ATIF is not supported */
+       if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
+               return 0;
+
        /* Call the ATIF method */
        ret = radeon_atif_call(handle);
        if (ret)
index 06e413e6a920207850734185f1c459989616f625..4b27efa4405b94b63011b2e8948d678c35ccfd62 100644 (file)
@@ -233,13 +233,12 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
                switch (radeon_encoder->encoder_id) {
                case ENCODER_OBJECT_ID_TRAVIS:
                case ENCODER_OBJECT_ID_NUTMEG:
-                       return true;
+                       return radeon_encoder->encoder_id;
                default:
-                       return false;
+                       return ENCODER_OBJECT_ID_NONE;
                }
        }
-
-       return false;
+       return ENCODER_OBJECT_ID_NONE;
 }
 
 void radeon_panel_mode_fixup(struct drm_encoder *encoder,
index 481b99e89f6542d661c4f0697d413a6eff40f821..b1053d64042313df931b9cde933e121bfa814517 100644 (file)
@@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+       int i;
 
        /* Lock the graphics update lock */
        tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
               (u32)crtc_base);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
index a983f410ab89d7d549530d7b7c2f42287a6a7969..23ae1c60ab3d97b8a1576c567054b736fd70eb93 100644 (file)
@@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
        u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+       int i;
 
        /* Lock the graphics update lock */
        tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
               (u32)crtc_base);
 
        /* Wait for update_pending to go high. */
-       while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+       for (i = 0; i < rdev->usec_timeout; i++) {
+               if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+                       break;
+               udelay(1);
+       }
        DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
        /* Unlock the lock, so double-buffering can take place inside vblank */
index 8cca91a93bde092af78c6c7bdcfb0ffceb3f04a6..dc279706ca7027d100761be6a45c54d9001534ac 100644 (file)
@@ -390,6 +390,11 @@ extern int vmw_context_check(struct vmw_private *dev_priv,
                             struct ttm_object_file *tfile,
                             int id,
                             struct vmw_resource **p_res);
+extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+                                 struct ttm_object_file *tfile,
+                                 uint32_t handle,
+                                 struct vmw_surface **out_surf,
+                                 struct vmw_dma_buffer **out_buf);
 extern void vmw_surface_res_free(struct vmw_resource *res);
 extern int vmw_surface_init(struct vmw_private *dev_priv,
                            struct vmw_surface *srf,
index 03bbc2a6f9a738439a8d8f59a027b87ec86c72db..a0c2f12b1e1b8fffb8e4422c275a91b0eb6a4eaf 100644 (file)
@@ -33,6 +33,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
 {
        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
        uint32_t fifo_min, hwversion;
+       const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
        if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
                return false;
@@ -41,7 +42,12 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
        if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
                return false;
 
-       hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+       hwversion = ioread32(fifo_mem +
+                            ((fifo->capabilities &
+                              SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+                             SVGA_FIFO_3D_HWVERSION_REVISED :
+                             SVGA_FIFO_3D_HWVERSION));
+
        if (hwversion == 0)
                return false;
 
index 3f6343502d1f96aea110dccdeb59a39cb9fe0065..66917c6c3813f6ff6cee579786864421c9dda0d4 100644 (file)
@@ -58,8 +58,14 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
        case DRM_VMW_PARAM_FIFO_HW_VERSION:
        {
                __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
-
-               param->value = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
+               const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+
+               param->value =
+                       ioread32(fifo_mem +
+                                ((fifo->capabilities &
+                                  SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
+                                 SVGA_FIFO_3D_HWVERSION_REVISED :
+                                 SVGA_FIFO_3D_HWVERSION));
                break;
        }
        default:
@@ -140,7 +146,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
                goto out_clips;
        }
 
-       clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+       clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
        if (clips == NULL) {
                DRM_ERROR("Failed to allocate clip rect list.\n");
                ret = -ENOMEM;
@@ -166,13 +172,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
                ret = -EINVAL;
                goto out_no_fb;
        }
-
        vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
-       if (!vfb->dmabuf) {
-               DRM_ERROR("Framebuffer not dmabuf backed.\n");
-               ret = -EINVAL;
-               goto out_no_fb;
-       }
 
        ret = ttm_read_lock(&vmaster->lock, true);
        if (unlikely(ret != 0))
@@ -232,7 +232,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
                goto out_clips;
        }
 
-       clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+       clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
        if (clips == NULL) {
                DRM_ERROR("Failed to allocate clip rect list.\n");
                ret = -ENOMEM;
index 880e285d7578afa3ebeae75de4dad77443d754ce..f94b33ae221546a9d9170f3e0eb6b8292776721b 100644 (file)
 /* Might need a hrtimer here? */
 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
 
+
+struct vmw_clip_rect {
+       int x1, x2, y1, y2;
+};
+
+/**
+ * Clip @num_rects number of @rects against @clip storing the
+ * results in @out_rects and the number of passed rects in @out_num.
+ */
+void vmw_clip_cliprects(struct drm_clip_rect *rects,
+                       int num_rects,
+                       struct vmw_clip_rect clip,
+                       SVGASignedRect *out_rects,
+                       int *out_num)
+{
+       int i, k;
+
+       for (i = 0, k = 0; i < num_rects; i++) {
+               int x1 = max_t(int, clip.x1, rects[i].x1);
+               int y1 = max_t(int, clip.y1, rects[i].y1);
+               int x2 = min_t(int, clip.x2, rects[i].x2);
+               int y2 = min_t(int, clip.y2, rects[i].y2);
+
+               if (x1 >= x2)
+                       continue;
+               if (y1 >= y2)
+                       continue;
+
+               out_rects[k].left   = x1;
+               out_rects[k].top    = y1;
+               out_rects[k].right  = x2;
+               out_rects[k].bottom = y2;
+               k++;
+       }
+
+       *out_num = k;
+}
+
 void vmw_display_unit_cleanup(struct vmw_display_unit *du)
 {
        if (du->cursor_surface)
@@ -82,6 +120,43 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
        return 0;
 }
 
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+                            struct vmw_dma_buffer *dmabuf,
+                            u32 width, u32 height,
+                            u32 hotspotX, u32 hotspotY)
+{
+       struct ttm_bo_kmap_obj map;
+       unsigned long kmap_offset;
+       unsigned long kmap_num;
+       void *virtual;
+       bool dummy;
+       int ret;
+
+       kmap_offset = 0;
+       kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+       ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("reserve failed\n");
+               return -EINVAL;
+       }
+
+       ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
+       if (unlikely(ret != 0))
+               goto err_unreserve;
+
+       virtual = ttm_kmap_obj_virtual(&map, &dummy);
+       ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
+                                     hotspotX, hotspotY);
+
+       ttm_bo_kunmap(&map);
+err_unreserve:
+       ttm_bo_unreserve(&dmabuf->base);
+
+       return ret;
+}
+
+
 void vmw_cursor_update_position(struct vmw_private *dev_priv,
                                bool show, int x, int y)
 {
@@ -110,24 +185,21 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
                return -EINVAL;
 
        if (handle) {
-               ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
-                                                    handle, &surface);
-               if (!ret) {
-                       if (!surface->snooper.image) {
-                               DRM_ERROR("surface not suitable for cursor\n");
-                               vmw_surface_unreference(&surface);
-                               return -EINVAL;
-                       }
-               } else {
-                       ret = vmw_user_dmabuf_lookup(tfile,
-                                                    handle, &dmabuf);
-                       if (ret) {
-                               DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
-                               return -EINVAL;
-                       }
+               ret = vmw_user_lookup_handle(dev_priv, tfile,
+                                            handle, &surface, &dmabuf);
+               if (ret) {
+                       DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
+                       return -EINVAL;
                }
        }
 
+       /* need to do this before taking down old image */
+       if (surface && !surface->snooper.image) {
+               DRM_ERROR("surface not suitable for cursor\n");
+               vmw_surface_unreference(&surface);
+               return -EINVAL;
+       }
+
        /* takedown old cursor */
        if (du->cursor_surface) {
                du->cursor_surface->snooper.crtc = NULL;
@@ -146,36 +218,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
                vmw_cursor_update_image(dev_priv, surface->snooper.image,
                                        64, 64, du->hotspot_x, du->hotspot_y);
        } else if (dmabuf) {
-               struct ttm_bo_kmap_obj map;
-               unsigned long kmap_offset;
-               unsigned long kmap_num;
-               void *virtual;
-               bool dummy;
-
                /* vmw_user_surface_lookup takes one reference */
                du->cursor_dmabuf = dmabuf;
 
-               kmap_offset = 0;
-               kmap_num = (64*64*4) >> PAGE_SHIFT;
-
-               ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
-               if (unlikely(ret != 0)) {
-                       DRM_ERROR("reserve failed\n");
-                       return -EINVAL;
-               }
-
-               ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
-               if (unlikely(ret != 0))
-                       goto err_unreserve;
-
-               virtual = ttm_kmap_obj_virtual(&map, &dummy);
-               vmw_cursor_update_image(dev_priv, virtual, 64, 64,
-                                       du->hotspot_x, du->hotspot_y);
-
-               ttm_bo_kunmap(&map);
-err_unreserve:
-               ttm_bo_unreserve(&dmabuf->base);
-
+               ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
+                                              du->hotspot_x, du->hotspot_y);
        } else {
                vmw_cursor_update_position(dev_priv, false, 0, 0);
                return 0;
@@ -377,8 +424,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
                                struct drm_clip_rect *clips,
                                unsigned num_clips, int inc)
 {
-       struct drm_clip_rect *clips_ptr;
        struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+       struct drm_clip_rect *clips_ptr;
+       struct drm_clip_rect *tmp;
        struct drm_crtc *crtc;
        size_t fifo_size;
        int i, num_units;
@@ -391,7 +439,6 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
        } *cmd;
        SVGASignedRect *blits;
 
-
        num_units = 0;
        list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
                            head) {
@@ -402,13 +449,24 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
 
        BUG_ON(!clips || !num_clips);
 
+       tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+       if (unlikely(tmp == NULL)) {
+               DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+               return -ENOMEM;
+       }
+
        fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
        cmd = kzalloc(fifo_size, GFP_KERNEL);
        if (unlikely(cmd == NULL)) {
                DRM_ERROR("Temporary fifo memory alloc failed.\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_free_tmp;
        }
 
+       /* setup blits pointer */
+       blits = (SVGASignedRect *)&cmd[1];
+
+       /* initial clip region */
        left = clips->x1;
        right = clips->x2;
        top = clips->y1;
@@ -434,45 +492,60 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
        cmd->body.srcRect.bottom = bottom;
 
        clips_ptr = clips;
-       blits = (SVGASignedRect *)&cmd[1];
        for (i = 0; i < num_clips; i++, clips_ptr += inc) {
-               blits[i].left   = clips_ptr->x1 - left;
-               blits[i].right  = clips_ptr->x2 - left;
-               blits[i].top    = clips_ptr->y1 - top;
-               blits[i].bottom = clips_ptr->y2 - top;
+               tmp[i].x1 = clips_ptr->x1 - left;
+               tmp[i].x2 = clips_ptr->x2 - left;
+               tmp[i].y1 = clips_ptr->y1 - top;
+               tmp[i].y2 = clips_ptr->y2 - top;
        }
 
        /* do per unit writing, reuse fifo for each */
        for (i = 0; i < num_units; i++) {
                struct vmw_display_unit *unit = units[i];
-               int clip_x1 = left - unit->crtc.x;
-               int clip_y1 = top - unit->crtc.y;
-               int clip_x2 = right - unit->crtc.x;
-               int clip_y2 = bottom - unit->crtc.y;
+               struct vmw_clip_rect clip;
+               int num;
+
+               clip.x1 = left - unit->crtc.x;
+               clip.y1 = top - unit->crtc.y;
+               clip.x2 = right - unit->crtc.x;
+               clip.y2 = bottom - unit->crtc.y;
 
                /* skip any crtcs that misses the clip region */
-               if (clip_x1 >= unit->crtc.mode.hdisplay ||
-                   clip_y1 >= unit->crtc.mode.vdisplay ||
-                   clip_x2 <= 0 || clip_y2 <= 0)
+               if (clip.x1 >= unit->crtc.mode.hdisplay ||
+                   clip.y1 >= unit->crtc.mode.vdisplay ||
+                   clip.x2 <= 0 || clip.y2 <= 0)
                        continue;
 
+               /*
+                * In order for the clip rects to be correctly scaled
+                * the src and dest rects needs to be the same size.
+                */
+               cmd->body.destRect.left = clip.x1;
+               cmd->body.destRect.right = clip.x2;
+               cmd->body.destRect.top = clip.y1;
+               cmd->body.destRect.bottom = clip.y2;
+
+               /* create a clip rect of the crtc in dest coords */
+               clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+               clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+               clip.x1 = 0 - clip.x1;
+               clip.y1 = 0 - clip.y1;
+
                /* need to reset sid as it is changed by execbuf */
                cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
-
                cmd->body.destScreenId = unit->unit;
 
-               /*
-                * The blit command is a lot more resilient then the
-                * readback command when it comes to clip rects. So its
-                * okay to go out of bounds.
-                */
+               /* clip and write blits to cmd stream */
+               vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
 
-               cmd->body.destRect.left = clip_x1;
-               cmd->body.destRect.right = clip_x2;
-               cmd->body.destRect.top = clip_y1;
-               cmd->body.destRect.bottom = clip_y2;
+               /* if no cliprects hit skip this */
+               if (num == 0)
+                       continue;
 
 
+               /* recalculate package length */
+               fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+               cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
                ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
                                          fifo_size, 0, NULL);
 
@@ -480,7 +553,10 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
                        break;
        }
 
+
        kfree(cmd);
+out_free_tmp:
+       kfree(tmp);
 
        return ret;
 }
@@ -556,6 +632,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
         * Sanity checks.
         */
 
+       /* Surface must be marked as a scanout. */
+       if (unlikely(!surface->scanout))
+               return -EINVAL;
+
        if (unlikely(surface->mip_levels[0] != 1 ||
                     surface->num_sizes != 1 ||
                     surface->sizes[0].width < mode_cmd->width ||
@@ -782,6 +862,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
                        int clip_y1 = clips_ptr->y1 - unit->crtc.y;
                        int clip_x2 = clips_ptr->x2 - unit->crtc.x;
                        int clip_y2 = clips_ptr->y2 - unit->crtc.y;
+                       int move_x, move_y;
 
                        /* skip any crtcs that misses the clip region */
                        if (clip_x1 >= unit->crtc.mode.hdisplay ||
@@ -789,12 +870,21 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
                            clip_x2 <= 0 || clip_y2 <= 0)
                                continue;
 
+                       /* clip size to crtc size */
+                       clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
+                       clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
+
+                       /* translate both src and dest to bring clip into screen */
+                       move_x = min_t(int, clip_x1, 0);
+                       move_y = min_t(int, clip_y1, 0);
+
+                       /* actual translate done here */
                        blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
                        blits[hit_num].body.destScreenId = unit->unit;
-                       blits[hit_num].body.srcOrigin.x = clips_ptr->x1;
-                       blits[hit_num].body.srcOrigin.y = clips_ptr->y1;
-                       blits[hit_num].body.destRect.left = clip_x1;
-                       blits[hit_num].body.destRect.top = clip_y1;
+                       blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
+                       blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
+                       blits[hit_num].body.destRect.left = clip_x1 - move_x;
+                       blits[hit_num].body.destRect.top = clip_y1 - move_y;
                        blits[hit_num].body.destRect.right = clip_x2;
                        blits[hit_num].body.destRect.bottom = clip_y2;
                        hit_num++;
@@ -1003,7 +1093,6 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
        struct vmw_surface *surface = NULL;
        struct vmw_dma_buffer *bo = NULL;
        struct ttm_base_object *user_obj;
-       u64 required_size;
        int ret;
 
        /**
@@ -1012,8 +1101,9 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
         * requested framebuffer.
         */
 
-       required_size = mode_cmd->pitch * mode_cmd->height;
-       if (unlikely(required_size > (u64) dev_priv->vram_size)) {
+       if (!vmw_kms_validate_mode_vram(dev_priv,
+                                       mode_cmd->pitch,
+                                       mode_cmd->height)) {
                DRM_ERROR("VRAM size is too small for requested mode.\n");
                return ERR_PTR(-ENOMEM);
        }
@@ -1033,46 +1123,29 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
                return ERR_PTR(-ENOENT);
        }
 
-       /**
-        * End conditioned code.
-        */
-
-       ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
-                                            mode_cmd->handle, &surface);
+       /* returns either a dmabuf or surface */
+       ret = vmw_user_lookup_handle(dev_priv, tfile,
+                                    mode_cmd->handle,
+                                    &surface, &bo);
        if (ret)
-               goto try_dmabuf;
-
-       if (!surface->scanout)
-               goto err_not_scanout;
-
-       ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
-                                             &vfb, mode_cmd);
-
-       /* vmw_user_surface_lookup takes one ref so does new_fb */
-       vmw_surface_unreference(&surface);
-
-       if (ret) {
-               DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
-               ttm_base_object_unref(&user_obj);
-               return ERR_PTR(ret);
-       } else
-               vfb->user_obj = user_obj;
-       return &vfb->base;
-
-try_dmabuf:
-       DRM_INFO("%s: trying buffer\n", __func__);
-
-       ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
-       if (ret) {
-               DRM_ERROR("failed to find buffer: %i\n", ret);
-               return ERR_PTR(-ENOENT);
-       }
-
-       ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
-                                            mode_cmd);
+               goto err_out;
+
+       /* Create the new framebuffer depending one what we got back */
+       if (bo)
+               ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
+                                                    mode_cmd);
+       else if (surface)
+               ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
+                                                     surface, &vfb, mode_cmd);
+       else
+               BUG();
 
-       /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
-       vmw_dmabuf_unreference(&bo);
+err_out:
+       /* vmw_user_lookup_handle takes one ref so does new_fb */
+       if (bo)
+               vmw_dmabuf_unreference(&bo);
+       if (surface)
+               vmw_surface_unreference(&surface);
 
        if (ret) {
                DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
@@ -1082,14 +1155,6 @@ try_dmabuf:
                vfb->user_obj = user_obj;
 
        return &vfb->base;
-
-err_not_scanout:
-       DRM_ERROR("surface not marked as scanout\n");
-       /* vmw_user_surface_lookup takes one ref */
-       vmw_surface_unreference(&surface);
-       ttm_base_object_unref(&user_obj);
-
-       return ERR_PTR(-EINVAL);
 }
 
 static struct drm_mode_config_funcs vmw_kms_funcs = {
@@ -1106,10 +1171,12 @@ int vmw_kms_present(struct vmw_private *dev_priv,
                    uint32_t num_clips)
 {
        struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+       struct drm_clip_rect *tmp;
        struct drm_crtc *crtc;
        size_t fifo_size;
        int i, k, num_units;
        int ret = 0; /* silence warning */
+       int left, right, top, bottom;
 
        struct {
                SVGA3dCmdHeader header;
@@ -1127,60 +1194,95 @@ int vmw_kms_present(struct vmw_private *dev_priv,
        BUG_ON(surface == NULL);
        BUG_ON(!clips || !num_clips);
 
+       tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
+       if (unlikely(tmp == NULL)) {
+               DRM_ERROR("Temporary cliprect memory alloc failed.\n");
+               return -ENOMEM;
+       }
+
        fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
        cmd = kmalloc(fifo_size, GFP_KERNEL);
        if (unlikely(cmd == NULL)) {
                DRM_ERROR("Failed to allocate temporary fifo memory.\n");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto out_free_tmp;
+       }
+
+       left = clips->x;
+       right = clips->x + clips->w;
+       top = clips->y;
+       bottom = clips->y + clips->h;
+
+       for (i = 1; i < num_clips; i++) {
+               left = min_t(int, left, (int)clips[i].x);
+               right = max_t(int, right, (int)clips[i].x + clips[i].w);
+               top = min_t(int, top, (int)clips[i].y);
+               bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
        }
 
        /* only need to do this once */
        memset(cmd, 0, fifo_size);
        cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
-       cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
-
-       cmd->body.srcRect.left = 0;
-       cmd->body.srcRect.right = surface->sizes[0].width;
-       cmd->body.srcRect.top = 0;
-       cmd->body.srcRect.bottom = surface->sizes[0].height;
 
        blits = (SVGASignedRect *)&cmd[1];
+
+       cmd->body.srcRect.left = left;
+       cmd->body.srcRect.right = right;
+       cmd->body.srcRect.top = top;
+       cmd->body.srcRect.bottom = bottom;
+
        for (i = 0; i < num_clips; i++) {
-               blits[i].left   = clips[i].x;
-               blits[i].right  = clips[i].x + clips[i].w;
-               blits[i].top    = clips[i].y;
-               blits[i].bottom = clips[i].y + clips[i].h;
+               tmp[i].x1 = clips[i].x - left;
+               tmp[i].x2 = clips[i].x + clips[i].w - left;
+               tmp[i].y1 = clips[i].y - top;
+               tmp[i].y2 = clips[i].y + clips[i].h - top;
        }
 
        for (k = 0; k < num_units; k++) {
                struct vmw_display_unit *unit = units[k];
-               int clip_x1 = destX - unit->crtc.x;
-               int clip_y1 = destY - unit->crtc.y;
-               int clip_x2 = clip_x1 + surface->sizes[0].width;
-               int clip_y2 = clip_y1 + surface->sizes[0].height;
+               struct vmw_clip_rect clip;
+               int num;
+
+               clip.x1 = left + destX - unit->crtc.x;
+               clip.y1 = top + destY - unit->crtc.y;
+               clip.x2 = right + destX - unit->crtc.x;
+               clip.y2 = bottom + destY - unit->crtc.y;
 
                /* skip any crtcs that misses the clip region */
-               if (clip_x1 >= unit->crtc.mode.hdisplay ||
-                   clip_y1 >= unit->crtc.mode.vdisplay ||
-                   clip_x2 <= 0 || clip_y2 <= 0)
+               if (clip.x1 >= unit->crtc.mode.hdisplay ||
+                   clip.y1 >= unit->crtc.mode.vdisplay ||
+                   clip.x2 <= 0 || clip.y2 <= 0)
                        continue;
 
+               /*
+                * In order for the clip rects to be correctly scaled
+                * the src and dest rects needs to be the same size.
+                */
+               cmd->body.destRect.left = clip.x1;
+               cmd->body.destRect.right = clip.x2;
+               cmd->body.destRect.top = clip.y1;
+               cmd->body.destRect.bottom = clip.y2;
+
+               /* create a clip rect of the crtc in dest coords */
+               clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
+               clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
+               clip.x1 = 0 - clip.x1;
+               clip.y1 = 0 - clip.y1;
+
                /* need to reset sid as it is changed by execbuf */
                cmd->body.srcImage.sid = sid;
-
                cmd->body.destScreenId = unit->unit;
 
-               /*
-                * The blit command is a lot more resilient then the
-                * readback command when it comes to clip rects. So its
-                * okay to go out of bounds.
-                */
+               /* clip and write blits to cmd stream */
+               vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
 
-               cmd->body.destRect.left = clip_x1;
-               cmd->body.destRect.right = clip_x2;
-               cmd->body.destRect.top = clip_y1;
-               cmd->body.destRect.bottom = clip_y2;
+               /* if no cliprects hit skip this */
+               if (num == 0)
+                       continue;
 
+               /* recalculate package length */
+               fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
+               cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
                ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
                                          fifo_size, 0, NULL);
 
@@ -1189,6 +1291,8 @@ int vmw_kms_present(struct vmw_private *dev_priv,
        }
 
        kfree(cmd);
+out_free_tmp:
+       kfree(tmp);
 
        return ret;
 }
@@ -1809,7 +1913,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
        }
 
        rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
-       rects = kzalloc(rects_size, GFP_KERNEL);
+       rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
+                       GFP_KERNEL);
        if (unlikely(!rects)) {
                ret = -ENOMEM;
                goto out_unlock;
@@ -1824,10 +1929,10 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
        }
 
        for (i = 0; i < arg->num_outputs; ++i) {
-               if (rects->x < 0 ||
-                   rects->y < 0 ||
-                   rects->x + rects->w > mode_config->max_width ||
-                   rects->y + rects->h > mode_config->max_height) {
+               if (rects[i].x < 0 ||
+                   rects[i].y < 0 ||
+                   rects[i].x + rects[i].w > mode_config->max_width ||
+                   rects[i].y + rects[i].h > mode_config->max_height) {
                        DRM_ERROR("Invalid GUI layout.\n");
                        ret = -EINVAL;
                        goto out_free;
index af8e6e5bd964a2dca37f7ba17165256d44a2f8bf..e1cb8556355fc625946783c30286ba3fb9388762 100644 (file)
@@ -62,9 +62,14 @@ struct vmw_framebuffer {
 int vmw_cursor_update_image(struct vmw_private *dev_priv,
                            u32 *image, u32 width, u32 height,
                            u32 hotspotX, u32 hotspotY);
+int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
+                            struct vmw_dma_buffer *dmabuf,
+                            u32 width, u32 height,
+                            u32 hotspotX, u32 hotspotY);
 void vmw_cursor_update_position(struct vmw_private *dev_priv,
                                bool show, int x, int y);
 
+
 /**
  * Base class display unit.
  *
index 90c5e39284913353f68c6e4611d59391d557228c..8f8dbd43c33d0116a43fb1206d93a4512aca2229 100644 (file)
@@ -74,9 +74,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
 {
        struct vmw_legacy_display *lds = dev_priv->ldu_priv;
        struct vmw_legacy_display_unit *entry;
+       struct vmw_display_unit *du = NULL;
        struct drm_framebuffer *fb = NULL;
        struct drm_crtc *crtc = NULL;
-       int i = 0;
+       int i = 0, ret;
 
        /* If there is no display topology the host just assumes
         * that the guest will set the same layout as the host.
@@ -129,6 +130,25 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
 
        lds->last_num_active = lds->num_active;
 
+
+       /* Find the first du with a cursor. */
+       list_for_each_entry(entry, &lds->active, active) {
+               du = &entry->base;
+
+               if (!du->cursor_dmabuf)
+                       continue;
+
+               ret = vmw_cursor_update_dmabuf(dev_priv,
+                                              du->cursor_dmabuf,
+                                              64, 64,
+                                              du->hotspot_x,
+                                              du->hotspot_y);
+               if (ret == 0)
+                       break;
+
+               DRM_ERROR("Could not update cursor image\n");
+       }
+
        return 0;
 }
 
index 86c5e4cceb31ef83beb568e3b912de62f73a2648..1c7f09e268190a5466fdac896e2d617382c8f2d1 100644 (file)
@@ -1190,6 +1190,29 @@ void vmw_resource_unreserve(struct list_head *list)
                write_unlock(lock);
 }
 
+/**
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
+ */
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+                          struct ttm_object_file *tfile,
+                          uint32_t handle,
+                          struct vmw_surface **out_surf,
+                          struct vmw_dma_buffer **out_buf)
+{
+       int ret;
+
+       BUG_ON(*out_surf || *out_buf);
+
+       ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
+       if (!ret)
+               return 0;
+
+       ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
+       return ret;
+}
+
 
 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
                                   struct ttm_object_file *tfile,
index 848a56c0279c8ac61687340c732521094428e541..af353842f75feaceadeedcc547eeb880519f86df 100644 (file)
@@ -1771,8 +1771,8 @@ static const struct hid_device_id hid_ignore_list[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
        { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
-       { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) },
        { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) },
index 06ce996b8b6504f65c9216c3173a38cf5a51cb34..4a441a6f996748a923204fbcb53c4850eb3af061 100644 (file)
 #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
 
 #define USB_VENDOR_ID_GENERAL_TOUCH    0x0dfc
-#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
 
 #define USB_VENDOR_ID_GLAB             0x06c2
 #define USB_DEVICE_ID_4_PHIDGETSERVO_30        0x0038
index 7a48b1eb423334780116f1cb6e16edcec8f19a49..5253d23361d91a4e93eefb00021bcd3ae4f9915f 100644 (file)
@@ -59,7 +59,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
 {
        struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
        struct completion *completion = &hwmon->read_completion;
-       unsigned long t;
+       long t;
        unsigned long val;
        int ret;
 
@@ -203,7 +203,7 @@ static int __devexit jz4740_hwmon_remove(struct platform_device *pdev)
        return 0;
 }
 
-struct platform_driver jz4740_hwmon_driver = {
+static struct platform_driver jz4740_hwmon_driver = {
        .probe  = jz4740_hwmon_probe,
        .remove = __devexit_p(jz4740_hwmon_remove),
        .driver = {
index 8cebef49aeaca2b81a94b862387fb0be46dcdaa5..18936ac9d51cd14af2221546fd062c3494ea18ec 100644 (file)
@@ -893,6 +893,13 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
        /* Set the number of I2C channel instance */
        adap_info->ch_num = id->driver_data;
 
+       ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
+                 KBUILD_MODNAME, adap_info);
+       if (ret) {
+               pch_pci_err(pdev, "request_irq FAILED\n");
+               goto err_request_irq;
+       }
+
        for (i = 0; i < adap_info->ch_num; i++) {
                pch_adap = &adap_info->pch_data[i].pch_adapter;
                adap_info->pch_i2c_suspended = false;
@@ -910,28 +917,23 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
 
                pch_adap->dev.parent = &pdev->dev;
 
+               pch_i2c_init(&adap_info->pch_data[i]);
                ret = i2c_add_adapter(pch_adap);
                if (ret) {
                        pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
-                       goto err_i2c_add_adapter;
+                       goto err_add_adapter;
                }
-
-               pch_i2c_init(&adap_info->pch_data[i]);
-       }
-       ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
-                 KBUILD_MODNAME, adap_info);
-       if (ret) {
-               pch_pci_err(pdev, "request_irq FAILED\n");
-               goto err_i2c_add_adapter;
        }
 
        pci_set_drvdata(pdev, adap_info);
        pch_pci_dbg(pdev, "returns %d.\n", ret);
        return 0;
 
-err_i2c_add_adapter:
+err_add_adapter:
        for (j = 0; j < i; j++)
                i2c_del_adapter(&adap_info->pch_data[j].pch_adapter);
+       free_irq(pdev->irq, adap_info);
+err_request_irq:
        pci_iounmap(pdev, base_addr);
 err_pci_iomap:
        pci_release_regions(pdev);
index a43d0023446a873c18586f0de99ac733ee1e6cfc..fa23faa20f0e34435881e24abc6d5e4c9545a259 100644 (file)
@@ -1047,13 +1047,14 @@ omap_i2c_probe(struct platform_device *pdev)
                 * size. This is to ensure that we can handle the status on int
                 * call back latencies.
                 */
-               if (dev->rev >= OMAP_I2C_REV_ON_3530_4430) {
-                       dev->fifo_size = 0;
+
+               dev->fifo_size = (dev->fifo_size / 2);
+
+               if (dev->rev >= OMAP_I2C_REV_ON_3530_4430)
                        dev->b_hw = 0; /* Disable hardware fixes */
-               } else {
-                       dev->fifo_size = (dev->fifo_size / 2);
+               else
                        dev->b_hw = 1; /* Enable hardware fixes */
-               }
+
                /* calculate wakeup latency constraint for MPU */
                if (dev->set_mpu_wkup_lat != NULL)
                        dev->latency = (1000000 * dev->fifo_size) /
index 2754cef86a06f882d337a28ef8cc320f1da4727b..4c17180816853a339ddb7a3dcb22b47425786173 100644 (file)
@@ -534,6 +534,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
 
        /* first, try busy waiting briefly */
        do {
+               cpu_relax();
                iicstat = readl(i2c->regs + S3C2410_IICSTAT);
        } while ((iicstat & S3C2410_IICSTAT_START) && --spins);
 
@@ -786,7 +787,7 @@ static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
 #else
 static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
 {
-       return -EINVAL;
+       return 0;
 }
 
 static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
index 75ff821c0af07caaef3a0e48c8503489838bd75d..d0d4aa9f480245097b2fab1bd05380c3fe6a01d6 100644 (file)
@@ -2513,6 +2513,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
 
        req.private_data_len = sizeof(struct cma_hdr) +
                               conn_param->private_data_len;
+       if (req.private_data_len < conn_param->private_data_len)
+               return -EINVAL;
+
        req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
        if (!req.private_data)
                return -ENOMEM;
@@ -2562,6 +2565,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
        memset(&req, 0, sizeof req);
        offset = cma_user_data_offset(id_priv->id.ps);
        req.private_data_len = offset + conn_param->private_data_len;
+       if (req.private_data_len < conn_param->private_data_len)
+               return -EINVAL;
+
        private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
        if (!private_data)
                return -ENOMEM;
index 77f3dbc0aaa1629783bfadb84d0ac5ad754e8207..18836cdf1e1075e6a17895de44c67528a3389169 100644 (file)
@@ -1244,7 +1244,8 @@ err_reg:
 
 err_counter:
        for (; i; --i)
-               mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
+               if (ibdev->counters[i - 1] != -1)
+                       mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
 
 err_map:
        iounmap(ibdev->uar_map);
@@ -1275,7 +1276,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
        }
        iounmap(ibdev->uar_map);
        for (p = 0; p < ibdev->num_ports; ++p)
-               mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
+               if (ibdev->counters[p] != -1)
+                       mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
        mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
                mlx4_CLOSE_PORT(dev, p);
 
index 574600ef5b428e4766d8cd431632a0e131c176a9..a7403248d83dee7c14ce9578618872c745fdffcd 100644 (file)
@@ -1285,7 +1285,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
        strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
        ctxt_fp(fp) = rcd;
        qib_stats.sps_ctxts++;
-       dd->freectxts++;
+       dd->freectxts--;
        ret = 0;
        goto bail;
 
@@ -1794,7 +1794,7 @@ static int qib_close(struct inode *in, struct file *fp)
                if (dd->pageshadow)
                        unlock_expected_tids(rcd);
                qib_stats.sps_ctxts--;
-               dd->freectxts--;
+               dd->freectxts++;
        }
 
        mutex_unlock(&qib_mutex);
index 80793f1608eb0d1fd2eaff655cba48ffcc037709..06517e60e50c1e64742083eb1371fb7a328bd929 100644 (file)
@@ -115,8 +115,8 @@ static void decode_mg(struct cma3000_accl_data *data, int *datax,
 static irqreturn_t cma3000_thread_irq(int irq, void *dev_id)
 {
        struct cma3000_accl_data *data = dev_id;
-       int datax, datay, dataz;
-       u8 ctrl, mode, range, intr_status;
+       int datax, datay, dataz, intr_status;
+       u8 ctrl, mode, range;
 
        intr_status = CMA3000_READ(data, CMA3000_INTSTATUS, "interrupt status");
        if (intr_status < 0)
index c5b12d2e955a5cdb8c968c3e31d86bcb99e416b3..86d6f39178b0d556364df951ca33575598bfa416 100644 (file)
@@ -2,7 +2,7 @@
  * Finger Sensing Pad PS/2 mouse driver.
  *
  * Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2010 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation.
  *
  *   This program is free software; you can redistribute it and/or
  *   modify it under the terms of the GNU General Public License
@@ -162,7 +162,7 @@ static int fsp_reg_write(struct psmouse *psmouse, int reg_addr, int reg_val)
        ps2_sendbyte(ps2dev, v, FSP_CMD_TIMEOUT2);
 
        if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
-               return -1;
+               goto out;
 
        if ((v = fsp_test_invert_cmd(reg_val)) != reg_val) {
                /* inversion is required */
@@ -261,7 +261,7 @@ static int fsp_page_reg_write(struct psmouse *psmouse, int reg_val)
        ps2_sendbyte(ps2dev, 0x88, FSP_CMD_TIMEOUT2);
 
        if (ps2_sendbyte(ps2dev, 0xf3, FSP_CMD_TIMEOUT) < 0)
-               return -1;
+               goto out;
 
        if ((v = fsp_test_invert_cmd(reg_val)) != reg_val) {
                ps2_sendbyte(ps2dev, 0x47, FSP_CMD_TIMEOUT2);
@@ -309,7 +309,7 @@ static int fsp_get_buttons(struct psmouse *psmouse, int *btn)
        };
        int val;
 
-       if (fsp_reg_read(psmouse, FSP_REG_TMOD_STATUS1, &val) == -1)
+       if (fsp_reg_read(psmouse, FSP_REG_TMOD_STATUS, &val) == -1)
                return -EIO;
 
        *btn = buttons[(val & 0x30) >> 4];
index ed1395ac7b8b3e11960ae51c7c92c7f51b615fc9..2e4af24f8c1586b6ecfb3c7f8325cbeb34eb4ae5 100644 (file)
@@ -2,7 +2,7 @@
  * Finger Sensing Pad PS/2 mouse driver.
  *
  * Copyright (C) 2005-2007 Asia Vital Components Co., Ltd.
- * Copyright (C) 2005-2009 Tai-hwa Liang, Sentelic Corporation.
+ * Copyright (C) 2005-2011 Tai-hwa Liang, Sentelic Corporation.
  *
  *   This program is free software; you can redistribute it and/or
  *   modify it under the terms of the GNU General Public License
@@ -33,6 +33,7 @@
 /* Finger-sensing Pad control registers */
 #define        FSP_REG_SYSCTL1         0x10
 #define        FSP_BIT_EN_REG_CLK      BIT(5)
+#define        FSP_REG_TMOD_STATUS     0x20
 #define        FSP_REG_OPC_QDOWN       0x31
 #define        FSP_BIT_EN_OPC_TAG      BIT(7)
 #define        FSP_REG_OPTZ_XLO        0x34
index c080b828e5dc5e2f69c8cf859deebe3d6abbae61..a6dcd18e9adf93b5b97cd00e04b6419a6788e1e5 100644 (file)
@@ -24,6 +24,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/delay.h>
 #include <linux/dmi.h>
 #include <linux/input/mt.h>
 #include <linux/serio.h>
@@ -1220,6 +1221,16 @@ static int synaptics_reconnect(struct psmouse *psmouse)
 
        do {
                psmouse_reset(psmouse);
+               if (retry) {
+                       /*
+                        * On some boxes, right after resuming, the touchpad
+                        * needs some time to finish initializing (I assume
+                        * it needs time to calibrate) and start responding
+                        * to Synaptics-specific queries, so let's wait a
+                        * bit.
+                        */
+                       ssleep(1);
+               }
                error = synaptics_detect(psmouse, 0);
        } while (error && ++retry < 3);
 
index da0d8761e778cfd8f79e64b26a0ce3acd8cf60c5..2ee47d01a3b4ecde112b07ac95ccd798f053e4f4 100644 (file)
@@ -1470,6 +1470,9 @@ static const struct wacom_features wacom_features_0xE3 =
 static const struct wacom_features wacom_features_0xE6 =
        { "Wacom ISDv4 E6",       WACOM_PKGLEN_TPC2FG,    27760, 15694,  255,
          0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xEC =
+       { "Wacom ISDv4 EC",       WACOM_PKGLEN_GRAPHIRE,  25710, 14500,  255,
+         0, TABLETPC,    WACOM_INTUOS_RES, WACOM_INTUOS_RES };
 static const struct wacom_features wacom_features_0x47 =
        { "Wacom Intuos2 6x8",    WACOM_PKGLEN_INTUOS,    20320, 16240, 1023,
          31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1611,6 +1614,7 @@ const struct usb_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0xE2) },
        { USB_DEVICE_WACOM(0xE3) },
        { USB_DEVICE_WACOM(0xE6) },
+       { USB_DEVICE_WACOM(0xEC) },
        { USB_DEVICE_WACOM(0x47) },
        { USB_DEVICE_LENOVO(0x6004) },
        { }
index c0c7820d4c46b406465e0d2d8e059a80ce819476..bdc447fd4766fbba47f46f49301a3a4758639289 100644 (file)
@@ -405,6 +405,9 @@ int dmar_disabled = 0;
 int dmar_disabled = 1;
 #endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
 
+int intel_iommu_enabled = 0;
+EXPORT_SYMBOL_GPL(intel_iommu_enabled);
+
 static int dmar_map_gfx = 1;
 static int dmar_forcedac;
 static int intel_iommu_strict;
@@ -3524,7 +3527,7 @@ found:
        return 0;
 }
 
-int dmar_parse_rmrr_atsr_dev(void)
+int __init dmar_parse_rmrr_atsr_dev(void)
 {
        struct dmar_rmrr_unit *rmrr, *rmrr_n;
        struct dmar_atsr_unit *atsr, *atsr_n;
@@ -3647,6 +3650,8 @@ int __init intel_iommu_init(void)
 
        bus_register_notifier(&pci_bus_type, &device_nb);
 
+       intel_iommu_enabled = 1;
+
        return 0;
 }
 
index 07c9f189f3143250e5ea2d0ea20fcdaa2b1674df..6777ca049471728d445ec323e3f051bdc19126f9 100644 (file)
@@ -773,7 +773,7 @@ int __init parse_ioapics_under_ir(void)
        return ir_supported;
 }
 
-int ir_dev_scope_init(void)
+int __init ir_dev_scope_init(void)
 {
        if (!intr_remapping_enabled)
                return 0;
index 2fb2963df55376a3a8efbf09490457e08b28b836..5b5fa5cdaa3108da74b7358ae187dd4ee8a00181 100644 (file)
@@ -90,7 +90,7 @@ struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
        if (bus == NULL || bus->iommu_ops == NULL)
                return NULL;
 
-       domain = kmalloc(sizeof(*domain), GFP_KERNEL);
+       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
        if (!domain)
                return NULL;
 
index 33ec9e4677727800d5439e16300be820c92090b8..9021182c4b766e02454365f1e2b6d22822358f8a 100644 (file)
@@ -242,6 +242,12 @@ static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg)
                case IIOCDOCFINT:
                        if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid))
                                return (-EINVAL);       /* invalid driver */
+                       if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) ==
+                                       sizeof(dioctl.cf_ctrl.msn))
+                               return -EINVAL;
+                       if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) ==
+                                       sizeof(dioctl.cf_ctrl.fwd_nr))
+                               return -EINVAL;
                        if ((i = cf_command(dioctl.cf_ctrl.drvid,
                                            (cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2,
                                            dioctl.cf_ctrl.cfproc,
index 1f73d7f7e0242e4e73d55582935f4ff0a11f6646..2339d7396b9ea305dd845e592d227bd726d3d43b 100644 (file)
@@ -2756,6 +2756,9 @@ isdn_net_setcfg(isdn_net_ioctl_cfg * cfg)
                        char *c,
                        *e;
 
+                       if (strnlen(cfg->drvid, sizeof(cfg->drvid)) ==
+                                       sizeof(cfg->drvid))
+                               return -EINVAL;
                        drvidx = -1;
                        chidx = -1;
                        strcpy(drvid, cfg->drvid);
index 7878712721bf431a1315f44db1c3b2dcc9486245..6d03774b176ec8236d9cfb72654bedf600f052d2 100644 (file)
@@ -1106,10 +1106,12 @@ void bitmap_write_all(struct bitmap *bitmap)
         */
        int i;
 
+       spin_lock_irq(&bitmap->lock);
        for (i = 0; i < bitmap->file_pages; i++)
                set_page_attr(bitmap, bitmap->filemap[i],
                              BITMAP_PAGE_NEEDWRITE);
        bitmap->allclean = 0;
+       spin_unlock_irq(&bitmap->lock);
 }
 
 static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
@@ -1391,9 +1393,6 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
                         atomic_read(&bitmap->behind_writes),
                         bitmap->mddev->bitmap_info.max_write_behind);
        }
-       if (bitmap->mddev->degraded)
-               /* Never clear bits or update events_cleared when degraded */
-               success = 0;
 
        while (sectors) {
                sector_t blocks;
@@ -1407,7 +1406,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
                        return;
                }
 
-               if (success &&
+               if (success && !bitmap->mddev->degraded &&
                    bitmap->events_cleared < bitmap->mddev->events) {
                        bitmap->events_cleared = bitmap->mddev->events;
                        bitmap->need_sync = 1;
@@ -1605,7 +1604,9 @@ void bitmap_dirty_bits(struct bitmap *bitmap, unsigned long s, unsigned long e)
        for (chunk = s; chunk <= e; chunk++) {
                sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
                bitmap_set_memory_bits(bitmap, sec, 1);
+               spin_lock_irq(&bitmap->lock);
                bitmap_file_set_bit(bitmap, sec);
+               spin_unlock_irq(&bitmap->lock);
                if (sec < bitmap->mddev->recovery_cp)
                        /* We are asserting that the array is dirty,
                         * so move the recovery_cp address back so
index c3273efd08cb6dce7a19cd8d193e98ff23600bb7..627456542fb3d0d1f1d18780db24a5531b29ce32 100644 (file)
@@ -230,6 +230,7 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
                return -EINVAL;
 
        rdev->raid_disk = rdev->saved_raid_disk;
+       rdev->saved_raid_disk = -1;
 
        newconf = linear_conf(mddev,mddev->raid_disks+1);
 
index 84acfe7d10e48e33ea581924d4648948d005d90c..f47f1f8ac44bc16677b212f35b398dae093dab87 100644 (file)
@@ -570,7 +570,7 @@ static void mddev_put(struct mddev *mddev)
            mddev->ctime == 0 && !mddev->hold_active) {
                /* Array is not configured at all, and not held active,
                 * so destroy it */
-               list_del(&mddev->all_mddevs);
+               list_del_init(&mddev->all_mddevs);
                bs = mddev->bio_set;
                mddev->bio_set = NULL;
                if (mddev->gendisk) {
@@ -2546,7 +2546,8 @@ state_show(struct md_rdev *rdev, char *page)
                sep = ",";
        }
        if (test_bit(Blocked, &rdev->flags) ||
-           rdev->badblocks.unacked_exist) {
+           (rdev->badblocks.unacked_exist
+            && !test_bit(Faulty, &rdev->flags))) {
                len += sprintf(page+len, "%sblocked", sep);
                sep = ",";
        }
@@ -3788,6 +3789,8 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
        if (err)
                return err;
        else {
+               if (mddev->hold_active == UNTIL_IOCTL)
+                       mddev->hold_active = 0;
                sysfs_notify_dirent_safe(mddev->sysfs_state);
                return len;
        }
@@ -4487,11 +4490,20 @@ md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
 
        if (!entry->show)
                return -EIO;
+       spin_lock(&all_mddevs_lock);
+       if (list_empty(&mddev->all_mddevs)) {
+               spin_unlock(&all_mddevs_lock);
+               return -EBUSY;
+       }
+       mddev_get(mddev);
+       spin_unlock(&all_mddevs_lock);
+
        rv = mddev_lock(mddev);
        if (!rv) {
                rv = entry->show(mddev, page);
                mddev_unlock(mddev);
        }
+       mddev_put(mddev);
        return rv;
 }
 
@@ -4507,13 +4519,19 @@ md_attr_store(struct kobject *kobj, struct attribute *attr,
                return -EIO;
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
+       spin_lock(&all_mddevs_lock);
+       if (list_empty(&mddev->all_mddevs)) {
+               spin_unlock(&all_mddevs_lock);
+               return -EBUSY;
+       }
+       mddev_get(mddev);
+       spin_unlock(&all_mddevs_lock);
        rv = mddev_lock(mddev);
-       if (mddev->hold_active == UNTIL_IOCTL)
-               mddev->hold_active = 0;
        if (!rv) {
                rv = entry->store(mddev, page, length);
                mddev_unlock(mddev);
        }
+       mddev_put(mddev);
        return rv;
 }
 
@@ -7342,8 +7360,7 @@ static int remove_and_add_spares(struct mddev *mddev)
                                        spares++;
                                        md_new_event(mddev);
                                        set_bit(MD_CHANGE_DEVS, &mddev->flags);
-                               } else
-                                       break;
+                               }
                        }
                }
        }
@@ -7840,6 +7857,7 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
                                  s + rdev->data_offset, sectors, acknowledged);
        if (rv) {
                /* Make sure they get written out promptly */
+               sysfs_notify_dirent_safe(rdev->sysfs_state);
                set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
                md_wakeup_thread(rdev->mddev->thread);
        }
index 297e260921787f490b63ddf88a9ea5adbdfd82c4..858fdbb7eb07a24ceb18a4d6214218c9205089e7 100644 (file)
@@ -3036,6 +3036,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                if (dev->written)
                        s->written++;
                rdev = rcu_dereference(conf->disks[i].rdev);
+               if (rdev && test_bit(Faulty, &rdev->flags))
+                       rdev = NULL;
                if (rdev) {
                        is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
                                             &first_bad, &bad_sectors);
@@ -3063,12 +3065,18 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                        }
                } else if (test_bit(In_sync, &rdev->flags))
                        set_bit(R5_Insync, &dev->flags);
-               else if (!test_bit(Faulty, &rdev->flags)) {
+               else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
                        /* in sync if before recovery_offset */
-                       if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
-                               set_bit(R5_Insync, &dev->flags);
-               }
-               if (test_bit(R5_WriteError, &dev->flags)) {
+                       set_bit(R5_Insync, &dev->flags);
+               else if (test_bit(R5_UPTODATE, &dev->flags) &&
+                        test_bit(R5_Expanded, &dev->flags))
+                       /* If we've reshaped into here, we assume it is Insync.
+                        * We will shortly update recovery_offset to make
+                        * it official.
+                        */
+                       set_bit(R5_Insync, &dev->flags);
+
+               if (rdev && test_bit(R5_WriteError, &dev->flags)) {
                        clear_bit(R5_Insync, &dev->flags);
                        if (!test_bit(Faulty, &rdev->flags)) {
                                s->handle_bad_blocks = 1;
@@ -3076,7 +3084,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
                        } else
                                clear_bit(R5_WriteError, &dev->flags);
                }
-               if (test_bit(R5_MadeGood, &dev->flags)) {
+               if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
                        if (!test_bit(Faulty, &rdev->flags)) {
                                s->handle_bad_blocks = 1;
                                atomic_inc(&rdev->nr_pending);
index 7eb1bf75cd072ae634b816a8a496ccfd8a559127..5d02221e99dd973ad674e56567bab4b369fae7ac 100644 (file)
@@ -488,9 +488,10 @@ static int mxl5007t_write_regs(struct mxl5007t_state *state,
 
 static int mxl5007t_read_reg(struct mxl5007t_state *state, u8 reg, u8 *val)
 {
+       u8 buf[2] = { 0xfb, reg };
        struct i2c_msg msg[] = {
                { .addr = state->i2c_props.addr, .flags = 0,
-                 .buf = &reg, .len = 1 },
+                 .buf = buf, .len = 2 },
                { .addr = state->i2c_props.addr, .flags = I2C_M_RD,
                  .buf = val, .len = 1 },
        };
index aacfe2387e28393d7eaf1f436cbc06afd0d29182..4fc29730a12ccf2305739c34efa625deef0a1f95 100644 (file)
@@ -141,7 +141,7 @@ static int tda18218_set_params(struct dvb_frontend *fe,
        switch (params->u.ofdm.bandwidth) {
        case BANDWIDTH_6_MHZ:
                LP_Fc = 0;
-               LO_Frac = params->frequency + 4000000;
+               LO_Frac = params->frequency + 3000000;
                break;
        case BANDWIDTH_7_MHZ:
                LP_Fc = 1;
index 303f22ea04c075792cd2953628230c82d9e01160..01bb8daf4b09f43c8b153c7a099e85d851e1b249 100644 (file)
@@ -189,7 +189,7 @@ struct ati_remote {
        dma_addr_t inbuf_dma;
        dma_addr_t outbuf_dma;
 
-       unsigned char old_data[2];  /* Detect duplicate events */
+       unsigned char old_data;     /* Detect duplicate events */
        unsigned long old_jiffies;
        unsigned long acc_jiffies;  /* handle acceleration */
        unsigned long first_jiffies;
@@ -221,35 +221,35 @@ struct ati_remote {
 /* Translation table from hardware messages to input events. */
 static const struct {
        short kind;
-       unsigned char data1, data2;
+       unsigned char data;
        int type;
        unsigned int code;
        int value;
 }  ati_remote_tbl[] = {
        /* Directional control pad axes */
-       {KIND_ACCEL,   0x35, 0x70, EV_REL, REL_X, -1},   /* left */
-       {KIND_ACCEL,   0x36, 0x71, EV_REL, REL_X, 1},    /* right */
-       {KIND_ACCEL,   0x37, 0x72, EV_REL, REL_Y, -1},   /* up */
-       {KIND_ACCEL,   0x38, 0x73, EV_REL, REL_Y, 1},    /* down */
+       {KIND_ACCEL,   0x70, EV_REL, REL_X, -1},   /* left */
+       {KIND_ACCEL,   0x71, EV_REL, REL_X, 1},    /* right */
+       {KIND_ACCEL,   0x72, EV_REL, REL_Y, -1},   /* up */
+       {KIND_ACCEL,   0x73, EV_REL, REL_Y, 1},    /* down */
        /* Directional control pad diagonals */
-       {KIND_LU,      0x39, 0x74, EV_REL, 0, 0},        /* left up */
-       {KIND_RU,      0x3a, 0x75, EV_REL, 0, 0},        /* right up */
-       {KIND_LD,      0x3c, 0x77, EV_REL, 0, 0},        /* left down */
-       {KIND_RD,      0x3b, 0x76, EV_REL, 0, 0},        /* right down */
+       {KIND_LU,      0x74, EV_REL, 0, 0},        /* left up */
+       {KIND_RU,      0x75, EV_REL, 0, 0},        /* right up */
+       {KIND_LD,      0x77, EV_REL, 0, 0},        /* left down */
+       {KIND_RD,      0x76, EV_REL, 0, 0},        /* right down */
 
        /* "Mouse button" buttons */
-       {KIND_LITERAL, 0x3d, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
-       {KIND_LITERAL, 0x3e, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
-       {KIND_LITERAL, 0x41, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
-       {KIND_LITERAL, 0x42, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
+       {KIND_LITERAL, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
+       {KIND_LITERAL, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
+       {KIND_LITERAL, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
+       {KIND_LITERAL, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
 
        /* Artificial "doubleclick" events are generated by the hardware.
         * They are mapped to the "side" and "extra" mouse buttons here. */
-       {KIND_FILTERED, 0x3f, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
-       {KIND_FILTERED, 0x43, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
+       {KIND_FILTERED, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
+       {KIND_FILTERED, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
 
        /* Non-mouse events are handled by rc-core */
-       {KIND_END, 0x00, 0x00, EV_MAX + 1, 0, 0}
+       {KIND_END, 0x00, EV_MAX + 1, 0, 0}
 };
 
 /* Local function prototypes */
@@ -396,25 +396,6 @@ static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd, unsigne
        return retval;
 }
 
-/*
- *     ati_remote_event_lookup
- */
-static int ati_remote_event_lookup(int rem, unsigned char d1, unsigned char d2)
-{
-       int i;
-
-       for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
-               /*
-                * Decide if the table entry matches the remote input.
-                */
-               if (ati_remote_tbl[i].data1 == d1 &&
-                   ati_remote_tbl[i].data2 == d2)
-                       return i;
-
-       }
-       return -1;
-}
-
 /*
  *     ati_remote_compute_accel
  *
@@ -463,7 +444,15 @@ static void ati_remote_input_report(struct urb *urb)
        int index = -1;
        int acc;
        int remote_num;
-       unsigned char scancode[2];
+       unsigned char scancode;
+       int i;
+
+       /*
+        * data[0] = 0x14
+        * data[1] = data[2] + data[3] + 0xd5 (a checksum byte)
+        * data[2] = the key code (with toggle bit in MSB with some models)
+        * data[3] = channel << 4 (the low 4 bits must be zero)
+        */
 
        /* Deal with strange looking inputs */
        if ( (urb->actual_length != 4) || (data[0] != 0x14) ||
@@ -472,6 +461,13 @@ static void ati_remote_input_report(struct urb *urb)
                return;
        }
 
+       if (data[1] != ((data[2] + data[3] + 0xd5) & 0xff)) {
+               dbginfo(&ati_remote->interface->dev,
+                       "wrong checksum in input: %02x %02x %02x %02x\n",
+                       data[0], data[1], data[2], data[3]);
+               return;
+       }
+
        /* Mask unwanted remote channels.  */
        /* note: remote_num is 0-based, channel 1 on remote == 0 here */
        remote_num = (data[3] >> 4) & 0x0f;
@@ -482,31 +478,30 @@ static void ati_remote_input_report(struct urb *urb)
                return;
        }
 
-       scancode[0] = (((data[1] - ((remote_num + 1) << 4)) & 0xf0) | (data[1] & 0x0f));
-
        /*
-        * Some devices (e.g. SnapStream Firefly) use 8080 as toggle code,
-        * so we have to clear them. The first bit is a bit tricky as the
-        * "non-toggled" state depends on remote_num, so we xor it with the
-        * second bit which is only used for toggle.
+        * MSB is a toggle code, though only used by some devices
+        * (e.g. SnapStream Firefly)
         */
-       scancode[0] ^= (data[2] & 0x80);
-
-       scancode[1] = data[2] & ~0x80;
+       scancode = data[2] & 0x7f;
 
-       /* Look up event code index in mouse translation table. */
-       index = ati_remote_event_lookup(remote_num, scancode[0], scancode[1]);
+       /* Look up event code index in the mouse translation table. */
+       for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
+               if (scancode == ati_remote_tbl[i].data) {
+                       index = i;
+                       break;
+               }
+       }
 
        if (index >= 0) {
                dbginfo(&ati_remote->interface->dev,
-                       "channel 0x%02x; mouse data %02x,%02x; index %d; keycode %d\n",
-                       remote_num, data[1], data[2], index, ati_remote_tbl[index].code);
+                       "channel 0x%02x; mouse data %02x; index %d; keycode %d\n",
+                       remote_num, data[2], index, ati_remote_tbl[index].code);
                if (!dev)
                        return; /* no mouse device */
        } else
                dbginfo(&ati_remote->interface->dev,
-                       "channel 0x%02x; key data %02x,%02x, scancode %02x,%02x\n",
-                       remote_num, data[1], data[2], scancode[0], scancode[1]);
+                       "channel 0x%02x; key data %02x, scancode %02x\n",
+                       remote_num, data[2], scancode);
 
 
        if (index >= 0 && ati_remote_tbl[index].kind == KIND_LITERAL) {
@@ -523,8 +518,7 @@ static void ati_remote_input_report(struct urb *urb)
                unsigned long now = jiffies;
 
                /* Filter duplicate events which happen "too close" together. */
-               if (ati_remote->old_data[0] == data[1] &&
-                   ati_remote->old_data[1] == data[2] &&
+               if (ati_remote->old_data == data[2] &&
                    time_before(now, ati_remote->old_jiffies +
                                     msecs_to_jiffies(repeat_filter))) {
                        ati_remote->repeat_count++;
@@ -533,8 +527,7 @@ static void ati_remote_input_report(struct urb *urb)
                        ati_remote->first_jiffies = now;
                }
 
-               ati_remote->old_data[0] = data[1];
-               ati_remote->old_data[1] = data[2];
+               ati_remote->old_data = data[2];
                ati_remote->old_jiffies = now;
 
                /* Ensure we skip at least the 4 first duplicate events (generated
@@ -549,14 +542,13 @@ static void ati_remote_input_report(struct urb *urb)
 
                if (index < 0) {
                        /* Not a mouse event, hand it to rc-core. */
-                       u32 rc_code = (scancode[0] << 8) | scancode[1];
 
                        /*
                         * We don't use the rc-core repeat handling yet as
                         * it would cause ghost repeats which would be a
                         * regression for this driver.
                         */
-                       rc_keydown_notimeout(ati_remote->rdev, rc_code,
+                       rc_keydown_notimeout(ati_remote->rdev, scancode,
                                             data[2]);
                        rc_keyup(ati_remote->rdev);
                        return;
@@ -607,8 +599,7 @@ static void ati_remote_input_report(struct urb *urb)
                input_sync(dev);
 
                ati_remote->old_jiffies = jiffies;
-               ati_remote->old_data[0] = data[1];
-               ati_remote->old_data[1] = data[2];
+               ati_remote->old_data = data[2];
        }
 }
 
index e1b8b2605c48b6dd1a65a604a65f40c706a5a8ce..81506440eded45d7b9db84462e9b0874eb38ca61 100644 (file)
 #include <media/rc-map.h>
 
 static struct rc_map_table ati_x10[] = {
-       { 0xd20d, KEY_1 },
-       { 0xd30e, KEY_2 },
-       { 0xd40f, KEY_3 },
-       { 0xd510, KEY_4 },
-       { 0xd611, KEY_5 },
-       { 0xd712, KEY_6 },
-       { 0xd813, KEY_7 },
-       { 0xd914, KEY_8 },
-       { 0xda15, KEY_9 },
-       { 0xdc17, KEY_0 },
-       { 0xc500, KEY_A },
-       { 0xc601, KEY_B },
-       { 0xde19, KEY_C },
-       { 0xe01b, KEY_D },
-       { 0xe621, KEY_E },
-       { 0xe823, KEY_F },
+       { 0x0d, KEY_1 },
+       { 0x0e, KEY_2 },
+       { 0x0f, KEY_3 },
+       { 0x10, KEY_4 },
+       { 0x11, KEY_5 },
+       { 0x12, KEY_6 },
+       { 0x13, KEY_7 },
+       { 0x14, KEY_8 },
+       { 0x15, KEY_9 },
+       { 0x17, KEY_0 },
+       { 0x00, KEY_A },
+       { 0x01, KEY_B },
+       { 0x19, KEY_C },
+       { 0x1b, KEY_D },
+       { 0x21, KEY_E },
+       { 0x23, KEY_F },
 
-       { 0xdd18, KEY_KPENTER },    /* "check" */
-       { 0xdb16, KEY_MENU },       /* "menu" */
-       { 0xc702, KEY_POWER },      /* Power */
-       { 0xc803, KEY_TV },         /* TV */
-       { 0xc904, KEY_DVD },        /* DVD */
-       { 0xca05, KEY_WWW },        /* WEB */
-       { 0xcb06, KEY_BOOKMARKS },  /* "book" */
-       { 0xcc07, KEY_EDIT },       /* "hand" */
-       { 0xe11c, KEY_COFFEE },     /* "timer" */
-       { 0xe520, KEY_FRONT },      /* "max" */
-       { 0xe21d, KEY_LEFT },       /* left */
-       { 0xe41f, KEY_RIGHT },      /* right */
-       { 0xe722, KEY_DOWN },       /* down */
-       { 0xdf1a, KEY_UP },         /* up */
-       { 0xe31e, KEY_OK },         /* "OK" */
-       { 0xce09, KEY_VOLUMEDOWN }, /* VOL + */
-       { 0xcd08, KEY_VOLUMEUP },   /* VOL - */
-       { 0xcf0a, KEY_MUTE },       /* MUTE  */
-       { 0xd00b, KEY_CHANNELUP },  /* CH + */
-       { 0xd10c, KEY_CHANNELDOWN },/* CH - */
-       { 0xec27, KEY_RECORD },     /* ( o) red */
-       { 0xea25, KEY_PLAY },       /* ( >) */
-       { 0xe924, KEY_REWIND },     /* (<<) */
-       { 0xeb26, KEY_FORWARD },    /* (>>) */
-       { 0xed28, KEY_STOP },       /* ([]) */
-       { 0xee29, KEY_PAUSE },      /* ('') */
-       { 0xf02b, KEY_PREVIOUS },   /* (<-) */
-       { 0xef2a, KEY_NEXT },       /* (>+) */
-       { 0xf22d, KEY_INFO },       /* PLAYING */
-       { 0xf32e, KEY_HOME },       /* TOP */
-       { 0xf42f, KEY_END },        /* END */
-       { 0xf530, KEY_SELECT },     /* SELECT */
+       { 0x18, KEY_KPENTER },    /* "check" */
+       { 0x16, KEY_MENU },       /* "menu" */
+       { 0x02, KEY_POWER },      /* Power */
+       { 0x03, KEY_TV },         /* TV */
+       { 0x04, KEY_DVD },        /* DVD */
+       { 0x05, KEY_WWW },        /* WEB */
+       { 0x06, KEY_BOOKMARKS },  /* "book" */
+       { 0x07, KEY_EDIT },       /* "hand" */
+       { 0x1c, KEY_COFFEE },     /* "timer" */
+       { 0x20, KEY_FRONT },      /* "max" */
+       { 0x1d, KEY_LEFT },       /* left */
+       { 0x1f, KEY_RIGHT },      /* right */
+       { 0x22, KEY_DOWN },       /* down */
+       { 0x1a, KEY_UP },         /* up */
+       { 0x1e, KEY_OK },         /* "OK" */
+       { 0x09, KEY_VOLUMEDOWN }, /* VOL + */
+       { 0x08, KEY_VOLUMEUP },   /* VOL - */
+       { 0x0a, KEY_MUTE },       /* MUTE  */
+       { 0x0b, KEY_CHANNELUP },  /* CH + */
+       { 0x0c, KEY_CHANNELDOWN },/* CH - */
+       { 0x27, KEY_RECORD },     /* ( o) red */
+       { 0x25, KEY_PLAY },       /* ( >) */
+       { 0x24, KEY_REWIND },     /* (<<) */
+       { 0x26, KEY_FORWARD },    /* (>>) */
+       { 0x28, KEY_STOP },       /* ([]) */
+       { 0x29, KEY_PAUSE },      /* ('') */
+       { 0x2b, KEY_PREVIOUS },   /* (<-) */
+       { 0x2a, KEY_NEXT },       /* (>+) */
+       { 0x2d, KEY_INFO },       /* PLAYING */
+       { 0x2e, KEY_HOME },       /* TOP */
+       { 0x2f, KEY_END },        /* END */
+       { 0x30, KEY_SELECT },     /* SELECT */
 };
 
 static struct rc_map_list ati_x10_map = {
index 09e2cc01d1102801d063e33dbc256e0a5a8d8f70..479cdb8978104b1c2510852d736c7f1fbcfbd6aa 100644 (file)
 #include <media/rc-map.h>
 
 static struct rc_map_table medion_x10[] = {
-       { 0xf12c, KEY_TV },    /* TV */
-       { 0xf22d, KEY_VCR },   /* VCR */
-       { 0xc904, KEY_DVD },   /* DVD */
-       { 0xcb06, KEY_AUDIO }, /* MUSIC */
-
-       { 0xf32e, KEY_RADIO },     /* RADIO */
-       { 0xca05, KEY_DIRECTORY }, /* PHOTO */
-       { 0xf42f, KEY_INFO },      /* TV-PREVIEW */
-       { 0xf530, KEY_LIST },      /* CHANNEL-LST */
-
-       { 0xe01b, KEY_SETUP }, /* SETUP */
-       { 0xf631, KEY_VIDEO }, /* VIDEO DESKTOP */
-
-       { 0xcd08, KEY_VOLUMEDOWN },  /* VOL - */
-       { 0xce09, KEY_VOLUMEUP },    /* VOL + */
-       { 0xd00b, KEY_CHANNELUP },   /* CHAN + */
-       { 0xd10c, KEY_CHANNELDOWN }, /* CHAN - */
-       { 0xc500, KEY_MUTE },        /* MUTE */
-
-       { 0xf732, KEY_RED }, /* red */
-       { 0xf833, KEY_GREEN }, /* green */
-       { 0xf934, KEY_YELLOW }, /* yellow */
-       { 0xfa35, KEY_BLUE }, /* blue */
-       { 0xdb16, KEY_TEXT }, /* TXT */
-
-       { 0xd20d, KEY_1 },
-       { 0xd30e, KEY_2 },
-       { 0xd40f, KEY_3 },
-       { 0xd510, KEY_4 },
-       { 0xd611, KEY_5 },
-       { 0xd712, KEY_6 },
-       { 0xd813, KEY_7 },
-       { 0xd914, KEY_8 },
-       { 0xda15, KEY_9 },
-       { 0xdc17, KEY_0 },
-       { 0xe11c, KEY_SEARCH }, /* TV/RAD, CH SRC */
-       { 0xe520, KEY_DELETE }, /* DELETE */
-
-       { 0xfb36, KEY_KEYBOARD }, /* RENAME */
-       { 0xdd18, KEY_SCREEN },   /* SNAPSHOT */
-
-       { 0xdf1a, KEY_UP },    /* up */
-       { 0xe722, KEY_DOWN },  /* down */
-       { 0xe21d, KEY_LEFT },  /* left */
-       { 0xe41f, KEY_RIGHT }, /* right */
-       { 0xe31e, KEY_OK },    /* OK */
-
-       { 0xfc37, KEY_SELECT }, /* ACQUIRE IMAGE */
-       { 0xfd38, KEY_EDIT },   /* EDIT IMAGE */
-
-       { 0xe924, KEY_REWIND },   /* rewind  (<<) */
-       { 0xea25, KEY_PLAY },     /* play    ( >) */
-       { 0xeb26, KEY_FORWARD },  /* forward (>>) */
-       { 0xec27, KEY_RECORD },   /* record  ( o) */
-       { 0xed28, KEY_STOP },     /* stop    ([]) */
-       { 0xee29, KEY_PAUSE },    /* pause   ('') */
-
-       { 0xe621, KEY_PREVIOUS },        /* prev */
-       { 0xfe39, KEY_SWITCHVIDEOMODE }, /* F SCR */
-       { 0xe823, KEY_NEXT },            /* next */
-       { 0xde19, KEY_MENU },            /* MENU */
-       { 0xff3a, KEY_LANGUAGE },        /* AUDIO */
-
-       { 0xc702, KEY_POWER }, /* POWER */
+       { 0x2c, KEY_TV },    /* TV */
+       { 0x2d, KEY_VCR },   /* VCR */
+       { 0x04, KEY_DVD },   /* DVD */
+       { 0x06, KEY_AUDIO }, /* MUSIC */
+
+       { 0x2e, KEY_RADIO },     /* RADIO */
+       { 0x05, KEY_DIRECTORY }, /* PHOTO */
+       { 0x2f, KEY_INFO },      /* TV-PREVIEW */
+       { 0x30, KEY_LIST },      /* CHANNEL-LST */
+
+       { 0x1b, KEY_SETUP }, /* SETUP */
+       { 0x31, KEY_VIDEO }, /* VIDEO DESKTOP */
+
+       { 0x08, KEY_VOLUMEDOWN },  /* VOL - */
+       { 0x09, KEY_VOLUMEUP },    /* VOL + */
+       { 0x0b, KEY_CHANNELUP },   /* CHAN + */
+       { 0x0c, KEY_CHANNELDOWN }, /* CHAN - */
+       { 0x00, KEY_MUTE },        /* MUTE */
+
+       { 0x32, KEY_RED }, /* red */
+       { 0x33, KEY_GREEN }, /* green */
+       { 0x34, KEY_YELLOW }, /* yellow */
+       { 0x35, KEY_BLUE }, /* blue */
+       { 0x16, KEY_TEXT }, /* TXT */
+
+       { 0x0d, KEY_1 },
+       { 0x0e, KEY_2 },
+       { 0x0f, KEY_3 },
+       { 0x10, KEY_4 },
+       { 0x11, KEY_5 },
+       { 0x12, KEY_6 },
+       { 0x13, KEY_7 },
+       { 0x14, KEY_8 },
+       { 0x15, KEY_9 },
+       { 0x17, KEY_0 },
+       { 0x1c, KEY_SEARCH }, /* TV/RAD, CH SRC */
+       { 0x20, KEY_DELETE }, /* DELETE */
+
+       { 0x36, KEY_KEYBOARD }, /* RENAME */
+       { 0x18, KEY_SCREEN },   /* SNAPSHOT */
+
+       { 0x1a, KEY_UP },    /* up */
+       { 0x22, KEY_DOWN },  /* down */
+       { 0x1d, KEY_LEFT },  /* left */
+       { 0x1f, KEY_RIGHT }, /* right */
+       { 0x1e, KEY_OK },    /* OK */
+
+       { 0x37, KEY_SELECT }, /* ACQUIRE IMAGE */
+       { 0x38, KEY_EDIT },   /* EDIT IMAGE */
+
+       { 0x24, KEY_REWIND },   /* rewind  (<<) */
+       { 0x25, KEY_PLAY },     /* play    ( >) */
+       { 0x26, KEY_FORWARD },  /* forward (>>) */
+       { 0x27, KEY_RECORD },   /* record  ( o) */
+       { 0x28, KEY_STOP },     /* stop    ([]) */
+       { 0x29, KEY_PAUSE },    /* pause   ('') */
+
+       { 0x21, KEY_PREVIOUS },        /* prev */
+       { 0x39, KEY_SWITCHVIDEOMODE }, /* F SCR */
+       { 0x23, KEY_NEXT },            /* next */
+       { 0x19, KEY_MENU },            /* MENU */
+       { 0x3a, KEY_LANGUAGE },        /* AUDIO */
+
+       { 0x02, KEY_POWER }, /* POWER */
 };
 
 static struct rc_map_list medion_x10_map = {
index ef146520931c40fe19da588d41be3d6a3f00b990..c7f33ec719b49f0cb6ae9460606d3a54c1542610 100644 (file)
 #include <media/rc-map.h>
 
 static struct rc_map_table snapstream_firefly[] = {
-       { 0xf12c, KEY_ZOOM },       /* Maximize */
-       { 0xc702, KEY_CLOSE },
-
-       { 0xd20d, KEY_1 },
-       { 0xd30e, KEY_2 },
-       { 0xd40f, KEY_3 },
-       { 0xd510, KEY_4 },
-       { 0xd611, KEY_5 },
-       { 0xd712, KEY_6 },
-       { 0xd813, KEY_7 },
-       { 0xd914, KEY_8 },
-       { 0xda15, KEY_9 },
-       { 0xdc17, KEY_0 },
-       { 0xdb16, KEY_BACK },
-       { 0xdd18, KEY_KPENTER },    /* ent */
-
-       { 0xce09, KEY_VOLUMEUP },
-       { 0xcd08, KEY_VOLUMEDOWN },
-       { 0xcf0a, KEY_MUTE },
-       { 0xd00b, KEY_CHANNELUP },
-       { 0xd10c, KEY_CHANNELDOWN },
-       { 0xc500, KEY_VENDOR },     /* firefly */
-
-       { 0xf32e, KEY_INFO },
-       { 0xf42f, KEY_OPTION },
-
-       { 0xe21d, KEY_LEFT },
-       { 0xe41f, KEY_RIGHT },
-       { 0xe722, KEY_DOWN },
-       { 0xdf1a, KEY_UP },
-       { 0xe31e, KEY_OK },
-
-       { 0xe11c, KEY_MENU },
-       { 0xe520, KEY_EXIT },
-
-       { 0xec27, KEY_RECORD },
-       { 0xea25, KEY_PLAY },
-       { 0xed28, KEY_STOP },
-       { 0xe924, KEY_REWIND },
-       { 0xeb26, KEY_FORWARD },
-       { 0xee29, KEY_PAUSE },
-       { 0xf02b, KEY_PREVIOUS },
-       { 0xef2a, KEY_NEXT },
-
-       { 0xcb06, KEY_AUDIO },      /* Music */
-       { 0xca05, KEY_IMAGES },     /* Photos */
-       { 0xc904, KEY_DVD },
-       { 0xc803, KEY_TV },
-       { 0xcc07, KEY_VIDEO },
-
-       { 0xc601, KEY_HELP },
-       { 0xf22d, KEY_MODE },       /* Mouse */
-
-       { 0xde19, KEY_A },
-       { 0xe01b, KEY_B },
-       { 0xe621, KEY_C },
-       { 0xe823, KEY_D },
+       { 0x2c, KEY_ZOOM },       /* Maximize */
+       { 0x02, KEY_CLOSE },
+
+       { 0x0d, KEY_1 },
+       { 0x0e, KEY_2 },
+       { 0x0f, KEY_3 },
+       { 0x10, KEY_4 },
+       { 0x11, KEY_5 },
+       { 0x12, KEY_6 },
+       { 0x13, KEY_7 },
+       { 0x14, KEY_8 },
+       { 0x15, KEY_9 },
+       { 0x17, KEY_0 },
+       { 0x16, KEY_BACK },
+       { 0x18, KEY_KPENTER },    /* ent */
+
+       { 0x09, KEY_VOLUMEUP },
+       { 0x08, KEY_VOLUMEDOWN },
+       { 0x0a, KEY_MUTE },
+       { 0x0b, KEY_CHANNELUP },
+       { 0x0c, KEY_CHANNELDOWN },
+       { 0x00, KEY_VENDOR },     /* firefly */
+
+       { 0x2e, KEY_INFO },
+       { 0x2f, KEY_OPTION },
+
+       { 0x1d, KEY_LEFT },
+       { 0x1f, KEY_RIGHT },
+       { 0x22, KEY_DOWN },
+       { 0x1a, KEY_UP },
+       { 0x1e, KEY_OK },
+
+       { 0x1c, KEY_MENU },
+       { 0x20, KEY_EXIT },
+
+       { 0x27, KEY_RECORD },
+       { 0x25, KEY_PLAY },
+       { 0x28, KEY_STOP },
+       { 0x24, KEY_REWIND },
+       { 0x26, KEY_FORWARD },
+       { 0x29, KEY_PAUSE },
+       { 0x2b, KEY_PREVIOUS },
+       { 0x2a, KEY_NEXT },
+
+       { 0x06, KEY_AUDIO },      /* Music */
+       { 0x05, KEY_IMAGES },     /* Photos */
+       { 0x04, KEY_DVD },
+       { 0x03, KEY_TV },
+       { 0x07, KEY_VIDEO },
+
+       { 0x01, KEY_HELP },
+       { 0x2d, KEY_MODE },       /* Mouse */
+
+       { 0x19, KEY_A },
+       { 0x1b, KEY_B },
+       { 0x21, KEY_C },
+       { 0x23, KEY_D },
 };
 
 static struct rc_map_list snapstream_firefly_map = {
index 39fc923fc46bc74b512d24243cd7387c5cf0f250..1c6015a04f964df6f03e2be376661fca5d84dba4 100644 (file)
@@ -162,11 +162,14 @@ static void hauppauge_eeprom(struct au0828_dev *dev, u8 *eeprom_data)
        switch (tv.model) {
        case 72000: /* WinTV-HVR950q (Retail, IR, ATSC/QAM */
        case 72001: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+       case 72101: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+       case 72201: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72211: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72221: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72231: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72241: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM and analog video */
        case 72251: /* WinTV-HVR950q (Retail, IR, ATSC/QAM and analog video */
+       case 72261: /* WinTV-HVR950q (OEM, IR, ATSC/QAM and analog video */
        case 72301: /* WinTV-HVR850 (Retail, IR, ATSC and analog video */
        case 72500: /* WinTV-HVR950q (OEM, No IR, ATSC/QAM */
                break;
@@ -324,6 +327,10 @@ struct usb_device_id au0828_usb_id_table[] = {
                .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL },
        { USB_DEVICE(0x2040, 0x8200),
                .driver_info = AU0828_BOARD_HAUPPAUGE_WOODBURY },
+       { USB_DEVICE(0x2040, 0x7260),
+               .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
+       { USB_DEVICE(0x2040, 0x7213),
+               .driver_info = AU0828_BOARD_HAUPPAUGE_HVR950Q },
        { },
 };
 
index 881e04c7ffe6dd91f5a96f12aaebe236a7bfba5c..2ca10dfec91fd2347532d35e22ea44fd4ee01298 100644 (file)
@@ -838,13 +838,13 @@ static int gspca_init_transfer(struct gspca_dev *gspca_dev)
        gspca_dev->usb_err = 0;
 
        /* do the specific subdriver stuff before endpoint selection */
-       gspca_dev->alt = 0;
+       intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
+       gspca_dev->alt = gspca_dev->cam.bulk ? intf->num_altsetting : 0;
        if (gspca_dev->sd_desc->isoc_init) {
                ret = gspca_dev->sd_desc->isoc_init(gspca_dev);
                if (ret < 0)
                        goto unlock;
        }
-       intf = usb_ifnum_to_if(gspca_dev->dev, gspca_dev->iface);
        xfer = gspca_dev->cam.bulk ? USB_ENDPOINT_XFER_BULK
                                   : USB_ENDPOINT_XFER_ISOC;
 
@@ -957,7 +957,7 @@ retry:
                                ret = -EIO;
                                goto out;
                        }
-                       alt = ep_tb[--alt_idx].alt;
+                       gspca_dev->alt = ep_tb[--alt_idx].alt;
                }
        }
 out:
index 89d09a8914f8ea5099d6031d59b5f9117c214b78..82c8817bd32dcddcc74a98593919bd64502de237 100644 (file)
@@ -162,7 +162,6 @@ struct m5mols_version {
  * @pad: media pad
  * @ffmt: current fmt according to resolution type
  * @res_type: current resolution type
- * @code: current code
  * @irq_waitq: waitqueue for the capture
  * @work_irq: workqueue for the IRQ
  * @flags: state variable for the interrupt handler
@@ -192,7 +191,6 @@ struct m5mols_info {
        struct media_pad pad;
        struct v4l2_mbus_framefmt ffmt[M5MOLS_RESTYPE_MAX];
        int res_type;
-       enum v4l2_mbus_pixelcode code;
        wait_queue_head_t irq_waitq;
        struct work_struct work_irq;
        unsigned long flags;
index 05ab3700647e286acf9b01ee994a07cd56eb8014..e0f09e531800d193c1cb0250cb89d820d7fa1236 100644 (file)
@@ -334,7 +334,7 @@ int m5mols_mode(struct m5mols_info *info, u8 mode)
        int ret = -EINVAL;
        u8 reg;
 
-       if (mode < REG_PARAMETER && mode > REG_CAPTURE)
+       if (mode < REG_PARAMETER || mode > REG_CAPTURE)
                return ret;
 
        ret = m5mols_read_u8(sd, SYSTEM_SYSMODE, &reg);
@@ -511,9 +511,6 @@ static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
        struct m5mols_info *info = to_m5mols(sd);
        struct v4l2_mbus_framefmt *format;
 
-       if (fmt->pad != 0)
-               return -EINVAL;
-
        format = __find_format(info, fh, fmt->which, info->res_type);
        if (!format)
                return -EINVAL;
@@ -532,9 +529,6 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
        u32 resolution = 0;
        int ret;
 
-       if (fmt->pad != 0)
-               return -EINVAL;
-
        ret = __find_resolution(sd, format, &type, &resolution);
        if (ret < 0)
                return ret;
@@ -543,13 +537,14 @@ static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
        if (!sfmt)
                return 0;
 
-       *sfmt           = m5mols_default_ffmt[type];
-       sfmt->width     = format->width;
-       sfmt->height    = format->height;
+
+       format->code = m5mols_default_ffmt[type].code;
+       format->colorspace = V4L2_COLORSPACE_JPEG;
+       format->field = V4L2_FIELD_NONE;
 
        if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+               *sfmt = *format;
                info->resolution = resolution;
-               info->code = format->code;
                info->res_type = type;
        }
 
@@ -626,13 +621,14 @@ static int m5mols_start_monitor(struct m5mols_info *info)
 static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
 {
        struct m5mols_info *info = to_m5mols(sd);
+       u32 code = info->ffmt[info->res_type].code;
 
        if (enable) {
                int ret = -EINVAL;
 
-               if (is_code(info->code, M5MOLS_RESTYPE_MONITOR))
+               if (is_code(code, M5MOLS_RESTYPE_MONITOR))
                        ret = m5mols_start_monitor(info);
-               if (is_code(info->code, M5MOLS_RESTYPE_CAPTURE))
+               if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
                        ret = m5mols_start_capture(info);
 
                return ret;
index cf2c0fb95f2f47d96121e55a4e4d87a07c076bcf..398f96ffd35e88d212a8d39c8585decfc1d426d0 100644 (file)
@@ -955,6 +955,7 @@ static int mt9m111_probe(struct i2c_client *client,
        mt9m111->rect.height    = MT9M111_MAX_HEIGHT;
        mt9m111->fmt            = &mt9m111_colour_fmts[0];
        mt9m111->lastpage       = -1;
+       mutex_init(&mt9m111->power_lock);
 
        ret = mt9m111_video_probe(client);
        if (ret) {
index 32114a3c0ca78daa889853893d5fb066f8476cf9..7b34b11daf24d02b5ad0a3eba08c2e437cc60b39 100644 (file)
@@ -1083,8 +1083,10 @@ static int mt9t112_probe(struct i2c_client *client,
        v4l2_i2c_subdev_init(&priv->subdev, client, &mt9t112_subdev_ops);
 
        ret = mt9t112_camera_probe(client);
-       if (ret)
+       if (ret) {
                kfree(priv);
+               return ret;
+       }
 
        /* Cannot fail: using the default supported pixel code */
        mt9t112_set_params(priv, &rect, V4L2_MBUS_FMT_UYVY8_2X8);
index 9c5c19f142de598067de4fec22b229eecb4a7862..ee0d0b39cd170e1700b659a7af2e1085c580b9fa 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/irq.h>
 #include <linux/videodev2.h>
 #include <linux/dma-mapping.h>
+#include <linux/slab.h>
 
 #include <media/videobuf-dma-contig.h>
 #include <media/v4l2-device.h>
@@ -2169,6 +2170,14 @@ static int __init omap_vout_probe(struct platform_device *pdev)
        vid_dev->num_displays = 0;
        for_each_dss_dev(dssdev) {
                omap_dss_get_device(dssdev);
+
+               if (!dssdev->driver) {
+                       dev_warn(&pdev->dev, "no driver for display: %s\n",
+                                       dssdev->name);
+                       omap_dss_put_device(dssdev);
+                       continue;
+               }
+
                vid_dev->displays[vid_dev->num_displays++] = dssdev;
        }
 
index e87ae2f634b26e523c406427f22b4b2e362cf5e2..6a6cf388bae446d70a043eac83cb09e2b6a0e4e8 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 
index 1d54b86c936bba11252877c0db65bc5a99e70b47..3ea38a8def8e1523121c8142acc769d06436539a 100644 (file)
@@ -506,7 +506,7 @@ int omap24xxcam_sgdma_queue(struct omap24xxcam_sgdma *sgdma,
        unsigned long flags;
        struct sgdma_state *sg_state;
 
-       if ((sglen < 0) || ((sglen > 0) & !sglist))
+       if ((sglen < 0) || ((sglen > 0) && !sglist))
                return -EINVAL;
 
        spin_lock_irqsave(&sgdma->lock, flags);
index b0b0fa5a3572fb834a87c45dba668b798524dfad..54a4a3f22e2e4187c5651aaa42cf73dcf9141059 100644 (file)
@@ -1408,7 +1408,7 @@ static void ccdc_hs_vs_isr(struct isp_ccdc_device *ccdc)
 {
        struct isp_pipeline *pipe =
                to_isp_pipeline(&ccdc->video_out.video.entity);
-       struct video_device *vdev = &ccdc->subdev.devnode;
+       struct video_device *vdev = ccdc->subdev.devnode;
        struct v4l2_event event;
 
        memset(&event, 0, sizeof(event));
index 68d539456c552aa0c5ee16bd999ed09f41dee712..bc0b2c7349b97894d62c6fcdb876eff8613634f7 100644 (file)
@@ -496,7 +496,7 @@ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size)
 
 static void isp_stat_queue_event(struct ispstat *stat, int err)
 {
-       struct video_device *vdev = &stat->subdev.devnode;
+       struct video_device *vdev = stat->subdev.devnode;
        struct v4l2_event event;
        struct omap3isp_stat_event_status *status = (void *)event.u.data;
 
index d1000723c5ae0040f45d2f283a0df45de4ee8a01..f2290578448c416bb809abe4d11d4146da6ead91 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/cacheflush.h>
 #include <linux/clk.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/pagemap.h>
 #include <linux/scatterlist.h>
 #include <linux/sched.h>
index 9f2d26b1d4cb8e2af6a48ccdc9c3f66c4bc87715..6806345ec2f0f6a9a26accb862b8eb4aa6d5c565 100644 (file)
@@ -540,7 +540,7 @@ static u8 to_clkrc(struct v4l2_fract *timeperframe,
 static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
 {
        struct i2c_client *client = v4l2_get_subdevdata(sd);
-       struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+       struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
        struct soc_camera_sense *sense = icd->sense;
        struct ov6650 *priv = to_ov6650(client);
        bool half_scale = !is_unscaled_ok(mf->width, mf->height, &priv->rect);
index c8d91b0cd9bdd043e54e2ed47391a5736bda61ad..2cc3b9166724f2eaf47ea1c05f5a12e5e7208ea7 100644 (file)
@@ -98,6 +98,10 @@ static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend)
                        vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
        }
        set_bit(ST_CAPT_SUSPENDED, &fimc->state);
+
+       fimc_hw_reset(fimc);
+       cap->buf_index = 0;
+
        spin_unlock_irqrestore(&fimc->slock, flags);
 
        if (streaming)
@@ -137,7 +141,7 @@ int fimc_capture_config_update(struct fimc_ctx *ctx)
        struct fimc_dev *fimc = ctx->fimc_dev;
        int ret;
 
-       if (test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
+       if (!test_bit(ST_CAPT_APPLY_CFG, &fimc->state))
                return 0;
 
        spin_lock(&ctx->slock);
@@ -150,7 +154,7 @@ int fimc_capture_config_update(struct fimc_ctx *ctx)
                fimc_hw_set_rotation(ctx);
                fimc_prepare_dma_offset(ctx, &ctx->d_frame);
                fimc_hw_set_out_dma(ctx);
-               set_bit(ST_CAPT_APPLY_CFG, &fimc->state);
+               clear_bit(ST_CAPT_APPLY_CFG, &fimc->state);
        }
        spin_unlock(&ctx->slock);
        return ret;
@@ -164,7 +168,6 @@ static int start_streaming(struct vb2_queue *q, unsigned int count)
        int min_bufs;
        int ret;
 
-       fimc_hw_reset(fimc);
        vid_cap->frame_count = 0;
 
        ret = fimc_init_capture(fimc);
@@ -523,7 +526,7 @@ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx,
        max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w;
        min_w = ctx->state & FIMC_DST_CROP ? dst->width : var->min_out_pixsize;
        min_h = ctx->state & FIMC_DST_CROP ? dst->height : var->min_out_pixsize;
-       if (fimc->id == 1 && var->pix_hoff)
+       if (var->min_vsize_align == 1 && !rotation)
                align_h = fimc_fmt_is_rgb(ffmt->color) ? 0 : 1;
 
        depth = fimc_get_format_depth(ffmt);
@@ -1239,6 +1242,7 @@ static int fimc_subdev_set_fmt(struct v4l2_subdev *sd,
 
        mutex_lock(&fimc->lock);
        set_frame_bounds(ff, mf->width, mf->height);
+       fimc->vid_cap.mf = *mf;
        ff->fmt = ffmt;
 
        /* Reset the crop rectangle if required. */
@@ -1375,7 +1379,7 @@ static void fimc_destroy_capture_subdev(struct fimc_dev *fimc)
        media_entity_cleanup(&sd->entity);
        v4l2_device_unregister_subdev(sd);
        kfree(sd);
-       sd = NULL;
+       fimc->vid_cap.subdev = NULL;
 }
 
 /* Set default format at the sensor and host interface */
index 19ca6db38b2f87f7951fe423cfe6741d3733acac..07c6254faee32b1ef3ee75964faa2b63b1f7615d 100644 (file)
@@ -37,7 +37,7 @@ static char *fimc_clocks[MAX_FIMC_CLOCKS] = {
 static struct fimc_fmt fimc_formats[] = {
        {
                .name           = "RGB565",
-               .fourcc         = V4L2_PIX_FMT_RGB565X,
+               .fourcc         = V4L2_PIX_FMT_RGB565,
                .depth          = { 16 },
                .color          = S5P_FIMC_RGB565,
                .memplanes      = 1,
@@ -1038,12 +1038,11 @@ static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f)
                mod_x = 6; /* 64 x 32 pixels tile */
                mod_y = 5;
        } else {
-               if (fimc->id == 1 && variant->pix_hoff)
+               if (variant->min_vsize_align == 1)
                        mod_y = fimc_fmt_is_rgb(fmt->color) ? 0 : 1;
                else
-                       mod_y = mod_x;
+                       mod_y = ffs(variant->min_vsize_align) - 1;
        }
-       dbg("mod_x: %d, mod_y: %d, max_w: %d", mod_x, mod_y, max_w);
 
        v4l_bound_align_image(&pix->width, 16, max_w, mod_x,
                &pix->height, 8, variant->pix_limit->scaler_dis_w, mod_y, 0);
@@ -1226,10 +1225,10 @@ static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
                fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
 
        /* Get pixel alignment constraints. */
-       if (fimc->id == 1 && fimc->variant->pix_hoff)
+       if (fimc->variant->min_vsize_align == 1)
                halign = fimc_fmt_is_rgb(f->fmt->color) ? 0 : 1;
        else
-               halign = ffs(min_size) - 1;
+               halign = ffs(fimc->variant->min_vsize_align) - 1;
 
        for (i = 0; i < f->fmt->colplanes; i++)
                depth += f->fmt->depth[i];
@@ -1615,7 +1614,6 @@ static int fimc_probe(struct platform_device *pdev)
        pdata = pdev->dev.platform_data;
        fimc->pdata = pdata;
 
-       set_bit(ST_LPM, &fimc->state);
 
        init_waitqueue_head(&fimc->irq_queue);
        spin_lock_init(&fimc->slock);
@@ -1707,8 +1705,6 @@ static int fimc_runtime_resume(struct device *dev)
        /* Enable clocks and perform basic initalization */
        clk_enable(fimc->clock[CLK_GATE]);
        fimc_hw_reset(fimc);
-       if (fimc->variant->out_buf_count > 4)
-               fimc_hw_set_dma_seq(fimc, 0xF);
 
        /* Resume the capture or mem-to-mem device */
        if (fimc_capture_busy(fimc))
@@ -1750,8 +1746,6 @@ static int fimc_resume(struct device *dev)
                return 0;
        }
        fimc_hw_reset(fimc);
-       if (fimc->variant->out_buf_count > 4)
-               fimc_hw_set_dma_seq(fimc, 0xF);
        spin_unlock_irqrestore(&fimc->slock, flags);
 
        if (fimc_capture_busy(fimc))
@@ -1780,7 +1774,6 @@ static int __devexit fimc_remove(struct platform_device *pdev)
        struct fimc_dev *fimc = platform_get_drvdata(pdev);
 
        pm_runtime_disable(&pdev->dev);
-       fimc_runtime_suspend(&pdev->dev);
        pm_runtime_set_suspended(&pdev->dev);
 
        vb2_dma_contig_cleanup_ctx(fimc->alloc_ctx);
@@ -1840,6 +1833,7 @@ static struct samsung_fimc_variant fimc0_variant_s5p = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 8,
+       .min_vsize_align = 16,
        .out_buf_count   = 4,
        .pix_limit       = &s5p_pix_limit[0],
 };
@@ -1849,6 +1843,7 @@ static struct samsung_fimc_variant fimc2_variant_s5p = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 8,
+       .min_vsize_align = 16,
        .out_buf_count   = 4,
        .pix_limit = &s5p_pix_limit[1],
 };
@@ -1861,6 +1856,7 @@ static struct samsung_fimc_variant fimc0_variant_s5pv210 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 8,
+       .min_vsize_align = 16,
        .out_buf_count   = 4,
        .pix_limit       = &s5p_pix_limit[1],
 };
@@ -1874,6 +1870,7 @@ static struct samsung_fimc_variant fimc1_variant_s5pv210 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 1,
+       .min_vsize_align = 1,
        .out_buf_count   = 4,
        .pix_limit       = &s5p_pix_limit[2],
 };
@@ -1884,6 +1881,7 @@ static struct samsung_fimc_variant fimc2_variant_s5pv210 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 8,
+       .min_vsize_align = 16,
        .out_buf_count   = 4,
        .pix_limit       = &s5p_pix_limit[2],
 };
@@ -1898,6 +1896,7 @@ static struct samsung_fimc_variant fimc0_variant_exynos4 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 2,
+       .min_vsize_align = 1,
        .out_buf_count   = 32,
        .pix_limit       = &s5p_pix_limit[1],
 };
@@ -1910,6 +1909,7 @@ static struct samsung_fimc_variant fimc3_variant_exynos4 = {
        .min_inp_pixsize = 16,
        .min_out_pixsize = 16,
        .hor_offs_align  = 2,
+       .min_vsize_align = 1,
        .out_buf_count   = 32,
        .pix_limit       = &s5p_pix_limit[3],
 };
index a6936dad5b1025b196ef6c1acd5e40131a44d7c3..c7f01c47b20fe354e101bbf4ab904cab18e3ec55 100644 (file)
@@ -377,6 +377,7 @@ struct fimc_pix_limit {
  * @min_inp_pixsize: minimum input pixel size
  * @min_out_pixsize: minimum output pixel size
  * @hor_offs_align: horizontal pixel offset aligment
+ * @min_vsize_align: minimum vertical pixel size alignment
  * @out_buf_count: the number of buffers in output DMA sequence
  */
 struct samsung_fimc_variant {
@@ -390,6 +391,7 @@ struct samsung_fimc_variant {
        u16             min_inp_pixsize;
        u16             min_out_pixsize;
        u16             hor_offs_align;
+       u16             min_vsize_align;
        u16             out_buf_count;
 };
 
index cc337b1de91392ffbcd00eebe33095c037b1b97c..615c862f0360ef20061e28701d1a99f460d0a7e1 100644 (file)
@@ -220,6 +220,7 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
        sd = v4l2_i2c_new_subdev_board(&fmd->v4l2_dev, adapter,
                                       s_info->pdata->board_info, NULL);
        if (IS_ERR_OR_NULL(sd)) {
+               i2c_put_adapter(adapter);
                v4l2_err(&fmd->v4l2_dev, "Failed to acquire subdev\n");
                return NULL;
        }
@@ -234,12 +235,15 @@ static struct v4l2_subdev *fimc_md_register_sensor(struct fimc_md *fmd,
 static void fimc_md_unregister_sensor(struct v4l2_subdev *sd)
 {
        struct i2c_client *client = v4l2_get_subdevdata(sd);
+       struct i2c_adapter *adapter;
 
        if (!client)
                return;
        v4l2_device_unregister_subdev(sd);
+       adapter = client->adapter;
        i2c_unregister_device(client);
-       i2c_put_adapter(client->adapter);
+       if (adapter)
+               i2c_put_adapter(adapter);
 }
 
 static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
@@ -381,20 +385,28 @@ static void fimc_md_unregister_entities(struct fimc_md *fmd)
 
 static int fimc_md_register_video_nodes(struct fimc_md *fmd)
 {
+       struct video_device *vdev;
        int i, ret = 0;
 
        for (i = 0; i < FIMC_MAX_DEVS && !ret; i++) {
                if (!fmd->fimc[i])
                        continue;
 
-               if (fmd->fimc[i]->m2m.vfd)
-                       ret = video_register_device(fmd->fimc[i]->m2m.vfd,
-                                                   VFL_TYPE_GRABBER, -1);
-               if (ret)
-                       break;
-               if (fmd->fimc[i]->vid_cap.vfd)
-                       ret = video_register_device(fmd->fimc[i]->vid_cap.vfd,
-                                                   VFL_TYPE_GRABBER, -1);
+               vdev = fmd->fimc[i]->m2m.vfd;
+               if (vdev) {
+                       ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+                       if (ret)
+                               break;
+                       v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+                                 vdev->name, video_device_node_name(vdev));
+               }
+
+               vdev = fmd->fimc[i]->vid_cap.vfd;
+               if (vdev == NULL)
+                       continue;
+               ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+               v4l2_info(&fmd->v4l2_dev, "Registered %s as /dev/%s\n",
+                         vdev->name, video_device_node_name(vdev));
        }
 
        return ret;
@@ -502,7 +514,7 @@ static int fimc_md_create_links(struct fimc_md *fmd)
                        if (WARN(csis == NULL,
                                 "MIPI-CSI interface specified "
                                 "but s5p-csis module is not loaded!\n"))
-                               continue;
+                               return -EINVAL;
 
                        ret = media_entity_create_link(&sensor->entity, 0,
                                              &csis->entity, CSIS_PAD_SINK,
@@ -742,9 +754,6 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
        struct fimc_md *fmd;
        int ret;
 
-       if (WARN(!pdev->dev.platform_data, "Platform data not specified!\n"))
-               return -EINVAL;
-
        fmd = kzalloc(sizeof(struct fimc_md), GFP_KERNEL);
        if (!fmd)
                return -ENOMEM;
@@ -782,9 +791,11 @@ static int __devinit fimc_md_probe(struct platform_device *pdev)
        if (ret)
                goto err3;
 
-       ret = fimc_md_register_sensor_entities(fmd);
-       if (ret)
-               goto err3;
+       if (pdev->dev.platform_data) {
+               ret = fimc_md_register_sensor_entities(fmd);
+               if (ret)
+                       goto err3;
+       }
        ret = fimc_md_create_links(fmd);
        if (ret)
                goto err3;
index 20e664e341632df52877c78ef10ce96f6bc57301..44f5c2d1920bb50dd0dfe8e436eec1df32aa8c3f 100644 (file)
@@ -35,6 +35,9 @@ void fimc_hw_reset(struct fimc_dev *dev)
        cfg = readl(dev->regs + S5P_CIGCTRL);
        cfg &= ~S5P_CIGCTRL_SWRST;
        writel(cfg, dev->regs + S5P_CIGCTRL);
+
+       if (dev->variant->out_buf_count > 4)
+               fimc_hw_set_dma_seq(dev, 0xF);
 }
 
 static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx)
@@ -251,7 +254,14 @@ static void fimc_hw_set_scaler(struct fimc_ctx *ctx)
        struct fimc_scaler *sc = &ctx->scaler;
        struct fimc_frame *src_frame = &ctx->s_frame;
        struct fimc_frame *dst_frame = &ctx->d_frame;
-       u32 cfg = 0;
+
+       u32 cfg = readl(dev->regs + S5P_CISCCTRL);
+
+       cfg &= ~(S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE |
+                S5P_CISCCTRL_SCALEUP_H | S5P_CISCCTRL_SCALEUP_V |
+                S5P_CISCCTRL_SCALERBYPASS | S5P_CISCCTRL_ONE2ONE |
+                S5P_CISCCTRL_INRGB_FMT_MASK | S5P_CISCCTRL_OUTRGB_FMT_MASK |
+                S5P_CISCCTRL_INTERLACE | S5P_CISCCTRL_RGB_EXT);
 
        if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW))
                cfg |= (S5P_CISCCTRL_CSCR2Y_WIDE | S5P_CISCCTRL_CSCY2R_WIDE);
@@ -308,9 +318,9 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
        fimc_hw_set_scaler(ctx);
 
        cfg = readl(dev->regs + S5P_CISCCTRL);
+       cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
 
        if (variant->has_mainscaler_ext) {
-               cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
                cfg |= S5P_CISCCTRL_MHRATIO_EXT(sc->main_hratio);
                cfg |= S5P_CISCCTRL_MVRATIO_EXT(sc->main_vratio);
                writel(cfg, dev->regs + S5P_CISCCTRL);
@@ -323,7 +333,6 @@ void fimc_hw_set_mainscaler(struct fimc_ctx *ctx)
                cfg |= S5P_CIEXTEN_MVRATIO_EXT(sc->main_vratio);
                writel(cfg, dev->regs + S5P_CIEXTEN);
        } else {
-               cfg &= ~(S5P_CISCCTRL_MHRATIO_MASK | S5P_CISCCTRL_MVRATIO_MASK);
                cfg |= S5P_CISCCTRL_MHRATIO(sc->main_hratio);
                cfg |= S5P_CISCCTRL_MVRATIO(sc->main_vratio);
                writel(cfg, dev->regs + S5P_CISCCTRL);
index 1e8cdb77d4b8540a991bb55d98ef35225596c98f..dff9dc79879566356df6ed580d28119793ae3b09 100644 (file)
@@ -61,7 +61,7 @@ static struct s5p_mfc_fmt formats[] = {
                .num_planes = 1,
        },
        {
-               .name = "H264 Encoded Stream",
+               .name = "H263 Encoded Stream",
                .fourcc = V4L2_PIX_FMT_H263,
                .codec_mode = S5P_FIMV_CODEC_H263_ENC,
                .type = MFC_FMT_ENC,
index e16d3a4bc1dcc0a0c4522e252b61a53d0e8e78d6..b47d0c06ecf5ab3ee66c911f1d035df1948b31e7 100644 (file)
@@ -16,6 +16,7 @@
 #include <media/v4l2-ioctl.h>
 #include <linux/videodev2.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/version.h>
 #include <linux/timer.h>
 #include <media/videobuf2-dma-contig.h>
index f390682629cf8e6ee832275e47e2ddc0ef1af433..c51decfcae197cedb83f6d68169cc0a6251f621d 100644 (file)
@@ -566,8 +566,10 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
        ret = sh_mobile_ceu_soft_reset(pcdev);
 
        csi2_sd = find_csi2(pcdev);
-       if (csi2_sd)
-               csi2_sd->grp_id = (long)icd;
+       if (csi2_sd) {
+               csi2_sd->grp_id = soc_camera_grp_id(icd);
+               v4l2_set_subdev_hostdata(csi2_sd, icd);
+       }
 
        ret = v4l2_subdev_call(csi2_sd, core, s_power, 1);
        if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) {
@@ -768,7 +770,7 @@ static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev,
 {
        if (pcdev->csi2_pdev) {
                struct v4l2_subdev *csi2_sd = find_csi2(pcdev);
-               if (csi2_sd && csi2_sd->grp_id == (u32)icd)
+               if (csi2_sd && csi2_sd->grp_id == soc_camera_grp_id(icd))
                        return csi2_sd;
        }
 
@@ -1089,8 +1091,9 @@ static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int
                        /* Try 2560x1920, 1280x960, 640x480, 320x240 */
                        mf.width        = 2560 >> shift;
                        mf.height       = 1920 >> shift;
-                       ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
-                                                        s_mbus_fmt, &mf);
+                       ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                       soc_camera_grp_id(icd), video,
+                                       s_mbus_fmt, &mf);
                        if (ret < 0)
                                return ret;
                        shift++;
@@ -1389,7 +1392,8 @@ static int client_s_fmt(struct soc_camera_device *icd,
        bool ceu_1to1;
        int ret;
 
-       ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
+       ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                        soc_camera_grp_id(icd), video,
                                         s_mbus_fmt, mf);
        if (ret < 0)
                return ret;
@@ -1426,8 +1430,9 @@ static int client_s_fmt(struct soc_camera_device *icd,
                tmp_h = min(2 * tmp_h, max_height);
                mf->width = tmp_w;
                mf->height = tmp_h;
-               ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
-                                                s_mbus_fmt, mf);
+               ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                       soc_camera_grp_id(icd), video,
+                                       s_mbus_fmt, mf);
                dev_geo(dev, "Camera scaled to %ux%u\n",
                        mf->width, mf->height);
                if (ret < 0) {
@@ -1580,8 +1585,9 @@ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd,
        }
 
        if (interm_width < icd->user_width || interm_height < icd->user_height) {
-               ret = v4l2_device_call_until_err(sd->v4l2_dev, (int)icd, video,
-                                                s_mbus_fmt, &mf);
+               ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                       soc_camera_grp_id(icd), video,
+                                       s_mbus_fmt, &mf);
                if (ret < 0)
                        return ret;
 
@@ -1867,7 +1873,8 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
        mf.code         = xlate->code;
        mf.colorspace   = pix->colorspace;
 
-       ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf);
+       ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
+                                        video, try_mbus_fmt, &mf);
        if (ret < 0)
                return ret;
 
@@ -1891,8 +1898,9 @@ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd,
                         */
                        mf.width = 2560;
                        mf.height = 1920;
-                       ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video,
-                                                        try_mbus_fmt, &mf);
+                       ret = v4l2_device_call_until_err(sd->v4l2_dev,
+                                       soc_camera_grp_id(icd), video,
+                                       try_mbus_fmt, &mf);
                        if (ret < 0) {
                                /* Shouldn't actually happen... */
                                dev_err(icd->parent,
index ea4f0473ed3be5c35c076c78e8f370e224175821..8a652b53ff7e9a8a1a4ad0f00bf7a874bf8f3bf6 100644 (file)
@@ -143,7 +143,7 @@ static int sh_csi2_s_mbus_config(struct v4l2_subdev *sd,
                                 const struct v4l2_mbus_config *cfg)
 {
        struct sh_csi2 *priv = container_of(sd, struct sh_csi2, subdev);
-       struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+       struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
        struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
        struct v4l2_mbus_config client_cfg = {.type = V4L2_MBUS_CSI2,
                                              .flags = priv->mipi_flags};
@@ -202,7 +202,7 @@ static void sh_csi2_hwinit(struct sh_csi2 *priv)
 static int sh_csi2_client_connect(struct sh_csi2 *priv)
 {
        struct sh_csi2_pdata *pdata = priv->pdev->dev.platform_data;
-       struct soc_camera_device *icd = (struct soc_camera_device *)priv->subdev.grp_id;
+       struct soc_camera_device *icd = v4l2_get_subdev_hostdata(&priv->subdev);
        struct v4l2_subdev *client_sd = soc_camera_to_subdev(icd);
        struct device *dev = v4l2_get_subdevdata(&priv->subdev);
        struct v4l2_mbus_config cfg;
index b72580c38957855434e12c265567864ddf49e583..62e4312515cb99a5e640cfe440954f1b8f5994cd 100644 (file)
@@ -1103,7 +1103,8 @@ static int soc_camera_probe(struct soc_camera_device *icd)
        }
 
        sd = soc_camera_to_subdev(icd);
-       sd->grp_id = (long)icd;
+       sd->grp_id = soc_camera_grp_id(icd);
+       v4l2_set_subdev_hostdata(sd, icd);
 
        if (v4l2_ctrl_add_handler(&icd->ctrl_handler, sd->ctrl_handler))
                goto ectrl;
index 43c0ebb81956188b22f7ce2953eddc936f39ae3a..b7b2d3483fd4e1e1f8bb5b6a2feb6e7c349c57f3 100644 (file)
@@ -4,7 +4,7 @@
  * Debugfs support for the AB5500 MFD driver
  */
 
-#include <linux/export.h>
+#include <linux/module.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/mfd/ab5500/ab5500.h>
index 1e9173804ede2bacb93a7179d5d7c817c79dfcf5..d3d572b2317b888be174bf52d3d17c65b391ef07 100644 (file)
@@ -620,6 +620,7 @@ static struct resource __devinitdata ab8500_fg_resources[] = {
 
 static struct resource __devinitdata ab8500_chargalg_resources[] = {};
 
+#ifdef CONFIG_DEBUG_FS
 static struct resource __devinitdata ab8500_debug_resources[] = {
        {
                .name   = "IRQ_FIRST",
@@ -634,6 +635,7 @@ static struct resource __devinitdata ab8500_debug_resources[] = {
                .flags  = IORESOURCE_IRQ,
        },
 };
+#endif
 
 static struct resource __devinitdata ab8500_usb_resources[] = {
        {
index f1d88483112cac13565cce2a8f88d3092c9d516c..8d816cce8322ebecdf8d40e29f0c0b0a3aedcbdb 100644 (file)
@@ -109,7 +109,7 @@ int adp5520_set_bits(struct device *dev, int reg, uint8_t bit_mask)
 
        ret = __adp5520_read(chip->client, reg, &reg_val);
 
-       if (!ret && ((reg_val & bit_mask) == 0)) {
+       if (!ret && ((reg_val & bit_mask) != bit_mask)) {
                reg_val |= bit_mask;
                ret = __adp5520_write(chip->client, reg, reg_val);
        }
index 1b79c37fd59901b882fc0b995e6182fe43a4eedb..1924b857a0fbf6355d9acfcd96fa387b841c6976 100644 (file)
@@ -182,7 +182,7 @@ int da903x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
        if (ret)
                goto out;
 
-       if ((reg_val & bit_mask) == 0) {
+       if ((reg_val & bit_mask) != bit_mask) {
                reg_val |= bit_mask;
                ret = __da903x_write(chip->client, reg, reg_val);
        }
@@ -549,6 +549,7 @@ static int __devexit da903x_remove(struct i2c_client *client)
        struct da903x_chip *chip = i2c_get_clientdata(client);
 
        da903x_remove_subdevs(chip);
+       free_irq(client->irq, chip);
        kfree(chip);
        return 0;
 }
index 1e9ee533eacb8d204e860f9881d58e2675adf126..ef39528088f2298a47e7f62f69317b2a6af428ee 100644 (file)
@@ -16,6 +16,7 @@
  */
 
 #include <linux/err.h>
+#include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
index bba26d96c24075a3cbca9a320fabe3177b4de1a8..a5ddf31b60ca89d3f228997f7da526b126449d94 100644 (file)
@@ -197,7 +197,7 @@ int tps6586x_set_bits(struct device *dev, int reg, uint8_t bit_mask)
        if (ret)
                goto out;
 
-       if ((reg_val & bit_mask) == 0) {
+       if ((reg_val & bit_mask) != bit_mask) {
                reg_val |= bit_mask;
                ret = __tps6586x_write(to_i2c_client(dev), reg, reg_val);
        }
index 6f5b8cf2f652b8edf6accf9db107d8f63ea72c23..c1da84bc1573f563c4b698f0bc786068e7f911bc 100644 (file)
@@ -120,7 +120,7 @@ int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
                goto out;
        }
 
-       data &= mask;
+       data &= ~mask;
        err = tps65910_i2c_write(tps65910, reg, 1, &data);
        if (err)
                dev_err(tps65910->dev, "write to reg %x failed\n", reg);
index bfbd66021afd383703fad96e890975853c2bf063..61e70cfaa774fb977adcba8cc697a06edd673210 100644 (file)
@@ -363,13 +363,13 @@ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
                pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
                return -EPERM;
        }
-       sid = twl_map[mod_no].sid;
-       twl = &twl_modules[sid];
-
        if (unlikely(!inuse)) {
-               pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+               pr_err("%s: not initialized\n", DRIVER_NAME);
                return -EPERM;
        }
+       sid = twl_map[mod_no].sid;
+       twl = &twl_modules[sid];
+
        mutex_lock(&twl->xfer_lock);
        /*
         * [MSG1]: fill the register address data
@@ -420,13 +420,13 @@ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes)
                pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no);
                return -EPERM;
        }
-       sid = twl_map[mod_no].sid;
-       twl = &twl_modules[sid];
-
        if (unlikely(!inuse)) {
-               pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid);
+               pr_err("%s: not initialized\n", DRIVER_NAME);
                return -EPERM;
        }
+       sid = twl_map[mod_no].sid;
+       twl = &twl_modules[sid];
+
        mutex_lock(&twl->xfer_lock);
        /* [MSG1] fill the register address data */
        msg = &twl->xfer_msg[0];
index f062c8cc6c38f3e40337444b91111646afefde88..29f11e0765feef54093b839b6e288ae56bc0bf3f 100644 (file)
@@ -432,6 +432,7 @@ struct sih_agent {
        u32                     edge_change;
 
        struct mutex            irq_lock;
+       char                    *irq_name;
 };
 
 /*----------------------------------------------------------------------*/
@@ -589,7 +590,7 @@ static inline int sih_read_isr(const struct sih *sih)
  * Generic handler for SIH interrupts ... we "know" this is called
  * in task context, with IRQs enabled.
  */
-static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
+static irqreturn_t handle_twl4030_sih(int irq, void *data)
 {
        struct sih_agent *agent = irq_get_handler_data(irq);
        const struct sih *sih = agent->sih;
@@ -602,7 +603,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
                pr_err("twl4030: %s SIH, read ISR error %d\n",
                        sih->name, isr);
                /* REVISIT:  recover; eventually mask it all, etc */
-               return;
+               return IRQ_HANDLED;
        }
 
        while (isr) {
@@ -616,6 +617,7 @@ static void handle_twl4030_sih(unsigned irq, struct irq_desc *desc)
                        pr_err("twl4030: %s SIH, invalid ISR bit %d\n",
                                sih->name, irq);
        }
+       return IRQ_HANDLED;
 }
 
 static unsigned twl4030_irq_next;
@@ -668,18 +670,19 @@ int twl4030_sih_setup(int module)
                activate_irq(irq);
        }
 
-       status = irq_base;
        twl4030_irq_next += i;
 
        /* replace generic PIH handler (handle_simple_irq) */
        irq = sih_mod + twl4030_irq_base;
        irq_set_handler_data(irq, agent);
-       irq_set_chained_handler(irq, handle_twl4030_sih);
+       agent->irq_name = kasprintf(GFP_KERNEL, "twl4030_%s", sih->name);
+       status = request_threaded_irq(irq, NULL, handle_twl4030_sih, 0,
+                                     agent->irq_name ?: sih->name, NULL);
 
        pr_info("twl4030: %s (irq %d) chaining IRQs %d..%d\n", sih->name,
                        irq, irq_base, twl4030_irq_next - 1);
 
-       return status;
+       return status < 0 ? status : irq_base;
 }
 
 /* FIXME need a call to reverse twl4030_sih_setup() ... */
@@ -733,8 +736,9 @@ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end)
        }
 
        /* install an irq handler to demultiplex the TWL4030 interrupt */
-       status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih, 0,
-                                       "TWL4030-PIH", NULL);
+       status = request_threaded_irq(irq_num, NULL, handle_twl4030_pih,
+                                     IRQF_ONESHOT,
+                                     "TWL4030-PIH", NULL);
        if (status < 0) {
                pr_err("twl4030: could not claim irq%d: %d\n", irq_num, status);
                goto fail_rqirq;
index 5d6ba132837e8efb5f470d48784d5bb5f9809509..61894fced8ea281570bf80320fd593efa0d25740 100644 (file)
@@ -239,6 +239,7 @@ static int wm8994_suspend(struct device *dev)
 
        switch (wm8994->type) {
        case WM8958:
+       case WM1811:
                ret = wm8994_reg_read(wm8994, WM8958_MIC_DETECT_1);
                if (ret < 0) {
                        dev_err(dev, "Failed to read power status: %d\n", ret);
index a1cb21f95302c497157dc97ed85f9acd68a3c10d..1e0e27cbe98786e82abf3d5d9e6ede36cc8f0eb2 100644 (file)
@@ -1606,6 +1606,14 @@ static const struct mmc_fixup blk_fixups[] =
                  MMC_QUIRK_BLK_NO_CMD23),
        MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
                  MMC_QUIRK_BLK_NO_CMD23),
+
+       /*
+        * Some Micron MMC cards needs longer data read timeout than
+        * indicated in CSD.
+        */
+       MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc,
+                 MMC_QUIRK_LONG_READ_TIME),
+
        END_FIXUP
 };
 
index 5278ffb20e74bbba9b5508e008171165ad296f16..950b97d7412a4f6f5f562401f132ed0d568a969a 100644 (file)
@@ -529,6 +529,18 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
                        data->timeout_clks = 0;
                }
        }
+
+       /*
+        * Some cards require longer data read timeout than indicated in CSD.
+        * Address this by setting the read timeout to a "reasonably high"
+        * value. For the cards tested, 300ms has proven enough. If necessary,
+        * this value can be increased if other problematic cards require this.
+        */
+       if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
+               data->timeout_ns = 300000000;
+               data->timeout_clks = 0;
+       }
+
        /*
         * Some cards need very high timeouts if driven in SPI mode.
         * The worst observed timeout was 900ms after writing a
@@ -1213,6 +1225,46 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
        mmc_host_clk_release(host);
 }
 
+static void mmc_poweroff_notify(struct mmc_host *host)
+{
+       struct mmc_card *card;
+       unsigned int timeout;
+       unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
+       int err = 0;
+
+       card = host->card;
+
+       /*
+        * Send power notify command only if card
+        * is mmc and notify state is powered ON
+        */
+       if (card && mmc_card_mmc(card) &&
+           (card->poweroff_notify_state == MMC_POWERED_ON)) {
+
+               if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
+                       notify_type = EXT_CSD_POWER_OFF_SHORT;
+                       timeout = card->ext_csd.generic_cmd6_time;
+                       card->poweroff_notify_state = MMC_POWEROFF_SHORT;
+               } else {
+                       notify_type = EXT_CSD_POWER_OFF_LONG;
+                       timeout = card->ext_csd.power_off_longtime;
+                       card->poweroff_notify_state = MMC_POWEROFF_LONG;
+               }
+
+               err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+                                EXT_CSD_POWER_OFF_NOTIFICATION,
+                                notify_type, timeout);
+
+               if (err && err != -EBADMSG)
+                       pr_err("Device failed to respond within %d poweroff "
+                              "time. Forcefully powering down the device\n",
+                              timeout);
+
+               /* Set the card state to no notification after the poweroff */
+               card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
+       }
+}
+
 /*
  * Apply power to the MMC stack.  This is a two-stage process.
  * First, we enable power to the card without the clock running.
@@ -1269,42 +1321,12 @@ static void mmc_power_up(struct mmc_host *host)
 
 void mmc_power_off(struct mmc_host *host)
 {
-       struct mmc_card *card;
-       unsigned int notify_type;
-       unsigned int timeout;
-       int err;
-
        mmc_host_clk_hold(host);
 
-       card = host->card;
        host->ios.clock = 0;
        host->ios.vdd = 0;
 
-       if (card && mmc_card_mmc(card) &&
-           (card->poweroff_notify_state == MMC_POWERED_ON)) {
-
-               if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
-                       notify_type = EXT_CSD_POWER_OFF_SHORT;
-                       timeout = card->ext_csd.generic_cmd6_time;
-                       card->poweroff_notify_state = MMC_POWEROFF_SHORT;
-               } else {
-                       notify_type = EXT_CSD_POWER_OFF_LONG;
-                       timeout = card->ext_csd.power_off_longtime;
-                       card->poweroff_notify_state = MMC_POWEROFF_LONG;
-               }
-
-               err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-                                EXT_CSD_POWER_OFF_NOTIFICATION,
-                                notify_type, timeout);
-
-               if (err && err != -EBADMSG)
-                       pr_err("Device failed to respond within %d poweroff "
-                              "time. Forcefully powering down the device\n",
-                              timeout);
-
-               /* Set the card state to no notification after the poweroff */
-               card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
-       }
+       mmc_poweroff_notify(host);
 
        /*
         * Reset ocr mask to be the highest possible voltage supported for
@@ -2196,7 +2218,7 @@ int mmc_card_sleep(struct mmc_host *host)
 
        mmc_bus_get(host);
 
-       if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
+       if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
                err = host->bus_ops->sleep(host);
 
        mmc_bus_put(host);
@@ -2302,8 +2324,17 @@ int mmc_suspend_host(struct mmc_host *host)
                 * pre-claim the host.
                 */
                if (mmc_try_claim_host(host)) {
-                       if (host->bus_ops->suspend)
+                       if (host->bus_ops->suspend) {
+                               /*
+                                * For eMMC 4.5 device send notify command
+                                * before sleep, because in sleep state eMMC 4.5
+                                * devices respond to only RESET and AWAKE cmd
+                                */
+                               mmc_poweroff_notify(host);
                                err = host->bus_ops->suspend(host);
+                       }
+                       mmc_do_release_host(host);
+
                        if (err == -ENOSYS || !host->bus_ops->resume) {
                                /*
                                 * We simply "remove" the card in this case.
@@ -2318,7 +2349,6 @@ int mmc_suspend_host(struct mmc_host *host)
                                host->pm_flags = 0;
                                err = 0;
                        }
-                       mmc_do_release_host(host);
                } else {
                        err = -EBUSY;
                }
index e8a5eb38748be34a002aa24a5ba28af9c472c182..d31c78b72b0fd8d1e53adb2f403454790896fafe 100644 (file)
@@ -302,17 +302,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
        host->max_blk_size = 512;
        host->max_blk_count = PAGE_CACHE_SIZE / 512;
 
-       /*
-        * Enable runtime power management by default. This flag was added due
-        * to runtime power management causing disruption for some users, but
-        * the power on/off code has been improved since then.
-        *
-        * We'll enable this flag by default as an experiment, and if no
-        * problems are reported, we will follow up later and remove the flag
-        * altogether.
-        */
-       host->caps = MMC_CAP_POWER_OFF_CARD;
-
        return host;
 
 free:
index dbf421a6279c702d91baeede5bb5f1e55dcb6fcd..d240427c12462dd545b233d6d6e75787e4b48abb 100644 (file)
@@ -876,17 +876,21 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
         * set the notification byte in the ext_csd register of device
         */
        if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
-           (card->poweroff_notify_state == MMC_NO_POWER_NOTIFICATION)) {
+           (card->ext_csd.rev >= 6)) {
                err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
                                 EXT_CSD_POWER_OFF_NOTIFICATION,
                                 EXT_CSD_POWER_ON,
                                 card->ext_csd.generic_cmd6_time);
                if (err && err != -EBADMSG)
                        goto free_card;
-       }
 
-       if (!err)
-               card->poweroff_notify_state = MMC_POWERED_ON;
+               /*
+                * The err can be -EBADMSG or 0,
+                * so check for success and update the flag
+                */
+               if (!err)
+                       card->poweroff_notify_state = MMC_POWERED_ON;
+       }
 
        /*
         * Activate high speed (if supported)
index 50b5f9926f6462d7836434c89a71213f418faa9c..0726e59fd418fb4b8520de940d76214db15c2ccc 100644 (file)
@@ -675,7 +675,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
              unsigned int status)
 {
        /* First check for errors */
-       if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
+       if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+                     MCI_TXUNDERRUN|MCI_RXOVERRUN)) {
                u32 remain, success;
 
                /* Terminate the DMA transfer */
@@ -754,8 +755,12 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
        }
 
        if (!cmd->data || cmd->error) {
-               if (host->data)
+               if (host->data) {
+                       /* Terminate the DMA transfer */
+                       if (dma_inprogress(host))
+                               mmci_dma_data_error(host);
                        mmci_stop_data(host);
+               }
                mmci_request_end(host, cmd->mrq);
        } else if (!(cmd->data->flags & MMC_DATA_READ)) {
                mmci_start_data(host, cmd->data);
@@ -955,8 +960,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
                dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
 
                data = host->data;
-               if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|
-                             MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data)
+               if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
+                             MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
+                             MCI_DATABLOCKEND) && data)
                        mmci_data_irq(host, data, status);
 
                cmd = host->cmd;
index 325ea61e12d37d4d47cb9f7ee82d795f11fc8ccf..8e0fbe99404778ee1cc4f512b767479d7dad9855 100644 (file)
@@ -732,6 +732,7 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                                "failed to config DMA channel. Falling back to PIO\n");
                        dma_release_channel(host->dma);
                        host->do_dma = 0;
+                       host->dma = NULL;
                }
        }
 
index 101cd31c82207982cf5986390a80f91c1053c535..d5fe43d53c51894ff9e861dbced02bfde10ef705 100644 (file)
@@ -1010,6 +1010,7 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
                        host->data->sg_len,
                        omap_hsmmc_get_dma_dir(host, host->data));
                omap_free_dma(dma_ch);
+               host->data->host_cookie = 0;
        }
        host->data = NULL;
 }
@@ -1575,8 +1576,10 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
        struct mmc_data *data = mrq->data;
 
        if (host->use_dma) {
-               dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
-                            omap_hsmmc_get_dma_dir(host, data));
+               if (data->host_cookie)
+                       dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+                                    data->sg_len,
+                                    omap_hsmmc_get_dma_dir(host, data));
                data->host_cookie = 0;
        }
 }
index 4b920b7621cfe7cd214d7c3ed30d848de383e1ad..b4257e700617c6e4f51455a462df1f063dfa0294 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/mmc/host.h>
+#include <linux/module.h>
 #include <mach/cns3xxx.h>
 #include "sdhci-pltfm.h"
 
@@ -108,13 +109,10 @@ static struct platform_driver sdhci_cns3xxx_driver = {
        .driver         = {
                .name   = "sdhci-cns3xxx",
                .owner  = THIS_MODULE,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_cns3xxx_probe,
        .remove         = __devexit_p(sdhci_cns3xxx_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_cns3xxx_init(void)
index f2d29dca442074e319da8fc050ea28777ddf32fc..a81312c91f7086625d10f963df9f31c0b64e0c4c 100644 (file)
@@ -82,13 +82,10 @@ static struct platform_driver sdhci_dove_driver = {
        .driver         = {
                .name   = "sdhci-dove",
                .owner  = THIS_MODULE,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_dove_probe,
        .remove         = __devexit_p(sdhci_dove_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_dove_init(void)
index 4b976f00ea85137913a2b54e7dce63e590b809ac..38ebc4ea259fcb659f855fe446e45e0dc7fa9952 100644 (file)
@@ -599,14 +599,11 @@ static struct platform_driver sdhci_esdhc_imx_driver = {
                .name   = "sdhci-esdhc-imx",
                .owner  = THIS_MODULE,
                .of_match_table = imx_esdhc_dt_ids,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .id_table       = imx_esdhc_devtype,
        .probe          = sdhci_esdhc_imx_probe,
        .remove         = __devexit_p(sdhci_esdhc_imx_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_esdhc_imx_init(void)
index 59e9d003e5891c0f9d76b8788055399bb1c3c070..01e5f627e0f047b2a7c63474a931fbedab0a1444 100644 (file)
@@ -125,13 +125,10 @@ static struct platform_driver sdhci_esdhc_driver = {
                .name = "sdhci-esdhc",
                .owner = THIS_MODULE,
                .of_match_table = sdhci_esdhc_of_match,
+               .pm = SDHCI_PLTFM_PMOPS,
        },
        .probe = sdhci_esdhc_probe,
        .remove = __devexit_p(sdhci_esdhc_remove),
-#ifdef CONFIG_PM
-       .suspend = sdhci_pltfm_suspend,
-       .resume = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_esdhc_init(void)
index 9b0d794a4f692911d05347fe917722635b96594b..3619adc7d9fc36548283386f82e08be2d195a608 100644 (file)
@@ -87,13 +87,10 @@ static struct platform_driver sdhci_hlwd_driver = {
                .name = "sdhci-hlwd",
                .owner = THIS_MODULE,
                .of_match_table = sdhci_hlwd_of_match,
+               .pm = SDHCI_PLTFM_PMOPS,
        },
        .probe = sdhci_hlwd_probe,
        .remove = __devexit_p(sdhci_hlwd_remove),
-#ifdef CONFIG_PM
-       .suspend = sdhci_pltfm_suspend,
-       .resume = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_hlwd_init(void)
index d833d9c2f7e338425262402af53d6e2f2b63e848..6878a94626bc35b18bf493a4dcbf522dc3403255 100644 (file)
@@ -54,8 +54,7 @@ struct sdhci_pci_fixes {
        int                     (*probe_slot) (struct sdhci_pci_slot *);
        void                    (*remove_slot) (struct sdhci_pci_slot *, int);
 
-       int                     (*suspend) (struct sdhci_pci_chip *,
-                                       pm_message_t);
+       int                     (*suspend) (struct sdhci_pci_chip *);
        int                     (*resume) (struct sdhci_pci_chip *);
 };
 
@@ -549,7 +548,7 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)
                jmicron_enable_mmc(slot->host, 0);
 }
 
-static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state)
+static int jmicron_suspend(struct sdhci_pci_chip *chip)
 {
        int i;
 
@@ -993,8 +992,9 @@ static struct sdhci_ops sdhci_pci_ops = {
 
 #ifdef CONFIG_PM
 
-static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int sdhci_pci_suspend(struct device *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev);
        struct sdhci_pci_chip *chip;
        struct sdhci_pci_slot *slot;
        mmc_pm_flag_t slot_pm_flags;
@@ -1010,7 +1010,7 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
                if (!slot)
                        continue;
 
-               ret = sdhci_suspend_host(slot->host, state);
+               ret = sdhci_suspend_host(slot->host);
 
                if (ret) {
                        for (i--; i >= 0; i--)
@@ -1026,7 +1026,7 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
        }
 
        if (chip->fixes && chip->fixes->suspend) {
-               ret = chip->fixes->suspend(chip, state);
+               ret = chip->fixes->suspend(chip);
                if (ret) {
                        for (i = chip->num_slots - 1; i >= 0; i--)
                                sdhci_resume_host(chip->slots[i]->host);
@@ -1042,16 +1042,17 @@ static int sdhci_pci_suspend(struct pci_dev *pdev, pm_message_t state)
                }
                pci_set_power_state(pdev, PCI_D3hot);
        } else {
-               pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+               pci_enable_wake(pdev, PCI_D3hot, 0);
                pci_disable_device(pdev);
-               pci_set_power_state(pdev, pci_choose_state(pdev, state));
+               pci_set_power_state(pdev, PCI_D3hot);
        }
 
        return 0;
 }
 
-static int sdhci_pci_resume(struct pci_dev *pdev)
+static int sdhci_pci_resume(struct device *dev)
 {
+       struct pci_dev *pdev = to_pci_dev(dev);
        struct sdhci_pci_chip *chip;
        struct sdhci_pci_slot *slot;
        int i, ret;
@@ -1099,7 +1100,6 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
        struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
        struct sdhci_pci_chip *chip;
        struct sdhci_pci_slot *slot;
-       pm_message_t state = { .event = PM_EVENT_SUSPEND };
        int i, ret;
 
        chip = pci_get_drvdata(pdev);
@@ -1121,7 +1121,7 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
        }
 
        if (chip->fixes && chip->fixes->suspend) {
-               ret = chip->fixes->suspend(chip, state);
+               ret = chip->fixes->suspend(chip);
                if (ret) {
                        for (i = chip->num_slots - 1; i >= 0; i--)
                                sdhci_runtime_resume_host(chip->slots[i]->host);
@@ -1176,6 +1176,8 @@ static int sdhci_pci_runtime_idle(struct device *dev)
 #endif
 
 static const struct dev_pm_ops sdhci_pci_pm_ops = {
+       .suspend = sdhci_pci_suspend,
+       .resume = sdhci_pci_resume,
        .runtime_suspend = sdhci_pci_runtime_suspend,
        .runtime_resume = sdhci_pci_runtime_resume,
        .runtime_idle = sdhci_pci_runtime_idle,
@@ -1428,8 +1430,6 @@ static struct pci_driver sdhci_driver = {
        .id_table =     pci_ids,
        .probe =        sdhci_pci_probe,
        .remove =       __devexit_p(sdhci_pci_remove),
-       .suspend =      sdhci_pci_suspend,
-       .resume =       sdhci_pci_resume,
        .driver =       {
                .pm =   &sdhci_pci_pm_ops
        },
index a9e12ea05583bfe3da78ef48f5442c3294367ff5..03970bcb3495ee7f542a12fc118074635dcd78ff 100644 (file)
@@ -194,21 +194,25 @@ int sdhci_pltfm_unregister(struct platform_device *pdev)
 EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);
 
 #ifdef CONFIG_PM
-int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state)
+static int sdhci_pltfm_suspend(struct device *dev)
 {
-       struct sdhci_host *host = platform_get_drvdata(dev);
+       struct sdhci_host *host = dev_get_drvdata(dev);
 
-       return sdhci_suspend_host(host, state);
+       return sdhci_suspend_host(host);
 }
-EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend);
 
-int sdhci_pltfm_resume(struct platform_device *dev)
+static int sdhci_pltfm_resume(struct device *dev)
 {
-       struct sdhci_host *host = platform_get_drvdata(dev);
+       struct sdhci_host *host = dev_get_drvdata(dev);
 
        return sdhci_resume_host(host);
 }
-EXPORT_SYMBOL_GPL(sdhci_pltfm_resume);
+
+const struct dev_pm_ops sdhci_pltfm_pmops = {
+       .suspend        = sdhci_pltfm_suspend,
+       .resume         = sdhci_pltfm_resume,
+};
+EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops);
 #endif /* CONFIG_PM */
 
 static int __init sdhci_pltfm_drv_init(void)
index 3a9fc3f40840eb5a3a09704c2282863b32375405..37e0e184a0bbf1f23fe9ca6233f85341633802b6 100644 (file)
@@ -99,8 +99,10 @@ extern int sdhci_pltfm_register(struct platform_device *pdev,
 extern int sdhci_pltfm_unregister(struct platform_device *pdev);
 
 #ifdef CONFIG_PM
-extern int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state);
-extern int sdhci_pltfm_resume(struct platform_device *dev);
+extern const struct dev_pm_ops sdhci_pltfm_pmops;
+#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops)
+#else
+#define SDHCI_PLTFM_PMOPS NULL
 #endif
 
 #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */
index d4bf6d30c7baa3ea5a6c6bb1abc01d58e41ad853..7a039c3cb1f10a8651ae04091c8303008ad2cbe2 100644 (file)
@@ -218,13 +218,10 @@ static struct platform_driver sdhci_pxav2_driver = {
        .driver         = {
                .name   = "sdhci-pxav2",
                .owner  = THIS_MODULE,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_pxav2_probe,
        .remove         = __devexit_p(sdhci_pxav2_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 static int __init sdhci_pxav2_init(void)
 {
index cff4ad3e7a59c94c599716bfcf3bf63b0f28d3d7..15673a7ee6a59a70d7455179f3afe30b3fb15588 100644 (file)
@@ -264,13 +264,10 @@ static struct platform_driver sdhci_pxav3_driver = {
        .driver         = {
                .name   = "sdhci-pxav3",
                .owner  = THIS_MODULE,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_pxav3_probe,
        .remove         = __devexit_p(sdhci_pxav3_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 static int __init sdhci_pxav3_init(void)
 {
index 3d00e722efc9cec34e9cbdd1807863326f58e828..0d33ff0d67fbf7abf46f496c14e61dbdd3a9f6a4 100644 (file)
@@ -622,33 +622,38 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
 
 #ifdef CONFIG_PM
 
-static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
+static int sdhci_s3c_suspend(struct device *dev)
 {
-       struct sdhci_host *host = platform_get_drvdata(dev);
+       struct sdhci_host *host = dev_get_drvdata(dev);
 
-       return sdhci_suspend_host(host, pm);
+       return sdhci_suspend_host(host);
 }
 
-static int sdhci_s3c_resume(struct platform_device *dev)
+static int sdhci_s3c_resume(struct device *dev)
 {
-       struct sdhci_host *host = platform_get_drvdata(dev);
+       struct sdhci_host *host = dev_get_drvdata(dev);
 
        return sdhci_resume_host(host);
 }
 
+static const struct dev_pm_ops sdhci_s3c_pmops = {
+       .suspend        = sdhci_s3c_suspend,
+       .resume         = sdhci_s3c_resume,
+};
+
+#define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops)
+
 #else
-#define sdhci_s3c_suspend NULL
-#define sdhci_s3c_resume NULL
+#define SDHCI_S3C_PMOPS NULL
 #endif
 
 static struct platform_driver sdhci_s3c_driver = {
        .probe          = sdhci_s3c_probe,
        .remove         = __devexit_p(sdhci_s3c_remove),
-       .suspend        = sdhci_s3c_suspend,
-       .resume         = sdhci_s3c_resume,
        .driver         = {
                .owner  = THIS_MODULE,
                .name   = "s3c-sdhci",
+               .pm     = SDHCI_S3C_PMOPS,
        },
 };
 
index 89699e861fc1c90c8c127ac798a3e208dd2d839e..e2e18d3f949c45dda79395646484a848a84d9326 100644 (file)
@@ -318,13 +318,10 @@ static struct platform_driver sdhci_tegra_driver = {
                .name   = "sdhci-tegra",
                .owner  = THIS_MODULE,
                .of_match_table = sdhci_tegra_dt_match,
+               .pm     = SDHCI_PLTFM_PMOPS,
        },
        .probe          = sdhci_tegra_probe,
        .remove         = __devexit_p(sdhci_tegra_remove),
-#ifdef CONFIG_PM
-       .suspend        = sdhci_pltfm_suspend,
-       .resume         = sdhci_pltfm_resume,
-#endif
 };
 
 static int __init sdhci_tegra_init(void)
index 6d8eea3235411e875250b3a8b7f588c2290304ec..19ed580f2cabf2fca09ae59d52cb86f5265f9ab8 100644 (file)
@@ -2327,7 +2327,7 @@ out:
 
 #ifdef CONFIG_PM
 
-int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
+int sdhci_suspend_host(struct sdhci_host *host)
 {
        int ret;
 
index 0a5b65460d8a760de4c37cbdb2d9a89dd7cf430d..a04d4d0c6fd20911c29f59a4f0132eea2cffa6b7 100644 (file)
@@ -374,7 +374,7 @@ extern int sdhci_add_host(struct sdhci_host *host);
 extern void sdhci_remove_host(struct sdhci_host *host, int dead);
 
 #ifdef CONFIG_PM
-extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
+extern int sdhci_suspend_host(struct sdhci_host *host);
 extern int sdhci_resume_host(struct sdhci_host *host);
 extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
 #endif
index 369366c8e205e4ed8aa27746e3a8940b89e54287..d5505f3fe2a170d04fc9011c5b5d2ff886a31df7 100644 (file)
@@ -908,7 +908,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                if (host->power) {
                        pm_runtime_put(&host->pd->dev);
                        host->power = false;
-                       if (p->down_pwr)
+                       if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
                                p->down_pwr(host->pd);
                }
                host->state = STATE_IDLE;
index d85a60cda16781ae25509a28705122597b87dd2a..4208b3958069119e68e8da8ad03e3eadf5862b8c 100644 (file)
@@ -798,7 +798,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                /* start bus clock */
                tmio_mmc_clk_start(host);
        } else if (ios->power_mode != MMC_POWER_UP) {
-               if (host->set_pwr)
+               if (host->set_pwr && ios->power_mode == MMC_POWER_OFF)
                        host->set_pwr(host->pdev, 0);
                if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
                    pdata->power) {
index e8f6e65183d77d1a6eeb8d8137af21d57f07ca90..2ec978bc32ba47232b6618db96e2db1bebb25038 100644 (file)
@@ -259,7 +259,7 @@ static int firmware_rom_wait_states = 0x04;
 static int firmware_rom_wait_states = 0x1C;
 #endif
 
-module_param(firmware_rom_wait_states, bool, 0644);
+module_param(firmware_rom_wait_states, int, 0644);
 MODULE_PARM_DESC(firmware_rom_wait_states,
                 "ROM wait states byte=RRRIIEEE (Reserved Internal External)");
 
index 94f553489725280d2ec851023f17745e86c48d9a..45876d0e5b8e500b321466fad5789d7a9bfbe182 100644 (file)
@@ -227,10 +227,14 @@ static int platram_probe(struct platform_device *pdev)
        if (!err)
                dev_info(&pdev->dev, "registered mtd device\n");
 
-       /* add the whole device. */
-       err = mtd_device_register(info->mtd, NULL, 0);
-       if (err)
-               dev_err(&pdev->dev, "failed to register the entire device\n");
+       if (pdata->nr_partitions) {
+               /* add the whole device. */
+               err = mtd_device_register(info->mtd, NULL, 0);
+               if (err) {
+                       dev_err(&pdev->dev,
+                               "failed to register the entire device\n");
+               }
+       }
 
        return err;
 
index 411a17df9fc13ee2fd78d8e54456e18095bdd91f..2a25b6789af4d5633ee7fcadf104fc08e9183928 100644 (file)
@@ -98,7 +98,7 @@ static int __devinit pxa2xx_flash_probe(struct platform_device *pdev)
        }
        info->mtd->owner = THIS_MODULE;
 
-       mtd_device_parse_register(info->mtd, probes, 0, NULL, 0);
+       mtd_device_parse_register(info->mtd, probes, 0, flash->parts, flash->nr_parts);
 
        platform_set_drvdata(pdev, info);
        return 0;
index 071b63420f0e213a2ba9df41495bce2ba468b2af..493ec2fcf97fe7f8c21e55420e4ecb3ddaa6c845 100644 (file)
@@ -21,9 +21,9 @@
 #include <linux/clk.h>
 #include <linux/slab.h>
 #include <linux/interrupt.h>
+#include <linux/module.h>
 #include <linux/mtd/gpmi-nand.h>
 #include <linux/mtd/partitions.h>
-
 #include "gpmi-nand.h"
 
 /* add our owner bbt descriptor */
index ee1713907b92b0684a64de524ca564214699f705..f8aacf48ecddf6c12a63a10026e8b948d5f0a8e2 100644 (file)
@@ -188,7 +188,7 @@ static int ndfc_chip_init(struct ndfc_controller *ndfc,
        if (!flash_np)
                return -ENODEV;
 
-       ppdata->of_node = flash_np;
+       ppdata.of_node = flash_np;
        ndfc->mtd.name = kasprintf(GFP_KERNEL, "%s.%s",
                        dev_name(&ndfc->ofdev->dev), flash_np->name);
        if (!ndfc->mtd.name) {
index a73d9dc80ff674d7b90de88226511470b9c8c873..84fb6349a59ab7d78c2ba37fe4d06538e5d52bd8 100644 (file)
@@ -4,7 +4,7 @@
 
 menuconfig ARCNET
        depends on NETDEVICES && (ISA || PCI || PCMCIA)
-       bool "ARCnet support"
+       tristate "ARCnet support"
        ---help---
          If you have a network card of this type, say Y and check out the
          (arguably) beautiful poetry in
index b0c577256487b4b7c35d690da3a2ca619d2deb4c..7f8756825b8abf62f924d569b3e923c7f955c413 100644 (file)
@@ -2553,30 +2553,6 @@ re_arm:
        }
 }
 
-static __be32 bond_glean_dev_ip(struct net_device *dev)
-{
-       struct in_device *idev;
-       struct in_ifaddr *ifa;
-       __be32 addr = 0;
-
-       if (!dev)
-               return 0;
-
-       rcu_read_lock();
-       idev = __in_dev_get_rcu(dev);
-       if (!idev)
-               goto out;
-
-       ifa = idev->ifa_list;
-       if (!ifa)
-               goto out;
-
-       addr = ifa->ifa_local;
-out:
-       rcu_read_unlock();
-       return addr;
-}
-
 static int bond_has_this_ip(struct bonding *bond, __be32 ip)
 {
        struct vlan_entry *vlan;
@@ -3322,6 +3298,10 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
        struct bonding *bond;
        struct vlan_entry *vlan;
 
+       /* we only care about primary address */
+       if(ifa->ifa_flags & IFA_F_SECONDARY)
+               return NOTIFY_DONE;
+
        list_for_each_entry(bond, &bn->dev_list, bond_list) {
                if (bond->dev == event_dev) {
                        switch (event) {
@@ -3329,7 +3309,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
                                bond->master_ip = ifa->ifa_local;
                                return NOTIFY_OK;
                        case NETDEV_DOWN:
-                               bond->master_ip = bond_glean_dev_ip(bond->dev);
+                               bond->master_ip = 0;
                                return NOTIFY_OK;
                        default:
                                return NOTIFY_DONE;
@@ -3345,8 +3325,7 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
                                        vlan->vlan_ip = ifa->ifa_local;
                                        return NOTIFY_OK;
                                case NETDEV_DOWN:
-                                       vlan->vlan_ip =
-                                               bond_glean_dev_ip(vlan_dev);
+                                       vlan->vlan_ip = 0;
                                        return NOTIFY_OK;
                                default:
                                        return NOTIFY_DONE;
index 905bce0b3a4328bdc59243481d2624c63f55b0e8..2c7f5036f570fd70d1078c1fabef3b7336d2d89e 100644 (file)
@@ -20,7 +20,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/version.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/netdevice.h>
index 4cf835dbc1222f6c90154b601bc02b76d6efbbb6..3fb66d09ece59e463c81eb20b77ba7cfa7df9fd0 100644 (file)
@@ -608,7 +608,7 @@ static void b44_tx(struct b44 *bp)
                                 skb->len,
                                 DMA_TO_DEVICE);
                rp->skb = NULL;
-               dev_kfree_skb(skb);
+               dev_kfree_skb_irq(skb);
        }
 
        bp->tx_cons = cons;
index bce203fa4b9e274c24e06e52b133a0936328c4e5..882f48f0a03cdb47568bff2e67b95cc1cdab5cb3 100644 (file)
@@ -10327,6 +10327,43 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
        return 0;
 }
 
+
+static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy,
+                                      struct link_params *params, u8 mode)
+{
+       struct bnx2x *bp = params->bp;
+       u16 temp;
+
+       bnx2x_cl22_write(bp, phy,
+               MDIO_REG_GPHY_SHADOW,
+               MDIO_REG_GPHY_SHADOW_LED_SEL1);
+       bnx2x_cl22_read(bp, phy,
+               MDIO_REG_GPHY_SHADOW,
+               &temp);
+       temp &= 0xff00;
+
+       DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode);
+       switch (mode) {
+       case LED_MODE_FRONT_PANEL_OFF:
+       case LED_MODE_OFF:
+               temp |= 0x00ee;
+               break;
+       case LED_MODE_OPER:
+               temp |= 0x0001;
+               break;
+       case LED_MODE_ON:
+               temp |= 0x00ff;
+               break;
+       default:
+               break;
+       }
+       bnx2x_cl22_write(bp, phy,
+               MDIO_REG_GPHY_SHADOW,
+               MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+       return;
+}
+
+
 static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
                                     struct link_params *params)
 {
@@ -11103,7 +11140,7 @@ static struct bnx2x_phy phy_54618se = {
        .config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
        .format_fw_ver  = (format_fw_ver_t)NULL,
        .hw_reset       = (hw_reset_t)NULL,
-       .set_link_led   = (set_link_led_t)NULL,
+       .set_link_led   = (set_link_led_t)bnx2x_5461x_set_link_led,
        .phy_specific_func = (phy_specific_func_t)NULL
 };
 /*****************************************************************/
index fc7bd0f23c0b7e656905eb590ea6d8c0d360ea44..e58073ef33b47e5dc45708d794726a7a038470a2 100644 (file)
@@ -6990,6 +6990,7 @@ Theotherbitsarereservedandshouldbezero*/
 #define MDIO_REG_INTR_MASK                             0x1b
 #define MDIO_REG_INTR_MASK_LINK_STATUS                 (0x1 << 1)
 #define MDIO_REG_GPHY_SHADOW                           0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL1                  (0x0d << 10)
 #define MDIO_REG_GPHY_SHADOW_LED_SEL2                  (0x0e << 10)
 #define MDIO_REG_GPHY_SHADOW_WR_ENA                    (0x1 << 15)
 #define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED              (0x1e << 10)
index 438f4580bf66207539761761e37c42e0ef303b78..2a22f52563532201891fa4a720d11180886b714c 100644 (file)
@@ -613,7 +613,7 @@ static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
 
                if (!dm->wake_state)
                        irq_set_irq_wake(dm->irq_wake, 1);
-               else if (dm->wake_state & !opts)
+               else if (dm->wake_state && !opts)
                        irq_set_irq_wake(dm->irq_wake, 0);
        }
 
index c520cfd3b29805440508acff0a8748b5956fee11..5272f9d4dda9448faece5a061d413c71620f6da4 100644 (file)
@@ -24,6 +24,7 @@ config FEC
        bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
        depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
                   ARCH_MXC || ARCH_MXS)
+       default ARCH_MXC || ARCH_MXS if ARM
        select PHYLIB
        ---help---
          Say Y here if you want to use the built-in 10/100 Fast ethernet
index 1124ce0a15944a36dd119efae6fe47a101264b72..c136230d50bb125e6ca1f4de21ae72485e0dbec2 100644 (file)
@@ -232,6 +232,7 @@ struct fec_enet_private {
        struct  platform_device *pdev;
 
        int     opened;
+       int     dev_id;
 
        /* Phylib and MDIO interface */
        struct  mii_bus *mii_bus;
@@ -837,7 +838,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
 
        /* Adjust MAC if using macaddr */
        if (iap == macaddr)
-                ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
+                ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
 }
 
 /* ------------------------------------------------------------------------- */
@@ -953,7 +954,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
        char mdio_bus_id[MII_BUS_ID_SIZE];
        char phy_name[MII_BUS_ID_SIZE + 3];
        int phy_id;
-       int dev_id = fep->pdev->id;
+       int dev_id = fep->dev_id;
 
        fep->phy_dev = NULL;
 
@@ -1031,7 +1032,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
         * mdio interface in board design, and need to be configured by
         * fec0 mii_bus.
         */
-       if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id > 0) {
+       if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
                /* fec1 uses fec0 mii_bus */
                fep->mii_bus = fec0_mii_bus;
                return 0;
@@ -1063,7 +1064,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
        fep->mii_bus->read = fec_enet_mdio_read;
        fep->mii_bus->write = fec_enet_mdio_write;
        fep->mii_bus->reset = fec_enet_mdio_reset;
-       snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
+       snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", fep->dev_id + 1);
        fep->mii_bus->priv = fep;
        fep->mii_bus->parent = &pdev->dev;
 
@@ -1521,6 +1522,7 @@ fec_probe(struct platform_device *pdev)
        int i, irq, ret = 0;
        struct resource *r;
        const struct of_device_id *of_id;
+       static int dev_id;
 
        of_id = of_match_device(fec_dt_ids, &pdev->dev);
        if (of_id)
@@ -1548,6 +1550,7 @@ fec_probe(struct platform_device *pdev)
 
        fep->hwp = ioremap(r->start, resource_size(r));
        fep->pdev = pdev;
+       fep->dev_id = dev_id++;
 
        if (!fep->hwp) {
                ret = -ENOMEM;
index 52f4e8ad48e77c84b8bd4ed9cd52faa3f5adb419..4d9f84b8ab9773ef91d75f395907fd9b3f9c25de 100644 (file)
@@ -183,28 +183,10 @@ void fsl_pq_mdio_bus_name(char *name, struct device_node *np)
 }
 EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
 
-/* Scan the bus in reverse, looking for an empty spot */
-static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
-{
-       int i;
-
-       for (i = PHY_MAX_ADDR; i > 0; i--) {
-               u32 phy_id;
-
-               if (get_phy_id(new_bus, i, &phy_id))
-                       return -1;
-
-               if (phy_id == 0xffffffff)
-                       break;
-       }
-
-       return i;
-}
-
 
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
 static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
 {
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
        struct gfar __iomem *enet_regs;
 
        /*
@@ -220,15 +202,15 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi
        } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
                        of_device_is_compatible(np, "fsl,etsec2-tbi")) {
                return of_iomap(np, 1);
-       } else
-               return NULL;
-}
+       }
 #endif
+       return NULL;
+}
 
 
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
 static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
 {
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
        struct device_node *np = NULL;
        int err = 0;
 
@@ -261,9 +243,10 @@ static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
                return err;
        else
                return -EINVAL;
-}
+#else
+       return -ENODEV;
 #endif
-
+}
 
 static int fsl_pq_mdio_probe(struct platform_device *ofdev)
 {
@@ -339,19 +322,13 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
                        of_device_is_compatible(np, "fsl,etsec2-mdio") ||
                        of_device_is_compatible(np, "fsl,etsec2-tbi") ||
                        of_device_is_compatible(np, "gianfar")) {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
                tbipa = get_gfar_tbipa(regs, np);
                if (!tbipa) {
                        err = -EINVAL;
                        goto err_free_irqs;
                }
-#else
-               err = -ENODEV;
-               goto err_free_irqs;
-#endif
        } else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
                        of_device_is_compatible(np, "ucc_geth_phy")) {
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
                u32 id;
                static u32 mii_mng_master;
 
@@ -364,10 +341,6 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
                        mii_mng_master = id;
                        ucc_set_qe_mux_mii_mng(id - 1);
                }
-#else
-               err = -ENODEV;
-               goto err_free_irqs;
-#endif
        } else {
                err = -ENODEV;
                goto err_free_irqs;
@@ -386,16 +359,6 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
        }
 
        if (tbiaddr == -1) {
-               out_be32(tbipa, 0);
-
-               tbiaddr = fsl_pq_mdio_find_free(new_bus);
-       }
-
-       /*
-        * We define TBIPA at 0 to be illegal, opting to fail for boards that
-        * have PHYs at 1-31, rather than change tbipa and rescan.
-        */
-       if (tbiaddr == 0) {
                err = -EBUSY;
 
                goto err_free_irqs;
index 410d6a1984ed400863c7e5eabd63e7041629d101..6650068c996c48158c3b09c8975b912440e00aff 100644 (file)
@@ -61,9 +61,9 @@
 #ifdef EHEA_SMALL_QUEUES
 #define EHEA_MAX_CQE_COUNT      1023
 #define EHEA_DEF_ENTRIES_SQ     1023
-#define EHEA_DEF_ENTRIES_RQ1    4095
+#define EHEA_DEF_ENTRIES_RQ1    1023
 #define EHEA_DEF_ENTRIES_RQ2    1023
-#define EHEA_DEF_ENTRIES_RQ3    1023
+#define EHEA_DEF_ENTRIES_RQ3    511
 #else
 #define EHEA_MAX_CQE_COUNT      4080
 #define EHEA_DEF_ENTRIES_SQ     4080
index 37b70f7052b68ad9a16593cb299cf6eb380a6491..bfeccbfde236237dc0c8486d0dd1d12d192d3a02 100644 (file)
@@ -371,7 +371,8 @@ static void ehea_update_stats(struct work_struct *work)
 out_herr:
        free_page((unsigned long)cb2);
 resched:
-       schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+       schedule_delayed_work(&port->stats_work,
+                             round_jiffies_relative(msecs_to_jiffies(1000)));
 }
 
 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
@@ -2434,7 +2435,8 @@ static int ehea_open(struct net_device *dev)
        }
 
        mutex_unlock(&port->port_lock);
-       schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+       schedule_delayed_work(&port->stats_work,
+                             round_jiffies_relative(msecs_to_jiffies(1000)));
 
        return ret;
 }
index 4326681df382ac461c24e21eb6a6ea0ee64d4426..acc31af6594a243b91d7c9f666363581f68ddf05 100644 (file)
@@ -1421,7 +1421,7 @@ static void veth_receive(struct veth_lpar_connection *cnx,
 
                /* FIXME: do we need this? */
                memset(local_list, 0, sizeof(local_list));
-               memset(remote_list, 0, sizeof(VETH_MAX_FRAMES_PER_MSG));
+               memset(remote_list, 0, sizeof(remote_list));
 
                /* a 0 address marks the end of the valid entries */
                if (senddata->addr[startchunk] == 0)
index 7becff1f387d7c9d84fb26ab3db6f7299bb64f15..76b84573566bd850a267c02a9d533fac5b68c7c4 100644 (file)
@@ -1744,6 +1744,112 @@ jme_phy_off(struct jme_adapter *jme)
                jme_new_phy_off(jme);
 }
 
+static int
+jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
+{
+       u32 phy_addr;
+
+       phy_addr = JM_PHY_SPEC_REG_READ | specreg;
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+                       phy_addr);
+       return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
+                       JM_PHY_SPEC_DATA_REG);
+}
+
+static void
+jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
+{
+       u32 phy_addr;
+
+       phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
+                       phy_data);
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+                       phy_addr);
+}
+
+static int
+jme_phy_calibration(struct jme_adapter *jme)
+{
+       u32 ctrl1000, phy_data;
+
+       jme_phy_off(jme);
+       jme_phy_on(jme);
+       /*  Enabel PHY test mode 1 */
+       ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+       ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+       ctrl1000 |= PHY_GAD_TEST_MODE_1;
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+
+       phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+       phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
+       phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
+                       JM_PHY_EXT_COMM_2_CALI_ENABLE;
+       jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+       msleep(20);
+       phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+       phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
+                       JM_PHY_EXT_COMM_2_CALI_MODE_0 |
+                       JM_PHY_EXT_COMM_2_CALI_LATCH);
+       jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+
+       /*  Disable PHY test mode */
+       ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+       ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+       jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+       return 0;
+}
+
+static int
+jme_phy_setEA(struct jme_adapter *jme)
+{
+       u32 phy_comm0 = 0, phy_comm1 = 0;
+       u8 nic_ctrl;
+
+       pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
+       if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
+               return 0;
+
+       switch (jme->pdev->device) {
+       case PCI_DEVICE_ID_JMICRON_JMC250:
+               if (((jme->chip_main_rev == 5) &&
+                       ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+                       (jme->chip_sub_rev == 3))) ||
+                       (jme->chip_main_rev >= 6)) {
+                       phy_comm0 = 0x008A;
+                       phy_comm1 = 0x4109;
+               }
+               if ((jme->chip_main_rev == 3) &&
+                       ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+                       phy_comm0 = 0xE088;
+               break;
+       case PCI_DEVICE_ID_JMICRON_JMC260:
+               if (((jme->chip_main_rev == 5) &&
+                       ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+                       (jme->chip_sub_rev == 3))) ||
+                       (jme->chip_main_rev >= 6)) {
+                       phy_comm0 = 0x008A;
+                       phy_comm1 = 0x4109;
+               }
+               if ((jme->chip_main_rev == 3) &&
+                       ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+                       phy_comm0 = 0xE088;
+               if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
+                       phy_comm0 = 0x608A;
+               if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
+                       phy_comm0 = 0x408A;
+               break;
+       default:
+               return -ENODEV;
+       }
+       if (phy_comm0)
+               jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
+       if (phy_comm1)
+               jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
+
+       return 0;
+}
+
 static int
 jme_open(struct net_device *netdev)
 {
@@ -1769,7 +1875,8 @@ jme_open(struct net_device *netdev)
                jme_set_settings(netdev, &jme->old_ecmd);
        else
                jme_reset_phy_processor(jme);
-
+       jme_phy_calibration(jme);
+       jme_phy_setEA(jme);
        jme_reset_link(jme);
 
        return 0;
@@ -3184,7 +3291,8 @@ jme_resume(struct device *dev)
                jme_set_settings(netdev, &jme->old_ecmd);
        else
                jme_reset_phy_processor(jme);
-
+       jme_phy_calibration(jme);
+       jme_phy_setEA(jme);
        jme_start_irq(jme);
        netif_device_attach(netdev);
 
@@ -3239,4 +3347,3 @@ MODULE_DESCRIPTION("JMicron JMC2x0 PCI Express Ethernet driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
-
index 02ea27c1dcb5a464f06d9b38d98259f6f080fd4c..4304072bd3c536e852a38cf6c37716df8500ed66 100644 (file)
@@ -760,6 +760,25 @@ enum jme_rxmcs_bits {
                                  RXMCS_CHECKSUM,
 };
 
+/*     Extern PHY common register 2    */
+
+#define PHY_GAD_TEST_MODE_1                    0x00002000
+#define PHY_GAD_TEST_MODE_MSK                  0x0000E000
+#define JM_PHY_SPEC_REG_READ                   0x00004000
+#define JM_PHY_SPEC_REG_WRITE                  0x00008000
+#define PHY_CALIBRATION_DELAY                  20
+#define JM_PHY_SPEC_ADDR_REG                   0x1E
+#define JM_PHY_SPEC_DATA_REG                   0x1F
+
+#define JM_PHY_EXT_COMM_0_REG                  0x30
+#define JM_PHY_EXT_COMM_1_REG                  0x31
+#define JM_PHY_EXT_COMM_2_REG                  0x32
+#define JM_PHY_EXT_COMM_2_CALI_ENABLE          0x01
+#define JM_PHY_EXT_COMM_2_CALI_MODE_0          0x02
+#define JM_PHY_EXT_COMM_2_CALI_LATCH           0x10
+#define PCI_PRIV_SHARE_NICCTRL                 0xF5
+#define JME_FLAG_PHYEA_ENABLE                  0x2
+
 /*
  * Wakeup Frame setup interface registers
  */
index c7b60839ac9951caa4b3de0f0577bfd7d39bb1ca..dea0cb4400e25e20edbdd3fcf730d9280e23bb1a 100644 (file)
@@ -2606,6 +2606,9 @@ static int skge_up(struct net_device *dev)
        spin_unlock_irq(&hw->hw_lock);
 
        napi_enable(&skge->napi);
+
+       skge_set_multicast(dev);
+
        return 0;
 
  free_tx_ring:
index 227997d775e858b08a7cde4e92036738c803acc9..5829e0b47e7e5963a8d5913505f2c3f5392dfbab 100644 (file)
@@ -147,6 +147,7 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
        mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
        if (priv->mdev->dev->caps.comp_pool && cq->vector)
                mlx4_release_eq(priv->mdev->dev, cq->vector);
+       cq->vector = 0;
        cq->buf_size = 0;
        cq->buf = NULL;
 }
index 05db5434bafc3284eb0a3b6162a64c8ed2c436d6..90497ffb1ac39d5635d258730f8cc229275aa240 100644 (file)
@@ -2,4 +2,5 @@
 # Makefile for the A Semi network device drivers.
 #
 
-obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o pasemi_mac_ethtool.o
+obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
+pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
index 8731f79c9efc40439bac7af0363225d885f42036..b8478aab050e76efa2b734572b3f866f98da321f 100644 (file)
 
 
 #define TX_DESC_PER_IOCB 8
-/* The maximum number of frags we handle is based
- * on PAGE_SIZE...
- */
-#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13)   /* 4k & 8k pages */
+
+#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
 #define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
 #else /* all other page sizes */
 #define TX_DESC_PER_OAL 0
@@ -1353,7 +1351,7 @@ struct tx_ring_desc {
        struct ob_mac_iocb_req *queue_entry;
        u32 index;
        struct oal oal;
-       struct map_list map[MAX_SKB_FRAGS + 1];
+       struct map_list map[MAX_SKB_FRAGS + 2];
        int map_cnt;
        struct tx_ring_desc *next;
 };
index 6f06aa10f0d729a040a6e34a244d57338ba3025f..c8f47f17186f1449a10dd8eb14fb2e1f81a3c978 100644 (file)
@@ -477,7 +477,6 @@ enum rtl_register_content {
        /* Config1 register p.24 */
        LEDS1           = (1 << 7),
        LEDS0           = (1 << 6),
-       MSIEnable       = (1 << 5),     /* Enable Message Signaled Interrupt */
        Speed_down      = (1 << 4),
        MEMMAP          = (1 << 3),
        IOMAP           = (1 << 2),
@@ -485,6 +484,7 @@ enum rtl_register_content {
        PMEnable        = (1 << 0),     /* Power Management Enable */
 
        /* Config2 register p. 25 */
+       MSIEnable       = (1 << 5),     /* 8169 only. Reserved in the 8168. */
        PCI_Clock_66MHz = 0x01,
        PCI_Clock_33MHz = 0x00,
 
@@ -1183,11 +1183,13 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
        return value;
 }
 
-static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
+static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
 {
-       RTL_W16(IntrMask, 0x0000);
+       void __iomem *ioaddr = tp->mmio_addr;
 
-       RTL_W16(IntrStatus, 0xffff);
+       RTL_W16(IntrMask, 0x0000);
+       RTL_W16(IntrStatus, tp->intr_event);
+       RTL_R8(ChipCmd);
 }
 
 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
@@ -3424,22 +3426,24 @@ static const struct rtl_cfg_info {
 };
 
 /* Cfg9346_Unlock assumed. */
-static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
+static unsigned rtl_try_msi(struct rtl8169_private *tp,
                            const struct rtl_cfg_info *cfg)
 {
+       void __iomem *ioaddr = tp->mmio_addr;
        unsigned msi = 0;
        u8 cfg2;
 
        cfg2 = RTL_R8(Config2) & ~MSIEnable;
        if (cfg->features & RTL_FEATURE_MSI) {
-               if (pci_enable_msi(pdev)) {
-                       dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
+               if (pci_enable_msi(tp->pci_dev)) {
+                       netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
                } else {
                        cfg2 |= MSIEnable;
                        msi = RTL_FEATURE_MSI;
                }
        }
-       RTL_W8(Config2, cfg2);
+       if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+               RTL_W8(Config2, cfg2);
        return msi;
 }
 
@@ -3933,8 +3937,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
                        break;
                udelay(100);
        }
-
-       rtl8169_init_ring_indexes(tp);
 }
 
 static int __devinit
@@ -4077,7 +4079,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                tp->features |= RTL_FEATURE_WOL;
        if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
                tp->features |= RTL_FEATURE_WOL;
-       tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
+       tp->features |= rtl_try_msi(tp, cfg);
        RTL_W8(Cfg9346, Cfg9346_Lock);
 
        if (rtl_tbi_enabled(tp)) {
@@ -4339,7 +4341,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
        void __iomem *ioaddr = tp->mmio_addr;
 
        /* Disable interrupts */
-       rtl8169_irq_mask_and_ack(ioaddr);
+       rtl8169_irq_mask_and_ack(tp);
 
        rtl_rx_close(tp);
 
@@ -4885,8 +4887,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
        RTL_W16(IntrMitigate, 0x5151);
 
        /* Work around for RxFIFO overflow. */
-       if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
-           tp->mac_version == RTL_GIGA_MAC_VER_22) {
+       if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
                tp->intr_event |= RxFIFOOver | PCSTimeout;
                tp->intr_event &= ~RxOverflow;
        }
@@ -5076,6 +5077,11 @@ static void rtl_hw_start_8101(struct net_device *dev)
        void __iomem *ioaddr = tp->mmio_addr;
        struct pci_dev *pdev = tp->pci_dev;
 
+       if (tp->mac_version >= RTL_GIGA_MAC_VER_30) {
+               tp->intr_event &= ~RxFIFOOver;
+               tp->napi_event &= ~RxFIFOOver;
+       }
+
        if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
            tp->mac_version == RTL_GIGA_MAC_VER_16) {
                int cap = pci_pcie_cap(pdev);
@@ -5342,7 +5348,7 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev)
        /* Wait for any pending NAPI task to complete */
        napi_disable(&tp->napi);
 
-       rtl8169_irq_mask_and_ack(ioaddr);
+       rtl8169_irq_mask_and_ack(tp);
 
        tp->intr_mask = 0xffff;
        RTL_W16(IntrMask, tp->intr_event);
@@ -5389,14 +5395,16 @@ static void rtl8169_reset_task(struct work_struct *work)
        if (!netif_running(dev))
                goto out_unlock;
 
+       rtl8169_hw_reset(tp);
+
        rtl8169_wait_for_quiescence(dev);
 
        for (i = 0; i < NUM_RX_DESC; i++)
                rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
 
        rtl8169_tx_clear(tp);
+       rtl8169_init_ring_indexes(tp);
 
-       rtl8169_hw_reset(tp);
        rtl_hw_start(dev);
        netif_wake_queue(dev);
        rtl8169_check_link_status(dev, tp, tp->mmio_addr);
@@ -5407,11 +5415,6 @@ out_unlock:
 
 static void rtl8169_tx_timeout(struct net_device *dev)
 {
-       struct rtl8169_private *tp = netdev_priv(dev);
-
-       rtl8169_hw_reset(tp);
-
-       /* Let's wait a bit while any (async) irq lands on */
        rtl8169_schedule_work(dev, rtl8169_reset_task);
 }
 
@@ -5804,6 +5807,10 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
         */
        status = RTL_R16(IntrStatus);
        while (status && status != 0xffff) {
+               status &= tp->intr_event;
+               if (!status)
+                       break;
+
                handled = 1;
 
                /* Handle all of the error cases first. These will reset
@@ -5818,27 +5825,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
                        switch (tp->mac_version) {
                        /* Work around for rx fifo overflow */
                        case RTL_GIGA_MAC_VER_11:
-                       case RTL_GIGA_MAC_VER_22:
-                       case RTL_GIGA_MAC_VER_26:
                                netif_stop_queue(dev);
                                rtl8169_tx_timeout(dev);
                                goto done;
-                       /* Testers needed. */
-                       case RTL_GIGA_MAC_VER_17:
-                       case RTL_GIGA_MAC_VER_19:
-                       case RTL_GIGA_MAC_VER_20:
-                       case RTL_GIGA_MAC_VER_21:
-                       case RTL_GIGA_MAC_VER_23:
-                       case RTL_GIGA_MAC_VER_24:
-                       case RTL_GIGA_MAC_VER_27:
-                       case RTL_GIGA_MAC_VER_28:
-                       case RTL_GIGA_MAC_VER_31:
-                       /* Experimental science. Pktgen proof. */
-                       case RTL_GIGA_MAC_VER_12:
-                       case RTL_GIGA_MAC_VER_25:
-                               if (status == RxFIFOOver)
-                                       goto done;
-                               break;
                        default:
                                break;
                        }
index 8ea770a89f2556b8c762f2af345e5044d45ff9ba..72cd190b9c1a0734d401b193e1bd7b12cb49b3af 100644 (file)
@@ -781,10 +781,15 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
        unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
                            MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
 
-       /* Do not manage MMC IRQ (FIXME) */
+       /* Mask MMC irq, counters are managed in SW and registers
+        * are cleared on each READ eventually. */
        dwmac_mmc_intr_all_mask(priv->ioaddr);
-       dwmac_mmc_ctrl(priv->ioaddr, mode);
-       memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+
+       if (priv->dma_cap.rmon) {
+               dwmac_mmc_ctrl(priv->ioaddr, mode);
+               memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+       } else
+               pr_info(" No MAC Management Counters available");
 }
 
 static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
@@ -1012,8 +1017,7 @@ static int stmmac_open(struct net_device *dev)
        memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
        priv->xstats.threshold = tc;
 
-       if (priv->dma_cap.rmon)
-               stmmac_mmc_setup(priv);
+       stmmac_mmc_setup(priv);
 
        /* Start the ball rolling... */
        DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
index dca9d3369cdd9de8eb5f1156778369d76006c246..c97d2f59085504274eddcc2d5a25ecf352c824f0 100644 (file)
@@ -836,11 +836,13 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
        chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
 
        /* handle completed packets */
+       spin_unlock_irqrestore(&chan->lock, flags);
        do {
                ret = __cpdma_chan_process(chan);
                if (ret < 0)
                        break;
        } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+       spin_lock_irqsave(&chan->lock, flags);
 
        /* remaining packets haven't been tx/rx'ed, clean them up */
        while (chan->head) {
index 10826d8a2a2df36b3731051b2d4abb504c15ce39..1187a1169eb21b50351599939cc4cb9728ad3b57 100644 (file)
@@ -926,7 +926,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
                goto done;
 
        /* Re-enable the ingress interrupt. */
-       enable_percpu_irq(priv->intr_id);
+       enable_percpu_irq(priv->intr_id, 0);
 
        /* HACK: Avoid the "rotting packet" problem (see above). */
        if (qup->__packet_receive_read !=
@@ -1296,7 +1296,7 @@ static void tile_net_open_enable(void *dev_ptr)
        info->napi_enabled = true;
 
        /* Enable the ingress interrupt. */
-       enable_percpu_irq(priv->intr_id);
+       enable_percpu_irq(priv->intr_id, 0);
 }
 
 
@@ -1697,7 +1697,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
        for (i = 0; i < sh->nr_frags; i++) {
 
                skb_frag_t *f = &sh->frags[i];
-               unsigned long pfn = page_to_pfn(f->page);
+               unsigned long pfn = page_to_pfn(skb_frag_page(f));
 
                /* FIXME: Compute "hash_for_home" properly. */
                /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
@@ -1706,7 +1706,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
                /* FIXME: Hmmm. */
                if (!hash_default) {
                        void *va = pfn_to_kaddr(pfn) + f->page_offset;
-                       BUG_ON(PageHighMem(f->page));
+                       BUG_ON(PageHighMem(skb_frag_page(f)));
                        finv_buffer_remote(va, f->size, 0);
                }
 
index bb88e12101c78b86b91def144677e4ecb10d8091..a70244306c9462830c4ebde0b87334ef7667fa0e 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 menuconfig PHYLIB
-       bool "PHY Device support and infrastructure"
+       tristate "PHY Device support and infrastructure"
        depends on !S390
        depends on NETDEVICES
        help
index 89f829f5f7257fcacd54902d3963520ab5b47525..f8a6853b692ed3edf29e996739d70261988bc80b 100644 (file)
@@ -423,10 +423,8 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
        lock_sock(sk);
 
        opt->src_addr = sp->sa_addr.pptp;
-       if (add_chan(po)) {
-               release_sock(sk);
+       if (add_chan(po))
                error = -EBUSY;
-       }
 
        release_sock(sk);
        return error;
index e6fed4d4cb77f70c8c6d62af16d53a57ed31c99c..e95f0e60a9bc7bc9dd18587986c53e5af8f79d42 100644 (file)
@@ -1655,6 +1655,10 @@ static const struct usb_device_id        products [] = {
        // ASIX 88772a
        USB_DEVICE(0x0db0, 0xa877),
        .driver_info = (unsigned long) &ax88772_info,
+}, {
+       // Asus USB Ethernet Adapter
+       USB_DEVICE (0x0b95, 0x7e2b),
+       .driver_info = (unsigned long) &ax88772_info,
 },
        { },            // END
 };
index 93fbe6f4089890df98b98904921879ab97835b98..d2348a5a7809bd1e2233f39a2b6f192443234f2f 100644 (file)
@@ -286,7 +286,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
                        ath_start_ani(common);
        }
 
-       if (ath9k_hw_ops(ah)->antdiv_comb_conf_get && sc->ant_rx != 3) {
+       if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) {
                struct ath_hw_antcomb_conf div_ant_conf;
                u8 lna_conf;
 
index 888abc2be3a547d6f85ad32723fd2d2910c5582e..528d5f3e868c712a7b2372dc8616e1581205bddc 100644 (file)
@@ -1271,7 +1271,9 @@ static void ath_rc_init(struct ath_softc *sc,
 
        ath_rc_priv->max_valid_rate = k;
        ath_rc_sort_validrates(rate_table, ath_rc_priv);
-       ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
+       ath_rc_priv->rate_max_phy = (k > 4) ?
+                                       ath_rc_priv->valid_rate_index[k-4] :
+                                       ath_rc_priv->valid_rate_index[k-1];
        ath_rc_priv->rate_table = rate_table;
 
        ath_dbg(common, ATH_DBG_CONFIG,
index e12b48c2cff6ab71e097d38c9632eeb11044bed0..dd008b0e6417a3b3e15679399252c53af9af990d 100644 (file)
@@ -191,6 +191,7 @@ static struct iwl_base_params iwl1000_base_params = {
        .chain_noise_scale = 1000,
        .wd_timeout = IWL_DEF_WD_TIMEOUT,
        .max_event_log_size = 128,
+       .wd_disable = true,
 };
 static struct iwl_ht_params iwl1000_ht_params = {
        .ht_greenfield_support = true,
index c511c98a89a81284e03f621705cbd09736616297..f55fb2d1af52f3891e8d29bc7a21ab193b7f243f 100644 (file)
@@ -364,6 +364,7 @@ static struct iwl_base_params iwl5000_base_params = {
        .wd_timeout = IWL_LONG_WD_TIMEOUT,
        .max_event_log_size = 512,
        .no_idle_support = true,
+       .wd_disable = true,
 };
 static struct iwl_ht_params iwl5000_ht_params = {
        .ht_greenfield_support = true,
index 58a381c01c89c9a367c7b592d65a49eb02457be0..5c7c17c7166ac7a307407ddff67eee59ca148771 100644 (file)
@@ -528,6 +528,24 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        return 0;
 }
 
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+       struct iwl_rxon_context *ctx)
+{
+       if (conf_is_ht40_minus(conf)) {
+               ctx->ht.extension_chan_offset =
+                       IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+               ctx->ht.is_40mhz = true;
+       } else if (conf_is_ht40_plus(conf)) {
+               ctx->ht.extension_chan_offset =
+                       IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+               ctx->ht.is_40mhz = true;
+       } else {
+               ctx->ht.extension_chan_offset =
+                       IEEE80211_HT_PARAM_CHA_SEC_NONE;
+               ctx->ht.is_40mhz = false;
+       }
+}
+
 int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
 {
        struct iwl_priv *priv = hw->priv;
@@ -586,19 +604,11 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
                                ctx->ht.enabled = conf_is_ht(conf);
 
                        if (ctx->ht.enabled) {
-                               if (conf_is_ht40_minus(conf)) {
-                                       ctx->ht.extension_chan_offset =
-                                               IEEE80211_HT_PARAM_CHA_SEC_BELOW;
-                                       ctx->ht.is_40mhz = true;
-                               } else if (conf_is_ht40_plus(conf)) {
-                                       ctx->ht.extension_chan_offset =
-                                               IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
-                                       ctx->ht.is_40mhz = true;
-                               } else {
-                                       ctx->ht.extension_chan_offset =
-                                               IEEE80211_HT_PARAM_CHA_SEC_NONE;
-                                       ctx->ht.is_40mhz = false;
-                               }
+                               /* if HT40 is used, it should not change
+                                * after associated except channel switch */
+                               if (!ctx->ht.is_40mhz ||
+                                               !iwl_is_associated_ctx(ctx))
+                                       iwlagn_config_ht40(conf, ctx);
                        } else
                                ctx->ht.is_40mhz = false;
 
index ed6283623932e2436ddc364d8429ee085df28249..4b2aa1da09532a98e4505afe70f2ec3114fa6510 100644 (file)
@@ -1268,9 +1268,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
 
        switch (keyconf->cipher) {
        case WLAN_CIPHER_SUITE_TKIP:
-               keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-               keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-
                if (sta)
                        addr = sta->addr;
                else /* station mode case only */
@@ -1283,8 +1280,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
                                          seq.tkip.iv32, p1k, CMD_SYNC);
                break;
        case WLAN_CIPHER_SUITE_CCMP:
-               keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-               /* fall through */
        case WLAN_CIPHER_SUITE_WEP40:
        case WLAN_CIPHER_SUITE_WEP104:
                ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
index 35a6b71f358ce7a563f6507b58f0743eb6ded16a..df1540ca6102f641ed491cd50899e5eec17b8963 100644 (file)
@@ -91,7 +91,10 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
                tx_cmd->tid_tspec = qc[0] & 0xf;
                tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
        } else {
-               tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+               if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+                       tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+               else
+                       tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
        }
 
        iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
index ccba69b7f8a78f9b54635328bee5c3e3d4d4553a..e0e9a3dfbc00a2febd8ce146bc8c3d02bc8e312a 100644 (file)
@@ -2316,6 +2316,17 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                return -EOPNOTSUPP;
        }
 
+       switch (key->cipher) {
+       case WLAN_CIPHER_SUITE_TKIP:
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+               /* fall through */
+       case WLAN_CIPHER_SUITE_CCMP:
+               key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+               break;
+       default:
+               break;
+       }
+
        /*
         * We could program these keys into the hardware as well, but we
         * don't expect much multicast traffic in IBSS and having keys
@@ -2599,21 +2610,9 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
 
        /* Configure HT40 channels */
        ctx->ht.enabled = conf_is_ht(conf);
-       if (ctx->ht.enabled) {
-               if (conf_is_ht40_minus(conf)) {
-                       ctx->ht.extension_chan_offset =
-                               IEEE80211_HT_PARAM_CHA_SEC_BELOW;
-                       ctx->ht.is_40mhz = true;
-               } else if (conf_is_ht40_plus(conf)) {
-                       ctx->ht.extension_chan_offset =
-                               IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
-                       ctx->ht.is_40mhz = true;
-               } else {
-                       ctx->ht.extension_chan_offset =
-                               IEEE80211_HT_PARAM_CHA_SEC_NONE;
-                       ctx->ht.is_40mhz = false;
-               }
-       } else
+       if (ctx->ht.enabled)
+               iwlagn_config_ht40(conf, ctx);
+       else
                ctx->ht.is_40mhz = false;
 
        if ((le16_to_cpu(ctx->staging.channel) != ch))
@@ -2851,6 +2850,9 @@ static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
        int ret;
        u8 sta_id;
 
+       if (ctx->ctxid != IWL_RXON_CTX_PAN)
+               return 0;
+
        IWL_DEBUG_MAC80211(priv, "enter\n");
        mutex_lock(&priv->shrd->mutex);
 
@@ -2899,6 +2901,9 @@ static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
        struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
        struct iwl_rxon_context *ctx = vif_priv->ctx;
 
+       if (ctx->ctxid != IWL_RXON_CTX_PAN)
+               return;
+
        IWL_DEBUG_MAC80211(priv, "enter\n");
        mutex_lock(&priv->shrd->mutex);
 
@@ -3499,9 +3504,10 @@ MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
 module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
 MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
 
-module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO);
+module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO);
 MODULE_PARM_DESC(wd_disable,
-               "Disable stuck queue watchdog timer (default: 0 [enabled])");
+               "Disable stuck queue watchdog timer 0=system default, "
+               "1=disable, 2=enable (default: 0)");
 
 /*
  * set bt_coex_active to true, uCode will do kill/defer
index 5b936ec1a541ecc1ecd213771f7d4e75f7960136..3856abaea50792885442b78b084e4c328ace3fc6 100644 (file)
@@ -86,6 +86,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
                             struct ieee80211_vif *vif,
                             struct ieee80211_bss_conf *bss_conf,
                             u32 changes);
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+                       struct iwl_rxon_context *ctx);
 
 /* uCode */
 int iwlagn_rx_calib_result(struct iwl_priv *priv,
index 001fdf140abbb72de6d12fe95dac3d8211158ff0..fcf54160e4ed51b3f89a84447898c7948acaaf59 100644 (file)
@@ -1810,11 +1810,23 @@ void iwl_setup_watchdog(struct iwl_priv *priv)
 {
        unsigned int timeout = priv->cfg->base_params->wd_timeout;
 
-       if (timeout && !iwlagn_mod_params.wd_disable)
-               mod_timer(&priv->watchdog,
-                         jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
-       else
-               del_timer(&priv->watchdog);
+       if (!iwlagn_mod_params.wd_disable) {
+               /* use system default */
+               if (timeout && !priv->cfg->base_params->wd_disable)
+                       mod_timer(&priv->watchdog,
+                               jiffies +
+                               msecs_to_jiffies(IWL_WD_TICK(timeout)));
+               else
+                       del_timer(&priv->watchdog);
+       } else {
+               /* module parameter overwrite default configuration */
+               if (timeout && iwlagn_mod_params.wd_disable == 2)
+                       mod_timer(&priv->watchdog,
+                               jiffies +
+                               msecs_to_jiffies(IWL_WD_TICK(timeout)));
+               else
+                       del_timer(&priv->watchdog);
+       }
 }
 
 /**
index 137da33807044bedd162a501a65454037abce49f..f2fc288f3dd31f5068e1bb29fbe3307fa1d2045d 100644 (file)
@@ -113,6 +113,7 @@ struct iwl_lib_ops {
  * @shadow_reg_enable: HW shadhow register bit
  * @no_idle_support: do not support idle mode
  * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
+ * wd_disable: disable watchdog timer
  */
 struct iwl_base_params {
        int eeprom_size;
@@ -134,6 +135,7 @@ struct iwl_base_params {
        const bool shadow_reg_enable;
        const bool no_idle_support;
        const bool hd_v2;
+       const bool wd_disable;
 };
 /*
  * @advanced_bt_coexist: support advanced bt coexist
index 1f7a93c67c45ed67998389ac406917875e826125..14eaf37ce3b1c6a06fdad12d9cc74371274ba894 100644 (file)
@@ -120,7 +120,7 @@ extern struct iwl_mod_params iwlagn_mod_params;
  * @restart_fw: restart firmware, default = 1
  * @plcp_check: enable plcp health check, default = true
  * @ack_check: disable ack health check, default = false
- * @wd_disable: enable stuck queue check, default = false
+ * @wd_disable: enable stuck queue check, default = 0
  * @bt_coex_active: enable bt coex, default = true
  * @led_mode: system default, default = 0
  * @no_sleep_autoadjust: disable autoadjust, default = true
@@ -141,7 +141,7 @@ struct iwl_mod_params {
        int restart_fw;
        bool plcp_check;
        bool ack_check;
-       bool wd_disable;
+       int  wd_disable;
        bool bt_coex_active;
        int led_mode;
        bool no_sleep_autoadjust;
index ce918980e97799a51fc3b0b9a732d28fe706c84a..5f17ab8e76bacbc7e4521951c15a716a532afc45 100644 (file)
@@ -1197,9 +1197,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
        iwl_print_hex_dump(trans, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
 
        /* Set up entry for this TFD in Tx byte-count array */
-       if (is_agg)
-               iwl_trans_txq_update_byte_cnt_tbl(trans, txq,
-                                              le16_to_cpu(tx_cmd->len));
+       iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
 
        dma_sync_single_for_device(bus(trans)->dev, txcmd_phys, firstlen,
                        DMA_BIDIRECTIONAL);
index ac278156d390ea901d37eda9819abdf2de26edcf..6e0a3eaecf7070bcdcce90eb91e63d7fe6cc6aab 100644 (file)
@@ -939,7 +939,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
 {
        struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
        unsigned long cmd_flags;
-       unsigned long cmd_pending_q_flags;
        unsigned long scan_pending_q_flags;
        uint16_t cancel_scan_cmd = false;
 
@@ -949,12 +948,9 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
                cmd_node = adapter->curr_cmd;
                cmd_node->wait_q_enabled = false;
                cmd_node->cmd_flag |= CMD_F_CANCELED;
-               spin_lock_irqsave(&adapter->cmd_pending_q_lock,
-                                 cmd_pending_q_flags);
-               list_del(&cmd_node->list);
-               spin_unlock_irqrestore(&adapter->cmd_pending_q_lock,
-                                      cmd_pending_q_flags);
                mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+               mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+               adapter->curr_cmd = NULL;
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
        }
 
@@ -981,7 +977,6 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
                spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
        }
        adapter->cmd_wait_q.status = -1;
-       mwifiex_complete_cmd(adapter, adapter->curr_cmd);
 }
 
 /*
index f18df82eeb92caccedb8381bb95e4f55cd4d1901..78d0d6988553d40354b9fbf2396e1657eaefd9c6 100644 (file)
@@ -588,8 +588,6 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
 
        WARN_ON(priv->fw_state != FW_STATE_READY);
 
-       cancel_work_sync(&priv->work);
-
        p54spi_power_off(priv);
        spin_lock_irqsave(&priv->tx_lock, flags);
        INIT_LIST_HEAD(&priv->tx_pending);
@@ -597,6 +595,8 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
 
        priv->fw_state = FW_STATE_OFF;
        mutex_unlock(&priv->mutex);
+
+       cancel_work_sync(&priv->work);
 }
 
 static int __devinit p54spi_probe(struct spi_device *spi)
@@ -656,6 +656,7 @@ static int __devinit p54spi_probe(struct spi_device *spi)
        init_completion(&priv->fw_comp);
        INIT_LIST_HEAD(&priv->tx_pending);
        mutex_init(&priv->mutex);
+       spin_lock_init(&priv->tx_lock);
        SET_IEEE80211_DEV(hw, &spi->dev);
        priv->common.open = p54spi_op_start;
        priv->common.stop = p54spi_op_stop;
index d97a2caf582b3997f2378434117758a928cefbd3..bc2ba80c47bb9ff0105d8ed2c119ab3b3589364e 100644 (file)
@@ -778,7 +778,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
                dwrq->flags = 0;
                dwrq->length = 0;
        }
-       essid->octets[essid->length] = '\0';
+       essid->octets[dwrq->length] = '\0';
        memcpy(extra, essid->octets, dwrq->length);
        kfree(essid);
 
index 3f183a15186e09b5e05a2e296c5c66bd94426b23..1ba079dffb11573e86f1b547ac8868e313840f26 100644 (file)
@@ -3771,7 +3771,7 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
        /* Apparently the data is read from end to start */
        rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
        /* The returned value is in CPU order, but eeprom is le */
-       rt2x00dev->eeprom[i] = cpu_to_le32(reg);
+       *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
        rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
        *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
        rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
index db526284454336e47bf2c530e1b0630123809524..55c8e50f45fd143b7e606894c71e072c97480f2a 100644 (file)
@@ -395,7 +395,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
        if (mac->link_state != MAC80211_LINKED)
                return;
 
-       spin_lock(&rtlpriv->locks.lps_lock);
+       spin_lock_irq(&rtlpriv->locks.lps_lock);
 
        /* Idle for a while if we connect to AP a while ago. */
        if (mac->cnt_after_linked >= 2) {
@@ -407,7 +407,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
                }
        }
 
-       spin_unlock(&rtlpriv->locks.lps_lock);
+       spin_unlock_irq(&rtlpriv->locks.lps_lock);
 }
 
 /*Leave the leisure power save mode.*/
@@ -416,8 +416,9 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+       unsigned long flags;
 
-       spin_lock(&rtlpriv->locks.lps_lock);
+       spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags);
 
        if (ppsc->fwctrl_lps) {
                if (ppsc->dot11_psmode != EACTIVE) {
@@ -438,7 +439,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
                        rtl_lps_set_psmode(hw, EACTIVE);
                }
        }
-       spin_unlock(&rtlpriv->locks.lps_lock);
+       spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags);
 }
 
 /* For sw LPS*/
@@ -539,9 +540,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
                RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
        }
 
-       spin_lock(&rtlpriv->locks.lps_lock);
+       spin_lock_irq(&rtlpriv->locks.lps_lock);
        rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
-       spin_unlock(&rtlpriv->locks.lps_lock);
+       spin_unlock_irq(&rtlpriv->locks.lps_lock);
 }
 
 void rtl_swlps_rfon_wq_callback(void *data)
@@ -574,9 +575,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
        if (rtlpriv->link_info.busytraffic)
                return;
 
-       spin_lock(&rtlpriv->locks.lps_lock);
+       spin_lock_irq(&rtlpriv->locks.lps_lock);
        rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
-       spin_unlock(&rtlpriv->locks.lps_lock);
+       spin_unlock_irq(&rtlpriv->locks.lps_lock);
 
        if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
                !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
index 592a10ac59299ba0b243bdbf6ddfd676b4dfcf79..3b585aadabfcdae61bc7aed6b8b2694266ed1f14 100644 (file)
@@ -569,7 +569,7 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
                }
        case ERFSLEEP:{
                        if (ppsc->rfpwr_state == ERFOFF)
-                               break;
+                               return false;
                        for (queue_id = 0, i = 0;
                             queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
                                ring = &pcipriv->dev.tx_ring[queue_id];
index 72852900df84f4dbcd40d40887ae67a645731816..e49cf2244c7568af1958a57608804a61763cd23d 100644 (file)
@@ -548,7 +548,7 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
                break;
        case ERFSLEEP:
                if (ppsc->rfpwr_state == ERFOFF)
-                       break;
+                       return false;
                for (queue_id = 0, i = 0;
                     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
                        ring = &pcipriv->dev.tx_ring[queue_id];
index 3ac7af1c5509c3eec85943a5f570c24e542e473d..0883349e1c8371f9828deee3bb78ac04ccf0c4d9 100644 (file)
@@ -3374,7 +3374,7 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
                break;
        case ERFSLEEP:
                if (ppsc->rfpwr_state == ERFOFF)
-                       break;
+                       return false;
 
                for (queue_id = 0, i = 0;
                     queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
index f27171af979cd7716d23d48614116a2ab74677cd..f10ac1ad9087e594747d9e4c71b29863fe623da8 100644 (file)
@@ -602,7 +602,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
                }
        case ERFSLEEP:
                        if (ppsc->rfpwr_state == ERFOFF)
-                               break;
+                               return false;
 
                        for (queue_id = 0, i = 0;
                             queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
index 0cb594c86090fa7c25f3578132528f5f616aef26..15e332d08c8d7825382e21bfba8c868636c2c1f6 100644 (file)
@@ -1021,7 +1021,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
                pending_idx = *((u16 *)skb->data);
                xen_netbk_idx_release(netbk, pending_idx);
                for (j = start; j < i; j++) {
-                       pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
+                       pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
                        xen_netbk_idx_release(netbk, pending_idx);
                }
 
@@ -1668,7 +1668,7 @@ static int __init netback_init(void)
                                             "netback/%u", group);
 
                if (IS_ERR(netbk->task)) {
-                       printk(KERN_ALERT "kthread_run() fails at netback\n");
+                       printk(KERN_ALERT "kthread_create() fails at netback\n");
                        del_timer(&netbk->net_timer);
                        rc = PTR_ERR(netbk->task);
                        goto failed_init;
index 19c0115092dd7a692c0d035063a3e83c111618a2..0f0cfa3bca301e2d824ad64ebdaba23d6c617825 100644 (file)
 #include <linux/string.h>
 #include <linux/slab.h>
 
-/* For archs that don't support NO_IRQ (such as x86), provide a dummy value */
-#ifndef NO_IRQ
-#define NO_IRQ 0
-#endif
-
 /**
  * irq_of_parse_and_map - Parse and map an interrupt into linux virq space
  * @device: Device node of the device whose interrupt is to be mapped
@@ -44,7 +39,7 @@ unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
        struct of_irq oirq;
 
        if (of_irq_map_one(dev, index, &oirq))
-               return NO_IRQ;
+               return 0;
 
        return irq_create_of_mapping(oirq.controller, oirq.specifier,
                                     oirq.size);
@@ -345,7 +340,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
 
        /* Only dereference the resource if both the
         * resource and the irq are valid. */
-       if (r && irq != NO_IRQ) {
+       if (r && irq) {
                r->start = r->end = irq;
                r->flags = IORESOURCE_IRQ;
                r->name = dev->full_name;
@@ -363,7 +358,7 @@ int of_irq_count(struct device_node *dev)
 {
        int nr = 0;
 
-       while (of_irq_to_resource(dev, nr, NULL) != NO_IRQ)
+       while (of_irq_to_resource(dev, nr, NULL))
                nr++;
 
        return nr;
@@ -383,7 +378,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res,
        int i;
 
        for (i = 0; i < nr_irqs; i++, res++)
-               if (of_irq_to_resource(dev, i, res) == NO_IRQ)
+               if (!of_irq_to_resource(dev, i, res))
                        break;
 
        return i;
index cbd5d701c7e086f632f74bf8bec29fe1a23e6a55..63b3ec48c203a43f3d8a9d395e459d765fd7f7b0 100644 (file)
@@ -314,7 +314,7 @@ static const struct of_dev_auxdata *of_dev_lookup(const struct of_dev_auxdata *l
        if (!lookup)
                return NULL;
 
-       for(; lookup->name != NULL; lookup++) {
+       for(; lookup->compatible != NULL; lookup++) {
                if (!of_device_is_compatible(np, lookup->compatible))
                        continue;
                if (of_address_to_resource(np, 0, &res))
index dccd8636095cb361e2e0e1e2bb8fb7fdd57ecee2..f8c752e408a663d55adf7e84ca4fb42aa93d1d02 100644 (file)
@@ -239,26 +239,45 @@ int oprofile_set_ulong(unsigned long *addr, unsigned long val)
        return err;
 }
 
+static int timer_mode;
+
 static int __init oprofile_init(void)
 {
        int err;
 
+       /* always init architecture to setup backtrace support */
        err = oprofile_arch_init(&oprofile_ops);
-       if (err < 0 || timer) {
-               printk(KERN_INFO "oprofile: using timer interrupt.\n");
+
+       timer_mode = err || timer;      /* fall back to timer mode on errors */
+       if (timer_mode) {
+               if (!err)
+                       oprofile_arch_exit();
                err = oprofile_timer_init(&oprofile_ops);
                if (err)
                        return err;
        }
-       return oprofilefs_register();
+
+       err = oprofilefs_register();
+       if (!err)
+               return 0;
+
+       /* failed */
+       if (timer_mode)
+               oprofile_timer_exit();
+       else
+               oprofile_arch_exit();
+
+       return err;
 }
 
 
 static void __exit oprofile_exit(void)
 {
-       oprofile_timer_exit();
        oprofilefs_unregister();
-       oprofile_arch_exit();
+       if (timer_mode)
+               oprofile_timer_exit();
+       else
+               oprofile_arch_exit();
 }
 
 
index 89f63456646fa4c67b6ee74f7c657917be05a425..84a208dbed939afa20743804d8e4d75ca5ff741a 100644 (file)
@@ -45,7 +45,7 @@ static ssize_t timeout_write(struct file *file, char const __user *buf,
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        retval = oprofile_set_timeout(val);
@@ -84,7 +84,7 @@ static ssize_t depth_write(struct file *file, char const __user *buf, size_t cou
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        retval = oprofile_set_ulong(&oprofile_backtrace_depth, val);
@@ -141,9 +141,10 @@ static ssize_t enable_write(struct file *file, char const __user *buf, size_t co
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&val, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
+       retval = 0;
        if (val)
                retval = oprofile_start();
        else
index d0de6cc2d7a5e56e36d2dfa8b2cc8b6ad17074f5..2f0aa0f700e63985a0abbe29573b3816e1ef1e47 100644 (file)
@@ -60,6 +60,13 @@ ssize_t oprofilefs_ulong_to_user(unsigned long val, char __user *buf, size_t cou
 }
 
 
+/*
+ * Note: If oprofilefs_ulong_from_user() returns 0, then *val remains
+ * unchanged and might be uninitialized. This follows write syscall
+ * implementation when count is zero: "If count is zero ... [and if]
+ * no errors are detected, 0 will be returned without causing any
+ * other effect." (man 2 write)
+ */
 int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_t count)
 {
        char tmpbuf[TMPBUFSIZE];
@@ -79,7 +86,7 @@ int oprofilefs_ulong_from_user(unsigned long *val, char const __user *buf, size_
        raw_spin_lock_irqsave(&oprofilefs_lock, flags);
        *val = simple_strtoul(tmpbuf, NULL, 0);
        raw_spin_unlock_irqrestore(&oprofilefs_lock, flags);
-       return 0;
+       return count;
 }
 
 
@@ -99,7 +106,7 @@ static ssize_t ulong_write_file(struct file *file, char const __user *buf, size_
                return -EINVAL;
 
        retval = oprofilefs_ulong_from_user(&value, buf, count);
-       if (retval)
+       if (retval <= 0)
                return retval;
 
        retval = oprofile_set_ulong(file->private_data, value);
index 3ef44624f5103ddaf405e76fcafd0afe6b27a132..878fba1265829cdab586a145d86a332b5ce32874 100644 (file)
@@ -110,6 +110,7 @@ int oprofile_timer_init(struct oprofile_operations *ops)
        ops->start = oprofile_hrtimer_start;
        ops->stop = oprofile_hrtimer_stop;
        ops->cpu_type = "timer";
+       printk(KERN_INFO "oprofile: using timer interrupt.\n");
        return 0;
 }
 
index 7ec56fb0bd78aca5aec916aefd8788cc1b101027..b0dd08e6a9da1cc4f8ee78ebf53f1e4e5f1de2b2 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/export.h>
 #include <linux/pci-ats.h>
 #include <linux/pci.h>
+#include <linux/slab.h>
 
 #include "pci.h"
 
index fce1c54a0c8d8acf480c4b208ced89fb060ac95a..9ddf69e3bbef03487e12ea814be79bb3258fc0fa 100644 (file)
@@ -132,6 +132,18 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
        if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
                return AE_OK;
 
+       pdev = pbus->self;
+       if (pdev && pci_is_pcie(pdev)) {
+               tmp = acpi_find_root_bridge_handle(pdev);
+               if (tmp) {
+                       struct acpi_pci_root *root = acpi_pci_find_root(tmp);
+
+                       if (root && (root->osc_control_set &
+                                       OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
+                               return AE_OK;
+               }
+       }
+
        acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
        device = (adr >> 16) & 0xffff;
        function = adr & 0xffff;
@@ -213,7 +225,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
 
        pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
        if (pdev) {
-               pdev->current_state = PCI_D0;
                slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
                pci_dev_put(pdev);
        }
@@ -459,17 +470,8 @@ static int add_bridge(acpi_handle handle)
 {
        acpi_status status;
        unsigned long long tmp;
-       struct acpi_pci_root *root;
        acpi_handle dummy_handle;
 
-       /*
-        * We shouldn't use this bridge if PCIe native hotplug control has been
-        * granted by the BIOS for it.
-        */
-       root = acpi_pci_find_root(handle);
-       if (root && (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
-               return -ENODEV;
-
        /* if the bridge doesn't have _STA, we assume it is always there */
        status = acpi_get_handle(handle, "_STA", &dummy_handle);
        if (ACPI_SUCCESS(status)) {
@@ -1385,19 +1387,11 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type,
 static acpi_status
 find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
 {
-       struct acpi_pci_root *root;
        int *count = (int *)context;
 
        if (!acpi_is_root_bridge(handle))
                return AE_OK;
 
-       root = acpi_pci_find_root(handle);
-       if (!root)
-               return AE_OK;
-
-       if (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
-               return AE_OK;
-
        (*count)++;
        acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
                                    handle_hotplug_event_bridge, NULL);
index b82c155d7b37f539eb85ce28512219a6b701cf52..1969a3ee3058328e469a0fc6e529f9841f5708ab 100644 (file)
@@ -283,6 +283,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
        struct resource *res;
        struct pci_dev *pdev;
        struct pci_sriov *iov = dev->sriov;
+       int bars = 0;
 
        if (!nr_virtfn)
                return 0;
@@ -307,6 +308,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
 
        nres = 0;
        for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+               bars |= (1 << (i + PCI_IOV_RESOURCES));
                res = dev->resource + PCI_IOV_RESOURCES + i;
                if (res->parent)
                        nres++;
@@ -324,6 +326,11 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
                return -ENOMEM;
        }
 
+       if (pci_enable_resources(dev, bars)) {
+               dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
+               return -ENOMEM;
+       }
+
        if (iov->link != dev->devfn) {
                pdev = pci_get_slot(dev->bus, iov->link);
                if (!pdev)
index 6f45a73c6e9fa38c9e09fbf3d5a4853d8cc3396c..6d4a5319148d7eb293eb499cb98cfc56e496f078 100644 (file)
@@ -664,6 +664,9 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
                error = platform_pci_set_power_state(dev, state);
                if (!error)
                        pci_update_current_state(dev, state);
+               /* Fall back to PCI_D0 if native PM is not supported */
+               if (!dev->pm_cap)
+                       dev->current_state = PCI_D0;
        } else {
                error = -ENODEV;
                /* Fall back to PCI_D0 if native PM is not supported */
@@ -1126,7 +1129,11 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
        if (atomic_add_return(1, &dev->enable_cnt) > 1)
                return 0;               /* already enabled */
 
-       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+       /* only skip sriov related */
+       for (i = 0; i <= PCI_ROM_RESOURCE; i++)
+               if (dev->resource[i].flags & flags)
+                       bars |= (1 << i);
+       for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
                if (dev->resource[i].flags & flags)
                        bars |= (1 << i);
 
index 13ef8c37471d0a6575fed59d6cf92dece74c34ba..dcdc1f4a4624d782d35f3776d1a69bbfdf983cdb 100644 (file)
@@ -121,6 +121,7 @@ struct toshiba_acpi_dev {
        int illumination_supported:1;
        int video_supported:1;
        int fan_supported:1;
+       int system_event_supported:1;
 
        struct mutex mutex;
 };
@@ -724,7 +725,7 @@ static int keys_proc_show(struct seq_file *m, void *v)
        u32 hci_result;
        u32 value;
 
-       if (!dev->key_event_valid) {
+       if (!dev->key_event_valid && dev->system_event_supported) {
                hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
                if (hci_result == HCI_SUCCESS) {
                        dev->key_event_valid = 1;
@@ -964,6 +965,8 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
 
        /* enable event fifo */
        hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
+       if (hci_result == HCI_SUCCESS)
+               dev->system_event_supported = 1;
 
        props.type = BACKLIGHT_PLATFORM;
        props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
@@ -1032,12 +1035,15 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
 {
        struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
        u32 hci_result, value;
+       int retries = 3;
 
-       if (event != 0x80)
+       if (!dev->system_event_supported || event != 0x80)
                return;
+
        do {
                hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
-               if (hci_result == HCI_SUCCESS) {
+               switch (hci_result) {
+               case HCI_SUCCESS:
                        if (value == 0x100)
                                continue;
                        /* act on key press; ignore key release */
@@ -1049,14 +1055,19 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event)
                                pr_info("Unknown key %x\n",
                                       value);
                        }
-               } else if (hci_result == HCI_NOT_SUPPORTED) {
+                       break;
+               case HCI_NOT_SUPPORTED:
                        /* This is a workaround for an unresolved issue on
                         * some machines where system events sporadically
                         * become disabled. */
                        hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
                        pr_notice("Re-enabled hotkeys\n");
+                       /* fall through */
+               default:
+                       retries--;
+                       break;
                }
-       } while (hci_result != HCI_EMPTY);
+       } while (retries && hci_result != HCI_EMPTY);
 }
 
 
index cffcb7c00b0068da7b32a467358b7e71a0388e13..01fa671ec97f6af9a31bf1a8c39ceef86691617b 100644 (file)
@@ -61,7 +61,8 @@ MODULE_PARM_DESC(debug, "Flag to enable PMIC Battery debug messages.");
 #define PMIC_BATT_CHR_SBATDET_MASK     (1 << 5)
 #define PMIC_BATT_CHR_SDCLMT_MASK      (1 << 6)
 #define PMIC_BATT_CHR_SUSBOVP_MASK     (1 << 7)
-#define PMIC_BATT_CHR_EXCPT_MASK       0xC6
+#define PMIC_BATT_CHR_EXCPT_MASK       0x86
+
 #define PMIC_BATT_ADC_ACCCHRG_MASK     (1 << 31)
 #define PMIC_BATT_ADC_ACCCHRGVAL_MASK  0x7FFFFFFF
 
@@ -304,11 +305,6 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
                        pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
                        pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
                        batt_exception = 1;
-               } else if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
-                       pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
-                       pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
-                       pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
-                       batt_exception = 1;
                } else if (r8 & PMIC_BATT_CHR_STEMP_MASK) {
                        pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
                        pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
@@ -316,6 +312,10 @@ static void pmic_battery_read_status(struct pmic_power_module_info *pbi)
                        batt_exception = 1;
                } else {
                        pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
+                       if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
+                               /* PMIC will change charging current automatically */
+                               pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
+                       }
                }
        }
 
index cf3f9997546dc41d10a143390e834d0926720964..10451a15e8284f33be26996363c5ffd6c79ad56a 100644 (file)
@@ -101,7 +101,9 @@ static s32 scaled_ppm_to_ppb(long ppm)
 
 static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
 {
-       return 1; /* always round timer functions to one nanosecond */
+       tp->tv_sec = 0;
+       tp->tv_nsec = 1;
+       return 0;
 }
 
 static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
index 5225930a10cd24fd5003d553bfd65ada9e10eebc..691b1ab1a3d0499d85815bd03551e9d22518e494 100644 (file)
@@ -851,14 +851,12 @@ static int tsi721_doorbell_init(struct tsi721_device *priv)
        INIT_WORK(&priv->idb_work, tsi721_db_dpc);
 
        /* Allocate buffer for inbound doorbells queue */
-       priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
+       priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
                                IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
                                &priv->idb_dma, GFP_KERNEL);
        if (!priv->idb_base)
                return -ENOMEM;
 
-       memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE);
-
        dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
                priv->idb_base, (unsigned long long)priv->idb_dma);
 
@@ -904,7 +902,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
         */
 
        /* Allocate space for DMA descriptors */
-       bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
+       bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
                                        bd_num * sizeof(struct tsi721_dma_desc),
                                        &bd_phys, GFP_KERNEL);
        if (!bd_ptr)
@@ -913,8 +911,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        priv->bdma[chnum].bd_phys = bd_phys;
        priv->bdma[chnum].bd_base = bd_ptr;
 
-       memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc));
-
        dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
                bd_ptr, (unsigned long long)bd_phys);
 
@@ -922,7 +918,7 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
                                        bd_num : TSI721_DMA_MINSTSSZ;
        sts_size = roundup_pow_of_two(sts_size);
-       sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
+       sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
                                     sts_size * sizeof(struct tsi721_dma_sts),
                                     &sts_phys, GFP_KERNEL);
        if (!sts_ptr) {
@@ -938,8 +934,6 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
        priv->bdma[chnum].sts_base = sts_ptr;
        priv->bdma[chnum].sts_size = sts_size;
 
-       memset(sts_ptr, 0, sts_size);
-
        dev_dbg(&priv->pdev->dev,
                "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
                sts_ptr, (unsigned long long)sts_phys, sts_size);
@@ -1400,7 +1394,7 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
 
        /* Outbound message descriptor status FIFO allocation */
        priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
-       priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
+       priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
                        priv->omsg_ring[mbox].sts_size *
                                                sizeof(struct tsi721_dma_sts),
                        &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
@@ -1412,9 +1406,6 @@ static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
                goto out_desc;
        }
 
-       memset(priv->omsg_ring[mbox].sts_base, 0,
-               entries * sizeof(struct tsi721_dma_sts));
-
        /*
         * Configure Outbound Messaging Engine
         */
@@ -2116,8 +2107,8 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
        INIT_LIST_HEAD(&mport->dbells);
 
        rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
-       rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
-       rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
+       rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
+       rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
        strcpy(mport->name, "Tsi721 mport");
 
        /* Hook up interrupt handler */
@@ -2163,7 +2154,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
                                  const struct pci_device_id *id)
 {
        struct tsi721_device *priv;
-       int i;
+       int i, cap;
        int err;
        u32 regval;
 
@@ -2271,10 +2262,20 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
                        dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
        }
 
-       /* Clear "no snoop" and "relaxed ordering" bits. */
-       pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, &regval);
-       regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN);
-       pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval);
+       cap = pci_pcie_cap(pdev);
+       BUG_ON(cap == 0);
+
+       /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
+       pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &regval);
+       regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
+                   PCI_EXP_DEVCTL_NOSNOOP_EN);
+       regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT;
+       pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval);
+
+       /* Adjust PCIe completion timeout. */
+       pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, &regval);
+       regval &= ~(0x0f);
+       pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2);
 
        /*
         * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
index 58be4deb1402ad9f2a119c196e208a8b0d2f2593..822e54c394d5cd0690ee80d2c5173eb05140018f 100644 (file)
@@ -72,6 +72,8 @@
 #define TSI721_MSIXPBA_OFFSET  0x2a000
 #define TSI721_PCIECFG_EPCTL   0x400
 
+#define MAX_READ_REQUEST_SZ_SHIFT      12
+
 /*
  * Event Management Registers
  */
index e8326f26fa2f5c5ca11d26901f5e70052a1ffea6..dc4c2748bbc38bfac593cc47a2ff7bac6a34c8fe 100644 (file)
@@ -63,7 +63,7 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg)
         */
        delta = timespec_sub(old_system, old_rtc);
        delta_delta = timespec_sub(delta, old_delta);
-       if (abs(delta_delta.tv_sec)  >= 2) {
+       if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
                /*
                 * if delta_delta is too large, assume time correction
                 * has occured and set old_delta to the current delta.
@@ -97,9 +97,8 @@ static int rtc_resume(struct device *dev)
        rtc_tm_to_time(&tm, &new_rtc.tv_sec);
        new_rtc.tv_nsec = 0;
 
-       if (new_rtc.tv_sec <= old_rtc.tv_sec) {
-               if (new_rtc.tv_sec < old_rtc.tv_sec)
-                       pr_debug("%s:  time travel!\n", dev_name(&rtc->dev));
+       if (new_rtc.tv_sec < old_rtc.tv_sec) {
+               pr_debug("%s:  time travel!\n", dev_name(&rtc->dev));
                return 0;
        }
 
@@ -116,7 +115,8 @@ static int rtc_resume(struct device *dev)
        sleep_time = timespec_sub(sleep_time,
                        timespec_sub(new_system, old_system));
 
-       timekeeping_inject_sleeptime(&sleep_time);
+       if (sleep_time.tv_sec >= 0)
+               timekeeping_inject_sleeptime(&sleep_time);
        return 0;
 }
 
index 8e286259a007fbc5921b4c569cdb959f62762f7a..3bcc7cfcaba7ff1e370fee78cf85fb5bb22a6358 100644 (file)
@@ -73,6 +73,8 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
                err = -EINVAL;
 
        mutex_unlock(&rtc->ops_lock);
+       /* A timer might have just expired */
+       schedule_work(&rtc->irqwork);
        return err;
 }
 EXPORT_SYMBOL_GPL(rtc_set_time);
@@ -112,6 +114,8 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
                err = -EINVAL;
 
        mutex_unlock(&rtc->ops_lock);
+       /* A timer might have just expired */
+       schedule_work(&rtc->irqwork);
 
        return err;
 }
@@ -319,6 +323,20 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 }
 EXPORT_SYMBOL_GPL(rtc_read_alarm);
 
+static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+{
+       int err;
+
+       if (!rtc->ops)
+               err = -ENODEV;
+       else if (!rtc->ops->set_alarm)
+               err = -EINVAL;
+       else
+               err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+
+       return err;
+}
+
 static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
 {
        struct rtc_time tm;
@@ -342,14 +360,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
         * over right here, before we set the alarm.
         */
 
-       if (!rtc->ops)
-               err = -ENODEV;
-       else if (!rtc->ops->set_alarm)
-               err = -EINVAL;
-       else
-               err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
-
-       return err;
+       return ___rtc_set_alarm(rtc, alarm);
 }
 
 int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
@@ -396,6 +407,8 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
                timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
        }
        mutex_unlock(&rtc->ops_lock);
+       /* maybe that was in the past.*/
+       schedule_work(&rtc->irqwork);
        return err;
 }
 EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
@@ -763,6 +776,20 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
        return 0;
 }
 
+static void rtc_alarm_disable(struct rtc_device *rtc)
+{
+       struct rtc_wkalrm alarm;
+       struct rtc_time tm;
+
+       __rtc_read_time(rtc, &tm);
+
+       alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
+                                    ktime_set(300, 0)));
+       alarm.enabled = 0;
+
+       ___rtc_set_alarm(rtc, &alarm);
+}
+
 /**
  * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
  * @rtc rtc device
@@ -784,8 +811,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
                struct rtc_wkalrm alarm;
                int err;
                next = timerqueue_getnext(&rtc->timerqueue);
-               if (!next)
+               if (!next) {
+                       rtc_alarm_disable(rtc);
                        return;
+               }
                alarm.time = rtc_ktime_to_tm(next->expires);
                alarm.enabled = 1;
                err = __rtc_set_alarm(rtc, &alarm);
@@ -847,7 +876,8 @@ again:
                err = __rtc_set_alarm(rtc, &alarm);
                if (err == -ETIME)
                        goto again;
-       }
+       } else
+               rtc_alarm_disable(rtc);
 
        mutex_unlock(&rtc->ops_lock);
 }
index eda128fc1d38729ebb1a385c10fd30e6345e852b..64aedd8cc095810e4134ceb727538a24b2af6232 100644 (file)
@@ -357,10 +357,19 @@ static int m41t80_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *t)
 static struct rtc_class_ops m41t80_rtc_ops = {
        .read_time = m41t80_rtc_read_time,
        .set_time = m41t80_rtc_set_time,
+       /*
+        * XXX - m41t80 alarm functionality is reported broken.
+        * until it is fixed, don't register alarm functions.
+        *
        .read_alarm = m41t80_rtc_read_alarm,
        .set_alarm = m41t80_rtc_set_alarm,
+       */
        .proc = m41t80_rtc_proc,
+       /*
+        * See above comment on broken alarm
+        *
        .alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
+       */
 };
 
 #if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
index 7639ab906f02e35ceec0a75bfaddd418e17478e8..5b979d9cc3324ffccd455da21e09a35b88f753ad 100644 (file)
@@ -202,7 +202,6 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
        void __iomem *base = s3c_rtc_base;
        int year = tm->tm_year - 100;
 
-       clk_enable(rtc_clk);
        pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
                 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
                 tm->tm_hour, tm->tm_min, tm->tm_sec);
@@ -214,6 +213,7 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm)
                return -EINVAL;
        }
 
+       clk_enable(rtc_clk);
        writeb(bin2bcd(tm->tm_sec),  base + S3C2410_RTCSEC);
        writeb(bin2bcd(tm->tm_min),  base + S3C2410_RTCMIN);
        writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR);
index 75c3f1f8fd434301c3ba4a07a632e0ffefa6aac3..a84631a7391d3ed50c680b939129d159fc25daa3 100644 (file)
@@ -529,10 +529,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
 int chsc_chp_vary(struct chp_id chpid, int on)
 {
        struct channel_path *chp = chpid_to_chp(chpid);
-       struct chp_link link;
 
-       memset(&link, 0, sizeof(struct chp_link));
-       link.chpid = chpid;
        /* Wait until previous actions have settled. */
        css_wait_for_slow_path();
        /*
@@ -542,10 +539,10 @@ int chsc_chp_vary(struct chp_id chpid, int on)
                /* Try to update the channel path descritor. */
                chsc_determine_base_channel_path_desc(chpid, &chp->desc);
                for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
-                                          __s390_vary_chpid_on, &link);
+                                          __s390_vary_chpid_on, &chpid);
        } else
                for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
-                                          NULL, &link);
+                                          NULL, &chpid);
 
        return 0;
 }
index 155a82bcb9e545e2888430337d0b1c2b50239acd..4a1ff5c2eb881355204ffe8e047cfbe3bb8e3706 100644 (file)
@@ -68,8 +68,13 @@ struct schib {
        __u8 mda[4];             /* model dependent area */
 } __attribute__ ((packed,aligned(4)));
 
+/*
+ * When rescheduled, todo's with higher values will overwrite those
+ * with lower values.
+ */
 enum sch_todo {
        SCH_TODO_NOTHING,
+       SCH_TODO_EVAL,
        SCH_TODO_UNREG,
 };
 
index 92d7324acb1c78fbab348a2ea190c8901351df9d..21908e67bf6745d8dc91f791347a9d9cee538aa1 100644 (file)
@@ -195,51 +195,6 @@ void css_sch_device_unregister(struct subchannel *sch)
 }
 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
 
-static void css_sch_todo(struct work_struct *work)
-{
-       struct subchannel *sch;
-       enum sch_todo todo;
-
-       sch = container_of(work, struct subchannel, todo_work);
-       /* Find out todo. */
-       spin_lock_irq(sch->lock);
-       todo = sch->todo;
-       CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
-                     sch->schid.sch_no, todo);
-       sch->todo = SCH_TODO_NOTHING;
-       spin_unlock_irq(sch->lock);
-       /* Perform todo. */
-       if (todo == SCH_TODO_UNREG)
-               css_sch_device_unregister(sch);
-       /* Release workqueue ref. */
-       put_device(&sch->dev);
-}
-
-/**
- * css_sched_sch_todo - schedule a subchannel operation
- * @sch: subchannel
- * @todo: todo
- *
- * Schedule the operation identified by @todo to be performed on the slow path
- * workqueue. Do nothing if another operation with higher priority is already
- * scheduled. Needs to be called with subchannel lock held.
- */
-void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
-{
-       CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
-                     sch->schid.ssid, sch->schid.sch_no, todo);
-       if (sch->todo >= todo)
-               return;
-       /* Get workqueue ref. */
-       if (!get_device(&sch->dev))
-               return;
-       sch->todo = todo;
-       if (!queue_work(cio_work_q, &sch->todo_work)) {
-               /* Already queued, release workqueue ref. */
-               put_device(&sch->dev);
-       }
-}
-
 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
 {
        int i;
@@ -466,6 +421,65 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
                css_schedule_eval(schid);
 }
 
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+       CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+                     sch->schid.ssid, sch->schid.sch_no, todo);
+       if (sch->todo >= todo)
+               return;
+       /* Get workqueue ref. */
+       if (!get_device(&sch->dev))
+               return;
+       sch->todo = todo;
+       if (!queue_work(cio_work_q, &sch->todo_work)) {
+               /* Already queued, release workqueue ref. */
+               put_device(&sch->dev);
+       }
+}
+
+static void css_sch_todo(struct work_struct *work)
+{
+       struct subchannel *sch;
+       enum sch_todo todo;
+       int ret;
+
+       sch = container_of(work, struct subchannel, todo_work);
+       /* Find out todo. */
+       spin_lock_irq(sch->lock);
+       todo = sch->todo;
+       CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+                     sch->schid.sch_no, todo);
+       sch->todo = SCH_TODO_NOTHING;
+       spin_unlock_irq(sch->lock);
+       /* Perform todo. */
+       switch (todo) {
+       case SCH_TODO_NOTHING:
+               break;
+       case SCH_TODO_EVAL:
+               ret = css_evaluate_known_subchannel(sch, 1);
+               if (ret == -EAGAIN) {
+                       spin_lock_irq(sch->lock);
+                       css_sched_sch_todo(sch, todo);
+                       spin_unlock_irq(sch->lock);
+               }
+               break;
+       case SCH_TODO_UNREG:
+               css_sch_device_unregister(sch);
+               break;
+       }
+       /* Release workqueue ref. */
+       put_device(&sch->dev);
+}
+
 static struct idset *slow_subchannel_set;
 static spinlock_t slow_subchannel_lock;
 static wait_queue_head_t css_eval_wq;
index d734f4a0ecac23cea1d821b316a563b71087ccc6..47269858ecb662af862c38a5e96fc4f9aacfe2ca 100644 (file)
@@ -1868,9 +1868,9 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
         */
        cdev->private->flags.resuming = 1;
        cdev->private->path_new_mask = LPM_ANYPATH;
-       css_schedule_eval(sch->schid);
+       css_sched_sch_todo(sch, SCH_TODO_EVAL);
        spin_unlock_irq(sch->lock);
-       css_complete_work();
+       css_wait_for_slow_path();
 
        /* cdev may have been moved to a different subchannel. */
        sch = to_subchannel(cdev->dev.parent);
index 52c233fa2b1281d14a2881618465606447bef365..1b853513c891ca2f010f8703b110f49c6f65afb3 100644 (file)
@@ -496,8 +496,26 @@ static void ccw_device_reset_path_events(struct ccw_device *cdev)
        cdev->private->pgid_reset_mask = 0;
 }
 
-void
-ccw_device_verify_done(struct ccw_device *cdev, int err)
+static void create_fake_irb(struct irb *irb, int type)
+{
+       memset(irb, 0, sizeof(*irb));
+       if (type == FAKE_CMD_IRB) {
+               struct cmd_scsw *scsw = &irb->scsw.cmd;
+               scsw->cc = 1;
+               scsw->fctl = SCSW_FCTL_START_FUNC;
+               scsw->actl = SCSW_ACTL_START_PEND;
+               scsw->stctl = SCSW_STCTL_STATUS_PEND;
+       } else if (type == FAKE_TM_IRB) {
+               struct tm_scsw *scsw = &irb->scsw.tm;
+               scsw->x = 1;
+               scsw->cc = 1;
+               scsw->fctl = SCSW_FCTL_START_FUNC;
+               scsw->actl = SCSW_ACTL_START_PEND;
+               scsw->stctl = SCSW_STCTL_STATUS_PEND;
+       }
+}
+
+void ccw_device_verify_done(struct ccw_device *cdev, int err)
 {
        struct subchannel *sch;
 
@@ -520,12 +538,8 @@ callback:
                ccw_device_done(cdev, DEV_STATE_ONLINE);
                /* Deliver fake irb to device driver, if needed. */
                if (cdev->private->flags.fake_irb) {
-                       memset(&cdev->private->irb, 0, sizeof(struct irb));
-                       cdev->private->irb.scsw.cmd.cc = 1;
-                       cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
-                       cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
-                       cdev->private->irb.scsw.cmd.stctl =
-                               SCSW_STCTL_STATUS_PEND;
+                       create_fake_irb(&cdev->private->irb,
+                                       cdev->private->flags.fake_irb);
                        cdev->private->flags.fake_irb = 0;
                        if (cdev->handler)
                                cdev->handler(cdev, cdev->private->intparm,
index f98698d5735e887e0fb6cc46f00a63012ecdccb5..ec7fb6d3b479a25a32bfad67ecc36a3539782b39 100644 (file)
@@ -198,7 +198,7 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
        if (cdev->private->state == DEV_STATE_VERIFY) {
                /* Remember to fake irb when finished. */
                if (!cdev->private->flags.fake_irb) {
-                       cdev->private->flags.fake_irb = 1;
+                       cdev->private->flags.fake_irb = FAKE_CMD_IRB;
                        cdev->private->intparm = intparm;
                        return 0;
                } else
@@ -213,9 +213,9 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
        ret = cio_set_options (sch, flags);
        if (ret)
                return ret;
-       /* Adjust requested path mask to excluded varied off paths. */
+       /* Adjust requested path mask to exclude unusable paths. */
        if (lpm) {
-               lpm &= sch->opm;
+               lpm &= sch->lpm;
                if (lpm == 0)
                        return -EACCES;
        }
@@ -605,11 +605,21 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw,
        sch = to_subchannel(cdev->dev.parent);
        if (!sch->schib.pmcw.ena)
                return -EINVAL;
+       if (cdev->private->state == DEV_STATE_VERIFY) {
+               /* Remember to fake irb when finished. */
+               if (!cdev->private->flags.fake_irb) {
+                       cdev->private->flags.fake_irb = FAKE_TM_IRB;
+                       cdev->private->intparm = intparm;
+                       return 0;
+               } else
+                       /* There's already a fake I/O around. */
+                       return -EBUSY;
+       }
        if (cdev->private->state != DEV_STATE_ONLINE)
                return -EIO;
-       /* Adjust requested path mask to excluded varied off paths. */
+       /* Adjust requested path mask to exclude unusable paths. */
        if (lpm) {
-               lpm &= sch->opm;
+               lpm &= sch->lpm;
                if (lpm == 0)
                        return -EACCES;
        }
index 2ebb492a5c17dcb8a9e05fb23209162edb43d849..76253dfcc1be86a18eba7ac7ea4d6f53c134bc7e 100644 (file)
@@ -111,6 +111,9 @@ enum cdev_todo {
        CDEV_TODO_UNREG_EVAL,
 };
 
+#define FAKE_CMD_IRB   1
+#define FAKE_TM_IRB    2
+
 struct ccw_device_private {
        struct ccw_device *cdev;
        struct subchannel *sch;
@@ -138,7 +141,7 @@ struct ccw_device_private {
                unsigned int doverify:1;    /* delayed path verification */
                unsigned int donotify:1;    /* call notify function */
                unsigned int recog_done:1;  /* dev. recog. complete */
-               unsigned int fake_irb:1;    /* deliver faked irb */
+               unsigned int fake_irb:2;    /* deliver faked irb */
                unsigned int resuming:1;    /* recognition while resume */
                unsigned int pgroup:1;      /* pathgroup is set up */
                unsigned int mpath:1;       /* multipathing is set up */
index ec94f049e99543849ed56c90c665102c40c5b87e..96bbe9d12a79fbef17fa9b4df331adf5a24a96c3 100644 (file)
@@ -1552,6 +1552,8 @@ static void ap_reset(struct ap_device *ap_dev)
        rc = ap_init_queue(ap_dev->qid);
        if (rc == -ENODEV)
                ap_dev->unregistered = 1;
+       else
+               __ap_schedule_poll_timer();
 }
 
 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
index 11f07f888223d92748e684b4000155b7e37eae9c..b79576b64f451e72352bbc3c92582f23e9a92191 100644 (file)
@@ -55,6 +55,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
 {
        struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
+       /* if previous slave_alloc returned early, there is nothing to do */
+       if (!zfcp_sdev->port)
+               return;
+
        zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
        put_device(&zfcp_sdev->port->dev);
 }
index 5f94d22c491ecbf1c619e79b0e1ff8a75e23ccd4..542668292900fa9fe2e5afca92215f46104e0520 100644 (file)
@@ -233,13 +233,9 @@ int bbc_i2c_write_buf(struct bbc_i2c_client *client,
        int ret = 0;
 
        while (len > 0) {
-               int err = bbc_i2c_writeb(client, *buf, off);
-
-               if (err < 0) {
-                       ret = err;
+               ret = bbc_i2c_writeb(client, *buf, off);
+               if (ret < 0)
                        break;
-               }
-
                len--;
                buf++;
                off++;
@@ -253,11 +249,9 @@ int bbc_i2c_read_buf(struct bbc_i2c_client *client,
        int ret = 0;
 
        while (len > 0) {
-               int err = bbc_i2c_readb(client, buf, off);
-               if (err < 0) {
-                       ret = err;
+               ret = bbc_i2c_readb(client, buf, off);
+               if (ret < 0)
                        break;
-               }
                len--;
                buf++;
                off++;
@@ -422,17 +416,6 @@ static struct platform_driver bbc_i2c_driver = {
        .remove         = __devexit_p(bbc_i2c_remove),
 };
 
-static int __init bbc_i2c_init(void)
-{
-       return platform_driver_register(&bbc_i2c_driver);
-}
-
-static void __exit bbc_i2c_exit(void)
-{
-       platform_driver_unregister(&bbc_i2c_driver);
-}
-
-module_init(bbc_i2c_init);
-module_exit(bbc_i2c_exit);
+module_platform_driver(bbc_i2c_driver);
 
 MODULE_LICENSE("GPL");
index 965a1fccd66a8898270d45756468e868a4f8acc5..4b9939726c342f3b5e32b1cc5f3ebfe5c88ef945 100644 (file)
@@ -275,15 +275,4 @@ static struct platform_driver d7s_driver = {
        .remove         = __devexit_p(d7s_remove),
 };
 
-static int __init d7s_init(void)
-{
-       return platform_driver_register(&d7s_driver);
-}
-
-static void __exit d7s_exit(void)
-{
-       platform_driver_unregister(&d7s_driver);
-}
-
-module_init(d7s_init);
-module_exit(d7s_exit);
+module_platform_driver(d7s_driver);
index be7b4e56154f9da35b81bee8fcbc2a238180f314..339fd6f65eda7ff3ba3d90914e8b79c73ae15d34 100644 (file)
@@ -1138,16 +1138,6 @@ static struct platform_driver envctrl_driver = {
        .remove         = __devexit_p(envctrl_remove),
 };
 
-static int __init envctrl_init(void)
-{
-       return platform_driver_register(&envctrl_driver);
-}
-
-static void __exit envctrl_exit(void)
-{
-       platform_driver_unregister(&envctrl_driver);
-}
+module_platform_driver(envctrl_driver);
 
-module_init(envctrl_init);
-module_exit(envctrl_exit);
 MODULE_LICENSE("GPL");
index 73dd4e7afaaa0b84fb8a6fd9de3552cab37a8a19..826157f386943940367f663b20415e2c01fc571f 100644 (file)
@@ -216,16 +216,6 @@ static struct platform_driver flash_driver = {
        .remove         = __devexit_p(flash_remove),
 };
 
-static int __init flash_init(void)
-{
-       return platform_driver_register(&flash_driver);
-}
-
-static void __exit flash_cleanup(void)
-{
-       platform_driver_unregister(&flash_driver);
-}
+module_platform_driver(flash_driver);
 
-module_init(flash_init);
-module_exit(flash_cleanup);
 MODULE_LICENSE("GPL");
index ebce9639a26abba176a3aff226a89abc3d13f8bc..0b31658ccde5cd8be14d3b1f54a06670c1c375df 100644 (file)
@@ -435,16 +435,6 @@ static struct platform_driver uctrl_driver = {
 };
 
 
-static int __init uctrl_init(void)
-{
-       return platform_driver_register(&uctrl_driver);
-}
-
-static void __exit uctrl_exit(void)
-{
-       platform_driver_unregister(&uctrl_driver);
-}
+module_platform_driver(uctrl_driver);
 
-module_init(uctrl_init);
-module_exit(uctrl_exit);
 MODULE_LICENSE("GPL");
index dba72a4e6a1cd607288ce533c64a0313f132c2b4..1ad0b8225560f89fedae6a9e0f8318666df10fcb 100644 (file)
@@ -1906,18 +1906,19 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
        spin_lock(&session->lock);
        task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
                                 cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
-       if (!task) {
+       if (!task || !task->sc) {
                spin_unlock(&session->lock);
                return -EINVAL;
        }
        sc = task->sc;
-       spin_unlock(&session->lock);
 
        if (!blk_rq_cpu_valid(sc->request))
                cpu = smp_processor_id();
        else
                cpu = sc->request->cpu;
 
+       spin_unlock(&session->lock);
+
        p = &per_cpu(bnx2i_percpu, cpu);
        spin_lock(&p->p_work_lock);
        if (unlikely(!p->iothread)) {
index cefbe44bb84a1293db665510543ab8cccaec1622..8d67467dd9cec100f52b51803fbe943192421a58 100644 (file)
@@ -31,6 +31,8 @@
 #include <linux/sysfs.h>
 #include <linux/ctype.h>
 #include <linux/workqueue.h>
+#include <net/dcbnl.h>
+#include <net/dcbevent.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
 #include <scsi/scsi_transport.h>
@@ -101,6 +103,8 @@ static int fcoe_ddp_done(struct fc_lport *, u16);
 static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
                           unsigned int);
 static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+                                    ulong event, void *ptr);
 
 static bool fcoe_match(struct net_device *netdev);
 static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
@@ -129,6 +133,11 @@ static struct notifier_block fcoe_cpu_notifier = {
        .notifier_call = fcoe_cpu_callback,
 };
 
+/* notification function for DCB events */
+static struct notifier_block dcb_notifier = {
+       .notifier_call = fcoe_dcb_app_notification,
+};
+
 static struct scsi_transport_template *fcoe_nport_scsi_transport;
 static struct scsi_transport_template *fcoe_vport_scsi_transport;
 
@@ -1522,6 +1531,8 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
        skb_reset_network_header(skb);
        skb->mac_len = elen;
        skb->protocol = htons(ETH_P_FCOE);
+       skb->priority = port->priority;
+
        if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
            fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
                skb->vlan_tci = VLAN_TAG_PRESENT |
@@ -1624,6 +1635,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
        stats->InvalidCRCCount++;
        if (stats->InvalidCRCCount < 5)
                printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
+       put_cpu();
        return -EINVAL;
 }
 
@@ -1746,6 +1758,7 @@ int fcoe_percpu_receive_thread(void *arg)
  */
 static void fcoe_dev_setup(void)
 {
+       register_dcbevent_notifier(&dcb_notifier);
        register_netdevice_notifier(&fcoe_notifier);
 }
 
@@ -1754,9 +1767,69 @@ static void fcoe_dev_setup(void)
  */
 static void fcoe_dev_cleanup(void)
 {
+       unregister_dcbevent_notifier(&dcb_notifier);
        unregister_netdevice_notifier(&fcoe_notifier);
 }
 
+static struct fcoe_interface *
+fcoe_hostlist_lookup_realdev_port(struct net_device *netdev)
+{
+       struct fcoe_interface *fcoe;
+       struct net_device *real_dev;
+
+       list_for_each_entry(fcoe, &fcoe_hostlist, list) {
+               if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+                       real_dev = vlan_dev_real_dev(fcoe->netdev);
+               else
+                       real_dev = fcoe->netdev;
+
+               if (netdev == real_dev)
+                       return fcoe;
+       }
+       return NULL;
+}
+
+static int fcoe_dcb_app_notification(struct notifier_block *notifier,
+                                    ulong event, void *ptr)
+{
+       struct dcb_app_type *entry = ptr;
+       struct fcoe_interface *fcoe;
+       struct net_device *netdev;
+       struct fcoe_port *port;
+       int prio;
+
+       if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE)
+               return NOTIFY_OK;
+
+       netdev = dev_get_by_index(&init_net, entry->ifindex);
+       if (!netdev)
+               return NOTIFY_OK;
+
+       fcoe = fcoe_hostlist_lookup_realdev_port(netdev);
+       dev_put(netdev);
+       if (!fcoe)
+               return NOTIFY_OK;
+
+       if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
+               prio = ffs(entry->app.priority) - 1;
+       else
+               prio = entry->app.priority;
+
+       if (prio < 0)
+               return NOTIFY_OK;
+
+       if (entry->app.protocol == ETH_P_FIP ||
+           entry->app.protocol == ETH_P_FCOE)
+               fcoe->ctlr.priority = prio;
+
+       if (entry->app.protocol == ETH_P_FCOE) {
+               port = lport_priv(fcoe->ctlr.lp);
+               port->priority = prio;
+       }
+
+       return NOTIFY_OK;
+}
+
 /**
  * fcoe_device_notification() - Handler for net device events
  * @notifier: The context of the notification
@@ -1964,6 +2037,46 @@ static bool fcoe_match(struct net_device *netdev)
        return true;
 }
 
+/**
+ * fcoe_dcb_create() - Initialize DCB attributes and hooks
+ * @netdev: The net_device object of the L2 link that should be queried
+ * @port: The fcoe_port to bind FCoE APP priority with
+ * @
+ */
+static void fcoe_dcb_create(struct fcoe_interface *fcoe)
+{
+#ifdef CONFIG_DCB
+       int dcbx;
+       u8 fup, up;
+       struct net_device *netdev = fcoe->realdev;
+       struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
+       struct dcb_app app = {
+                               .priority = 0,
+                               .protocol = ETH_P_FCOE
+                            };
+
+       /* setup DCB priority attributes. */
+       if (netdev && netdev->dcbnl_ops && netdev->dcbnl_ops->getdcbx) {
+               dcbx = netdev->dcbnl_ops->getdcbx(netdev);
+
+               if (dcbx & DCB_CAP_DCBX_VER_IEEE) {
+                       app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
+                       up = dcb_ieee_getapp_mask(netdev, &app);
+                       app.protocol = ETH_P_FIP;
+                       fup = dcb_ieee_getapp_mask(netdev, &app);
+               } else {
+                       app.selector = DCB_APP_IDTYPE_ETHTYPE;
+                       up = dcb_getapp(netdev, &app);
+                       app.protocol = ETH_P_FIP;
+                       fup = dcb_getapp(netdev, &app);
+               }
+
+               port->priority = ffs(up) ? ffs(up) - 1 : 0;
+               fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
+       }
+#endif
+}
+
 /**
  * fcoe_create() - Create a fcoe interface
  * @netdev  : The net_device object the Ethernet interface to create on
@@ -2007,6 +2120,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
        /* Make this the "master" N_Port */
        fcoe->ctlr.lp = lport;
 
+       /* setup DCB priority attributes. */
+       fcoe_dcb_create(fcoe);
+
        /* add to lports list */
        fcoe_hostlist_add(lport);
 
index c74c4b8e71ef03c0c8a8111efe1152bb5586e26a..e7522dcc296eb8bb9c425da842f2a159d05862f2 100644 (file)
@@ -320,6 +320,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
 
        skb_put(skb, sizeof(*sol));
        skb->protocol = htons(ETH_P_FIP);
+       skb->priority = fip->priority;
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
        fip->send(fip, skb);
@@ -474,6 +475,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
        }
        skb_put(skb, len);
        skb->protocol = htons(ETH_P_FIP);
+       skb->priority = fip->priority;
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
        fip->send(fip, skb);
@@ -566,6 +568,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
        cap->fip.fip_dl_len = htons(dlen / FIP_BPW);
 
        skb->protocol = htons(ETH_P_FIP);
+       skb->priority = fip->priority;
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
        return 0;
@@ -1911,6 +1914,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
 
        skb_put(skb, len);
        skb->protocol = htons(ETH_P_FIP);
+       skb->priority = fip->priority;
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
 
index 4e041f6d808cd6e49666b35c8d8aec0f1b0a76c5..d570573b7963ec47179d15ec7ac613221f0ce4bc 100644 (file)
@@ -4335,7 +4335,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
        /* insert into event log */
        sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
             sizeof(Mpi2EventDataSasDeviceStatusChange_t);
-       event_reply = kzalloc(sz, GFP_KERNEL);
+       event_reply = kzalloc(sz, GFP_ATOMIC);
        if (!event_reply) {
                printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
                    ioc->name, __FILE__, __LINE__, __func__);
index ac326c41e931dcba508c6357708be557986b8d88..6465dae5883a9dcb44ddde60b9c5f6f98fafd89b 100644 (file)
@@ -1762,12 +1762,31 @@ qla2x00_get_host_port_state(struct Scsi_Host *shost)
        scsi_qla_host_t *vha = shost_priv(shost);
        struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
 
-       if (!base_vha->flags.online)
+       if (!base_vha->flags.online) {
                fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
-       else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
-               fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
-       else
+               return;
+       }
+
+       switch (atomic_read(&base_vha->loop_state)) {
+       case LOOP_UPDATE:
+               fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+               break;
+       case LOOP_DOWN:
+               if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
+                       fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
+               else
+                       fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+               break;
+       case LOOP_DEAD:
+               fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+               break;
+       case LOOP_READY:
                fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+               break;
+       default:
+               fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+               break;
+       }
 }
 
 static int
index 9df4787715c0828df3dadd48cb149550600919e5..f3cddd5800c307e219bb8a8c6a84db569117df03 100644 (file)
  * |             Level            |   Last Value Used  |     Holes     |
  * ----------------------------------------------------------------------
  * | Module Init and Probe        |       0x0116       |               |
- * | Mailbox commands             |       0x1129       |               |
+ * | Mailbox commands             |       0x112b       |               |
  * | Device Discovery             |       0x2083       |               |
  * | Queue Command and IO tracing |       0x302e       |     0x3008     |
  * | DPC Thread                   |       0x401c       |               |
  * | Async Events                 |       0x5059       |               |
- * | Timer Routines               |       0x600d       |               |
+ * | Timer Routines               |       0x6010       | 0x600e,0x600f  |
  * | User Space Interactions      |       0x709d       |               |
- * | Task Management              |       0x8041       |               |
+ * | Task Management              |       0x8041       | 0x800b         |
  * | AER/EEH                      |       0x900f       |               |
  * | Virtual Port                 |       0xa007       |               |
- * | ISP82XX Specific             |       0xb051       |               |
+ * | ISP82XX Specific             |       0xb052       |               |
  * | MultiQ                       |       0xc00b       |               |
  * | Misc                         |       0xd00b       |               |
  * ----------------------------------------------------------------------
index ce32d8135c9e36335f53722664eab61db46a0f18..c0c11afb685c450e473faaaf711b00d8ec0d7d82 100644 (file)
@@ -578,6 +578,7 @@ extern int qla82xx_check_md_needed(scsi_qla_host_t *);
 extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
 extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
 extern char *qdev_state(uint32_t);
+extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
 
 /* BSG related functions */
 extern int qla24xx_bsg_request(struct fc_bsg_job *);
index f03e915f187729f6b208a0e89398b2a0b8642214..54ea68cec4c58c3cd0ee80c48bfaeb3f96e637d4 100644 (file)
@@ -1509,7 +1509,8 @@ enable_82xx_npiv:
                                    &ha->fw_xcb_count, NULL, NULL,
                                    &ha->max_npiv_vports, NULL);
 
-                               if (!fw_major_version && ql2xallocfwdump)
+                               if (!fw_major_version && ql2xallocfwdump
+                                   && !IS_QLA82XX(ha))
                                        qla2x00_alloc_fw_dump(vha);
                        }
                } else {
index dbec89622a0fa09d77ae6ad6c45e7df9237da3e0..a4b267e60a352b7cb72f620bd099ef19e12ddd13 100644 (file)
@@ -120,11 +120,10 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
  * Returns a pointer to the continuation type 1 IOCB packet.
  */
 static inline cont_a64_entry_t *
-qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
+qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
 {
        cont_a64_entry_t *cont_pkt;
 
-       struct req_que *req = vha->req;
        /* Adjust ring index. */
        req->ring_index++;
        if (req->ring_index == req->length) {
@@ -292,7 +291,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
                         * Five DSDs are available in the Continuation
                         * Type 1 IOCB.
                         */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                }
@@ -684,7 +683,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
                         * Five DSDs are available in the Continuation
                         * Type 1 IOCB.
                         */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                }
@@ -2070,7 +2069,8 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
                        * Five DSDs are available in the Cont.
                        * Type 1 IOCB.
                               */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+                           vha->hw->req_q_map[0]);
                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                        cont_iocb_prsnt = 1;
@@ -2096,6 +2096,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
        int index;
        uint16_t tot_dsds;
         scsi_qla_host_t *vha = sp->fcport->vha;
+       struct qla_hw_data *ha = vha->hw;
        struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
        int loop_iterartion = 0;
        int cont_iocb_prsnt = 0;
@@ -2141,7 +2142,8 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
                        * Five DSDs are available in the Cont.
                        * Type 1 IOCB.
                               */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
+                           ha->req_q_map[0]);
                        cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                        cont_iocb_prsnt = 1;
index 2516adf1aeeaa946372ac137da595d37466418c7..7b91b290ffd6bd23c1b137a7a3c73074f3f1580a 100644 (file)
@@ -1741,7 +1741,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                                    resid, scsi_bufflen(cp));
 
                                cp->result = DID_ERROR << 16 | lscsi_status;
-                               break;
+                               goto check_scsi_status;
                        }
 
                        if (!lscsi_status &&
index 3b3cec9f6ac295dab131050b8a92d47eadb06999..82a33533ed26c25dc87e90cfe1a35f95c33ccb44 100644 (file)
@@ -79,8 +79,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                mcp->mb[0] = MBS_LINK_DOWN_ERROR;
                ql_log(ql_log_warn, base_vha, 0x1004,
                    "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
-               rval = QLA_FUNCTION_FAILED;
-               goto premature_exit;
+               return QLA_FUNCTION_TIMEOUT;
        }
 
        /*
@@ -163,6 +162,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                                HINT_MBX_INT_PENDING) {
                                spin_unlock_irqrestore(&ha->hardware_lock,
                                        flags);
+                               ha->flags.mbox_busy = 0;
                                ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
                                    "Pending mailbox timeout, exiting.\n");
                                rval = QLA_FUNCTION_TIMEOUT;
@@ -188,6 +188,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                                HINT_MBX_INT_PENDING) {
                                spin_unlock_irqrestore(&ha->hardware_lock,
                                        flags);
+                               ha->flags.mbox_busy = 0;
                                ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
                                    "Pending mailbox timeout, exiting.\n");
                                rval = QLA_FUNCTION_TIMEOUT;
@@ -302,7 +303,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                        if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
                            !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
                            !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+                               if (IS_QLA82XX(ha)) {
+                                       ql_dbg(ql_dbg_mbx, vha, 0x112a,
+                                           "disabling pause transmit on port "
+                                           "0 & 1.\n");
+                                       qla82xx_wr_32(ha,
+                                           QLA82XX_CRB_NIU + 0x98,
+                                           CRB_NIU_XG_PAUSE_CTL_P0|
+                                           CRB_NIU_XG_PAUSE_CTL_P1);
+                               }
                                ql_log(ql_log_info, base_vha, 0x101c,
                                    "Mailbox cmd timeout occured. "
                                    "Scheduling ISP abort eeh_busy=0x%x.\n",
@@ -318,7 +327,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                        if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
                            !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
                            !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
-
+                               if (IS_QLA82XX(ha)) {
+                                       ql_dbg(ql_dbg_mbx, vha, 0x112b,
+                                           "disabling pause transmit on port "
+                                           "0 & 1.\n");
+                                       qla82xx_wr_32(ha,
+                                           QLA82XX_CRB_NIU + 0x98,
+                                           CRB_NIU_XG_PAUSE_CTL_P0|
+                                           CRB_NIU_XG_PAUSE_CTL_P1);
+                               }
                                ql_log(ql_log_info, base_vha, 0x101e,
                                    "Mailbox cmd timeout occured. "
                                    "Scheduling ISP abort.\n");
index 94bded5ddce4fe2f958dcdb8387c471eeaf790cf..03554934b0a58629848117649737a6fbabc15713 100644 (file)
@@ -3817,6 +3817,20 @@ exit:
        return rval;
 }
 
+void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
+{
+       struct qla_hw_data *ha = vha->hw;
+
+       if (ha->flags.mbox_busy) {
+               ha->flags.mbox_int = 1;
+               ha->flags.mbox_busy = 0;
+               ql_log(ql_log_warn, vha, 0x6010,
+                   "Doing premature completion of mbx command.\n");
+               if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
+                       complete(&ha->mbx_intr_comp);
+       }
+}
+
 void qla82xx_watchdog(scsi_qla_host_t *vha)
 {
        uint32_t dev_state, halt_status;
@@ -3839,9 +3853,13 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
                        qla2xxx_wake_dpc(vha);
                } else {
                        if (qla82xx_check_fw_alive(vha)) {
+                               ql_dbg(ql_dbg_timer, vha, 0x6011,
+                                   "disabling pause transmit on port 0 & 1.\n");
+                               qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+                                   CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1);
                                halt_status = qla82xx_rd_32(ha,
                                    QLA82XX_PEG_HALT_STATUS1);
-                               ql_dbg(ql_dbg_timer, vha, 0x6005,
+                               ql_log(ql_log_info, vha, 0x6005,
                                    "dumping hw/fw registers:.\n "
                                    " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
                                    " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
@@ -3858,6 +3876,11 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
                                            QLA82XX_CRB_PEG_NET_3 + 0x3c),
                                    qla82xx_rd_32(ha,
                                            QLA82XX_CRB_PEG_NET_4 + 0x3c));
+                               if (LSW(MSB(halt_status)) == 0x67)
+                                       ql_log(ql_log_warn, vha, 0xb052,
+                                           "Firmware aborted with "
+                                           "error code 0x00006700. Device is "
+                                           "being reset.\n");
                                if (halt_status & HALT_STATUS_UNRECOVERABLE) {
                                        set_bit(ISP_UNRECOVERABLE,
                                            &vha->dpc_flags);
@@ -3869,16 +3892,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
                                }
                                qla2xxx_wake_dpc(vha);
                                ha->flags.isp82xx_fw_hung = 1;
-                               if (ha->flags.mbox_busy) {
-                                       ha->flags.mbox_int = 1;
-                                       ql_log(ql_log_warn, vha, 0x6007,
-                                           "Due to FW hung, doing "
-                                           "premature completion of mbx "
-                                           "command.\n");
-                                       if (test_bit(MBX_INTR_WAIT,
-                                           &ha->mbx_cmd_flags))
-                                               complete(&ha->mbx_intr_comp);
-                               }
+                               ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
+                               qla82xx_clear_pending_mbx(vha);
                        }
                }
        }
@@ -4073,10 +4088,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
                        msleep(1000);
                        if (qla82xx_check_fw_alive(vha)) {
                                ha->flags.isp82xx_fw_hung = 1;
-                               if (ha->flags.mbox_busy) {
-                                       ha->flags.mbox_int = 1;
-                                       complete(&ha->mbx_intr_comp);
-                               }
+                               qla82xx_clear_pending_mbx(vha);
                                break;
                        }
                }
index 57820c199bc225858b836feb1d38b457dce14430..57a226be339aa2fe438c6a40a5674062552b006c 100644 (file)
@@ -1173,4 +1173,8 @@ struct qla82xx_md_entry_queue {
 
 static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
        0x410000B8, 0x410000BC };
+
+#define CRB_NIU_XG_PAUSE_CTL_P0        0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1        0x8
+
 #endif
index fd14c7bfc62665f698d9950210dfa3b2e1cf1f42..f9e5b85e84d83e6d147eebca647e2e986358cc0c 100644 (file)
@@ -201,12 +201,12 @@ MODULE_PARM_DESC(ql2xmdcapmask,
                "Set the Minidump driver capture mask level. "
                "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
 
-int ql2xmdenable;
+int ql2xmdenable = 1;
 module_param(ql2xmdenable, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xmdenable,
                "Enable/disable MiniDump. "
-               "0 (Default) - MiniDump disabled. "
-               "1 - MiniDump enabled.");
+               "0 - MiniDump disabled. "
+               "1 (Default) - MiniDump enabled.");
 
 /*
  * SCSI host template entry points
@@ -423,6 +423,7 @@ fail2:
        qla25xx_delete_queues(vha);
        destroy_workqueue(ha->wq);
        ha->wq = NULL;
+       vha->req = ha->req_q_map[0];
 fail:
        ha->mqenable = 0;
        kfree(ha->req_q_map);
@@ -814,49 +815,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
        return return_status;
 }
 
-/*
- * qla2x00_wait_for_loop_ready
- *    Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
- *    to be in LOOP_READY state.
- * Input:
- *     ha - pointer to host adapter structure
- *
- * Note:
- *    Does context switching-Release SPIN_LOCK
- *    (if any) before calling this routine.
- *
- *
- * Return:
- *    Success (LOOP_READY) : 0
- *    Failed  (LOOP_NOT_READY) : 1
- */
-static inline int
-qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
-{
-       int      return_status = QLA_SUCCESS;
-       unsigned long loop_timeout ;
-       struct qla_hw_data *ha = vha->hw;
-       scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
-
-       /* wait for 5 min at the max for loop to be ready */
-       loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
-
-       while ((!atomic_read(&base_vha->loop_down_timer) &&
-           atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
-           atomic_read(&base_vha->loop_state) != LOOP_READY) {
-               if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
-                       return_status = QLA_FUNCTION_FAILED;
-                       break;
-               }
-               msleep(1000);
-               if (time_after_eq(jiffies, loop_timeout)) {
-                       return_status = QLA_FUNCTION_FAILED;
-                       break;
-               }
-       }
-       return (return_status);
-}
-
 static void
 sp_get(struct srb *sp)
 {
@@ -1035,12 +993,6 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
                    "Wait for hba online failed for cmd=%p.\n", cmd);
                goto eh_reset_failed;
        }
-       err = 1;
-       if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
-               ql_log(ql_log_warn, vha, 0x800b,
-                   "Wait for loop ready failed for cmd=%p.\n", cmd);
-               goto eh_reset_failed;
-       }
        err = 2;
        if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
                != QLA_SUCCESS) {
@@ -1137,10 +1089,9 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
                goto eh_bus_reset_done;
        }
 
-       if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
-               if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
-                       ret = SUCCESS;
-       }
+       if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
+               ret = SUCCESS;
+
        if (ret == FAILED)
                goto eh_bus_reset_done;
 
@@ -1206,15 +1157,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
        if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
                goto eh_host_reset_lock;
 
-       /*
-        * Fixme-may be dpc thread is active and processing
-        * loop_resync,so wait a while for it to
-        * be completed and then issue big hammer.Otherwise
-        * it may cause I/O failure as big hammer marks the
-        * devices as lost kicking of the port_down_timer
-        * while dpc is stuck for the mailbox to complete.
-        */
-       qla2x00_wait_for_loop_ready(vha);
        if (vha != base_vha) {
                if (qla2x00_vp_abort_isp(vha))
                        goto eh_host_reset_lock;
@@ -1297,16 +1239,13 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
                atomic_set(&vha->loop_state, LOOP_DOWN);
                atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
                qla2x00_mark_all_devices_lost(vha, 0);
-               qla2x00_wait_for_loop_ready(vha);
        }
 
        if (ha->flags.enable_lip_reset) {
                ret = qla2x00_lip_reset(vha);
-               if (ret != QLA_SUCCESS) {
+               if (ret != QLA_SUCCESS)
                        ql_dbg(ql_dbg_taskm, vha, 0x802e,
                            "lip_reset failed (%d).\n", ret);
-               } else
-                       qla2x00_wait_for_loop_ready(vha);
        }
 
        /* Issue marker command only when we are going to start the I/O */
@@ -4070,13 +4009,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
                /* For ISP82XX complete any pending mailbox cmd */
                if (IS_QLA82XX(ha)) {
                        ha->flags.isp82xx_fw_hung = 1;
-                       if (ha->flags.mbox_busy) {
-                               ha->flags.mbox_int = 1;
-                               ql_dbg(ql_dbg_aer, vha, 0x9001,
-                                   "Due to pci channel io frozen, doing premature "
-                                   "completion of mbx command.\n");
-                               complete(&ha->mbx_intr_comp);
-                       }
+                       ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
+                       qla82xx_clear_pending_mbx(vha);
                }
                qla2x00_free_irqs(vha);
                pci_disable_device(pdev);
index 13b6357c1fa2ae297c811b7588b3fb1172f4dd3f..23f33a6d52d7278b5cf5d1d42a5bf44330aba183 100644 (file)
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.03.07.07-k"
+#define QLA2XXX_VERSION      "8.03.07.12-k"
 
 #define QLA_DRIVER_MAJOR_VER   8
 #define QLA_DRIVER_MINOR_VER   3
index ace637bf254e1ca2b8c9d8be7490f41027945044..fd5edc6e166dec140854e737912fec75a4eebf20 100644 (file)
 #define ISCSI_ALIAS_SIZE               32      /* ISCSI Alias name size */
 #define ISCSI_NAME_SIZE                        0xE0    /* ISCSI Name size */
 
-#define QL4_SESS_RECOVERY_TMO          30      /* iSCSI session */
+#define QL4_SESS_RECOVERY_TMO          120     /* iSCSI session */
                                                /* recovery timeout */
 
 #define LSDW(x) ((u32)((u64)(x)))
 #define ISNS_DEREG_TOV                 5
 #define HBA_ONLINE_TOV                 30
 #define DISABLE_ACB_TOV                        30
+#define IP_CONFIG_TOV                  30
+#define LOGIN_TOV                      12
 
 #define MAX_RESET_HA_RETRIES           2
 
@@ -240,6 +242,45 @@ struct ddb_entry {
 
        uint16_t fw_ddb_index;  /* DDB firmware index */
        uint32_t fw_ddb_device_state; /* F/W Device State  -- see ql4_fw.h */
+       uint16_t ddb_type;
+#define FLASH_DDB 0x01
+
+       struct dev_db_entry fw_ddb_entry;
+       int (*unblock_sess)(struct iscsi_cls_session *cls_session);
+       int (*ddb_change)(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                         struct ddb_entry *ddb_entry, uint32_t state);
+
+       /* Driver Re-login  */
+       unsigned long flags;              /* DDB Flags */
+       uint16_t default_relogin_timeout; /*  Max time to wait for
+                                          *  relogin to complete */
+       atomic_t retry_relogin_timer;     /* Min Time between relogins
+                                          * (4000 only) */
+       atomic_t relogin_timer;           /* Max Time to wait for
+                                          * relogin to complete */
+       atomic_t relogin_retry_count;     /* Num of times relogin has been
+                                          * retried */
+       uint32_t default_time2wait;       /* Default Min time between
+                                          * relogins (+aens) */
+
+};
+
+struct qla_ddb_index {
+       struct list_head list;
+       uint16_t fw_ddb_idx;
+       struct dev_db_entry fw_ddb;
+};
+
+#define DDB_IPADDR_LEN 64
+
+struct ql4_tuple_ddb {
+       int port;
+       int tpgt;
+       char ip_addr[DDB_IPADDR_LEN];
+       char iscsi_name[ISCSI_NAME_SIZE];
+       uint16_t options;
+#define DDB_OPT_IPV6 0x0e0e
+#define DDB_OPT_IPV4 0x0f0f
 };
 
 /*
@@ -411,7 +452,7 @@ struct scsi_qla_host {
 #define AF_FW_RECOVERY                 19 /* 0x00080000 */
 #define AF_EEH_BUSY                    20 /* 0x00100000 */
 #define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
-
+#define AF_BUILD_DDB_LIST              22 /* 0x00400000 */
        unsigned long dpc_flags;
 
 #define DPC_RESET_HA                   1 /* 0x00000002 */
@@ -604,6 +645,7 @@ struct scsi_qla_host {
        uint16_t bootload_minor;
        uint16_t bootload_patch;
        uint16_t bootload_build;
+       uint16_t def_timeout; /* Default login timeout */
 
        uint32_t flash_state;
 #define        QLFLASH_WAITING         0
@@ -623,6 +665,11 @@ struct scsi_qla_host {
        uint16_t iscsi_pci_func_cnt;
        uint8_t model_name[16];
        struct completion disable_acb_comp;
+       struct dma_pool *fw_ddb_dma_pool;
+#define DDB_DMA_BLOCK_SIZE 512
+       uint16_t pri_ddb_idx;
+       uint16_t sec_ddb_idx;
+       int is_reset;
 };
 
 struct ql4_task_data {
@@ -835,6 +882,10 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
 /*---------------------------------------------------------------------------*/
 
 /* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
+
+#define INIT_ADAPTER    0
+#define RESET_ADAPTER   1
+
 #define PRESERVE_DDB_LIST      0
 #define REBUILD_DDB_LIST       1
 
index cbd5a20dbbd150c7a1fbe4840b7a547a1097dc2a..4ac07f882521307ef0f018806408e4dded971499 100644 (file)
@@ -12,6 +12,7 @@
 #define MAX_PRST_DEV_DB_ENTRIES                64
 #define MIN_DISC_DEV_DB_ENTRY          MAX_PRST_DEV_DB_ENTRIES
 #define MAX_DEV_DB_ENTRIES             512
+#define MAX_DEV_DB_ENTRIES_40XX                256
 
 /*************************************************************************
  *
@@ -604,6 +605,13 @@ struct addr_ctrl_blk {
        uint8_t res14[140];     /* 274-2FF */
 };
 
+#define IP_ADDR_COUNT  4 /* Total 4 IP address supported in one interface
+                          * One IPv4, one IPv6 link local and 2 IPv6
+                          */
+
+#define IP_STATE_MASK  0x0F000000
+#define IP_STATE_SHIFT 24
+
 struct init_fw_ctrl_blk {
        struct addr_ctrl_blk pri;
 /*     struct addr_ctrl_blk sec;*/
index 160db9d5ea2101e8ccb2ef4772c81429747a4b0d..d0dd4b33020643dd5bad1cf7d3c30a1d5d02575c 100644 (file)
@@ -13,7 +13,7 @@ struct iscsi_cls_conn;
 int qla4xxx_hw_reset(struct scsi_qla_host *ha);
 int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
 int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha);
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset);
 int qla4xxx_soft_reset(struct scsi_qla_host *ha);
 irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
 
@@ -153,10 +153,13 @@ int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                          uint32_t *mbx_sts);
 int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index);
 int qla4xxx_send_passthru0(struct iscsi_task *task);
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha);
 int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
                          uint16_t stats_size, dma_addr_t stats_dma);
 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
                                       struct ddb_entry *ddb_entry);
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+                                            struct ddb_entry *ddb_entry);
 int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
                            struct dev_db_entry *fw_ddb_entry,
                            dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
@@ -169,11 +172,22 @@ int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
 int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
                                     uint32_t region, uint32_t field0,
                                     uint32_t field1);
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index);
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session);
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                            struct ddb_entry *ddb_entry, uint32_t state);
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                      struct ddb_entry *ddb_entry, uint32_t state);
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
 
 /* BSG Functions */
 int qla4xxx_bsg_request(struct bsg_job *bsg_job);
 int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
 
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
+
 extern int ql4xextended_error_logging;
 extern int ql4xdontresethba;
 extern int ql4xenablemsix;
index 3075fbaef5533d6574cedce72f9c24b722696afa..1bdfa8120ac888c65c304c28dc3f3aba806ea403 100644 (file)
@@ -773,22 +773,24 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
  * be freed so that when login happens from user space there are free DDB
  * indices available.
  **/
-static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
+void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
 {
        int max_ddbs;
        int ret;
        uint32_t idx = 0, next_idx = 0;
        uint32_t state = 0, conn_err = 0;
 
-       max_ddbs =  is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
                                     MAX_DEV_DB_ENTRIES;
 
        for (idx = 0; idx < max_ddbs; idx = next_idx) {
                ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
                                              &next_idx, &state, &conn_err,
                                                NULL, NULL);
-               if (ret == QLA_ERROR)
+               if (ret == QLA_ERROR) {
+                       next_idx++;
                        continue;
+               }
                if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
                    state == DDB_DS_SESSION_FAILED) {
                        DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -804,7 +806,6 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
        }
 }
 
-
 /**
  * qla4xxx_initialize_adapter - initiailizes hba
  * @ha: Pointer to host adapter structure.
@@ -812,7 +813,7 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
  * This routine parforms all of the steps necessary to initialize the adapter.
  *
  **/
-int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
+int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
 {
        int status = QLA_ERROR;
 
@@ -840,7 +841,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
        if (status == QLA_ERROR)
                goto exit_init_hba;
 
-       qla4xxx_free_ddb_index(ha);
+       if (is_reset == RESET_ADAPTER)
+               qla4xxx_build_ddb_list(ha, is_reset);
 
        set_bit(AF_ONLINE, &ha->flags);
 exit_init_hba:
@@ -855,38 +857,12 @@ exit_init_hba:
        return status;
 }
 
-/**
- * qla4xxx_process_ddb_changed - process ddb state change
- * @ha - Pointer to host adapter structure.
- * @fw_ddb_index - Firmware's device database index
- * @state - Device state
- *
- * This routine processes a Decive Database Changed AEN Event.
- **/
-int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
-               uint32_t state, uint32_t conn_err)
+int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                      struct ddb_entry *ddb_entry, uint32_t state)
 {
-       struct ddb_entry * ddb_entry;
        uint32_t old_fw_ddb_device_state;
        int status = QLA_ERROR;
 
-       /* check for out of range index */
-       if (fw_ddb_index >= MAX_DDB_ENTRIES)
-               goto exit_ddb_event;
-
-       /* Get the corresponging ddb entry */
-       ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
-       /* Device does not currently exist in our database. */
-       if (ddb_entry == NULL) {
-               ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
-                          __func__, fw_ddb_index);
-
-               if (state == DDB_DS_NO_CONNECTION_ACTIVE)
-                       clear_bit(fw_ddb_index, ha->ddb_idx_map);
-
-               goto exit_ddb_event;
-       }
-
        old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
        DEBUG2(ql4_printk(KERN_INFO, ha,
                          "%s: DDB - old state = 0x%x, new state = 0x%x for "
@@ -900,9 +876,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                switch (state) {
                case DDB_DS_SESSION_ACTIVE:
                case DDB_DS_DISCOVERY:
-                       iscsi_conn_start(ddb_entry->conn);
-                       iscsi_conn_login_event(ddb_entry->conn,
-                                              ISCSI_CONN_STATE_LOGGED_IN);
+                       ddb_entry->unblock_sess(ddb_entry->sess);
                        qla4xxx_update_session_conn_param(ha, ddb_entry);
                        status = QLA_SUCCESS;
                        break;
@@ -936,9 +910,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                switch (state) {
                case DDB_DS_SESSION_ACTIVE:
                case DDB_DS_DISCOVERY:
-                       iscsi_conn_start(ddb_entry->conn);
-                       iscsi_conn_login_event(ddb_entry->conn,
-                                              ISCSI_CONN_STATE_LOGGED_IN);
+                       ddb_entry->unblock_sess(ddb_entry->sess);
                        qla4xxx_update_session_conn_param(ha, ddb_entry);
                        status = QLA_SUCCESS;
                        break;
@@ -954,7 +926,198 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
                                __func__));
                break;
        }
+       return status;
+}
+
+void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry)
+{
+       /*
+        * This triggers a relogin.  After the relogin_timer
+        * expires, the relogin gets scheduled.  We must wait a
+        * minimum amount of time since receiving an 0x8014 AEN
+        * with failed device_state or a logout response before
+        * we can issue another relogin.
+        *
+        * Firmware pads this timeout: (time2wait +1).
+        * Driver retry to login should be longer than F/W.
+        * Otherwise F/W will fail
+        * set_ddb() mbx cmd with 0x4005 since it still
+        * counting down its time2wait.
+        */
+       atomic_set(&ddb_entry->relogin_timer, 0);
+       atomic_set(&ddb_entry->retry_relogin_timer,
+                  ddb_entry->default_time2wait + 4);
+
+}
+
+int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
+                            struct ddb_entry *ddb_entry, uint32_t state)
+{
+       uint32_t old_fw_ddb_device_state;
+       int status = QLA_ERROR;
+
+       old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "%s: DDB - old state = 0x%x, new state = 0x%x for "
+                         "index [%d]\n", __func__,
+                         ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
+
+       ddb_entry->fw_ddb_device_state = state;
+
+       switch (old_fw_ddb_device_state) {
+       case DDB_DS_LOGIN_IN_PROCESS:
+       case DDB_DS_NO_CONNECTION_ACTIVE:
+               switch (state) {
+               case DDB_DS_SESSION_ACTIVE:
+                       ddb_entry->unblock_sess(ddb_entry->sess);
+                       qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               case DDB_DS_SESSION_FAILED:
+                       iscsi_block_session(ddb_entry->sess);
+                       if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+                               qla4xxx_arm_relogin_timer(ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               }
+               break;
+       case DDB_DS_SESSION_ACTIVE:
+               switch (state) {
+               case DDB_DS_SESSION_FAILED:
+                       iscsi_block_session(ddb_entry->sess);
+                       if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+                               qla4xxx_arm_relogin_timer(ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               }
+               break;
+       case DDB_DS_SESSION_FAILED:
+               switch (state) {
+               case DDB_DS_SESSION_ACTIVE:
+                       ddb_entry->unblock_sess(ddb_entry->sess);
+                       qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               case DDB_DS_SESSION_FAILED:
+                       if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+                               qla4xxx_arm_relogin_timer(ddb_entry);
+                       status = QLA_SUCCESS;
+                       break;
+               }
+               break;
+       default:
+               DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
+                                 __func__));
+               break;
+       }
+       return status;
+}
+
+/**
+ * qla4xxx_process_ddb_changed - process ddb state change
+ * @ha - Pointer to host adapter structure.
+ * @fw_ddb_index - Firmware's device database index
+ * @state - Device state
+ *
+ * This routine processes a Decive Database Changed AEN Event.
+ **/
+int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
+                               uint32_t fw_ddb_index,
+                               uint32_t state, uint32_t conn_err)
+{
+       struct ddb_entry *ddb_entry;
+       int status = QLA_ERROR;
+
+       /* check for out of range index */
+       if (fw_ddb_index >= MAX_DDB_ENTRIES)
+               goto exit_ddb_event;
+
+       /* Get the corresponging ddb entry */
+       ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
+       /* Device does not currently exist in our database. */
+       if (ddb_entry == NULL) {
+               ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
+                          __func__, fw_ddb_index);
+
+               if (state == DDB_DS_NO_CONNECTION_ACTIVE)
+                       clear_bit(fw_ddb_index, ha->ddb_idx_map);
+
+               goto exit_ddb_event;
+       }
+
+       ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
 
 exit_ddb_event:
        return status;
 }
+
+/**
+ * qla4xxx_login_flash_ddb - Login to target (DDB)
+ * @cls_session: Pointer to the session to login
+ *
+ * This routine logins to the target.
+ * Issues setddb and conn open mbx
+ **/
+void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+       struct dev_db_entry *fw_ddb_entry = NULL;
+       dma_addr_t fw_ddb_dma;
+       uint32_t mbx_sts = 0;
+       int ret;
+
+       sess = cls_session->dd_data;
+       ddb_entry = sess->dd_data;
+       ha =  ddb_entry->ha;
+
+       if (!test_bit(AF_LINK_UP, &ha->flags))
+               return;
+
+       if (ddb_entry->ddb_type != FLASH_DDB) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Skipping login to non FLASH DB"));
+               goto exit_login;
+       }
+
+       fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+                                     &fw_ddb_dma);
+       if (fw_ddb_entry == NULL) {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+               goto exit_login;
+       }
+
+       if (ddb_entry->fw_ddb_index == INVALID_ENTRY) {
+               ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index);
+               if (ret == QLA_ERROR)
+                       goto exit_login;
+
+               ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
+               ha->tot_ddbs++;
+       }
+
+       memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry,
+              sizeof(struct dev_db_entry));
+       ddb_entry->sess->target_id = ddb_entry->fw_ddb_index;
+
+       ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
+                                   fw_ddb_dma, &mbx_sts);
+       if (ret == QLA_ERROR) {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n"));
+               goto exit_login;
+       }
+
+       ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
+       ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
+       if (ret == QLA_ERROR) {
+               ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
+                          sess->targetname);
+               goto exit_login;
+       }
+
+exit_login:
+       if (fw_ddb_entry)
+               dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
index 4c2b84870392e16f1ed03428aa7ed9c004c9cbd9..c2593782fbbef8c203148b1661c92a3e1dbe35f6 100644 (file)
@@ -41,6 +41,16 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
                return status;
        }
 
+       if (is_qla40XX(ha)) {
+               if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
+                       DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
+                                         "prematurely completing mbx cmd as "
+                                         "adapter removal detected\n",
+                                         ha->host_no, __func__));
+                       return status;
+               }
+       }
+
        if (is_qla8022(ha)) {
                if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
                        DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
@@ -413,6 +423,7 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
        memcpy(ha->name_string, init_fw_cb->iscsi_name,
                min(sizeof(ha->name_string),
                sizeof(init_fw_cb->iscsi_name)));
+       ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
        /*memcpy(ha->alias, init_fw_cb->Alias,
               min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
 
index 30f31b127f33750dd384770cb5b1ba1b3e33807d..4169c8baa112a41266b4a05335290f7b1fdf90fd 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/slab.h>
 #include <linux/blkdev.h>
 #include <linux/iscsi_boot_sysfs.h>
+#include <linux/inet.h>
 
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsicam.h>
@@ -31,6 +32,13 @@ static struct kmem_cache *srb_cachep;
 /*
  * Module parameter information and variables
  */
+int ql4xdisablesysfsboot = 1;
+module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql4xdisablesysfsboot,
+               "Set to disable exporting boot targets to sysfs\n"
+               " 0 - Export boot targets\n"
+               " 1 - Do not export boot targets (Default)");
+
 int ql4xdontresethba = 0;
 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xdontresethba,
@@ -63,7 +71,7 @@ static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
                "Target Session Recovery Timeout.\n"
-               " Default: 30 sec.");
+               " Default: 120 sec.");
 
 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
 /*
@@ -415,7 +423,7 @@ static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
        qla_ep = ep->dd_data;
        ha = to_qla_host(qla_ep->host);
 
-       if (adapter_up(ha))
+       if (adapter_up(ha) && !test_bit(AF_BUILD_DDB_LIST, &ha->flags))
                ret = 1;
 
        return ret;
@@ -975,6 +983,150 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
 
 }
 
+int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
+{
+       uint32_t mbx_sts = 0;
+       uint16_t tmp_ddb_index;
+       int ret;
+
+get_ddb_index:
+       tmp_ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
+
+       if (tmp_ddb_index >= MAX_DDB_ENTRIES) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Free DDB index not available\n"));
+               ret = QLA_ERROR;
+               goto exit_get_ddb_index;
+       }
+
+       if (test_and_set_bit(tmp_ddb_index, ha->ddb_idx_map))
+               goto get_ddb_index;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Found a free DDB index at %d\n", tmp_ddb_index));
+       ret = qla4xxx_req_ddb_entry(ha, tmp_ddb_index, &mbx_sts);
+       if (ret == QLA_ERROR) {
+               if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
+                       ql4_printk(KERN_INFO, ha,
+                                  "DDB index = %d not available trying next\n",
+                                  tmp_ddb_index);
+                       goto get_ddb_index;
+               }
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Free FW DDB not available\n"));
+       }
+
+       *ddb_index = tmp_ddb_index;
+
+exit_get_ddb_index:
+       return ret;
+}
+
+static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha,
+                                  struct ddb_entry *ddb_entry,
+                                  char *existing_ipaddr,
+                                  char *user_ipaddr)
+{
+       uint8_t dst_ipaddr[IPv6_ADDR_LEN];
+       char formatted_ipaddr[DDB_IPADDR_LEN];
+       int status = QLA_SUCCESS, ret = 0;
+
+       if (ddb_entry->fw_ddb_entry.options & DDB_OPT_IPV6_DEVICE) {
+               ret = in6_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+                              '\0', NULL);
+               if (ret == 0) {
+                       status = QLA_ERROR;
+                       goto out_match;
+               }
+               ret = sprintf(formatted_ipaddr, "%pI6", dst_ipaddr);
+       } else {
+               ret = in4_pton(user_ipaddr, strlen(user_ipaddr), dst_ipaddr,
+                              '\0', NULL);
+               if (ret == 0) {
+                       status = QLA_ERROR;
+                       goto out_match;
+               }
+               ret = sprintf(formatted_ipaddr, "%pI4", dst_ipaddr);
+       }
+
+       if (strcmp(existing_ipaddr, formatted_ipaddr))
+               status = QLA_ERROR;
+
+out_match:
+       return status;
+}
+
+static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha,
+                                     struct iscsi_cls_conn *cls_conn)
+{
+       int idx = 0, max_ddbs, rval;
+       struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn);
+       struct iscsi_session *sess, *existing_sess;
+       struct iscsi_conn *conn, *existing_conn;
+       struct ddb_entry *ddb_entry;
+
+       sess = cls_sess->dd_data;
+       conn = cls_conn->dd_data;
+
+       if (sess->targetname == NULL ||
+           conn->persistent_address == NULL ||
+           conn->persistent_port == 0)
+               return QLA_ERROR;
+
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+                                    MAX_DEV_DB_ENTRIES;
+
+       for (idx = 0; idx < max_ddbs; idx++) {
+               ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+               if (ddb_entry == NULL)
+                       continue;
+
+               if (ddb_entry->ddb_type != FLASH_DDB)
+                       continue;
+
+               existing_sess = ddb_entry->sess->dd_data;
+               existing_conn = ddb_entry->conn->dd_data;
+
+               if (existing_sess->targetname == NULL ||
+                   existing_conn->persistent_address == NULL ||
+                   existing_conn->persistent_port == 0)
+                       continue;
+
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "IQN = %s User IQN = %s\n",
+                                 existing_sess->targetname,
+                                 sess->targetname));
+
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "IP = %s User IP = %s\n",
+                                 existing_conn->persistent_address,
+                                 conn->persistent_address));
+
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Port = %d User Port = %d\n",
+                                 existing_conn->persistent_port,
+                                 conn->persistent_port));
+
+               if (strcmp(existing_sess->targetname, sess->targetname))
+                       continue;
+               rval = qla4xxx_match_ipaddress(ha, ddb_entry,
+                                       existing_conn->persistent_address,
+                                       conn->persistent_address);
+               if (rval == QLA_ERROR)
+                       continue;
+               if (existing_conn->persistent_port != conn->persistent_port)
+                       continue;
+               break;
+       }
+
+       if (idx == max_ddbs)
+               return QLA_ERROR;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Match found in fwdb sessions\n"));
+       return QLA_SUCCESS;
+}
+
 static struct iscsi_cls_session *
 qla4xxx_session_create(struct iscsi_endpoint *ep,
                        uint16_t cmds_max, uint16_t qdepth,
@@ -984,8 +1136,7 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
        struct scsi_qla_host *ha;
        struct qla_endpoint *qla_ep;
        struct ddb_entry *ddb_entry;
-       uint32_t ddb_index;
-       uint32_t mbx_sts = 0;
+       uint16_t ddb_index;
        struct iscsi_session *sess;
        struct sockaddr *dst_addr;
        int ret;
@@ -1000,32 +1151,9 @@ qla4xxx_session_create(struct iscsi_endpoint *ep,
        dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
        ha = to_qla_host(qla_ep->host);
 
-get_ddb_index:
-       ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES);
-
-       if (ddb_index >= MAX_DDB_ENTRIES) {
-               DEBUG2(ql4_printk(KERN_INFO, ha,
-                                 "Free DDB index not available\n"));
-               return NULL;
-       }
-
-       if (test_and_set_bit(ddb_index, ha->ddb_idx_map))
-               goto get_ddb_index;
-
-       DEBUG2(ql4_printk(KERN_INFO, ha,
-                         "Found a free DDB index at %d\n", ddb_index));
-       ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts);
-       if (ret == QLA_ERROR) {
-               if (mbx_sts == MBOX_STS_COMMAND_ERROR) {
-                       ql4_printk(KERN_INFO, ha,
-                                  "DDB index = %d not available trying next\n",
-                                  ddb_index);
-                       goto get_ddb_index;
-               }
-               DEBUG2(ql4_printk(KERN_INFO, ha,
-                                 "Free FW DDB not available\n"));
+       ret = qla4xxx_get_ddb_index(ha, &ddb_index);
+       if (ret == QLA_ERROR)
                return NULL;
-       }
 
        cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host,
                                       cmds_max, sizeof(struct ddb_entry),
@@ -1040,6 +1168,8 @@ get_ddb_index:
        ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
        ddb_entry->ha = ha;
        ddb_entry->sess = cls_sess;
+       ddb_entry->unblock_sess = qla4xxx_unblock_ddb;
+       ddb_entry->ddb_change = qla4xxx_ddb_change;
        cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
        ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
        ha->tot_ddbs++;
@@ -1077,6 +1207,9 @@ qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx)
        DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
        cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn),
                                    conn_idx);
+       if (!cls_conn)
+               return NULL;
+
        sess = cls_sess->dd_data;
        ddb_entry = sess->dd_data;
        ddb_entry->conn = cls_conn;
@@ -1109,7 +1242,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
        struct iscsi_session *sess;
        struct ddb_entry *ddb_entry;
        struct scsi_qla_host *ha;
-       struct dev_db_entry *fw_ddb_entry;
+       struct dev_db_entry *fw_ddb_entry = NULL;
        dma_addr_t fw_ddb_entry_dma;
        uint32_t mbx_sts = 0;
        int ret = 0;
@@ -1120,12 +1253,25 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
        ddb_entry = sess->dd_data;
        ha = ddb_entry->ha;
 
+       /* Check if we have  matching FW DDB, if yes then do not
+        * login to this target. This could cause target to logout previous
+        * connection
+        */
+       ret = qla4xxx_match_fwdb_session(ha, cls_conn);
+       if (ret == QLA_SUCCESS) {
+               ql4_printk(KERN_INFO, ha,
+                          "Session already exist in FW.\n");
+               ret = -EEXIST;
+               goto exit_conn_start;
+       }
+
        fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
                                          &fw_ddb_entry_dma, GFP_KERNEL);
        if (!fw_ddb_entry) {
                ql4_printk(KERN_ERR, ha,
                           "%s: Unable to allocate dma buffer\n", __func__);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto exit_conn_start;
        }
 
        ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts);
@@ -1138,9 +1284,7 @@ static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn)
                if (mbx_sts)
                        if (ddb_entry->fw_ddb_device_state ==
                                                DDB_DS_SESSION_ACTIVE) {
-                               iscsi_conn_start(ddb_entry->conn);
-                               iscsi_conn_login_event(ddb_entry->conn,
-                                               ISCSI_CONN_STATE_LOGGED_IN);
+                               ddb_entry->unblock_sess(ddb_entry->sess);
                                goto exit_set_param;
                        }
 
@@ -1167,8 +1311,9 @@ exit_set_param:
        ret = 0;
 
 exit_conn_start:
-       dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
-                         fw_ddb_entry, fw_ddb_entry_dma);
+       if (fw_ddb_entry)
+               dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                 fw_ddb_entry, fw_ddb_entry_dma);
        return ret;
 }
 
@@ -1344,6 +1489,101 @@ static int qla4xxx_task_xmit(struct iscsi_task *task)
        return -ENOSYS;
 }
 
+static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
+                                    struct dev_db_entry *fw_ddb_entry,
+                                    struct iscsi_cls_session *cls_sess,
+                                    struct iscsi_cls_conn *cls_conn)
+{
+       int buflen = 0;
+       struct iscsi_session *sess;
+       struct iscsi_conn *conn;
+       char ip_addr[DDB_IPADDR_LEN];
+       uint16_t options = 0;
+
+       sess = cls_sess->dd_data;
+       conn = cls_conn->dd_data;
+
+       conn->max_recv_dlength = BYTE_UNITS *
+                         le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+
+       conn->max_xmit_dlength = BYTE_UNITS *
+                         le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+
+       sess->initial_r2t_en =
+                           (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+       sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+
+       sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
+
+       sess->first_burst = BYTE_UNITS *
+                              le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+
+       sess->max_burst = BYTE_UNITS *
+                                le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+
+       sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
+       sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+
+       conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
+
+       sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+
+       options = le16_to_cpu(fw_ddb_entry->options);
+       if (options & DDB_OPT_IPV6_DEVICE)
+               sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+       else
+               sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+       iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
+                       (char *)fw_ddb_entry->iscsi_name, buflen);
+       iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
+                       (char *)ha->name_string, buflen);
+       iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
+                       (char *)ip_addr, buflen);
+}
+
+void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
+                                            struct ddb_entry *ddb_entry)
+{
+       struct iscsi_cls_session *cls_sess;
+       struct iscsi_cls_conn *cls_conn;
+       uint32_t ddb_state;
+       dma_addr_t fw_ddb_entry_dma;
+       struct dev_db_entry *fw_ddb_entry;
+
+       fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                         &fw_ddb_entry_dma, GFP_KERNEL);
+       if (!fw_ddb_entry) {
+               ql4_printk(KERN_ERR, ha,
+                          "%s: Unable to allocate dma buffer\n", __func__);
+               goto exit_session_conn_fwddb_param;
+       }
+
+       if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
+                                   fw_ddb_entry_dma, NULL, NULL, &ddb_state,
+                                   NULL, NULL, NULL) == QLA_ERROR) {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed "
+                                 "get_ddb_entry for fw_ddb_index %d\n",
+                                 ha->host_no, __func__,
+                                 ddb_entry->fw_ddb_index));
+               goto exit_session_conn_fwddb_param;
+       }
+
+       cls_sess = ddb_entry->sess;
+
+       cls_conn = ddb_entry->conn;
+
+       /* Update params */
+       qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
+
+exit_session_conn_fwddb_param:
+       if (fw_ddb_entry)
+               dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                 fw_ddb_entry, fw_ddb_entry_dma);
+}
+
 void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
                                       struct ddb_entry *ddb_entry)
 {
@@ -1360,7 +1600,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
        if (!fw_ddb_entry) {
                ql4_printk(KERN_ERR, ha,
                           "%s: Unable to allocate dma buffer\n", __func__);
-               return;
+               goto exit_session_conn_param;
        }
 
        if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry,
@@ -1370,7 +1610,7 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
                                  "get_ddb_entry for fw_ddb_index %d\n",
                                  ha->host_no, __func__,
                                  ddb_entry->fw_ddb_index));
-               return;
+               goto exit_session_conn_param;
        }
 
        cls_sess = ddb_entry->sess;
@@ -1379,6 +1619,12 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
        cls_conn = ddb_entry->conn;
        conn = cls_conn->dd_data;
 
+       /* Update timers after login */
+       ddb_entry->default_relogin_timeout =
+                               le16_to_cpu(fw_ddb_entry->def_timeout);
+       ddb_entry->default_time2wait =
+                               le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+
        /* Update params */
        conn->max_recv_dlength = BYTE_UNITS *
                          le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
@@ -1407,6 +1653,11 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
 
        memcpy(sess->initiatorname, ha->name_string,
               min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
+
+exit_session_conn_param:
+       if (fw_ddb_entry)
+               dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+                                 fw_ddb_entry, fw_ddb_entry_dma);
 }
 
 /*
@@ -1607,6 +1858,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
                vfree(ha->chap_list);
        ha->chap_list = NULL;
 
+       if (ha->fw_ddb_dma_pool)
+               dma_pool_destroy(ha->fw_ddb_dma_pool);
+
        /* release io space registers  */
        if (is_qla8022(ha)) {
                if (ha->nx_pcibase)
@@ -1689,6 +1943,16 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha)
                goto mem_alloc_error_exit;
        }
 
+       ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", &ha->pdev->dev,
+                                             DDB_DMA_BLOCK_SIZE, 8, 0);
+
+       if (ha->fw_ddb_dma_pool == NULL) {
+               ql4_printk(KERN_WARNING, ha,
+                          "%s: fw_ddb_dma_pool allocation failed..\n",
+                          __func__);
+               goto mem_alloc_error_exit;
+       }
+
        return QLA_SUCCESS;
 
 mem_alloc_error_exit:
@@ -1800,6 +2064,60 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
        }
 }
 
+void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_sess->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+
+       if (!(ddb_entry->ddb_type == FLASH_DDB))
+               return;
+
+       if (adapter_up(ha) && !test_bit(DF_RELOGIN, &ddb_entry->flags) &&
+           !iscsi_is_session_online(cls_sess)) {
+               if (atomic_read(&ddb_entry->retry_relogin_timer) !=
+                   INVALID_ENTRY) {
+                       if (atomic_read(&ddb_entry->retry_relogin_timer) ==
+                                       0) {
+                               atomic_set(&ddb_entry->retry_relogin_timer,
+                                          INVALID_ENTRY);
+                               set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+                               set_bit(DF_RELOGIN, &ddb_entry->flags);
+                               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                      "%s: index [%d] login device\n",
+                                       __func__, ddb_entry->fw_ddb_index));
+                       } else
+                               atomic_dec(&ddb_entry->retry_relogin_timer);
+               }
+       }
+
+       /* Wait for relogin to timeout */
+       if (atomic_read(&ddb_entry->relogin_timer) &&
+           (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) {
+               /*
+                * If the relogin times out and the device is
+                * still NOT ONLINE then try and relogin again.
+                */
+               if (!iscsi_is_session_online(cls_sess)) {
+                       /* Reset retry relogin timer */
+                       atomic_inc(&ddb_entry->relogin_retry_count);
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                               "%s: index[%d] relogin timed out-retrying"
+                               " relogin (%d), retry (%d)\n", __func__,
+                               ddb_entry->fw_ddb_index,
+                               atomic_read(&ddb_entry->relogin_retry_count),
+                               ddb_entry->default_time2wait + 4));
+                       set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+                       atomic_set(&ddb_entry->retry_relogin_timer,
+                                  ddb_entry->default_time2wait + 4);
+               }
+       }
+}
+
 /**
  * qla4xxx_timer - checks every second for work to do.
  * @ha: Pointer to host adapter structure.
@@ -1809,6 +2127,8 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
        int start_dpc = 0;
        uint16_t w;
 
+       iscsi_host_for_each_session(ha->host, qla4xxx_check_relogin_flash_ddb);
+
        /* If we are in the middle of AER/EEH processing
         * skip any processing and reschedule the timer
         */
@@ -2078,7 +2398,12 @@ static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session)
        sess = cls_session->dd_data;
        ddb_entry = sess->dd_data;
        ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED;
-       iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+
+       if (ddb_entry->ddb_type == FLASH_DDB)
+               iscsi_block_session(ddb_entry->sess);
+       else
+               iscsi_session_failure(cls_session->dd_data,
+                                     ISCSI_ERR_CONN_FAILED);
 }
 
 /**
@@ -2163,7 +2488,7 @@ recover_ha_init_adapter:
 
                /* NOTE: AF_ONLINE flag set upon successful completion of
                 *       qla4xxx_initialize_adapter */
-               status = qla4xxx_initialize_adapter(ha);
+               status = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
        }
 
        /* Retry failed adapter initialization, if necessary
@@ -2245,17 +2570,108 @@ static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
                        iscsi_unblock_session(ddb_entry->sess);
                } else {
                        /* Trigger relogin */
-                       iscsi_session_failure(cls_session->dd_data,
-                                             ISCSI_ERR_CONN_FAILED);
+                       if (ddb_entry->ddb_type == FLASH_DDB) {
+                               if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+                                       qla4xxx_arm_relogin_timer(ddb_entry);
+                       } else
+                               iscsi_session_failure(cls_session->dd_data,
+                                                     ISCSI_ERR_CONN_FAILED);
                }
        }
 }
 
+int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_session->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+       ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+                  " unblock session\n", ha->host_no, __func__,
+                  ddb_entry->fw_ddb_index);
+
+       iscsi_unblock_session(ddb_entry->sess);
+
+       /* Start scan target */
+       if (test_bit(AF_ONLINE, &ha->flags)) {
+               ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+                          " start scan\n", ha->host_no, __func__,
+                          ddb_entry->fw_ddb_index);
+               scsi_queue_work(ha->host, &ddb_entry->sess->scan_work);
+       }
+       return QLA_SUCCESS;
+}
+
+int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_session->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+       ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]"
+                  " unblock user space session\n", ha->host_no, __func__,
+                  ddb_entry->fw_ddb_index);
+       iscsi_conn_start(ddb_entry->conn);
+       iscsi_conn_login_event(ddb_entry->conn,
+                              ISCSI_CONN_STATE_LOGGED_IN);
+
+       return QLA_SUCCESS;
+}
+
 static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha)
 {
        iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices);
 }
 
+static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+{
+       uint16_t relogin_timer;
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_sess->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+
+       relogin_timer = max(ddb_entry->default_relogin_timeout,
+                           (uint16_t)RELOGIN_TOV);
+       atomic_set(&ddb_entry->relogin_timer, relogin_timer);
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no,
+                         ddb_entry->fw_ddb_index, relogin_timer));
+
+       qla4xxx_login_flash_ddb(cls_sess);
+}
+
+static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
+{
+       struct iscsi_session *sess;
+       struct ddb_entry *ddb_entry;
+       struct scsi_qla_host *ha;
+
+       sess = cls_sess->dd_data;
+       ddb_entry = sess->dd_data;
+       ha = ddb_entry->ha;
+
+       if (!(ddb_entry->ddb_type == FLASH_DDB))
+               return;
+
+       if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
+           !iscsi_is_session_online(cls_sess)) {
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "relogin issued\n"));
+               qla4xxx_relogin_flash_ddb(cls_sess);
+       }
+}
+
 void qla4xxx_wake_dpc(struct scsi_qla_host *ha)
 {
        if (ha->dpc_thread)
@@ -2356,6 +2772,12 @@ dpc_post_reset_ha:
        if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags))
                qla4xxx_get_dhcp_ip_address(ha);
 
+       /* ---- relogin device? --- */
+       if (adapter_up(ha) &&
+           test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) {
+               iscsi_host_for_each_session(ha->host, qla4xxx_dpc_relogin);
+       }
+
        /* ---- link change? --- */
        if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
                if (!test_bit(AF_LINK_UP, &ha->flags)) {
@@ -2368,8 +2790,12 @@ dpc_post_reset_ha:
                         * fatal error recovery.  Therefore, the driver must
                         * manually relogin to devices when recovering from
                         * connection failures, logouts, expired KATO, etc. */
-
-                       qla4xxx_relogin_all_devices(ha);
+                       if (test_and_clear_bit(AF_BUILD_DDB_LIST, &ha->flags)) {
+                               qla4xxx_build_ddb_list(ha, ha->is_reset);
+                               iscsi_host_for_each_session(ha->host,
+                                               qla4xxx_login_flash_ddb);
+                       } else
+                               qla4xxx_relogin_all_devices(ha);
                }
        }
 }
@@ -2867,6 +3293,9 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
                          " target ID %d\n", __func__, ddb_index[0],
                          ddb_index[1]));
 
+       ha->pri_ddb_idx = ddb_index[0];
+       ha->sec_ddb_idx = ddb_index[1];
+
 exit_boot_info_free:
        dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma);
 exit_boot_info:
@@ -3034,6 +3463,9 @@ static int qla4xxx_get_boot_info(struct scsi_qla_host *ha)
                return ret;
        }
 
+       if (ql4xdisablesysfsboot)
+               return QLA_SUCCESS;
+
        if (ddb_index[0] == 0xffff)
                goto sec_target;
 
@@ -3066,7 +3498,15 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
        struct iscsi_boot_kobj *boot_kobj;
 
        if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS)
-               return 0;
+               return QLA_ERROR;
+
+       if (ql4xdisablesysfsboot) {
+               ql4_printk(KERN_INFO, ha,
+                          "%s: syfsboot disabled - driver will trigger login"
+                          "and publish session for discovery .\n", __func__);
+               return QLA_SUCCESS;
+       }
+
 
        ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no);
        if (!ha->boot_kset)
@@ -3108,7 +3548,7 @@ static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha)
        if (!boot_kobj)
                goto put_host;
 
-       return 0;
+       return QLA_SUCCESS;
 
 put_host:
        scsi_host_put(ha->host);
@@ -3174,9 +3614,507 @@ static void qla4xxx_create_chap_list(struct scsi_qla_host *ha)
 exit_chap_list:
        dma_free_coherent(&ha->pdev->dev, chap_size,
                        chap_flash_data, chap_dma);
-       return;
 }
 
+static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry,
+                                 struct ql4_tuple_ddb *tddb)
+{
+       struct scsi_qla_host *ha;
+       struct iscsi_cls_session *cls_sess;
+       struct iscsi_cls_conn *cls_conn;
+       struct iscsi_session *sess;
+       struct iscsi_conn *conn;
+
+       DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
+       ha = ddb_entry->ha;
+       cls_sess = ddb_entry->sess;
+       sess = cls_sess->dd_data;
+       cls_conn = ddb_entry->conn;
+       conn = cls_conn->dd_data;
+
+       tddb->tpgt = sess->tpgt;
+       tddb->port = conn->persistent_port;
+       strncpy(tddb->iscsi_name, sess->targetname, ISCSI_NAME_SIZE);
+       strncpy(tddb->ip_addr, conn->persistent_address, DDB_IPADDR_LEN);
+}
+
+static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry,
+                                     struct ql4_tuple_ddb *tddb)
+{
+       uint16_t options = 0;
+
+       tddb->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+       memcpy(&tddb->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
+              min(sizeof(tddb->iscsi_name), sizeof(fw_ddb_entry->iscsi_name)));
+
+       options = le16_to_cpu(fw_ddb_entry->options);
+       if (options & DDB_OPT_IPV6_DEVICE)
+               sprintf(tddb->ip_addr, "%pI6", fw_ddb_entry->ip_addr);
+       else
+               sprintf(tddb->ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+
+       tddb->port = le16_to_cpu(fw_ddb_entry->port);
+}
+
+static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha,
+                                    struct ql4_tuple_ddb *old_tddb,
+                                    struct ql4_tuple_ddb *new_tddb)
+{
+       if (strcmp(old_tddb->iscsi_name, new_tddb->iscsi_name))
+               return QLA_ERROR;
+
+       if (strcmp(old_tddb->ip_addr, new_tddb->ip_addr))
+               return QLA_ERROR;
+
+       if (old_tddb->port != new_tddb->port)
+               return QLA_ERROR;
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]",
+                         old_tddb->port, old_tddb->tpgt, old_tddb->ip_addr,
+                         old_tddb->iscsi_name, new_tddb->port, new_tddb->tpgt,
+                         new_tddb->ip_addr, new_tddb->iscsi_name));
+
+       return QLA_SUCCESS;
+}
+
+static int qla4xxx_is_session_exists(struct scsi_qla_host *ha,
+                                    struct dev_db_entry *fw_ddb_entry)
+{
+       struct ddb_entry *ddb_entry;
+       struct ql4_tuple_ddb *fw_tddb = NULL;
+       struct ql4_tuple_ddb *tmp_tddb = NULL;
+       int idx;
+       int ret = QLA_ERROR;
+
+       fw_tddb = vzalloc(sizeof(*fw_tddb));
+       if (!fw_tddb) {
+               DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                 "Memory Allocation failed.\n"));
+               ret = QLA_SUCCESS;
+               goto exit_check;
+       }
+
+       tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+       if (!tmp_tddb) {
+               DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                 "Memory Allocation failed.\n"));
+               ret = QLA_SUCCESS;
+               goto exit_check;
+       }
+
+       qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+       for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+               ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+               if (ddb_entry == NULL)
+                       continue;
+
+               qla4xxx_get_param_ddb(ddb_entry, tmp_tddb);
+               if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+                       ret = QLA_SUCCESS; /* found */
+                       goto exit_check;
+               }
+       }
+
+exit_check:
+       if (fw_tddb)
+               vfree(fw_tddb);
+       if (tmp_tddb)
+               vfree(tmp_tddb);
+       return ret;
+}
+
+static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha,
+                                      struct list_head *list_nt,
+                                      struct dev_db_entry *fw_ddb_entry)
+{
+       struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
+       struct ql4_tuple_ddb *fw_tddb = NULL;
+       struct ql4_tuple_ddb *tmp_tddb = NULL;
+       int ret = QLA_ERROR;
+
+       fw_tddb = vzalloc(sizeof(*fw_tddb));
+       if (!fw_tddb) {
+               DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                 "Memory Allocation failed.\n"));
+               ret = QLA_SUCCESS;
+               goto exit_check;
+       }
+
+       tmp_tddb = vzalloc(sizeof(*tmp_tddb));
+       if (!tmp_tddb) {
+               DEBUG2(ql4_printk(KERN_WARNING, ha,
+                                 "Memory Allocation failed.\n"));
+               ret = QLA_SUCCESS;
+               goto exit_check;
+       }
+
+       qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb);
+
+       list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+               qla4xxx_convert_param_ddb(&nt_ddb_idx->fw_ddb, tmp_tddb);
+               if (!qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb)) {
+                       ret = QLA_SUCCESS; /* found */
+                       goto exit_check;
+               }
+       }
+
+exit_check:
+       if (fw_tddb)
+               vfree(fw_tddb);
+       if (tmp_tddb)
+               vfree(tmp_tddb);
+       return ret;
+}
+
+static void qla4xxx_free_nt_list(struct list_head *list_nt)
+{
+       struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
+
+       /* Free up the normaltargets list */
+       list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
+               list_del_init(&nt_ddb_idx->list);
+               vfree(nt_ddb_idx);
+       }
+
+}
+
+static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+                                       struct dev_db_entry *fw_ddb_entry)
+{
+       struct iscsi_endpoint *ep;
+       struct sockaddr_in *addr;
+       struct sockaddr_in6 *addr6;
+       struct sockaddr *dst_addr;
+       char *ip;
+
+       /* TODO: need to destroy on unload iscsi_endpoint*/
+       dst_addr = vmalloc(sizeof(*dst_addr));
+       if (!dst_addr)
+               return NULL;
+
+       if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
+               dst_addr->sa_family = AF_INET6;
+               addr6 = (struct sockaddr_in6 *)dst_addr;
+               ip = (char *)&addr6->sin6_addr;
+               memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
+               addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
+
+       } else {
+               dst_addr->sa_family = AF_INET;
+               addr = (struct sockaddr_in *)dst_addr;
+               ip = (char *)&addr->sin_addr;
+               memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
+               addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
+       }
+
+       ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
+       vfree(dst_addr);
+       return ep;
+}
+
+static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
+{
+       if (ql4xdisablesysfsboot)
+               return QLA_SUCCESS;
+       if (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx)
+               return QLA_ERROR;
+       return QLA_SUCCESS;
+}
+
+static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
+                                         struct ddb_entry *ddb_entry)
+{
+       ddb_entry->ddb_type = FLASH_DDB;
+       ddb_entry->fw_ddb_index = INVALID_ENTRY;
+       ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
+       ddb_entry->ha = ha;
+       ddb_entry->unblock_sess = qla4xxx_unblock_flash_ddb;
+       ddb_entry->ddb_change = qla4xxx_flash_ddb_change;
+
+       atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+       atomic_set(&ddb_entry->relogin_timer, 0);
+       atomic_set(&ddb_entry->relogin_retry_count, 0);
+
+       ddb_entry->default_relogin_timeout =
+               le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+       ddb_entry->default_time2wait =
+               le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
+}
+
+static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
+{
+       uint32_t idx = 0;
+       uint32_t ip_idx[IP_ADDR_COUNT] = {0, 1, 2, 3}; /* 4 IP interfaces */
+       uint32_t sts[MBOX_REG_COUNT];
+       uint32_t ip_state;
+       unsigned long wtime;
+       int ret;
+
+       wtime = jiffies + (HZ * IP_CONFIG_TOV);
+       do {
+               for (idx = 0; idx < IP_ADDR_COUNT; idx++) {
+                       if (ip_idx[idx] == -1)
+                               continue;
+
+                       ret = qla4xxx_get_ip_state(ha, 0, ip_idx[idx], sts);
+
+                       if (ret == QLA_ERROR) {
+                               ip_idx[idx] = -1;
+                               continue;
+                       }
+
+                       ip_state = (sts[1] & IP_STATE_MASK) >> IP_STATE_SHIFT;
+
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                                         "Waiting for IP state for idx = %d, state = 0x%x\n",
+                                         ip_idx[idx], ip_state));
+                       if (ip_state == IP_ADDRSTATE_UNCONFIGURED ||
+                           ip_state == IP_ADDRSTATE_INVALID ||
+                           ip_state == IP_ADDRSTATE_PREFERRED ||
+                           ip_state == IP_ADDRSTATE_DEPRICATED ||
+                           ip_state == IP_ADDRSTATE_DISABLING)
+                               ip_idx[idx] = -1;
+
+               }
+
+               /* Break if all IP states checked */
+               if ((ip_idx[0] == -1) &&
+                   (ip_idx[1] == -1) &&
+                   (ip_idx[2] == -1) &&
+                   (ip_idx[3] == -1))
+                       break;
+               schedule_timeout_uninterruptible(HZ);
+       } while (time_after(wtime, jiffies));
+}
+
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+       int max_ddbs;
+       int ret;
+       uint32_t idx = 0, next_idx = 0;
+       uint32_t state = 0, conn_err = 0;
+       uint16_t conn_id;
+       struct dev_db_entry *fw_ddb_entry;
+       struct ddb_entry *ddb_entry = NULL;
+       dma_addr_t fw_ddb_dma;
+       struct iscsi_cls_session *cls_sess;
+       struct iscsi_session *sess;
+       struct iscsi_cls_conn *cls_conn;
+       struct iscsi_endpoint *ep;
+       uint16_t cmds_max = 32, tmo = 0;
+       uint32_t initial_cmdsn = 0;
+       struct list_head list_st, list_nt; /* List of sendtargets */
+       struct qla_ddb_index  *st_ddb_idx, *st_ddb_idx_tmp;
+       int fw_idx_size;
+       unsigned long wtime;
+       struct qla_ddb_index  *nt_ddb_idx;
+
+       if (!test_bit(AF_LINK_UP, &ha->flags)) {
+               set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+               ha->is_reset = is_reset;
+               return;
+       }
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+                                    MAX_DEV_DB_ENTRIES;
+
+       fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+                                     &fw_ddb_dma);
+       if (fw_ddb_entry == NULL) {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+               goto exit_ddb_list;
+       }
+
+       INIT_LIST_HEAD(&list_st);
+       INIT_LIST_HEAD(&list_nt);
+       fw_idx_size = sizeof(struct qla_ddb_index);
+
+       for (idx = 0; idx < max_ddbs; idx = next_idx) {
+               ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+                                             fw_ddb_dma, NULL,
+                                             &next_idx, &state, &conn_err,
+                                             NULL, &conn_id);
+               if (ret == QLA_ERROR)
+                       break;
+
+               if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+                       goto continue_next_st;
+
+               /* Check if ST, add to the list_st */
+               if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
+                       goto continue_next_st;
+
+               st_ddb_idx = vzalloc(fw_idx_size);
+               if (!st_ddb_idx)
+                       break;
+
+               st_ddb_idx->fw_ddb_idx = idx;
+
+               list_add_tail(&st_ddb_idx->list, &list_st);
+continue_next_st:
+               if (next_idx == 0)
+                       break;
+       }
+
+       /* Before issuing conn open mbox, ensure all IPs states are configured
+        * Note, conn open fails if IPs are not configured
+        */
+       qla4xxx_wait_for_ip_configuration(ha);
+
+       /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
+       list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+               qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+       }
+
+       /* Wait to ensure all sendtargets are done for min 12 sec wait */
+       tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Default time to wait for build ddb %d\n", tmo));
+
+       wtime = jiffies + (HZ * tmo);
+       do {
+               list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st,
+                                        list) {
+                       ret = qla4xxx_get_fwddb_entry(ha,
+                                                     st_ddb_idx->fw_ddb_idx,
+                                                     NULL, 0, NULL, &next_idx,
+                                                     &state, &conn_err, NULL,
+                                                     NULL);
+                       if (ret == QLA_ERROR)
+                               continue;
+
+                       if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+                           state == DDB_DS_SESSION_FAILED) {
+                               list_del_init(&st_ddb_idx->list);
+                               vfree(st_ddb_idx);
+                       }
+               }
+               schedule_timeout_uninterruptible(HZ / 10);
+       } while (time_after(wtime, jiffies));
+
+       /* Free up the sendtargets list */
+       list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+               list_del_init(&st_ddb_idx->list);
+               vfree(st_ddb_idx);
+       }
+
+       for (idx = 0; idx < max_ddbs; idx = next_idx) {
+               ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
+                                             fw_ddb_dma, NULL,
+                                             &next_idx, &state, &conn_err,
+                                             NULL, &conn_id);
+               if (ret == QLA_ERROR)
+                       break;
+
+               if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
+                       goto continue_next_nt;
+
+               /* Check if NT, then add to list it */
+               if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
+                       goto continue_next_nt;
+
+               if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+                   state == DDB_DS_SESSION_FAILED) {
+                       DEBUG2(ql4_printk(KERN_INFO, ha,
+                                         "Adding  DDB to session = 0x%x\n",
+                                         idx));
+                       if (is_reset == INIT_ADAPTER) {
+                               nt_ddb_idx = vmalloc(fw_idx_size);
+                               if (!nt_ddb_idx)
+                                       break;
+
+                               nt_ddb_idx->fw_ddb_idx = idx;
+
+                               memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
+                                      sizeof(struct dev_db_entry));
+
+                               if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
+                                               fw_ddb_entry) == QLA_SUCCESS) {
+                                       vfree(nt_ddb_idx);
+                                       goto continue_next_nt;
+                               }
+                               list_add_tail(&nt_ddb_idx->list, &list_nt);
+                       } else if (is_reset == RESET_ADAPTER) {
+                               if (qla4xxx_is_session_exists(ha,
+                                                  fw_ddb_entry) == QLA_SUCCESS)
+                                       goto continue_next_nt;
+                       }
+
+                       /* Create session object, with INVALID_ENTRY,
+                        * the targer_id would get set when we issue the login
+                        */
+                       cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport,
+                                               ha->host, cmds_max,
+                                               sizeof(struct ddb_entry),
+                                               sizeof(struct ql4_task_data),
+                                               initial_cmdsn, INVALID_ENTRY);
+                       if (!cls_sess)
+                               goto exit_ddb_list;
+
+                       /*
+                        * iscsi_session_setup increments the driver reference
+                        * count which wouldn't let the driver to be unloaded.
+                        * so calling module_put function to decrement the
+                        * reference count.
+                        **/
+                       module_put(qla4xxx_iscsi_transport.owner);
+                       sess = cls_sess->dd_data;
+                       ddb_entry = sess->dd_data;
+                       ddb_entry->sess = cls_sess;
+
+                       cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+                       memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+                              sizeof(struct dev_db_entry));
+
+                       qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+
+                       cls_conn = iscsi_conn_setup(cls_sess,
+                                                   sizeof(struct qla_conn),
+                                                   conn_id);
+                       if (!cls_conn)
+                               goto exit_ddb_list;
+
+                       ddb_entry->conn = cls_conn;
+
+                       /* Setup ep, for displaying attributes in sysfs */
+                       ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+                       if (ep) {
+                               ep->conn = cls_conn;
+                               cls_conn->ep = ep;
+                       } else {
+                               DEBUG2(ql4_printk(KERN_ERR, ha,
+                                                 "Unable to get ep\n"));
+                       }
+
+                       /* Update sess/conn params */
+                       qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
+                                                cls_conn);
+
+                       if (is_reset == RESET_ADAPTER) {
+                               iscsi_block_session(cls_sess);
+                               /* Use the relogin path to discover new devices
+                                *  by short-circuting the logic of setting
+                                *  timer to relogin - instead set the flags
+                                *  to initiate login right away.
+                                */
+                               set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+                               set_bit(DF_RELOGIN, &ddb_entry->flags);
+                       }
+               }
+continue_next_nt:
+               if (next_idx == 0)
+                       break;
+       }
+exit_ddb_list:
+       qla4xxx_free_nt_list(&list_nt);
+       if (fw_ddb_entry)
+               dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+
+       qla4xxx_free_ddb_index(ha);
+}
+
+
 /**
  * qla4xxx_probe_adapter - callback function to probe HBA
  * @pdev: pointer to pci_dev structure
@@ -3298,7 +4236,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
         * firmware
         * NOTE: interrupts enabled upon successful completion
         */
-       status = qla4xxx_initialize_adapter(ha);
+       status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
        while ((!test_bit(AF_ONLINE, &ha->flags)) &&
            init_retry_count++ < MAX_INIT_RETRIES) {
 
@@ -3319,7 +4257,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
                if (ha->isp_ops->reset_chip(ha) == QLA_ERROR)
                        continue;
 
-               status = qla4xxx_initialize_adapter(ha);
+               status = qla4xxx_initialize_adapter(ha, INIT_ADAPTER);
        }
 
        if (!test_bit(AF_ONLINE, &ha->flags)) {
@@ -3386,12 +4324,16 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
               ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
               ha->patch_number, ha->build_number);
 
-       qla4xxx_create_chap_list(ha);
-
        if (qla4xxx_setup_boot_info(ha))
                ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n",
                           __func__);
 
+               /* Perform the build ddb list and login to each */
+       qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
+       iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
+
+       qla4xxx_create_chap_list(ha);
+
        qla4xxx_create_ifaces(ha);
        return 0;
 
@@ -3449,6 +4391,38 @@ static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha)
        }
 }
 
+static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
+{
+       struct ddb_entry *ddb_entry;
+       int options;
+       int idx;
+
+       for (idx = 0; idx < MAX_DDB_ENTRIES; idx++) {
+
+               ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+               if ((ddb_entry != NULL) &&
+                   (ddb_entry->ddb_type == FLASH_DDB)) {
+
+                       options = LOGOUT_OPTION_CLOSE_SESSION;
+                       if (qla4xxx_session_logout_ddb(ha, ddb_entry, options)
+                           == QLA_ERROR)
+                               ql4_printk(KERN_ERR, ha, "%s: Logout failed\n",
+                                          __func__);
+
+                       qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
+                       /*
+                        * we have decremented the reference count of the driver
+                        * when we setup the session to have the driver unload
+                        * to be seamless without actually destroying the
+                        * session
+                        **/
+                       try_module_get(qla4xxx_iscsi_transport.owner);
+                       iscsi_destroy_endpoint(ddb_entry->conn->ep);
+                       qla4xxx_free_ddb(ha, ddb_entry);
+                       iscsi_session_teardown(ddb_entry->sess);
+               }
+       }
+}
 /**
  * qla4xxx_remove_adapter - calback function to remove adapter.
  * @pci_dev: PCI device pointer
@@ -3465,9 +4439,11 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
        /* destroy iface from sysfs */
        qla4xxx_destroy_ifaces(ha);
 
-       if (ha->boot_kset)
+       if ((!ql4xdisablesysfsboot) && ha->boot_kset)
                iscsi_boot_destroy_kset(ha->boot_kset);
 
+       qla4xxx_destroy_fw_ddb_session(ha);
+
        scsi_remove_host(ha->host);
 
        qla4xxx_free_adapter(ha);
@@ -4115,7 +5091,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
 
                qla4_8xxx_idc_unlock(ha);
                clear_bit(AF_FW_RECOVERY, &ha->flags);
-               rval = qla4xxx_initialize_adapter(ha);
+               rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
                qla4_8xxx_idc_lock(ha);
 
                if (rval != QLA_SUCCESS) {
@@ -4151,7 +5127,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
                if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) ==
                    QLA82XX_DEV_READY)) {
                        clear_bit(AF_FW_RECOVERY, &ha->flags);
-                       rval = qla4xxx_initialize_adapter(ha);
+                       rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
                        if (rval == QLA_SUCCESS) {
                                ret = qla4xxx_request_irqs(ha);
                                if (ret) {
index c15347d3f532099ef70127371a91caef2d90c3ee..5254e57968f5cf64a24c04731fe9ef2e240c4068 100644 (file)
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k8"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k9"
index a1fd73df5416129c62d8889f87c1d003b0b6b47c..8ba4510a95195392da12a65ce641ea5f164f3c20 100644 (file)
@@ -199,7 +199,7 @@ config SPI_FSL_LIB
        depends on FSL_SOC
 
 config SPI_FSL_SPI
-       tristate "Freescale SPI controller"
+       bool "Freescale SPI controller"
        depends on FSL_SOC
        select SPI_FSL_LIB
        help
@@ -208,7 +208,7 @@ config SPI_FSL_SPI
          MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
 
 config SPI_FSL_ESPI
-       tristate "Freescale eSPI controller"
+       bool "Freescale eSPI controller"
        depends on FSL_SOC
        select SPI_FSL_LIB
        help
index 024b48aed5ca6bba6f32857965e8904fe012ca1b..acc88b4d28693da4d59d315b350f0e717837e607 100644 (file)
@@ -13,6 +13,7 @@
  */
 
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/delay.h>
 #include <linux/spinlock.h>
index e093d3ec41ba26f1da89a100e3f8774511e65a21..0094c645ff0d4cc4562412ef020e6ba838966779 100644 (file)
@@ -256,7 +256,7 @@ static void spi_gpio_cleanup(struct spi_device *spi)
        spi_bitbang_cleanup(spi);
 }
 
-static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
+static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
 {
        int value;
 
@@ -270,7 +270,7 @@ static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
        return value;
 }
 
-static int __init
+static int __devinit
 spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
        u16 *res_flags)
 {
index 21c70b2b83119e333ebe4c5b5e18ab70f2ce6a0a..182e9c873822a64b6e9e25976470044eae8bc338 100644 (file)
@@ -8,6 +8,7 @@
  *
  */
 
+#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
index 84c934c0a5455950f1244de76ebcfe3b8b5a4aaf..520e8286db28f92cee618acdbaa00a310e5d41ab 100644 (file)
@@ -517,10 +517,14 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
 
 static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
 {
-       ssb_pcicore_fix_sprom_core_index(pc);
+       struct ssb_device *pdev = pc->dev;
+       struct ssb_bus *bus = pdev->bus;
+
+       if (bus->bustype == SSB_BUSTYPE_PCI)
+               ssb_pcicore_fix_sprom_core_index(pc);
 
        /* Disable PCI interrupts. */
-       ssb_write32(pc->dev, SSB_INTVEC, 0);
+       ssb_write32(pdev, SSB_INTVEC, 0);
 
        /* Additional PCIe always once-executed workarounds */
        if (pc->dev->id.coreid == SSB_DEV_PCIE) {
index 21d8c1c16cd891e42bf1621d7ff1ba8f0dc5e580..5e78c77d5a08277611c183c92dae273362caed58 100644 (file)
@@ -671,7 +671,7 @@ static int do_insnlist_ioctl(struct comedi_device *dev,
        }
 
        insns =
-           kmalloc(sizeof(struct comedi_insn) * insnlist.n_insns, GFP_KERNEL);
+           kcalloc(insnlist.n_insns, sizeof(struct comedi_insn), GFP_KERNEL);
        if (!insns) {
                DPRINTK("kmalloc failed\n");
                ret = -ENOMEM;
@@ -1432,7 +1432,21 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
        return ret;
 }
 
-static void comedi_unmap(struct vm_area_struct *area)
+
+static void comedi_vm_open(struct vm_area_struct *area)
+{
+       struct comedi_async *async;
+       struct comedi_device *dev;
+
+       async = area->vm_private_data;
+       dev = async->subdevice->device;
+
+       mutex_lock(&dev->mutex);
+       async->mmap_count++;
+       mutex_unlock(&dev->mutex);
+}
+
+static void comedi_vm_close(struct vm_area_struct *area)
 {
        struct comedi_async *async;
        struct comedi_device *dev;
@@ -1446,15 +1460,13 @@ static void comedi_unmap(struct vm_area_struct *area)
 }
 
 static struct vm_operations_struct comedi_vm_ops = {
-       .close = comedi_unmap,
+       .open = comedi_vm_open,
+       .close = comedi_vm_close,
 };
 
 static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
 {
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
        struct comedi_async *async = NULL;
        unsigned long start = vma->vm_start;
        unsigned long size;
@@ -1462,6 +1474,15 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
        int i;
        int retval;
        struct comedi_subdevice *s;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+
+       dev_file_info = comedi_get_device_file_info(minor);
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        mutex_lock(&dev->mutex);
        if (!dev->attached) {
@@ -1528,11 +1549,17 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
 {
        unsigned int mask = 0;
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
        struct comedi_subdevice *read_subdev;
        struct comedi_subdevice *write_subdev;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        mutex_lock(&dev->mutex);
        if (!dev->attached) {
@@ -1578,9 +1605,15 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
        int n, m, count = 0, retval = 0;
        DECLARE_WAITQUEUE(wait, current);
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        if (!dev->attached) {
                DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1640,11 +1673,11 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
                                retval = -EAGAIN;
                                break;
                        }
+                       schedule();
                        if (signal_pending(current)) {
                                retval = -ERESTARTSYS;
                                break;
                        }
-                       schedule();
                        if (!s->busy)
                                break;
                        if (s->busy != file) {
@@ -1683,9 +1716,15 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
        int n, m, count = 0, retval = 0;
        DECLARE_WAITQUEUE(wait, current);
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        if (!dev->attached) {
                DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1741,11 +1780,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
                                retval = -EAGAIN;
                                break;
                        }
+                       schedule();
                        if (signal_pending(current)) {
                                retval = -ERESTARTSYS;
                                break;
                        }
-                       schedule();
                        if (!s->busy) {
                                retval = 0;
                                break;
@@ -1885,11 +1924,17 @@ ok:
 static int comedi_close(struct inode *inode, struct file *file)
 {
        const unsigned minor = iminor(inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
-       struct comedi_device *dev = dev_file_info->device;
        struct comedi_subdevice *s = NULL;
        int i;
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
+
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        mutex_lock(&dev->mutex);
 
@@ -1923,10 +1968,15 @@ static int comedi_close(struct inode *inode, struct file *file)
 static int comedi_fasync(int fd, struct file *file, int on)
 {
        const unsigned minor = iminor(file->f_dentry->d_inode);
-       struct comedi_device_file_info *dev_file_info =
-           comedi_get_device_file_info(minor);
+       struct comedi_device_file_info *dev_file_info;
+       struct comedi_device *dev;
+       dev_file_info = comedi_get_device_file_info(minor);
 
-       struct comedi_device *dev = dev_file_info->device;
+       if (dev_file_info == NULL)
+               return -ENODEV;
+       dev = dev_file_info->device;
+       if (dev == NULL)
+               return -ENODEV;
 
        return fasync_helper(fd, file, on, &dev->async_queue);
 }
index a8fea9a9173349799c0f2f629d23ce30412cf356..6144afb8cbaaf92a1ca09dfa5404787d72ee35ed 100644 (file)
@@ -1,4 +1,4 @@
-#define DRIVER_VERSION "v0.5"
+#define DRIVER_VERSION "v0.6"
 #define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
 #define DRIVER_DESC "Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com"
 /*
@@ -25,7 +25,7 @@ Driver: usbduxsigma
 Description: University of Stirling USB DAQ & INCITE Technology Limited
 Devices: [ITL] USB-DUX (usbduxsigma.o)
 Author: Bernd Porr <BerndPorr@f2s.com>
-Updated: 21 Jul 2011
+Updated: 8 Nov 2011
 Status: testing
 */
 /*
@@ -44,6 +44,7 @@ Status: testing
  *   0.3: proper vendor ID and driver name
  *   0.4: fixed D/A voltage range
  *   0.5: various bug fixes, health check at startup
+ *   0.6: corrected wrong input range
  */
 
 /* generates loads of debug info */
@@ -175,7 +176,7 @@ Status: testing
 /* comedi constants */
 static const struct comedi_lrange range_usbdux_ai_range = { 1, {
                                                                BIP_RANGE
-                                                               (2.65)
+                                                               (2.65/2.0)
                                                                }
 };
 
index fb2e89c3056c0c0c0ff1b1b02ada8f60eab8a551..5385da2e9cdbcdf23186738a787a7b8dba2e553a 100644 (file)
@@ -89,6 +89,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
        {USB_DEVICE(0x0DF6, 0x0045)},
        {USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
        {USB_DEVICE(0x0DF6, 0x004B)},
+       {USB_DEVICE(0x0DF6, 0x005D)},
        {USB_DEVICE(0x0DF6, 0x0063)},
        /* Sweex */
        {USB_DEVICE(0x177F, 0x0154)},
index 480b0ed2e4de8975ea315504190c5eff02e5ac59..115635f9502456d7e8e3f16470b7f4a269b23b5b 100644 (file)
@@ -1021,6 +1021,7 @@ static int __devinit rtsx_probe(struct pci_dev *pci,
        th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
        if (IS_ERR(th)) {
                printk(KERN_ERR "Unable to start the device-scanning thread\n");
+               complete(&dev->scanning_done);
                quiesce_and_remove_host(dev);
                err = PTR_ERR(th);
                goto errout;
index 3d1279c424a85155163c498ca463d0838fdfab65..7eb56178fb641d5090c5cbb69c3782112ca9b786 100644 (file)
@@ -54,6 +54,7 @@
 
 /* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
 #define DMT_ID(id) ((id) + 4)
+#define DM_TIMER_CLOCKS                4
 
 /* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
 #define MCBSP_ID(id) ((id) - 6)
@@ -114,8 +115,13 @@ static s8 get_clk_type(u8 id)
  */
 void dsp_clk_exit(void)
 {
+       int i;
+
        dsp_clock_disable_all(dsp_clocks);
 
+       for (i = 0; i < DM_TIMER_CLOCKS; i++)
+               omap_dm_timer_free(timer[i]);
+
        clk_put(iva2_clk);
        clk_put(ssi.sst_fck);
        clk_put(ssi.ssr_fck);
@@ -130,9 +136,13 @@ void dsp_clk_exit(void)
 void dsp_clk_init(void)
 {
        static struct platform_device dspbridge_device;
+       int i, id;
 
        dspbridge_device.dev.bus = &platform_bus_type;
 
+       for (i = 0, id = 5; i < DM_TIMER_CLOCKS; i++, id++)
+               timer[i] = omap_dm_timer_request_specific(id);
+
        iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
        if (IS_ERR(iva2_clk))
                dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
@@ -204,8 +214,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
                clk_enable(iva2_clk);
                break;
        case GPT_CLK:
-               timer[clk_id - 1] =
-                               omap_dm_timer_request_specific(DMT_ID(clk_id));
+               status = omap_dm_timer_start(timer[clk_id - 1]);
                break;
 #ifdef CONFIG_OMAP_MCBSP
        case MCBSP_CLK:
@@ -281,7 +290,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
                clk_disable(iva2_clk);
                break;
        case GPT_CLK:
-               omap_dm_timer_free(timer[clk_id - 1]);
+               status = omap_dm_timer_stop(timer[clk_id - 1]);
                break;
 #ifdef CONFIG_OMAP_MCBSP
        case MCBSP_CLK:
index c43c7e3421c851f48f1942c31cd9fd755ce2b0f3..76cfc6edecd9e26d46f7c3b37fbbf6d81cebdcd3 100644 (file)
 #include <linux/types.h>
 #include <linux/platform_device.h>
 #include <linux/pm.h>
-
-#ifdef MODULE
 #include <linux/module.h>
-#endif
-
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/moduleparam.h>
index 09c44abb89e8239c322bacb1671418dcd4b32e0a..3872b8cccdcf715067f273e091f3fd2cf419d6ac 100644 (file)
@@ -68,6 +68,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
 {
        struct usbip_device *ud = &vdev->ud;
        struct urb *urb;
+       unsigned long flags;
 
        spin_lock(&vdev->priv_lock);
        urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
@@ -101,9 +102,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
 
        usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
 
-       spin_lock(&the_controller->lock);
+       spin_lock_irqsave(&the_controller->lock, flags);
        usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
-       spin_unlock(&the_controller->lock);
+       spin_unlock_irqrestore(&the_controller->lock, flags);
 
        usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
 
@@ -141,6 +142,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
 {
        struct vhci_unlink *unlink;
        struct urb *urb;
+       unsigned long flags;
 
        usbip_dump_header(pdu);
 
@@ -170,9 +172,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
                urb->status = pdu->u.ret_unlink.status;
                pr_info("urb->status %d\n", urb->status);
 
-               spin_lock(&the_controller->lock);
+               spin_lock_irqsave(&the_controller->lock, flags);
                usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
-               spin_unlock(&the_controller->lock);
+               spin_unlock_irqrestore(&the_controller->lock, flags);
 
                usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
                                     urb->status);
index 0fd96c10271d8364c9b6e69de8dc80ecba567501..8599545cdf9e6d6c90a07510844f6290ab0a1be6 100644 (file)
@@ -614,13 +614,12 @@ int iscsit_add_reject(
        hdr     = (struct iscsi_reject *) cmd->pdu;
        hdr->reason = reason;
 
-       cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+       cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!cmd->buf_ptr) {
                pr_err("Unable to allocate memory for cmd->buf_ptr\n");
                iscsit_release_cmd(cmd);
                return -1;
        }
-       memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
 
        spin_lock_bh(&conn->cmd_lock);
        list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
@@ -661,13 +660,12 @@ int iscsit_add_reject_from_cmd(
        hdr     = (struct iscsi_reject *) cmd->pdu;
        hdr->reason = reason;
 
-       cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+       cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!cmd->buf_ptr) {
                pr_err("Unable to allocate memory for cmd->buf_ptr\n");
                iscsit_release_cmd(cmd);
                return -1;
        }
-       memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
 
        if (add_to_conn) {
                spin_lock_bh(&conn->cmd_lock);
@@ -1017,11 +1015,6 @@ done:
                                " non-existent or non-exported iSCSI LUN:"
                                " 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
                }
-               if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
-                       return iscsit_add_reject_from_cmd(
-                                       ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-                                       1, 1, buf, cmd);
-
                send_check_condition = 1;
                goto attach_cmd;
        }
@@ -1044,6 +1037,8 @@ done:
                 */
                send_check_condition = 1;
        } else {
+               cmd->data_length = cmd->se_cmd.data_length;
+
                if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
                        return iscsit_add_reject_from_cmd(
                                ISCSI_REASON_BOOKMARK_NO_RESOURCES,
@@ -1123,7 +1118,7 @@ attach_cmd:
         * the backend memory allocation.
         */
        ret = transport_generic_new_cmd(&cmd->se_cmd);
-       if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
+       if (ret < 0) {
                immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
                dump_immediate_data = 1;
                goto after_immediate_data;
@@ -1341,7 +1336,7 @@ static int iscsit_handle_data_out(struct iscsi_conn *conn, unsigned char *buf)
 
                spin_lock_irqsave(&se_cmd->t_state_lock, flags);
                if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
-                    (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
+                    (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
                        dump_unsolicited_data = 1;
                spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
 
@@ -2513,10 +2508,10 @@ static int iscsit_send_data_in(
        if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
                if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
                        hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
-                       hdr->residual_count = cpu_to_be32(cmd->residual_count);
+                       hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
                } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
                        hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
-                       hdr->residual_count = cpu_to_be32(cmd->residual_count);
+                       hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
                }
        }
        hton24(hdr->dlength, datain.length);
@@ -3018,10 +3013,10 @@ static int iscsit_send_status(
        hdr->flags              |= ISCSI_FLAG_CMD_FINAL;
        if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
                hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
-               hdr->residual_count = cpu_to_be32(cmd->residual_count);
+               hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
        } else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
                hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
-               hdr->residual_count = cpu_to_be32(cmd->residual_count);
+               hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
        }
        hdr->response           = cmd->iscsi_response;
        hdr->cmd_status         = cmd->se_cmd.scsi_status;
@@ -3133,6 +3128,7 @@ static int iscsit_send_task_mgt_rsp(
        hdr                     = (struct iscsi_tm_rsp *) cmd->pdu;
        memset(hdr, 0, ISCSI_HDR_LEN);
        hdr->opcode             = ISCSI_OP_SCSI_TMFUNC_RSP;
+       hdr->flags              = ISCSI_FLAG_CMD_FINAL;
        hdr->response           = iscsit_convert_tcm_tmr_rsp(se_tmr);
        hdr->itt                = cpu_to_be32(cmd->init_task_tag);
        cmd->stat_sn            = conn->stat_sn++;
index beb39469e7f1e4f91c971f6da468c742a338699d..1cd6ce373b83508fd396f82b80290b91372c9e96 100644 (file)
 
 static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
 {
-       int j = DIV_ROUND_UP(len, 2);
+       int j = DIV_ROUND_UP(len, 2), rc;
 
-       hex2bin(dst, src, j);
+       rc = hex2bin(dst, src, j);
+       if (rc < 0)
+               pr_debug("CHAP string contains non hex digit symbols\n");
 
        dst[j] = '\0';
        return j;
index 3723d90d5ae573db84b658fbd70bf2cd7a82bafa..f1a02dad05a02855b4ef59a6341e4bb61660ef30 100644 (file)
@@ -398,7 +398,6 @@ struct iscsi_cmd {
        u32                     pdu_send_order;
        /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
        u32                     pdu_start;
-       u32                     residual_count;
        /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
        u32                     seq_send_order;
        /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
@@ -535,7 +534,6 @@ struct iscsi_conn {
        atomic_t                connection_exit;
        atomic_t                connection_recovery;
        atomic_t                connection_reinstatement;
-       atomic_t                connection_wait;
        atomic_t                connection_wait_rcfr;
        atomic_t                sleep_on_conn_wait_comp;
        atomic_t                transport_failed;
@@ -643,7 +641,6 @@ struct iscsi_session {
        atomic_t                session_reinstatement;
        atomic_t                session_stop_active;
        atomic_t                sleep_on_sess_wait_comp;
-       atomic_t                transport_wait_cmds;
        /* connection list */
        struct list_head        sess_conn_list;
        struct list_head        cr_active_list;
index c4c68da3e5004b3fa39eeb71829bbefab4e38632..101b1beb3bca205aed7611ec4424f54cc5b20671 100644 (file)
@@ -938,8 +938,7 @@ int iscsit_execute_cmd(struct iscsi_cmd *cmd, int ooo)
                 * handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
                 */
                if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
-                       if (se_cmd->se_cmd_flags &
-                                       SCF_SCSI_RESERVATION_CONFLICT) {
+                       if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
                                cmd->i_state = ISTATE_SEND_STATUS;
                                spin_unlock_bh(&cmd->istate_lock);
                                iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
index daad362a93cecebeca5c9bcce26233da3b202df1..d734bdec24f9cf2b451a7f905b07fab421a19b3b 100644 (file)
@@ -224,7 +224,7 @@ static int iscsi_login_zero_tsih_s1(
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                pr_err("Could not allocate memory for session\n");
-               return -1;
+               return -ENOMEM;
        }
 
        iscsi_login_set_conn_values(sess, conn, pdu->cid);
@@ -250,7 +250,8 @@ static int iscsi_login_zero_tsih_s1(
                pr_err("idr_pre_get() for sess_idr failed\n");
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
-               return -1;
+               kfree(sess);
+               return -ENOMEM;
        }
        spin_lock(&sess_idr_lock);
        idr_get_new(&sess_idr, NULL, &sess->session_index);
@@ -270,14 +271,16 @@ static int iscsi_login_zero_tsih_s1(
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                pr_err("Unable to allocate memory for"
                                " struct iscsi_sess_ops.\n");
-               return -1;
+               kfree(sess);
+               return -ENOMEM;
        }
 
        sess->se_sess = transport_init_session();
-       if (!sess->se_sess) {
+       if (IS_ERR(sess->se_sess)) {
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
-               return -1;
+               kfree(sess);
+               return -ENOMEM;
        }
 
        return 0;
index 426cd4bf6a9aab344425723cd51766aa932cc536..98936cb7c2947ceb0edbaa41dda91d6641a9b892 100644 (file)
@@ -981,14 +981,13 @@ struct iscsi_login *iscsi_target_init_negotiation(
                return NULL;
        }
 
-       login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+       login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
        if (!login->req) {
                pr_err("Unable to allocate memory for Login Request.\n");
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
                goto out;
        }
-       memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
 
        login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
        if (!login->req_buf) {
index 3df1c9b8ae6b7e07118575ebbf24a59124644b01..81d5832fbbd537e7bbffe2c21b1792c1e7a2acde 100644 (file)
@@ -113,11 +113,9 @@ static struct se_cmd *tcm_loop_allocate_core_cmd(
                        scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
                        &tl_cmd->tl_sense_buf[0]);
 
-       /*
-        * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
-        */
        if (scsi_bidi_cmnd(sc))
-               se_cmd->t_tasks_bidi = 1;
+               se_cmd->se_cmd_flags |= SCF_BIDI;
+
        /*
         * Locate the struct se_lun pointer and attach it to struct se_cmd
         */
@@ -148,27 +146,13 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
         * Allocate the necessary tasks to complete the received CDB+data
         */
        ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
-       if (ret == -ENOMEM) {
-               /* Out of Resources */
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
-       } else if (ret == -EINVAL) {
-               /*
-                * Handle case for SAM_STAT_RESERVATION_CONFLICT
-                */
-               if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
-               /*
-                * Otherwise, return SAM_STAT_CHECK_CONDITION and return
-                * sense data.
-                */
-               return PYX_TRANSPORT_USE_SENSE_REASON;
-       }
-
+       if (ret != 0)
+               return ret;
        /*
         * For BIDI commands, pass in the extra READ buffer
         * to transport_generic_map_mem_to_cmd() below..
         */
-       if (se_cmd->t_tasks_bidi) {
+       if (se_cmd->se_cmd_flags & SCF_BIDI) {
                struct scsi_data_buffer *sdb = scsi_in(sc);
 
                sgl_bidi = sdb->table.sgl;
@@ -194,12 +178,8 @@ static int tcm_loop_new_cmd_map(struct se_cmd *se_cmd)
        }
 
        /* Tell the core about our preallocated memory */
-       ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
+       return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
                        scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
-       if (ret < 0)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
-
-       return 0;
 }
 
 /*
@@ -1360,17 +1340,16 @@ void tcm_loop_drop_scsi_hba(
 {
        struct tcm_loop_hba *tl_hba = container_of(wwn,
                                struct tcm_loop_hba, tl_hba_wwn);
-       int host_no = tl_hba->sh->host_no;
+
+       pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
+               " SAS Address: %s at Linux/SCSI Host ID: %d\n",
+               tl_hba->tl_wwn_address, tl_hba->sh->host_no);
        /*
         * Call device_unregister() on the original tl_hba->dev.
         * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
         * release *tl_hba;
         */
        device_unregister(&tl_hba->dev);
-
-       pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
-               " SAS Address: %s at Linux/SCSI Host ID: %d\n",
-               config_item_name(&wwn->wwn_group.cg_item), host_no);
 }
 
 /* Start items for tcm_loop_cit */
index 88f2ad43ec8b589922e34de340dac32325080f13..1dcbef499d6a09f2add951dee10c955f3541ccb8 100644 (file)
@@ -191,9 +191,10 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        int alua_access_state, primary = 0, rc;
        u16 tg_pt_id, rtpi;
 
-       if (!l_port)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
-
+       if (!l_port) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
+       }
        buf = transport_kmap_first_data_page(cmd);
 
        /*
@@ -203,7 +204,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
        if (!l_tg_pt_gp_mem) {
                pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
-               rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               rc = -EINVAL;
                goto out;
        }
        spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
@@ -211,7 +213,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        if (!l_tg_pt_gp) {
                spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
                pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
-               rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               rc = -EINVAL;
                goto out;
        }
        rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
@@ -220,7 +223,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        if (!rc) {
                pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
                                " while TPGS_EXPLICT_ALUA is disabled\n");
-               rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               rc = -EINVAL;
                goto out;
        }
 
@@ -245,7 +249,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
                         * REQUEST, and the additional sense code set to INVALID
                         * FIELD IN PARAMETER LIST.
                         */
-                       rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       rc = -EINVAL;
                        goto out;
                }
                rc = -1;
@@ -298,7 +303,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
                         * throw an exception with ASCQ: INVALID_PARAMETER_LIST
                         */
                        if (rc != 0) {
-                               rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                               rc = -EINVAL;
                                goto out;
                        }
                } else {
@@ -335,7 +341,8 @@ int target_emulate_set_target_port_groups(struct se_task *task)
                         * INVALID_PARAMETER_LIST
                         */
                        if (rc != 0) {
-                               rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                               rc = -EINVAL;
                                goto out;
                        }
                }
@@ -1184,7 +1191,6 @@ void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
         * struct t10_alua_lu_gp.
         */
        spin_lock(&lu_gps_lock);
-       atomic_set(&lu_gp->lu_gp_shutdown, 1);
        list_del(&lu_gp->lu_gp_node);
        alua_lu_gps_count--;
        spin_unlock(&lu_gps_lock);
@@ -1438,7 +1444,6 @@ struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
 
        tg_pt_gp_mem->tg_pt = port;
        port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
-       atomic_set(&port->sep_tg_pt_gp_active, 1);
 
        return tg_pt_gp_mem;
 }
index 683ba02b8247feddd92777fc46824196e5a60180..831468b3163d777f3eb5c982fc05819d37dea3e6 100644 (file)
@@ -478,7 +478,7 @@ target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
        if (cmd->data_length < 60)
                return 0;
 
-       buf[2] = 0x3c;
+       buf[3] = 0x3c;
        /* Set HEADSUP, ORDSUP, SIMPSUP */
        buf[5] = 0x07;
 
@@ -703,6 +703,7 @@ int target_emulate_inquiry(struct se_task *task)
        if (cmd->data_length < 4) {
                pr_err("SCSI Inquiry payload length: %u"
                        " too small for EVPD=1\n", cmd->data_length);
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
                return -EINVAL;
        }
 
@@ -719,6 +720,7 @@ int target_emulate_inquiry(struct se_task *task)
        }
 
        pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+       cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
        ret = -EINVAL;
 
 out_unmap:
@@ -969,7 +971,8 @@ int target_emulate_modesense(struct se_task *task)
        default:
                pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
                       cdb[2] & 0x3f, cdb[3]);
-               return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
+               cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
+               return -EINVAL;
        }
        offset += length;
 
@@ -1027,7 +1030,8 @@ int target_emulate_request_sense(struct se_task *task)
        if (cdb[1] & 0x01) {
                pr_err("REQUEST_SENSE description emulation not"
                        " supported\n");
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -ENOSYS;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -1100,7 +1104,8 @@ int target_emulate_unmap(struct se_task *task)
        if (!dev->transport->do_discard) {
                pr_err("UNMAP emulation not supported for: %s\n",
                                dev->transport->name);
-               return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               return -ENOSYS;
        }
 
        /* First UNMAP block descriptor starts at 8 byte offset */
@@ -1157,7 +1162,8 @@ int target_emulate_write_same(struct se_task *task)
        if (!dev->transport->do_discard) {
                pr_err("WRITE_SAME emulation not supported"
                                " for: %s\n", dev->transport->name);
-               return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               return -ENOSYS;
        }
 
        if (cmd->t_task_cdb[0] == WRITE_SAME)
@@ -1193,11 +1199,13 @@ int target_emulate_write_same(struct se_task *task)
 int target_emulate_synchronize_cache(struct se_task *task)
 {
        struct se_device *dev = task->task_se_cmd->se_dev;
+       struct se_cmd *cmd = task->task_se_cmd;
 
        if (!dev->transport->do_sync_cache) {
                pr_err("SYNCHRONIZE_CACHE emulation not supported"
                        " for: %s\n", dev->transport->name);
-               return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               return -ENOSYS;
        }
 
        dev->transport->do_sync_cache(task);
index e0c1e8a8dd4e2140e13d7adedb4c84d414b45725..93d4f6a1b7980c597c119ae7f7f0506d57846993 100644 (file)
@@ -67,9 +67,6 @@ static struct config_group target_core_hbagroup;
 static struct config_group alua_group;
 static struct config_group alua_lu_gps_group;
 
-static DEFINE_SPINLOCK(se_device_lock);
-static LIST_HEAD(se_dev_list);
-
 static inline struct se_hba *
 item_to_hba(struct config_item *item)
 {
@@ -2741,7 +2738,6 @@ static struct config_group *target_core_make_subdev(
                                " struct se_subsystem_dev\n");
                goto unlock;
        }
-       INIT_LIST_HEAD(&se_dev->se_dev_node);
        INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
        spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
        INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
@@ -2777,9 +2773,6 @@ static struct config_group *target_core_make_subdev(
                        " from allocate_virtdevice()\n");
                goto out;
        }
-       spin_lock(&se_device_lock);
-       list_add_tail(&se_dev->se_dev_node, &se_dev_list);
-       spin_unlock(&se_device_lock);
 
        config_group_init_type_name(&se_dev->se_dev_group, name,
                        &target_core_dev_cit);
@@ -2874,10 +2867,6 @@ static void target_core_drop_subdev(
        mutex_lock(&hba->hba_access_mutex);
        t = hba->transport;
 
-       spin_lock(&se_device_lock);
-       list_del(&se_dev->se_dev_node);
-       spin_unlock(&se_device_lock);
-
        dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
        for (i = 0; dev_stat_grp->default_groups[i]; i++) {
                df_item = &dev_stat_grp->default_groups[i]->cg_item;
index ba5edec2c5f858edaa011a463fae7bb31ad2a008..9b8639425472d8322aab749c6ae03fb56ea2a377 100644 (file)
@@ -104,7 +104,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                se_cmd->se_lun = deve->se_lun;
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
-               se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
        }
        spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
@@ -137,7 +136,6 @@ int transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                se_lun = &se_sess->se_tpg->tpg_virt_lun0;
                se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
                se_cmd->orig_fe_lun = 0;
-               se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
                se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
        }
        /*
@@ -200,7 +198,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
                se_lun = deve->se_lun;
                se_cmd->pr_res_key = deve->pr_res_key;
                se_cmd->orig_fe_lun = unpacked_lun;
-               se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
        }
        spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
 
@@ -708,7 +705,7 @@ done:
 
        se_task->task_scsi_status = GOOD;
        transport_complete_task(se_task, 1);
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 }
 
 /*     se_release_device_for_hba():
@@ -957,8 +954,12 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       pr_err("dpo_emulated not supported\n");
-       return -EINVAL;
+       if (flag) {
+               pr_err("dpo_emulated not supported\n");
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
@@ -968,7 +969,7 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       if (dev->transport->fua_write_emulated == 0) {
+       if (flag && dev->transport->fua_write_emulated == 0) {
                pr_err("fua_write_emulated not supported\n");
                return -EINVAL;
        }
@@ -985,8 +986,12 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
                return -EINVAL;
        }
 
-       pr_err("ua read emulated not supported\n");
-       return -EINVAL;
+       if (flag) {
+               pr_err("ua read emulated not supported\n");
+               return -EINVAL;
+       }
+
+       return 0;
 }
 
 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
@@ -995,7 +1000,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
                pr_err("Illegal value %d\n", flag);
                return -EINVAL;
        }
-       if (dev->transport->write_cache_emulated == 0) {
+       if (flag && dev->transport->write_cache_emulated == 0) {
                pr_err("write_cache_emulated not supported\n");
                return -EINVAL;
        }
@@ -1056,7 +1061,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
         * We expect this value to be non-zero when generic Block Layer
         * Discard supported is detected iblock_create_virtdevice().
         */
-       if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+       if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
                pr_err("Generic Block Discard not supported\n");
                return -ENOSYS;
        }
@@ -1077,7 +1082,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
         * We expect this value to be non-zero when generic Block Layer
         * Discard supported is detected iblock_create_virtdevice().
         */
-       if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+       if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
                pr_err("Generic Block Discard not supported\n");
                return -ENOSYS;
        }
@@ -1587,7 +1592,6 @@ int core_dev_setup_virtual_lun0(void)
                ret = -ENOMEM;
                goto out;
        }
-       INIT_LIST_HEAD(&se_dev->se_dev_node);
        INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
        spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
        INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
index 67cd6fe05bfa7c751596752da1ff0dd00035908f..b4864fba4ef0d511758916a8debac60ee9f43674 100644 (file)
@@ -289,9 +289,9 @@ static int fd_do_readv(struct se_task *task)
                return -ENOMEM;
        }
 
-       for (i = 0; i < task->task_sg_nents; i++) {
-               iov[i].iov_len = sg[i].length;
-               iov[i].iov_base = sg_virt(&sg[i]);
+       for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+               iov[i].iov_len = sg->length;
+               iov[i].iov_base = sg_virt(sg);
        }
 
        old_fs = get_fs();
@@ -342,9 +342,9 @@ static int fd_do_writev(struct se_task *task)
                return -ENOMEM;
        }
 
-       for (i = 0; i < task->task_sg_nents; i++) {
-               iov[i].iov_len = sg[i].length;
-               iov[i].iov_base = sg_virt(&sg[i]);
+       for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+               iov[i].iov_len = sg->length;
+               iov[i].iov_base = sg_virt(sg);
        }
 
        old_fs = get_fs();
@@ -438,7 +438,7 @@ static int fd_do_task(struct se_task *task)
                if (ret > 0 &&
                    dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
                    dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
-                   cmd->t_tasks_fua) {
+                   (cmd->se_cmd_flags & SCF_FUA)) {
                        /*
                         * We might need to be a bit smarter here
                         * and return some sense data to let the initiator
@@ -449,13 +449,15 @@ static int fd_do_task(struct se_task *task)
 
        }
 
-       if (ret < 0)
+       if (ret < 0) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                return ret;
+       }
        if (ret) {
                task->task_scsi_status = GOOD;
                transport_complete_task(task, 1);
        }
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 }
 
 /*     fd_free_task(): (Part of se_subsystem_api_t template)
index 7698efe29262bfd8a7cb521016da0aff1ff0517d..4aa9922044382628fc21e12d8df3ab67840c23ae 100644 (file)
@@ -531,7 +531,7 @@ static int iblock_do_task(struct se_task *task)
                 */
                if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
                    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
-                    task->task_se_cmd->t_tasks_fua))
+                    (cmd->se_cmd_flags & SCF_FUA)))
                        rw = WRITE_FUA;
                else
                        rw = WRITE;
@@ -554,12 +554,15 @@ static int iblock_do_task(struct se_task *task)
        else {
                pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
                                " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -ENOSYS;
        }
 
        bio = iblock_get_bio(task, block_lba, sg_num);
-       if (!bio)
-               return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       if (!bio) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -ENOMEM;
+       }
 
        bio_list_init(&list);
        bio_list_add(&list, bio);
@@ -588,12 +591,13 @@ static int iblock_do_task(struct se_task *task)
                submit_bio(rw, bio);
        blk_finish_plug(&plug);
 
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 
 fail:
        while ((bio = bio_list_pop(&list)))
                bio_put(bio);
-       return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return -ENOMEM;
 }
 
 static u32 iblock_get_device_rev(struct se_device *dev)
index 5a4ebfc3a54f34791f6df93a36e02d85d752ebf7..95dee7074aeb5eb05f630fdded543e948ac206b3 100644 (file)
@@ -191,7 +191,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
                pr_err("Received legacy SPC-2 RESERVE/RELEASE"
                        " while active SPC-3 registrations exist,"
                        " returning RESERVATION_CONFLICT\n");
-               *ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
                return true;
        }
 
@@ -252,7 +252,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
            (cmd->t_task_cdb[1] & 0x02)) {
                pr_err("LongIO and Obselete Bits set, returning"
                                " ILLEGAL_REQUEST\n");
-               ret = PYX_TRANSPORT_ILLEGAL_REQUEST;
+               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -277,7 +278,8 @@ int target_scsi2_reservation_reserve(struct se_task *task)
                        " from %s \n", cmd->se_lun->unpacked_lun,
                        cmd->se_deve->mapped_lun,
                        sess->se_node_acl->initiatorname);
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = -EINVAL;
                goto out_unlock;
        }
 
@@ -1510,7 +1512,8 @@ static int core_scsi3_decode_spec_i_port(
        tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
        if (!tidh_new) {
                pr_err("Unable to allocate tidh_new\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        INIT_LIST_HEAD(&tidh_new->dest_list);
        tidh_new->dest_tpg = tpg;
@@ -1522,7 +1525,8 @@ static int core_scsi3_decode_spec_i_port(
                                sa_res_key, all_tg_pt, aptpl);
        if (!local_pr_reg) {
                kfree(tidh_new);
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -ENOMEM;
        }
        tidh_new->dest_pr_reg = local_pr_reg;
        /*
@@ -1548,7 +1552,8 @@ static int core_scsi3_decode_spec_i_port(
                pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
                        " does not equal CDB data_length: %u\n", tpdl,
                        cmd->data_length);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -1598,7 +1603,9 @@ static int core_scsi3_decode_spec_i_port(
                                        " for tmp_tpg\n");
                                atomic_dec(&tmp_tpg->tpg_pr_ref_count);
                                smp_mb__after_atomic_dec();
-                               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                               cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                               ret = -EINVAL;
                                goto out;
                        }
                        /*
@@ -1628,7 +1635,9 @@ static int core_scsi3_decode_spec_i_port(
                                atomic_dec(&dest_node_acl->acl_pr_ref_count);
                                smp_mb__after_atomic_dec();
                                core_scsi3_tpg_undepend_item(tmp_tpg);
-                               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                               cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                               ret = -EINVAL;
                                goto out;
                        }
 
@@ -1646,7 +1655,8 @@ static int core_scsi3_decode_spec_i_port(
                if (!dest_tpg) {
                        pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
                                        " dest_tpg\n");
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
 #if 0
@@ -1660,7 +1670,8 @@ static int core_scsi3_decode_spec_i_port(
                                " %u for Transport ID: %s\n", tid_len, ptr);
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
                /*
@@ -1678,7 +1689,8 @@ static int core_scsi3_decode_spec_i_port(
 
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
 
@@ -1690,7 +1702,9 @@ static int core_scsi3_decode_spec_i_port(
                        smp_mb__after_atomic_dec();
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       ret = -EINVAL;
                        goto out;
                }
 #if 0
@@ -1727,7 +1741,9 @@ static int core_scsi3_decode_spec_i_port(
                        core_scsi3_lunacl_undepend_item(dest_se_deve);
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
-                       ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       ret = -ENOMEM;
                        goto out;
                }
                INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1759,7 +1775,8 @@ static int core_scsi3_decode_spec_i_port(
                        core_scsi3_nodeacl_undepend_item(dest_node_acl);
                        core_scsi3_tpg_undepend_item(dest_tpg);
                        kfree(tidh_new);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
                tidh_new->dest_pr_reg = dest_pr_reg;
@@ -2098,7 +2115,8 @@ static int core_scsi3_emulate_pro_register(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        se_tpg = se_sess->se_tpg;
        se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2117,13 +2135,14 @@ static int core_scsi3_emulate_pro_register(
                if (res_key) {
                        pr_warn("SPC-3 PR: Reservation Key non-zero"
                                " for SA REGISTER, returning CONFLICT\n");
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * Do nothing but return GOOD status.
                 */
                if (!sa_res_key)
-                       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+                       return 0;
 
                if (!spec_i_pt) {
                        /*
@@ -2138,7 +2157,8 @@ static int core_scsi3_emulate_pro_register(
                        if (ret != 0) {
                                pr_err("Unable to allocate"
                                        " struct t10_pr_registration\n");
-                               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                               return -EINVAL;
                        }
                } else {
                        /*
@@ -2197,14 +2217,16 @@ static int core_scsi3_emulate_pro_register(
                                        " 0x%016Lx\n", res_key,
                                        pr_reg->pr_res_key);
                                core_scsi3_put_pr_reg(pr_reg);
-                               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                               return -EINVAL;
                        }
                }
                if (spec_i_pt) {
                        pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
                                " set while sa_res_key=0\n");
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       return -EINVAL;
                }
                /*
                 * An existing ALL_TG_PT=1 registration being released
@@ -2215,7 +2237,8 @@ static int core_scsi3_emulate_pro_register(
                                " registration exists, but ALL_TG_PT=1 bit not"
                                " present in received PROUT\n");
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_INVALID_CDB_FIELD;
+                       cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+                       return -EINVAL;
                }
                /*
                 * Allocate APTPL metadata buffer used for UNREGISTER ops
@@ -2227,7 +2250,9 @@ static int core_scsi3_emulate_pro_register(
                                pr_err("Unable to allocate"
                                        " pr_aptpl_buf\n");
                                core_scsi3_put_pr_reg(pr_reg);
-                               return PYX_TRANSPORT_LU_COMM_FAILURE;
+                               cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                               return -EINVAL;
                        }
                }
                /*
@@ -2241,7 +2266,8 @@ static int core_scsi3_emulate_pro_register(
                        if (pr_holder < 0) {
                                kfree(pr_aptpl_buf);
                                core_scsi3_put_pr_reg(pr_reg);
-                               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                               return -EINVAL;
                        }
 
                        spin_lock(&pr_tmpl->registration_lock);
@@ -2405,7 +2431,8 @@ static int core_scsi3_pro_reserve(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        se_tpg = se_sess->se_tpg;
        se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
@@ -2417,7 +2444,8 @@ static int core_scsi3_pro_reserve(
        if (!pr_reg) {
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for RESERVE\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.9: Reserving:
@@ -2433,7 +2461,8 @@ static int core_scsi3_pro_reserve(
                        " does not match existing SA REGISTER res_key:"
                        " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.9: Reserving:
@@ -2448,7 +2477,8 @@ static int core_scsi3_pro_reserve(
        if (scope != PR_SCOPE_LU_SCOPE) {
                pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
        /*
         * See if we have an existing PR reservation holder pointer at
@@ -2480,7 +2510,8 @@ static int core_scsi3_pro_reserve(
 
                        spin_unlock(&dev->dev_reservation_lock);
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * From spc4r17 Section 5.7.9: Reserving:
@@ -2503,7 +2534,8 @@ static int core_scsi3_pro_reserve(
 
                        spin_unlock(&dev->dev_reservation_lock);
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * From spc4r17 Section 5.7.9: Reserving:
@@ -2517,7 +2549,7 @@ static int core_scsi3_pro_reserve(
                 */
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+               return 0;
        }
        /*
         * Otherwise, our *pr_reg becomes the PR reservation holder for said
@@ -2574,7 +2606,8 @@ static int core_scsi3_emulate_pro_reserve(
        default:
                pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
                        " 0x%02x\n", type);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        return ret;
@@ -2630,7 +2663,8 @@ static int core_scsi3_emulate_pro_release(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * Locate the existing *pr_reg via struct se_node_acl pointers
@@ -2639,7 +2673,8 @@ static int core_scsi3_emulate_pro_release(
        if (!pr_reg) {
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for RELEASE\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.11.2 Releasing:
@@ -2661,7 +2696,7 @@ static int core_scsi3_emulate_pro_release(
                 */
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+               return 0;
        }
        if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
            (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
@@ -2675,7 +2710,7 @@ static int core_scsi3_emulate_pro_release(
                 */
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+               return 0;
        }
        /*
         * From spc4r17 Section 5.7.11.2 Releasing:
@@ -2697,7 +2732,8 @@ static int core_scsi3_emulate_pro_release(
                        " 0x%016Lx\n", res_key, pr_reg->pr_res_key);
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * From spc4r17 Section 5.7.11.2 Releasing and above:
@@ -2719,7 +2755,8 @@ static int core_scsi3_emulate_pro_release(
 
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * In response to a persistent reservation release request from the
@@ -2802,7 +2839,8 @@ static int core_scsi3_emulate_pro_clear(
        if (!pr_reg_n) {
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for CLEAR\n");
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * From spc4r17 section 5.7.11.6, Clearing:
@@ -2821,7 +2859,8 @@ static int core_scsi3_emulate_pro_clear(
                        " existing SA REGISTER res_key:"
                        " 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * a) Release the persistent reservation, if any;
@@ -2979,8 +3018,10 @@ static int core_scsi3_pro_preempt(
        int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
        int prh_type = 0, prh_scope = 0, ret;
 
-       if (!se_sess)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+       if (!se_sess) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
+       }
 
        se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
        pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -2989,16 +3030,19 @@ static int core_scsi3_pro_preempt(
                pr_err("SPC-3 PR: Unable to locate"
                        " PR_REGISTERED *pr_reg for PREEMPT%s\n",
                        (abort) ? "_AND_ABORT" : "");
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        if (pr_reg_n->pr_res_key != res_key) {
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        if (scope != PR_SCOPE_LU_SCOPE) {
                pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
        INIT_LIST_HEAD(&preempt_and_abort_list);
 
@@ -3012,7 +3056,8 @@ static int core_scsi3_pro_preempt(
        if (!all_reg && !sa_res_key) {
                spin_unlock(&dev->dev_reservation_lock);
                core_scsi3_put_pr_reg(pr_reg_n);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
        /*
         * From spc4r17, section 5.7.11.4.4 Removing Registrations:
@@ -3106,7 +3151,8 @@ static int core_scsi3_pro_preempt(
                if (!released_regs) {
                        spin_unlock(&dev->dev_reservation_lock);
                        core_scsi3_put_pr_reg(pr_reg_n);
-                       return PYX_TRANSPORT_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EINVAL;
                }
                /*
                 * For an existing all registrants type reservation
@@ -3297,7 +3343,8 @@ static int core_scsi3_emulate_pro_preempt(
        default:
                pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
                        " Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        return ret;
@@ -3331,7 +3378,8 @@ static int core_scsi3_emulate_pro_register_and_move(
 
        if (!se_sess || !se_lun) {
                pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        memset(dest_iport, 0, 64);
        memset(i_buf, 0, PR_REG_ISID_ID_LEN);
@@ -3349,7 +3397,8 @@ static int core_scsi3_emulate_pro_register_and_move(
        if (!pr_reg) {
                pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
                        " *pr_reg for REGISTER_AND_MOVE\n");
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
        }
        /*
         * The provided reservation key much match the existing reservation key
@@ -3360,7 +3409,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " res_key: 0x%016Lx does not match existing SA REGISTER"
                        " res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
        /*
         * The service active reservation key needs to be non zero
@@ -3369,7 +3419,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
                        " sa_res_key\n");
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
 
        /*
@@ -3392,7 +3443,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " does not equal CDB data_length: %u\n", tid_len,
                        cmd->data_length);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
 
        spin_lock(&dev->se_port_lock);
@@ -3417,7 +3469,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
                        smp_mb__after_atomic_dec();
                        core_scsi3_put_pr_reg(pr_reg);
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       return -EINVAL;
                }
 
                spin_lock(&dev->se_port_lock);
@@ -3430,7 +3483,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " fabric ops from Relative Target Port Identifier:"
                        " %hu\n", rtpi);
                core_scsi3_put_pr_reg(pr_reg);
-               return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -3445,14 +3499,16 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " from fabric: %s\n", proto_ident,
                        dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
                        dest_tf_ops->get_fabric_name());
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
                pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
                        " containg a valid tpg_parse_pr_out_transport_id"
                        " function pointer\n");
-               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               ret = -EINVAL;
                goto out;
        }
        initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
@@ -3460,7 +3516,8 @@ static int core_scsi3_emulate_pro_register_and_move(
        if (!initiator_str) {
                pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
                        " initiator_str from Transport ID\n");
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3489,7 +3546,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
                        " matches: %s on received I_T Nexus\n", initiator_str,
                        pr_reg_nacl->initiatorname);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
@@ -3497,7 +3555,8 @@ static int core_scsi3_emulate_pro_register_and_move(
                        " matches: %s %s on received I_T Nexus\n",
                        initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
                        pr_reg->pr_reg_isid);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 after_iport_check:
@@ -3517,7 +3576,8 @@ after_iport_check:
                pr_err("Unable to locate %s dest_node_acl for"
                        " TransportID%s\n", dest_tf_ops->get_fabric_name(),
                        initiator_str);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
@@ -3527,7 +3587,8 @@ after_iport_check:
                atomic_dec(&dest_node_acl->acl_pr_ref_count);
                smp_mb__after_atomic_dec();
                dest_node_acl = NULL;
-               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 #if 0
@@ -3543,7 +3604,8 @@ after_iport_check:
        if (!dest_se_deve) {
                pr_err("Unable to locate %s dest_se_deve from RTPI:"
                        " %hu\n",  dest_tf_ops->get_fabric_name(), rtpi);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3553,7 +3615,8 @@ after_iport_check:
                atomic_dec(&dest_se_deve->pr_ref_count);
                smp_mb__after_atomic_dec();
                dest_se_deve = NULL;
-               ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               ret = -EINVAL;
                goto out;
        }
 #if 0
@@ -3572,7 +3635,8 @@ after_iport_check:
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
                        " currently held\n");
                spin_unlock(&dev->dev_reservation_lock);
-               ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3585,7 +3649,8 @@ after_iport_check:
                pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
                        " Nexus is not reservation holder\n");
                spin_unlock(&dev->dev_reservation_lock);
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3603,7 +3668,8 @@ after_iport_check:
                        " reservation for type: %s\n",
                        core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
                spin_unlock(&dev->dev_reservation_lock);
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = -EINVAL;
                goto out;
        }
        pr_res_nacl = pr_res_holder->pr_reg_nacl;
@@ -3640,7 +3706,8 @@ after_iport_check:
                                sa_res_key, 0, aptpl, 2, 1);
                if (ret != 0) {
                        spin_unlock(&dev->dev_reservation_lock);
-                       ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+                       ret = -EINVAL;
                        goto out;
                }
                dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
@@ -3771,7 +3838,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
                pr_err("Received PERSISTENT_RESERVE CDB while legacy"
                        " SPC-2 reservation is held, returning"
                        " RESERVATION_CONFLICT\n");
-               ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               ret = EINVAL;
                goto out;
        }
 
@@ -3779,13 +3847,16 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
         * FIXME: A NULL struct se_session pointer means an this is not coming from
         * a $FABRIC_MOD's nexus, but from internal passthrough ops.
         */
-       if (!cmd->se_sess)
-               return PYX_TRANSPORT_LU_COMM_FAILURE;
+       if (!cmd->se_sess) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               return -EINVAL;
+       }
 
        if (cmd->data_length < 24) {
                pr_warn("SPC-PR: Received PR OUT parameter list"
                        " length too small: %u\n", cmd->data_length);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3820,7 +3891,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
         * SPEC_I_PT=1 is only valid for Service action: REGISTER
         */
        if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
 
@@ -3837,7 +3909,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
            (cmd->data_length != 24)) {
                pr_warn("SPC-PR: Received PR OUT illegal parameter"
                        " list length: %u\n", cmd->data_length);
-               ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+               ret = -EINVAL;
                goto out;
        }
        /*
@@ -3878,7 +3951,8 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
        default:
                pr_err("Unknown PERSISTENT_RESERVE_OUT service"
                        " action: 0x%02x\n", cdb[1] & 0x1f);
-               ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               ret = -EINVAL;
                break;
        }
 
@@ -3906,7 +3980,8 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
                        " too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -3965,7 +4040,8 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
                        " too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -4047,7 +4123,8 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
        if (cmd->data_length < 6) {
                pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
                        " %u too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -4108,7 +4185,8 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
                        " too small\n", cmd->data_length);
-               return PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               return -EINVAL;
        }
 
        buf = transport_kmap_first_data_page(cmd);
@@ -4255,7 +4333,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
                pr_err("Received PERSISTENT_RESERVE CDB while legacy"
                        " SPC-2 reservation is held, returning"
                        " RESERVATION_CONFLICT\n");
-               return PYX_TRANSPORT_RESERVATION_CONFLICT;
+               cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+               return -EINVAL;
        }
 
        switch (cmd->t_task_cdb[1] & 0x1f) {
@@ -4274,7 +4353,8 @@ int target_scsi3_emulate_pr_in(struct se_task *task)
        default:
                pr_err("Unknown PERSISTENT_RESERVE_IN service"
                        " action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
-               ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               ret = -EINVAL;
                break;
        }
 
index ed32e1efe42906bbfe91a309889c0acde60a8da5..8b15e56b038461169872d964055316c0318e7e31 100644 (file)
@@ -963,6 +963,7 @@ static inline struct bio *pscsi_get_bio(int sg_num)
 static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
                struct bio **hbio)
 {
+       struct se_cmd *cmd = task->task_se_cmd;
        struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
        u32 task_sg_num = task->task_sg_nents;
        struct bio *bio = NULL, *tbio = NULL;
@@ -971,7 +972,7 @@ static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
        u32 data_len = task->task_size, i, len, bytes, off;
        int nr_pages = (task->task_size + task_sg[0].offset +
                        PAGE_SIZE - 1) >> PAGE_SHIFT;
-       int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       int nr_vecs = 0, rc;
        int rw = (task->task_data_direction == DMA_TO_DEVICE);
 
        *hbio = NULL;
@@ -1058,11 +1059,13 @@ fail:
                bio->bi_next = NULL;
                bio_endio(bio, 0);      /* XXX: should be error */
        }
-       return ret;
+       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return -ENOMEM;
 }
 
 static int pscsi_do_task(struct se_task *task)
 {
+       struct se_cmd *cmd = task->task_se_cmd;
        struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
        struct pscsi_plugin_task *pt = PSCSI_TASK(task);
        struct request *req;
@@ -1078,7 +1081,9 @@ static int pscsi_do_task(struct se_task *task)
                if (!req || IS_ERR(req)) {
                        pr_err("PSCSI: blk_get_request() failed: %ld\n",
                                        req ? IS_ERR(req) : -ENOMEM);
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       return -ENODEV;
                }
        } else {
                BUG_ON(!task->task_size);
@@ -1087,8 +1092,11 @@ static int pscsi_do_task(struct se_task *task)
                 * Setup the main struct request for the task->task_sg[] payload
                 */
                ret = pscsi_map_sg(task, task->task_sg, &hbio);
-               if (ret < 0)
-                       return PYX_TRANSPORT_LU_COMM_FAILURE;
+               if (ret < 0) {
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       return ret;
+               }
 
                req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
                                       GFP_KERNEL);
@@ -1115,7 +1123,7 @@ static int pscsi_do_task(struct se_task *task)
                        (task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
                        pscsi_req_done);
 
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 
 fail:
        while (hbio) {
@@ -1124,7 +1132,8 @@ fail:
                bio->bi_next = NULL;
                bio_endio(bio, 0);      /* XXX: should be error */
        }
-       return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       return -ENOMEM;
 }
 
 /*     pscsi_get_sense_buffer():
@@ -1198,9 +1207,8 @@ static inline void pscsi_process_SAM_status(
                        " 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
                        pt->pscsi_result);
                task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
-               task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
-               task->task_se_cmd->transport_error_status =
-                                       PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+               task->task_se_cmd->scsi_sense_reason =
+                                       TCM_UNSUPPORTED_SCSI_OPCODE;
                transport_complete_task(task, 0);
                break;
        }
index 5158d3846f19cf8f79f69e7efe97f72558b0d413..02e51faa2f4ea168f0a6139c8e303fc9fca81c28 100644 (file)
@@ -343,235 +343,74 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
        return NULL;
 }
 
-/*     rd_MEMCPY_read():
- *
- *
- */
-static int rd_MEMCPY_read(struct rd_request *req)
+static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
 {
        struct se_task *task = &req->rd_task;
        struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
        struct rd_dev_sg_table *table;
-       struct scatterlist *sg_d, *sg_s;
-       void *dst, *src;
-       u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
-       u32 length, page_end = 0, table_sg_end;
+       struct scatterlist *rd_sg;
+       struct sg_mapping_iter m;
        u32 rd_offset = req->rd_offset;
+       u32 src_len;
 
        table = rd_get_sg_table(dev, req->rd_page);
        if (!table)
                return -EINVAL;
 
-       table_sg_end = (table->page_end_offset - req->rd_page);
-       sg_d = task->task_sg;
-       sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+       rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
 
-       pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
-               " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
-               req->rd_page, req->rd_offset);
-
-       src_offset = rd_offset;
+       pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+                       dev->rd_dev_id, read_rd ? "Read" : "Write",
+                       task->task_lba, req->rd_size, req->rd_page,
+                       rd_offset);
 
+       src_len = PAGE_SIZE - rd_offset;
+       sg_miter_start(&m, task->task_sg, task->task_sg_nents,
+                       read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
        while (req->rd_size) {
-               if ((sg_d[i].length - dst_offset) <
-                   (sg_s[j].length - src_offset)) {
-                       length = (sg_d[i].length - dst_offset);
-
-                       pr_debug("Step 1 - sg_d[%d]: %p length: %d"
-                               " offset: %u sg_s[%d].length: %u\n", i,
-                               &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
-                               sg_s[j].length);
-                       pr_debug("Step 1 - length: %u dst_offset: %u"
-                               " src_offset: %u\n", length, dst_offset,
-                               src_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       dst = sg_virt(&sg_d[i++]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       src = sg_virt(&sg_s[j]) + src_offset;
-                       BUG_ON(!src);
-
-                       dst_offset = 0;
-                       src_offset = length;
-                       page_end = 0;
-               } else {
-                       length = (sg_s[j].length - src_offset);
-
-                       pr_debug("Step 2 - sg_d[%d]: %p length: %d"
-                               " offset: %u sg_s[%d].length: %u\n", i,
-                               &sg_d[i], sg_d[i].length, sg_d[i].offset,
-                               j, sg_s[j].length);
-                       pr_debug("Step 2 - length: %u dst_offset: %u"
-                               " src_offset: %u\n", length, dst_offset,
-                               src_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       dst = sg_virt(&sg_d[i]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       if (sg_d[i].length == length) {
-                               i++;
-                               dst_offset = 0;
-                       } else
-                               dst_offset = length;
-
-                       src = sg_virt(&sg_s[j++]) + src_offset;
-                       BUG_ON(!src);
-
-                       src_offset = 0;
-                       page_end = 1;
-               }
+               u32 len;
+               void *rd_addr;
 
-               memcpy(dst, src, length);
+               sg_miter_next(&m);
+               len = min((u32)m.length, src_len);
+               m.consumed = len;
 
-               pr_debug("page: %u, remaining size: %u, length: %u,"
-                       " i: %u, j: %u\n", req->rd_page,
-                       (req->rd_size - length), length, i, j);
+               rd_addr = sg_virt(rd_sg) + rd_offset;
 
-               req->rd_size -= length;
-               if (!req->rd_size)
-                       return 0;
+               if (read_rd)
+                       memcpy(m.addr, rd_addr, len);
+               else
+                       memcpy(rd_addr, m.addr, len);
 
-               if (!page_end)
+               req->rd_size -= len;
+               if (!req->rd_size)
                        continue;
 
-               if (++req->rd_page <= table->page_end_offset) {
-                       pr_debug("page: %u in same page table\n",
-                               req->rd_page);
+               src_len -= len;
+               if (src_len) {
+                       rd_offset += len;
                        continue;
                }
 
-               pr_debug("getting new page table for page: %u\n",
-                               req->rd_page);
-
-               table = rd_get_sg_table(dev, req->rd_page);
-               if (!table)
-                       return -EINVAL;
-
-               sg_s = &table->sg_table[j = 0];
-       }
-
-       return 0;
-}
-
-/*     rd_MEMCPY_write():
- *
- *
- */
-static int rd_MEMCPY_write(struct rd_request *req)
-{
-       struct se_task *task = &req->rd_task;
-       struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
-       struct rd_dev_sg_table *table;
-       struct scatterlist *sg_d, *sg_s;
-       void *dst, *src;
-       u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
-       u32 length, page_end = 0, table_sg_end;
-       u32 rd_offset = req->rd_offset;
-
-       table = rd_get_sg_table(dev, req->rd_page);
-       if (!table)
-               return -EINVAL;
-
-       table_sg_end = (table->page_end_offset - req->rd_page);
-       sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
-       sg_s = task->task_sg;
-
-       pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
-               " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
-               req->rd_page, req->rd_offset);
-
-       dst_offset = rd_offset;
-
-       while (req->rd_size) {
-               if ((sg_s[i].length - src_offset) <
-                   (sg_d[j].length - dst_offset)) {
-                       length = (sg_s[i].length - src_offset);
-
-                       pr_debug("Step 1 - sg_s[%d]: %p length: %d"
-                               " offset: %d sg_d[%d].length: %u\n", i,
-                               &sg_s[i], sg_s[i].length, sg_s[i].offset,
-                               j, sg_d[j].length);
-                       pr_debug("Step 1 - length: %u src_offset: %u"
-                               " dst_offset: %u\n", length, src_offset,
-                               dst_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       src = sg_virt(&sg_s[i++]) + src_offset;
-                       BUG_ON(!src);
-
-                       dst = sg_virt(&sg_d[j]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       src_offset = 0;
-                       dst_offset = length;
-                       page_end = 0;
-               } else {
-                       length = (sg_d[j].length - dst_offset);
-
-                       pr_debug("Step 2 - sg_s[%d]: %p length: %d"
-                               " offset: %d sg_d[%d].length: %u\n", i,
-                               &sg_s[i], sg_s[i].length, sg_s[i].offset,
-                               j, sg_d[j].length);
-                       pr_debug("Step 2 - length: %u src_offset: %u"
-                               " dst_offset: %u\n", length, src_offset,
-                               dst_offset);
-
-                       if (length > req->rd_size)
-                               length = req->rd_size;
-
-                       src = sg_virt(&sg_s[i]) + src_offset;
-                       BUG_ON(!src);
-
-                       if (sg_s[i].length == length) {
-                               i++;
-                               src_offset = 0;
-                       } else
-                               src_offset = length;
-
-                       dst = sg_virt(&sg_d[j++]) + dst_offset;
-                       BUG_ON(!dst);
-
-                       dst_offset = 0;
-                       page_end = 1;
-               }
-
-               memcpy(dst, src, length);
-
-               pr_debug("page: %u, remaining size: %u, length: %u,"
-                       " i: %u, j: %u\n", req->rd_page,
-                       (req->rd_size - length), length, i, j);
-
-               req->rd_size -= length;
-               if (!req->rd_size)
-                       return 0;
-
-               if (!page_end)
-                       continue;
-
-               if (++req->rd_page <= table->page_end_offset) {
-                       pr_debug("page: %u in same page table\n",
-                               req->rd_page);
+               /* rd page completed, next one please */
+               req->rd_page++;
+               rd_offset = 0;
+               src_len = PAGE_SIZE;
+               if (req->rd_page <= table->page_end_offset) {
+                       rd_sg++;
                        continue;
                }
 
-               pr_debug("getting new page table for page: %u\n",
-                               req->rd_page);
-
                table = rd_get_sg_table(dev, req->rd_page);
-               if (!table)
+               if (!table) {
+                       sg_miter_stop(&m);
                        return -EINVAL;
+               }
 
-               sg_d = &table->sg_table[j = 0];
+               /* since we increment, the first sg entry is correct */
+               rd_sg = table->sg_table;
        }
-
+       sg_miter_stop(&m);
        return 0;
 }
 
@@ -583,28 +422,21 @@ static int rd_MEMCPY_do_task(struct se_task *task)
 {
        struct se_device *dev = task->task_se_cmd->se_dev;
        struct rd_request *req = RD_REQ(task);
-       unsigned long long lba;
+       u64 tmp;
        int ret;
 
-       req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
-       lba = task->task_lba;
-       req->rd_offset = (do_div(lba,
-                         (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
-                          dev->se_sub_dev->se_dev_attrib.block_size;
+       tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+       req->rd_offset = do_div(tmp, PAGE_SIZE);
+       req->rd_page = tmp;
        req->rd_size = task->task_size;
 
-       if (task->task_data_direction == DMA_FROM_DEVICE)
-               ret = rd_MEMCPY_read(req);
-       else
-               ret = rd_MEMCPY_write(req);
-
+       ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
        if (ret != 0)
                return ret;
 
        task->task_scsi_status = GOOD;
        transport_complete_task(task, 1);
-
-       return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+       return 0;
 }
 
 /*     rd_free_task(): (Part of se_subsystem_api_t template)
index 217e29df62977559d1886320ea54c6dbe7d04fab..684522805a1f370a99a5745c2fa3815a9c722912 100644 (file)
@@ -345,10 +345,6 @@ static void core_tmr_drain_cmd_list(
                        " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
                        "Preempt" : "", cmd, cmd->t_state,
                        atomic_read(&cmd->t_fe_count));
-               /*
-                * Signal that the command has failed via cmd->se_cmd_flags,
-                */
-               transport_new_cmd_failure(cmd);
 
                core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
                                atomic_read(&cmd->t_fe_count));
index 3400ae6e93f83d2ae5b25395b97bbd6158877ec0..0257658e2e3ea8a75642ae0dcabc77547ac2379b 100644 (file)
@@ -61,7 +61,6 @@
 static int sub_api_initialized;
 
 static struct workqueue_struct *target_completion_wq;
-static struct kmem_cache *se_cmd_cache;
 static struct kmem_cache *se_sess_cache;
 struct kmem_cache *se_tmr_req_cache;
 struct kmem_cache *se_ua_cache;
@@ -82,24 +81,18 @@ static int transport_generic_get_mem(struct se_cmd *cmd);
 static void transport_put_cmd(struct se_cmd *cmd);
 static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
 static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
-static void transport_generic_request_failure(struct se_cmd *, int, int);
+static void transport_generic_request_failure(struct se_cmd *);
 static void target_complete_ok_work(struct work_struct *work);
 
 int init_se_kmem_caches(void)
 {
-       se_cmd_cache = kmem_cache_create("se_cmd_cache",
-                       sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
-       if (!se_cmd_cache) {
-               pr_err("kmem_cache_create for struct se_cmd failed\n");
-               goto out;
-       }
        se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
                        sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
                        0, NULL);
        if (!se_tmr_req_cache) {
                pr_err("kmem_cache_create() for struct se_tmr_req"
                                " failed\n");
-               goto out_free_cmd_cache;
+               goto out;
        }
        se_sess_cache = kmem_cache_create("se_sess_cache",
                        sizeof(struct se_session), __alignof__(struct se_session),
@@ -182,8 +175,6 @@ out_free_sess_cache:
        kmem_cache_destroy(se_sess_cache);
 out_free_tmr_req_cache:
        kmem_cache_destroy(se_tmr_req_cache);
-out_free_cmd_cache:
-       kmem_cache_destroy(se_cmd_cache);
 out:
        return -ENOMEM;
 }
@@ -191,7 +182,6 @@ out:
 void release_se_kmem_caches(void)
 {
        destroy_workqueue(target_completion_wq);
-       kmem_cache_destroy(se_cmd_cache);
        kmem_cache_destroy(se_tmr_req_cache);
        kmem_cache_destroy(se_sess_cache);
        kmem_cache_destroy(se_ua_cache);
@@ -680,9 +670,9 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)
                task->task_scsi_status = GOOD;
        } else {
                task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
-               task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
-               task->task_se_cmd->transport_error_status =
-                                       PYX_TRANSPORT_ILLEGAL_REQUEST;
+               task->task_se_cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
        }
 
        transport_complete_task(task, good);
@@ -693,7 +683,7 @@ static void target_complete_failure_work(struct work_struct *work)
 {
        struct se_cmd *cmd = container_of(work, struct se_cmd, work);
 
-       transport_generic_request_failure(cmd, 1, 1);
+       transport_generic_request_failure(cmd);
 }
 
 /*     transport_complete_task():
@@ -755,10 +745,11 @@ void transport_complete_task(struct se_task *task, int success)
        if (cmd->t_tasks_failed) {
                if (!task->task_error_status) {
                        task->task_error_status =
-                               PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
-                       cmd->transport_error_status =
-                               PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       cmd->scsi_sense_reason =
+                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                }
+
                INIT_WORK(&cmd->work, target_complete_failure_work);
        } else {
                atomic_set(&cmd->t_transport_complete, 1);
@@ -1335,23 +1326,17 @@ struct se_device *transport_add_device_to_core_hba(
        dev->se_hba             = hba;
        dev->se_sub_dev         = se_dev;
        dev->transport          = transport;
-       atomic_set(&dev->active_cmds, 0);
        INIT_LIST_HEAD(&dev->dev_list);
        INIT_LIST_HEAD(&dev->dev_sep_list);
        INIT_LIST_HEAD(&dev->dev_tmr_list);
        INIT_LIST_HEAD(&dev->execute_task_list);
        INIT_LIST_HEAD(&dev->delayed_cmd_list);
-       INIT_LIST_HEAD(&dev->ordered_cmd_list);
        INIT_LIST_HEAD(&dev->state_task_list);
        INIT_LIST_HEAD(&dev->qf_cmd_list);
        spin_lock_init(&dev->execute_task_lock);
        spin_lock_init(&dev->delayed_cmd_lock);
-       spin_lock_init(&dev->ordered_cmd_lock);
-       spin_lock_init(&dev->state_task_lock);
-       spin_lock_init(&dev->dev_alua_lock);
        spin_lock_init(&dev->dev_reservation_lock);
        spin_lock_init(&dev->dev_status_lock);
-       spin_lock_init(&dev->dev_status_thr_lock);
        spin_lock_init(&dev->se_port_lock);
        spin_lock_init(&dev->se_tmr_lock);
        spin_lock_init(&dev->qf_cmd_lock);
@@ -1507,7 +1492,6 @@ void transport_init_se_cmd(
 {
        INIT_LIST_HEAD(&cmd->se_lun_node);
        INIT_LIST_HEAD(&cmd->se_delayed_node);
-       INIT_LIST_HEAD(&cmd->se_ordered_node);
        INIT_LIST_HEAD(&cmd->se_qf_node);
        INIT_LIST_HEAD(&cmd->se_queue_node);
        INIT_LIST_HEAD(&cmd->se_cmd_list);
@@ -1573,6 +1557,8 @@ int transport_generic_allocate_tasks(
                pr_err("Received SCSI CDB with command_size: %d that"
                        " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
                        scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
                return -EINVAL;
        }
        /*
@@ -1588,6 +1574,9 @@ int transport_generic_allocate_tasks(
                                " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
                                scsi_command_size(cdb),
                                (unsigned long)sizeof(cmd->__t_task_cdb));
+                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+                       cmd->scsi_sense_reason =
+                                       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                        return -ENOMEM;
                }
        } else
@@ -1658,11 +1647,9 @@ int transport_handle_cdb_direct(
         * and call transport_generic_request_failure() if necessary..
         */
        ret = transport_generic_new_cmd(cmd);
-       if (ret < 0) {
-               cmd->transport_error_status = ret;
-               transport_generic_request_failure(cmd, 0,
-                               (cmd->data_direction != DMA_TO_DEVICE));
-       }
+       if (ret < 0)
+               transport_generic_request_failure(cmd);
+
        return 0;
 }
 EXPORT_SYMBOL(transport_handle_cdb_direct);
@@ -1798,20 +1785,16 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
 /*
  * Handle SAM-esque emulation for generic transport request failures.
  */
-static void transport_generic_request_failure(
-       struct se_cmd *cmd,
-       int complete,
-       int sc)
+static void transport_generic_request_failure(struct se_cmd *cmd)
 {
        int ret = 0;
 
        pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
                " CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
                cmd->t_task_cdb[0]);
-       pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",
+       pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
                cmd->se_tfo->get_cmd_state(cmd),
-               cmd->t_state,
-               cmd->transport_error_status);
+               cmd->t_state, cmd->scsi_sense_reason);
        pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
                " t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
                " t_transport_active: %d t_transport_stop: %d"
@@ -1829,46 +1812,19 @@ static void transport_generic_request_failure(
        if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
                transport_complete_task_attr(cmd);
 
-       if (complete) {
-               cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
-       }
-
-       switch (cmd->transport_error_status) {
-       case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
-               cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
-               break;
-       case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
-               cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
-               break;
-       case PYX_TRANSPORT_INVALID_CDB_FIELD:
-               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
-               break;
-       case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               break;
-       case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
-               if (!sc)
-                       transport_new_cmd_failure(cmd);
-               /*
-                * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
-                * we force this session to fall back to session
-                * recovery.
-                */
-               cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
-               cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
-
-               goto check_stop;
-       case PYX_TRANSPORT_LU_COMM_FAILURE:
-       case PYX_TRANSPORT_ILLEGAL_REQUEST:
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               break;
-       case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
-               cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
-               break;
-       case PYX_TRANSPORT_WRITE_PROTECTED:
-               cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+       switch (cmd->scsi_sense_reason) {
+       case TCM_NON_EXISTENT_LUN:
+       case TCM_UNSUPPORTED_SCSI_OPCODE:
+       case TCM_INVALID_CDB_FIELD:
+       case TCM_INVALID_PARAMETER_LIST:
+       case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+       case TCM_UNKNOWN_MODE_PAGE:
+       case TCM_WRITE_PROTECTED:
+       case TCM_CHECK_CONDITION_ABORT_CMD:
+       case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+       case TCM_CHECK_CONDITION_NOT_READY:
                break;
-       case PYX_TRANSPORT_RESERVATION_CONFLICT:
+       case TCM_RESERVATION_CONFLICT:
                /*
                 * No SENSE Data payload for this case, set SCSI Status
                 * and queue the response to $FABRIC_MOD.
@@ -1893,15 +1849,9 @@ static void transport_generic_request_failure(
                if (ret == -EAGAIN || ret == -ENOMEM)
                        goto queue_full;
                goto check_stop;
-       case PYX_TRANSPORT_USE_SENSE_REASON:
-               /*
-                * struct se_cmd->scsi_sense_reason already set
-                */
-               break;
        default:
                pr_err("Unknown transport error for CDB 0x%02x: %d\n",
-                       cmd->t_task_cdb[0],
-                       cmd->transport_error_status);
+                       cmd->t_task_cdb[0], cmd->scsi_sense_reason);
                cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
                break;
        }
@@ -1912,14 +1862,10 @@ static void transport_generic_request_failure(
         * transport_send_check_condition_and_sense() after handling
         * possible unsoliticied write data payloads.
         */
-       if (!sc && !cmd->se_tfo->new_cmd_map)
-               transport_new_cmd_failure(cmd);
-       else {
-               ret = transport_send_check_condition_and_sense(cmd,
-                               cmd->scsi_sense_reason, 0);
-               if (ret == -EAGAIN || ret == -ENOMEM)
-                       goto queue_full;
-       }
+       ret = transport_send_check_condition_and_sense(cmd,
+                       cmd->scsi_sense_reason, 0);
+       if (ret == -EAGAIN || ret == -ENOMEM)
+               goto queue_full;
 
 check_stop:
        transport_lun_remove_cmd(cmd);
@@ -2002,19 +1948,12 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
         * to allow the passed struct se_cmd list of tasks to the front of the list.
         */
         if (cmd->sam_task_attr == MSG_HEAD_TAG) {
-               atomic_inc(&cmd->se_dev->dev_hoq_count);
-               smp_mb__after_atomic_inc();
                pr_debug("Added HEAD_OF_QUEUE for CDB:"
                        " 0x%02x, se_ordered_id: %u\n",
                        cmd->t_task_cdb[0],
                        cmd->se_ordered_id);
                return 1;
        } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
-               spin_lock(&cmd->se_dev->ordered_cmd_lock);
-               list_add_tail(&cmd->se_ordered_node,
-                               &cmd->se_dev->ordered_cmd_list);
-               spin_unlock(&cmd->se_dev->ordered_cmd_lock);
-
                atomic_inc(&cmd->se_dev->dev_ordered_sync);
                smp_mb__after_atomic_inc();
 
@@ -2076,9 +2015,9 @@ static int transport_execute_tasks(struct se_cmd *cmd)
 {
        int add_tasks;
 
-       if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
-               cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
-               transport_generic_request_failure(cmd, 0, 1);
+       if (se_dev_check_online(cmd->se_dev) != 0) {
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               transport_generic_request_failure(cmd);
                return 0;
        }
 
@@ -2163,14 +2102,13 @@ check_depth:
        else
                error = dev->transport->do_task(task);
        if (error != 0) {
-               cmd->transport_error_status = error;
                spin_lock_irqsave(&cmd->t_state_lock, flags);
                task->task_flags &= ~TF_ACTIVE;
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                atomic_set(&cmd->t_transport_sent, 0);
                transport_stop_tasks_for_cmd(cmd);
                atomic_inc(&dev->depth_left);
-               transport_generic_request_failure(cmd, 0, 1);
+               transport_generic_request_failure(cmd);
        }
 
        goto check_depth;
@@ -2178,19 +2116,6 @@ check_depth:
        return 0;
 }
 
-void transport_new_cmd_failure(struct se_cmd *se_cmd)
-{
-       unsigned long flags;
-       /*
-        * Any unsolicited data will get dumped for failed command inside of
-        * the fabric plugin
-        */
-       spin_lock_irqsave(&se_cmd->t_state_lock, flags);
-       se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
-       se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-       spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-}
-
 static inline u32 transport_get_sectors_6(
        unsigned char *cdb,
        struct se_cmd *cmd,
@@ -2213,10 +2138,15 @@ static inline u32 transport_get_sectors_6(
 
        /*
         * Everything else assume TYPE_DISK Sector CDB location.
-        * Use 8-bit sector value.
+        * Use 8-bit sector value.  SBC-3 says:
+        *
+        *   A TRANSFER LENGTH field set to zero specifies that 256
+        *   logical blocks shall be written.  Any other value
+        *   specifies the number of logical blocks that shall be
+        *   written.
         */
 type_disk:
-       return (u32)cdb[4];
+       return cdb[4] ? : 256;
 }
 
 static inline u32 transport_get_sectors_10(
@@ -2460,27 +2390,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)
        return -1;
 }
 
-static int
-transport_handle_reservation_conflict(struct se_cmd *cmd)
-{
-       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-       cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
-       cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
-       /*
-        * For UA Interlock Code 11b, a RESERVATION CONFLICT will
-        * establish a UNIT ATTENTION with PREVIOUS RESERVATION
-        * CONFLICT STATUS.
-        *
-        * See spc4r17, section 7.4.6 Control Mode Page, Table 349
-        */
-       if (cmd->se_sess &&
-           cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
-               core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
-                       cmd->orig_fe_lun, 0x2C,
-                       ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
-       return -EINVAL;
-}
-
 static inline long long transport_dev_end_lba(struct se_device *dev)
 {
        return dev->transport->get_blocks(dev) + 1;
@@ -2595,8 +2504,12 @@ static int transport_generic_cmd_sequencer(
         */
        if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
                if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
-                                       cmd, cdb, pr_reg_type) != 0)
-                       return transport_handle_reservation_conflict(cmd);
+                                       cmd, cdb, pr_reg_type) != 0) {
+                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+                       cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+                       cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+                       return -EBUSY;
+               }
                /*
                 * This means the CDB is allowed for the SCSI Initiator port
                 * when said port is *NOT* holding the legacy SPC-2 or
@@ -2658,7 +2571,8 @@ static int transport_generic_cmd_sequencer(
                        goto out_unsupported_cdb;
                size = transport_get_size(sectors, cdb, cmd);
                cmd->t_task_lba = transport_lba_32(cdb);
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
                break;
        case WRITE_12:
@@ -2667,7 +2581,8 @@ static int transport_generic_cmd_sequencer(
                        goto out_unsupported_cdb;
                size = transport_get_size(sectors, cdb, cmd);
                cmd->t_task_lba = transport_lba_32(cdb);
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
                break;
        case WRITE_16:
@@ -2676,12 +2591,13 @@ static int transport_generic_cmd_sequencer(
                        goto out_unsupported_cdb;
                size = transport_get_size(sectors, cdb, cmd);
                cmd->t_task_lba = transport_lba_64(cdb);
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
                break;
        case XDWRITEREAD_10:
                if ((cmd->data_direction != DMA_TO_DEVICE) ||
-                   !(cmd->t_tasks_bidi))
+                   !(cmd->se_cmd_flags & SCF_BIDI))
                        goto out_invalid_cdb_field;
                sectors = transport_get_sectors_10(cdb, cmd, &sector_ret);
                if (sector_ret)
@@ -2700,7 +2616,8 @@ static int transport_generic_cmd_sequencer(
                 * Setup BIDI XOR callback to be run after I/O completion.
                 */
                cmd->transport_complete_callback = &transport_xor_callback;
-               cmd->t_tasks_fua = (cdb[1] & 0x8);
+               if (cdb[1] & 0x8)
+                       cmd->se_cmd_flags |= SCF_FUA;
                break;
        case VARIABLE_LENGTH_CMD:
                service_action = get_unaligned_be16(&cdb[8]);
@@ -2728,7 +2645,8 @@ static int transport_generic_cmd_sequencer(
                         * completion.
                         */
                        cmd->transport_complete_callback = &transport_xor_callback;
-                       cmd->t_tasks_fua = (cdb[10] & 0x8);
+                       if (cdb[1] & 0x8)
+                               cmd->se_cmd_flags |= SCF_FUA;
                        break;
                case WRITE_SAME_32:
                        sectors = transport_get_sectors_32(cdb, cmd, &sector_ret);
@@ -3171,18 +3089,13 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
                        " SIMPLE: %u\n", dev->dev_cur_ordered_id,
                        cmd->se_ordered_id);
        } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
-               atomic_dec(&dev->dev_hoq_count);
-               smp_mb__after_atomic_dec();
                dev->dev_cur_ordered_id++;
                pr_debug("Incremented dev_cur_ordered_id: %u for"
                        " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
                        cmd->se_ordered_id);
        } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
-               spin_lock(&dev->ordered_cmd_lock);
-               list_del(&cmd->se_ordered_node);
                atomic_dec(&dev->dev_ordered_sync);
                smp_mb__after_atomic_dec();
-               spin_unlock(&dev->ordered_cmd_lock);
 
                dev->dev_cur_ordered_id++;
                pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -3495,6 +3408,18 @@ int transport_generic_map_mem_to_cmd(
 
        if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
            (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+               /*
+                * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+                * scatterlists already have been set to follow what the fabric
+                * passes for the original expected data transfer length.
+                */
+               if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+                       pr_warn("Rejecting SCSI DATA overflow for fabric using"
+                               " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+                       cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+                       cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+                       return -EINVAL;
+               }
 
                cmd->t_data_sg = sgl;
                cmd->t_data_nents = sgl_count;
@@ -3813,7 +3738,7 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
            cmd->data_length) {
                ret = transport_generic_get_mem(cmd);
                if (ret < 0)
-                       return ret;
+                       goto out_fail;
        }
 
        /*
@@ -3842,8 +3767,15 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
                task_cdbs = transport_allocate_control_task(cmd);
        }
 
-       if (task_cdbs <= 0)
+       if (task_cdbs < 0)
                goto out_fail;
+       else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+               cmd->t_state = TRANSPORT_COMPLETE;
+               atomic_set(&cmd->t_transport_active, 1);
+               INIT_WORK(&cmd->work, target_complete_ok_work);
+               queue_work(target_completion_wq, &cmd->work);
+               return 0;
+       }
 
        if (set_counts) {
                atomic_inc(&cmd->t_fe_count);
@@ -3929,7 +3861,7 @@ static int transport_generic_write_pending(struct se_cmd *cmd)
        else if (ret < 0)
                return ret;
 
-       return PYX_TRANSPORT_WRITE_PENDING;
+       return 1;
 
 queue_full:
        pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
@@ -4602,9 +4534,6 @@ void transport_send_task_abort(struct se_cmd *cmd)
                if (cmd->se_tfo->write_pending_status(cmd) != 0) {
                        atomic_inc(&cmd->t_transport_aborted);
                        smp_mb__after_atomic_inc();
-                       cmd->scsi_status = SAM_STAT_TASK_ABORTED;
-                       transport_new_cmd_failure(cmd);
-                       return;
                }
        }
        cmd->scsi_status = SAM_STAT_TASK_ABORTED;
@@ -4670,8 +4599,6 @@ static int transport_processing_thread(void *param)
        struct se_cmd *cmd;
        struct se_device *dev = (struct se_device *) param;
 
-       set_user_nice(current, -20);
-
        while (!kthread_should_stop()) {
                ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
                                atomic_read(&dev->dev_queue_obj.queue_cnt) ||
@@ -4698,18 +4625,13 @@ get_cmd:
                        }
                        ret = cmd->se_tfo->new_cmd_map(cmd);
                        if (ret < 0) {
-                               cmd->transport_error_status = ret;
-                               transport_generic_request_failure(cmd,
-                                               0, (cmd->data_direction !=
-                                                   DMA_TO_DEVICE));
+                               transport_generic_request_failure(cmd);
                                break;
                        }
                        ret = transport_generic_new_cmd(cmd);
                        if (ret < 0) {
-                               cmd->transport_error_status = ret;
-                               transport_generic_request_failure(cmd,
-                                       0, (cmd->data_direction !=
-                                        DMA_TO_DEVICE));
+                               transport_generic_request_failure(cmd);
+                               break;
                        }
                        break;
                case TRANSPORT_PROCESS_WRITE:
index 4fac37c4c615263abbfa60594cfcfc3f1917dbc7..71fc9cea5dc9ba120b725a4e0f6bf94c879d1340 100644 (file)
@@ -200,7 +200,7 @@ int ft_write_pending(struct se_cmd *se_cmd)
        lport = ep->lp;
        fp = fc_frame_alloc(lport, sizeof(*txrdy));
        if (!fp)
-               return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+               return -ENOMEM; /* Signal QUEUE_FULL */
 
        txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
        memset(txrdy, 0, sizeof(*txrdy));
index 5f770412ca403265ddfc19856ef4934470a25c63..9402b7387cac570d91ff6001a885daed3bbd8b6d 100644 (file)
@@ -436,8 +436,7 @@ static void ft_del_lport(struct se_wwn *wwn)
        struct ft_lport_acl *lacl = container_of(wwn,
                                struct ft_lport_acl, fc_lport_wwn);
 
-       pr_debug("del lport %s\n",
-                       config_item_name(&wwn->wwn_group.cg_item));
+       pr_debug("del lport %s\n", lacl->name);
        mutex_lock(&ft_lport_lock);
        list_del(&lacl->list);
        mutex_unlock(&ft_lport_lock);
index e8c564a533469f00f7e40435933ae496187aa20a..a8078d0638fa09de0f0c0be43c70dd1d94909ee5 100644 (file)
@@ -1458,6 +1458,16 @@ static const struct usb_device_id acm_ids[] = {
        },
        { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
        },
+       /* Motorola H24 HSPA module: */
+       { USB_DEVICE(0x22b8, 0x2d91) }, /* modem                                */
+       { USB_DEVICE(0x22b8, 0x2d92) }, /* modem           + diagnostics        */
+       { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port                      */
+       { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics        */
+       { USB_DEVICE(0x22b8, 0x2d96) }, /* modem                         + NMEA */
+       { USB_DEVICE(0x22b8, 0x2d97) }, /* modem           + diagnostics + NMEA */
+       { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port               + NMEA */
+       { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
+
        { USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
        .driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
                                           data interface instead of
index 717ebc9ff941808e2ddd52c93db09cc998456565..600d82348511068b141dc65f7a6e7e7c79037a14 100644 (file)
@@ -264,7 +264,7 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc)
                ret = -ENODEV;
                goto err0;
        }
-       dwc->revision = reg & DWC3_GSNPSREV_MASK;
+       dwc->revision = reg;
 
        dwc3_core_soft_reset(dwc);
 
index 4730016d7cd42d8644e8fb5fa8791fc9df080591..45f422ac103fb61678633ee08241b33b7dfe235a 100644 (file)
@@ -1959,7 +1959,7 @@ static int amd5536_start(struct usb_gadget_driver *driver,
        u32 tmp;
 
        if (!driver || !bind || !driver->setup
-                       || driver->speed != USB_SPEED_HIGH)
+                       || driver->speed < USB_SPEED_HIGH)
                return -EINVAL;
        if (!dev)
                return -ENODEV;
index 596a0b464e61f069f93c8278a3c8193ab44a7873..4dff83d2f265235338f3b92f0e5b59690a96de25 100644 (file)
@@ -130,9 +130,6 @@ ep_matches (
                        num_req_streams = ep_comp->bmAttributes & 0x1f;
                        if (num_req_streams > ep->max_streams)
                                return 0;
-                       /* Update the ep_comp descriptor if needed */
-                       if (num_req_streams != ep->max_streams)
-                               ep_comp->bmAttributes = ep->max_streams;
                }
 
        }
index c39d58860fa0414a2c862941dbc4382ddb2e1b0f..1a6f415c0d022f34d94e6a609658e47c5a31ed06 100644 (file)
@@ -2975,6 +2975,7 @@ static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
        fsg_common_put(common);
        usb_free_descriptors(fsg->function.descriptors);
        usb_free_descriptors(fsg->function.hs_descriptors);
+       usb_free_descriptors(fsg->function.ss_descriptors);
        kfree(fsg);
 }
 
index 91fdf790ed20b122bf0a13df0d3c8aed5285ac3c..cf33a8d0fd5df46ec339f5b481b68dc61121243a 100644 (file)
@@ -131,8 +131,8 @@ static int gser_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
        }
        if (!gser->port.in->desc || !gser->port.out->desc) {
                DBG(cdev, "activate generic ttyGS%d\n", gser->port_num);
-               if (!config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
-                   !config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
+               if (config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
+                   config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
                        gser->port.in->desc = NULL;
                        gser->port.out->desc = NULL;
                        return -EINVAL;
index 43a49ecc1f36ed1009eb836350bd07800a4d8c09..dcbc0a2e48dde8be9b27a5c429a4c004bfb8aa9c 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/err.h>
 #include <linux/fsl_devices.h>
 #include <linux/platform_device.h>
+#include <linux/io.h>
 
 #include <mach/hardware.h>
 
@@ -88,7 +89,6 @@ eenahb:
 void fsl_udc_clk_finalize(struct platform_device *pdev)
 {
        struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
-#if defined(CONFIG_SOC_IMX35)
        if (cpu_is_mx35()) {
                unsigned int v;
 
@@ -101,7 +101,6 @@ void fsl_udc_clk_finalize(struct platform_device *pdev)
                                        USBPHYCTRL_OTGBASE_OFFSET));
                }
        }
-#endif
 
        /* ULPI transceivers don't need usbpll */
        if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
index 2a03e4de11c1a277cfe27b670ac48f11cabc006a..e00cf92409ce5114f061801c3d3e29b6b6bae23e 100644 (file)
@@ -2336,8 +2336,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver,
        if (!udc_controller)
                return -ENODEV;
 
-       if (!driver || (driver->speed != USB_SPEED_FULL
-                       && driver->speed != USB_SPEED_HIGH)
+       if (!driver || driver->speed < USB_SPEED_FULL
                        || !bind || !driver->disconnect || !driver->setup)
                return -EINVAL;
 
index b3b3d83b7c3354744ec1fc4d4dc2765f19293a7c..dd28ef3def71f394f281b70f8ee2335b629f42e6 100644 (file)
@@ -696,12 +696,31 @@ static void fsl_free_request(struct usb_ep *_ep, struct usb_request *_req)
                kfree(req);
 }
 
-/*-------------------------------------------------------------------------*/
+/* Actually add a dTD chain to an empty dQH and let go */
+static void fsl_prime_ep(struct fsl_ep *ep, struct ep_td_struct *td)
+{
+       struct ep_queue_head *qh = get_qh_by_ep(ep);
+
+       /* Write dQH next pointer and terminate bit to 0 */
+       qh->next_dtd_ptr = cpu_to_hc32(td->td_dma
+                       & EP_QUEUE_HEAD_NEXT_POINTER_MASK);
+
+       /* Clear active and halt bit */
+       qh->size_ioc_int_sts &= cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
+                                       | EP_QUEUE_HEAD_STATUS_HALT));
+
+       /* Ensure that updates to the QH will occur before priming. */
+       wmb();
+
+       /* Prime endpoint by writing correct bit to ENDPTPRIME */
+       fsl_writel(ep_is_in(ep) ? (1 << (ep_index(ep) + 16))
+                       : (1 << (ep_index(ep))), &dr_regs->endpointprime);
+}
+
+/* Add dTD chain to the dQH of an EP */
 static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
 {
-       int i = ep_index(ep) * 2 + ep_is_in(ep);
        u32 temp, bitmask, tmp_stat;
-       struct ep_queue_head *dQH = &ep->udc->ep_qh[i];
 
        /* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
        VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
@@ -719,7 +738,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
                        cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
                /* Read prime bit, if 1 goto done */
                if (fsl_readl(&dr_regs->endpointprime) & bitmask)
-                       goto out;
+                       return;
 
                do {
                        /* Set ATDTW bit in USBCMD */
@@ -736,28 +755,10 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
                fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd);
 
                if (tmp_stat)
-                       goto out;
+                       return;
        }
 
-       /* Write dQH next pointer and terminate bit to 0 */
-       temp = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
-       dQH->next_dtd_ptr = cpu_to_hc32(temp);
-
-       /* Clear active and halt bit */
-       temp = cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
-                       | EP_QUEUE_HEAD_STATUS_HALT));
-       dQH->size_ioc_int_sts &= temp;
-
-       /* Ensure that updates to the QH will occur before priming. */
-       wmb();
-
-       /* Prime endpoint by writing 1 to ENDPTPRIME */
-       temp = ep_is_in(ep)
-               ? (1 << (ep_index(ep) + 16))
-               : (1 << (ep_index(ep)));
-       fsl_writel(temp, &dr_regs->endpointprime);
-out:
-       return;
+       fsl_prime_ep(ep, req->head);
 }
 
 /* Fill in the dTD structure
@@ -877,7 +878,7 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
                VDBG("%s, bad ep", __func__);
                return -EINVAL;
        }
-       if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+       if (usb_endpoint_xfer_isoc(ep->desc)) {
                if (req->req.length > ep->ep.maxpacket)
                        return -EMSGSIZE;
        }
@@ -973,25 +974,20 @@ static int fsl_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
 
                /* The request isn't the last request in this ep queue */
                if (req->queue.next != &ep->queue) {
-                       struct ep_queue_head *qh;
                        struct fsl_req *next_req;
 
-                       qh = ep->qh;
                        next_req = list_entry(req->queue.next, struct fsl_req,
                                        queue);
 
-                       /* Point the QH to the first TD of next request */
-                       fsl_writel((u32) next_req->head, &qh->curr_dtd_ptr);
+                       /* prime with dTD of next request */
+                       fsl_prime_ep(ep, next_req->head);
                }
-
-               /* The request hasn't been processed, patch up the TD chain */
+       /* The request hasn't been processed, patch up the TD chain */
        } else {
                struct fsl_req *prev_req;
 
                prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
-               fsl_writel(fsl_readl(&req->tail->next_td_ptr),
-                               &prev_req->tail->next_td_ptr);
-
+               prev_req->tail->next_td_ptr = req->tail->next_td_ptr;
        }
 
        done(ep, req, -ECONNRESET);
@@ -1032,7 +1028,7 @@ static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
                goto out;
        }
 
-       if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+       if (usb_endpoint_xfer_isoc(ep->desc)) {
                status = -EOPNOTSUPP;
                goto out;
        }
@@ -1068,7 +1064,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
        struct fsl_udc *udc;
        int size = 0;
        u32 bitmask;
-       struct ep_queue_head *d_qh;
+       struct ep_queue_head *qh;
 
        ep = container_of(_ep, struct fsl_ep, ep);
        if (!_ep || (!ep->desc && ep_index(ep) != 0))
@@ -1079,13 +1075,13 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep)
        if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
                return -ESHUTDOWN;
 
-       d_qh = &ep->udc->ep_qh[ep_index(ep) * 2 + ep_is_in(ep)];
+       qh = get_qh_by_ep(ep);
 
        bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) :
            (1 << (ep_index(ep)));
 
        if (fsl_readl(&dr_regs->endptstatus) & bitmask)
-               size = (d_qh->size_ioc_int_sts & DTD_PACKET_SIZE)
+               size = (qh->size_ioc_int_sts & DTD_PACKET_SIZE)
                    >> DTD_LENGTH_BIT_POS;
 
        pr_debug("%s %u\n", __func__, size);
@@ -1938,8 +1934,7 @@ static int fsl_start(struct usb_gadget_driver *driver,
        if (!udc_controller)
                return -ENODEV;
 
-       if (!driver || (driver->speed != USB_SPEED_FULL
-                               && driver->speed != USB_SPEED_HIGH)
+       if (!driver || driver->speed < USB_SPEED_FULL
                        || !bind || !driver->disconnect || !driver->setup)
                return -EINVAL;
 
index 1d51be83fda87402d4a77a28ff4fa636d17bff62..f781f5dec41776629584a33a1be5817d8e6778d6 100644 (file)
@@ -569,6 +569,16 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
                                        * 2 + ((windex & USB_DIR_IN) ? 1 : 0))
 #define get_pipe_by_ep(EP)     (ep_index(EP) * 2 + ep_is_in(EP))
 
+static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
+{
+       /* we only have one ep0 structure but two queue heads */
+       if (ep_index(ep) != 0)
+               return ep->qh;
+       else
+               return &ep->udc->ep_qh[(ep->udc->ep0_dir ==
+                               USB_DIR_IN) ? 1 : 0];
+}
+
 struct platform_device;
 #ifdef CONFIG_ARCH_MXC
 int fsl_udc_clk_init(struct platform_device *pdev);
index 91d0af2a24a8537728fb5c5cb176ff0108095ab5..9aa1cbbee45b64597ef9bcc44d5ba5ed219b2d6a 100644 (file)
@@ -1472,7 +1472,7 @@ static int m66592_start(struct usb_gadget_driver *driver,
        int retval;
 
        if (!driver
-                       || driver->speed != USB_SPEED_HIGH
+                       || driver->speed < USB_SPEED_HIGH
                        || !bind
                        || !driver->setup)
                return -EINVAL;
index 7f1bc9a73cda5a1fe45f8c053f426407758e0f50..da2b9d0be3ca0d444d9df8ca5f0b7a9800c3d078 100644 (file)
@@ -1881,7 +1881,7 @@ static int net2280_start(struct usb_gadget *_gadget,
         * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
         * "must not be used in normal operation"
         */
-       if (!driver || driver->speed != USB_SPEED_HIGH
+       if (!driver || driver->speed < USB_SPEED_HIGH
                        || !driver->setup)
                return -EINVAL;
 
index 24f84b210ce116cfd78e5bae3a1bee383d198ca0..fc719a3f855717b88417aaff8c2e89d9a2723109 100644 (file)
@@ -1746,7 +1746,7 @@ static int r8a66597_start(struct usb_gadget *gadget,
        struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
 
        if (!driver
-                       || driver->speed != USB_SPEED_HIGH
+                       || driver->speed < USB_SPEED_HIGH
                        || !driver->setup)
                return -EINVAL;
        if (!r8a66597)
index a552453dc94632cacee3ea5212f60aa4563fa519..b31448229f0b26c3a38014cf59e3339060253cb3 100644 (file)
@@ -2586,10 +2586,8 @@ static int s3c_hsotg_start(struct usb_gadget_driver *driver,
                return -EINVAL;
        }
 
-       if (driver->speed != USB_SPEED_HIGH &&
-           driver->speed != USB_SPEED_FULL) {
+       if (driver->speed < USB_SPEED_FULL)
                dev_err(hsotg->dev, "%s: bad speed\n", __func__);
-       }
 
        if (!bind || !driver->setup) {
                dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
index 8d54f893cefe9df7c97363c1812b4f30bb822ff7..20a553b46aedc1d17ecaeaf033ff62eb8f5f472c 100644 (file)
@@ -1142,8 +1142,7 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
        int ret;
 
        if (!driver
-               || (driver->speed != USB_SPEED_FULL &&
-                       driver->speed != USB_SPEED_HIGH)
+               || driver->speed < USB_SPEED_FULL
                || !bind
                || !driver->unbind || !driver->disconnect || !driver->setup)
                return -EINVAL;
index 56a32033adb3485db49484d23619d1f2dcf68071..a60679cbbf858e3c97a978218e16d931635ae1a3 100644 (file)
@@ -1475,6 +1475,7 @@ iso_stream_schedule (
         * jump until after the queue is primed.
         */
        else {
+               int done = 0;
                start = SCHEDULE_SLOP + (now & ~0x07);
 
                /* NOTE:  assumes URB_ISO_ASAP, to limit complexity/bugs */
@@ -1492,18 +1493,18 @@ iso_stream_schedule (
                        if (stream->highspeed) {
                                if (itd_slot_ok(ehci, mod, start,
                                                stream->usecs, period))
-                                       break;
+                                       done = 1;
                        } else {
                                if ((start % 8) >= 6)
                                        continue;
                                if (sitd_slot_ok(ehci, mod, stream,
                                                start, sched, period))
-                                       break;
+                                       done = 1;
                        }
-               } while (start > next);
+               } while (start > next && !done);
 
                /* no room in the schedule */
-               if (start == next) {
+               if (!done) {
                        ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n",
                                urb, now, now + mod);
                        status = -ENOSPC;
index a7dc1e1d45f2a77e1cfce4de339b449b8863f778..2ac4ac2e4ef95208be6d1ae2b42af1840cb42bb9 100644 (file)
@@ -18,7 +18,7 @@
 
 #include "isp1760-hcd.h"
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/of_platform.h>
@@ -31,7 +31,7 @@
 #include <linux/pci.h>
 #endif
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
 struct isp1760 {
        struct usb_hcd *hcd;
        int rst_gpio;
@@ -437,7 +437,7 @@ static int __init isp1760_init(void)
        ret = platform_driver_register(&isp1760_plat_driver);
        if (!ret)
                any_ret = 0;
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
        ret = platform_driver_register(&isp1760_of_driver);
        if (!ret)
                any_ret = 0;
@@ -457,7 +457,7 @@ module_init(isp1760_init);
 static void __exit isp1760_exit(void)
 {
        platform_driver_unregister(&isp1760_plat_driver);
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
        platform_driver_unregister(&isp1760_of_driver);
 #endif
 #ifdef CONFIG_PCI
index d6e175428618d4b31f69bd3d323e7c5aef93deae..a403b53e86b9fd3dcc742b9da5759330fbc4d212 100644 (file)
@@ -124,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset)
 {
        qset->td_start = qset->td_end = qset->ntds = 0;
 
-       qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
+       qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
        qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
        qset->qh.err_count = 0;
        qset->qh.scratch[0] = 0;
index aa94c01957919001e6a5804c7bc0434b923f20bc..a1afb7c39f7e70c36c12a8bccdc0a0b64b7f29d3 100644 (file)
@@ -711,7 +711,10 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
        ring = xhci->cmd_ring;
        seg = ring->deq_seg;
        do {
-               memset(seg->trbs, 0, SEGMENT_SIZE);
+               memset(seg->trbs, 0,
+                       sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+               seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
+                       cpu_to_le32(~TRB_CYCLE);
                seg = seg->next;
        } while (seg != ring->deq_seg);
 
index c1fa12ec7a9ad456a23c761f5c328a721182a070..b63ab1570103f2219afc5b44bdea08acd0531054 100644 (file)
@@ -2301,18 +2301,12 @@ static int musb_suspend(struct device *dev)
                 */
        }
 
-       musb_save_context(musb);
-
        spin_unlock_irqrestore(&musb->lock, flags);
        return 0;
 }
 
 static int musb_resume_noirq(struct device *dev)
 {
-       struct musb     *musb = dev_to_musb(dev);
-
-       musb_restore_context(musb);
-
        /* for static cmos like DaVinci, register values were preserved
         * unless for some reason the whole soc powered down or the USB
         * module got reset through the PSC (vs just being disabled).
index d51043acfe1abc5013217e70b4854e153f8bdf96..922148ff8d2969de64a808046f7ce06c6115ace1 100644 (file)
@@ -1903,7 +1903,7 @@ static int musb_gadget_start(struct usb_gadget *g,
        unsigned long           flags;
        int                     retval = -EINVAL;
 
-       if (driver->speed != USB_SPEED_HIGH)
+       if (driver->speed < USB_SPEED_HIGH)
                goto err0;
 
        pm_runtime_get_sync(musb->controller);
index 60ddba8066ea201181b0b6ce3c69a847eafc618a..79cb0af779fa07dac0702ee9b2b2d43e0a22b738 100644 (file)
@@ -774,6 +774,10 @@ static void musb_ep_program(struct musb *musb, u8 epnum,
                        if (musb->double_buffer_not_ok)
                                musb_writew(epio, MUSB_TXMAXP,
                                                hw_ep->max_packet_sz_tx);
+                       else if (can_bulk_split(musb, qh->type))
+                               musb_writew(epio, MUSB_TXMAXP, packet_sz
+                                       | ((hw_ep->max_packet_sz_tx /
+                                               packet_sz) - 1) << 11);
                        else
                                musb_writew(epio, MUSB_TXMAXP,
                                                qh->maxpacket |
index 053f86d70009f4a5616c2b303c72c4988fd12d74..ad96a38967299f4895b74ae0fa7bb416384706f8 100644 (file)
@@ -349,7 +349,7 @@ void usbhs_irq_callback_update(struct usbhs_priv *priv, struct usbhs_mod *mod)
                if (mod->irq_attch)
                        intenb1 |= ATTCHE;
 
-               if (mod->irq_attch)
+               if (mod->irq_dtch)
                        intenb1 |= DTCHE;
 
                if (mod->irq_sign)
index d9717e0bc1ff65c6d23cae4bf0693c434d8873fe..7f4e803385702499b70e4a40fce377bc28a1e45f 100644 (file)
@@ -751,53 +751,32 @@ static int usbhsg_gadget_start(struct usb_gadget *gadget,
                struct usb_gadget_driver *driver)
 {
        struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
-       struct usbhs_priv *priv;
-       struct device *dev;
-       int ret;
+       struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
 
        if (!driver             ||
            !driver->setup      ||
-           driver->speed != USB_SPEED_HIGH)
+           driver->speed < USB_SPEED_FULL)
                return -EINVAL;
 
-       dev  = usbhsg_gpriv_to_dev(gpriv);
-       priv = usbhsg_gpriv_to_priv(gpriv);
-
        /* first hook up the driver ... */
        gpriv->driver = driver;
        gpriv->gadget.dev.driver = &driver->driver;
 
-       ret = device_add(&gpriv->gadget.dev);
-       if (ret) {
-               dev_err(dev, "device_add error %d\n", ret);
-               goto add_fail;
-       }
-
        return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD);
-
-add_fail:
-       gpriv->driver = NULL;
-       gpriv->gadget.dev.driver = NULL;
-
-       return ret;
 }
 
 static int usbhsg_gadget_stop(struct usb_gadget *gadget,
                struct usb_gadget_driver *driver)
 {
        struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
-       struct usbhs_priv *priv;
-       struct device *dev;
+       struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
 
        if (!driver             ||
            !driver->unbind)
                return -EINVAL;
 
-       dev  = usbhsg_gpriv_to_dev(gpriv);
-       priv = usbhsg_gpriv_to_priv(gpriv);
-
        usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
-       device_del(&gpriv->gadget.dev);
+       gpriv->gadget.dev.driver = NULL;
        gpriv->driver = NULL;
 
        return 0;
@@ -827,6 +806,13 @@ static int usbhsg_start(struct usbhs_priv *priv)
 
 static int usbhsg_stop(struct usbhs_priv *priv)
 {
+       struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
+
+       /* cable disconnect */
+       if (gpriv->driver &&
+           gpriv->driver->disconnect)
+               gpriv->driver->disconnect(&gpriv->gadget);
+
        return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED);
 }
 
@@ -876,12 +862,14 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
        /*
         * init gadget
         */
-       device_initialize(&gpriv->gadget.dev);
        dev_set_name(&gpriv->gadget.dev, "gadget");
        gpriv->gadget.dev.parent        = dev;
        gpriv->gadget.name              = "renesas_usbhs_udc";
        gpriv->gadget.ops               = &usbhsg_gadget_ops;
        gpriv->gadget.is_dualspeed      = 1;
+       ret = device_register(&gpriv->gadget.dev);
+       if (ret < 0)
+               goto err_add_udc;
 
        INIT_LIST_HEAD(&gpriv->gadget.ep_list);
 
@@ -912,12 +900,15 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
 
        ret = usb_add_gadget_udc(dev, &gpriv->gadget);
        if (ret)
-               goto err_add_udc;
+               goto err_register;
 
 
        dev_info(dev, "gadget probed\n");
 
        return 0;
+
+err_register:
+       device_unregister(&gpriv->gadget.dev);
 err_add_udc:
        kfree(gpriv->uep);
 
@@ -933,6 +924,8 @@ void usbhs_mod_gadget_remove(struct usbhs_priv *priv)
 
        usb_del_gadget_udc(&gpriv->gadget);
 
+       device_unregister(&gpriv->gadget.dev);
+
        usbhsg_controller_unregister(gpriv);
 
        kfree(gpriv->uep);
index bade761a1e52b783a63dfb462f8b11ef4c111999..7955de5899512ecff50c099ab084a35a36a10f07 100644 (file)
@@ -1267,6 +1267,7 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
                dev_err(dev, "Failed to create hcd\n");
                return -ENOMEM;
        }
+       hcd->has_tt = 1; /* for low/full speed */
 
        pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL);
        if (!pipe_info) {
index bd4298bb6750d347f825c47026b8de0acb7096a1..ff3db5d056a56484fe594039f5817ac7d6ec4024 100644 (file)
@@ -736,6 +736,7 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
        { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
index 571fa96b49c7749b983c8d4a8f8228f3500f975c..055b64ef0bbad7ad6dd20200860c6874dd120f1e 100644 (file)
 
 /* Propox devices */
 #define FTDI_PROPOX_JTAGCABLEII_PID    0xD738
+#define FTDI_PROPOX_ISPCABLEIII_PID    0xD739
 
 /* Lenz LI-USB Computer Interface. */
 #define FTDI_LENZ_LIUSB_PID    0xD780
index d865878c9f97449a6168fd69198af63f647ac497..6dd64534fad0d88c6239fa9c64279bcc82525112 100644 (file)
@@ -661,6 +661,14 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
        { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) },  /* E398 3G Modem */
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) },  /* E398 3G PC UI Interface */
+       { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) },  /* E398 3G Application Interface */
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
        { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
@@ -747,6 +755,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
index 3041a974faf39278ef8fad4033e089f8e64b7b9c..24caba79d722a74fd2dba94f9560a7168e4bb26c 100644 (file)
@@ -1854,6 +1854,13 @@ UNUSUAL_DEV(  0x1370, 0x6828, 0x0110, 0x0110,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE ),
 
+/* Reported by Qinglin Ye <yestyle@gmail.com> */
+UNUSUAL_DEV(  0x13fe, 0x3600, 0x0100, 0x0100,
+               "Kingston",
+               "DT 101 G2",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_BULK_IGNORE_TAG ),
+
 /* Reported by Francesco Foresti <frafore@tiscali.it> */
 UNUSUAL_DEV(  0x14cd, 0x6600, 0x0201, 0x0201,
                "Super Top",
index 03f449a430d253ca997f0e84e721748707fb674f..5b89f7d6cd0ff4bb6d2f305e025b2ad3ebd18c5b 100644 (file)
@@ -76,8 +76,6 @@ static int irq;
 static void __iomem *virtbase;
 static unsigned long coh901327_users;
 static unsigned long boot_status;
-static u16 wdogenablestore;
-static u16 irqmaskstore;
 static struct device *parent;
 
 /*
@@ -461,6 +459,10 @@ out:
 }
 
 #ifdef CONFIG_PM
+
+static u16 wdogenablestore;
+static u16 irqmaskstore;
+
 static int coh901327_suspend(struct platform_device *pdev, pm_message_t state)
 {
        irqmaskstore = readw(virtbase + U300_WDOG_IMR) & 0x0001U;
index 3774c9b8dac9c6868a28c5eda079e7915b7d24cb..8464ea1c36a1080f4c4b4045333a70b963d08b5d 100644 (file)
@@ -231,6 +231,7 @@ static int __devinit cru_detect(unsigned long map_entry,
 
        cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
 
+       set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE));
        asminline_call(&cmn_regs, bios32_entrypoint);
 
        if (cmn_regs.u1.ral != 0) {
@@ -248,8 +249,10 @@ static int __devinit cru_detect(unsigned long map_entry,
                if ((physical_bios_base + physical_bios_offset)) {
                        cru_rom_addr =
                                ioremap(cru_physical_address, cru_length);
-                       if (cru_rom_addr)
+                       if (cru_rom_addr) {
+                               set_memory_x((unsigned long)cru_rom_addr, cru_length);
                                retval = 0;
+                       }
                }
 
                printk(KERN_DEBUG "hpwdt: CRU Base Address:   0x%lx\n",
index ba6ad662635ae97776cf51c16b5fba482cc8a8c8..99796c5d913db2c9f354dbd06b503d8d65580cdf 100644 (file)
@@ -384,10 +384,10 @@ MODULE_PARM_DESC(nowayout,
        "Watchdog cannot be stopped once started (default="
                                __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
 
-static int turn_SMI_watchdog_clear_off = 0;
+static int turn_SMI_watchdog_clear_off = 1;
 module_param(turn_SMI_watchdog_clear_off, int, 0);
 MODULE_PARM_DESC(turn_SMI_watchdog_clear_off,
-       "Turn off SMI clearing watchdog (default=0)");
+       "Turn off SMI clearing watchdog (depends on TCO-version)(default=1)");
 
 /*
  * Some TCO specific functions
@@ -813,7 +813,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
                ret = -EIO;
                goto out_unmap;
        }
-       if (turn_SMI_watchdog_clear_off) {
+       if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) {
                /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
                val32 = inl(SMI_EN);
                val32 &= 0xffffdfff;    /* Turn off SMI clearing watchdog */
index cc2cfbe33b30d441a2ef6449d2babe0b1eb43a3d..bfaf9bb1ee0d1ff384a014742f5c8148e96fc040 100644 (file)
@@ -351,7 +351,7 @@ static int __devexit sp805_wdt_remove(struct amba_device *adev)
        return 0;
 }
 
-static struct amba_id sp805_wdt_ids[] __initdata = {
+static struct amba_id sp805_wdt_ids[] = {
        {
                .id     = 0x00141805,
                .mask   = 0x00ffffff,
index 8e964b91c447b117b7d4d17731201254a79365d5..284798aaf8b1391fd8d4de7e43bff52e5721272d 100644 (file)
@@ -166,7 +166,7 @@ retry:
        /*
         * Get IO TLB memory from any location.
         */
-       xen_io_tlb_start = alloc_bootmem(bytes);
+       xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
        if (!xen_io_tlb_start) {
                m = "Cannot allocate Xen-SWIOTLB buffer!\n";
                goto error;
@@ -179,7 +179,7 @@ retry:
                               bytes,
                               xen_io_tlb_nslabs);
        if (rc) {
-               free_bootmem(__pa(xen_io_tlb_start), bytes);
+               free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
                m = "Failed to get contiguous memory for DMA from Xen!\n"\
                    "You either: don't have the permissions, do not have"\
                    " enough free memory under 4GB, or the hypervisor memory"\
index b3b8f2f3ad106aea4b4f33c9f61bf7eb01919563..ede860f921df847e3c94a2abda905949737c4e3d 100644 (file)
@@ -621,15 +621,6 @@ static struct xenbus_watch *find_watch(const char *token)
        return NULL;
 }
 
-static void xs_reset_watches(void)
-{
-       int err;
-
-       err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
-       if (err && err != -EEXIST)
-               printk(KERN_WARNING "xs_reset_watches failed: %d\n", err);
-}
-
 /* Register callback to watch this node. */
 int register_xenbus_watch(struct xenbus_watch *watch)
 {
@@ -906,9 +897,5 @@ int xs_init(void)
        if (IS_ERR(task))
                return PTR_ERR(task);
 
-       /* shutdown watches for kexec boot */
-       if (xen_hvm_domain())
-               xs_reset_watches();
-
        return 0;
 }
index e24cd8986d8badacc41ce764a15cc99f2524fb8c..ea78c3a17eecd8d4bf38e182fcb4ca1ce0389492 100644 (file)
@@ -12,7 +12,7 @@ here.
 This directory is _NOT_ for adding arbitrary new firmware images. The
 place to add those is the separate linux-firmware repository:
 
-    git://git.kernel.org/pub/scm/linux/kernel/git/dwmw2/linux-firmware.git
+    git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git
 
 That repository contains all these firmware images which have been
 extracted from older drivers, as well various new firmware images which
@@ -22,6 +22,7 @@ been permitted to redistribute under separate cover.
 To submit firmware to that repository, please send either a git binary
 diff or preferably a git pull request to:
       David Woodhouse <dwmw2@infradead.org>
+      Ben Hutchings <ben@decadent.org.uk>
 
 Your commit should include an update to the WHENCE file clearly
 identifying the licence under which the firmware is available, and
index 7ec14097fef1f3bbb9b7611a96111c55d0ba9e19..0b394580d8603becf8c5c031745687f6ba9e9aeb 100644 (file)
@@ -64,6 +64,8 @@ struct btrfs_worker_thread {
        int idle;
 };
 
+static int __btrfs_start_workers(struct btrfs_workers *workers);
+
 /*
  * btrfs_start_workers uses kthread_run, which can block waiting for memory
  * for a very long time.  It will actually throttle on page writeback,
@@ -88,27 +90,10 @@ static void start_new_worker_func(struct btrfs_work *work)
 {
        struct worker_start *start;
        start = container_of(work, struct worker_start, work);
-       btrfs_start_workers(start->queue, 1);
+       __btrfs_start_workers(start->queue);
        kfree(start);
 }
 
-static int start_new_worker(struct btrfs_workers *queue)
-{
-       struct worker_start *start;
-       int ret;
-
-       start = kzalloc(sizeof(*start), GFP_NOFS);
-       if (!start)
-               return -ENOMEM;
-
-       start->work.func = start_new_worker_func;
-       start->queue = queue;
-       ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
-       if (ret)
-               kfree(start);
-       return ret;
-}
-
 /*
  * helper function to move a thread onto the idle list after it
  * has finished some requests.
@@ -153,12 +138,20 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
 static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
 {
        struct btrfs_workers *workers = worker->workers;
+       struct worker_start *start;
        unsigned long flags;
 
        rmb();
        if (!workers->atomic_start_pending)
                return;
 
+       start = kzalloc(sizeof(*start), GFP_NOFS);
+       if (!start)
+               return;
+
+       start->work.func = start_new_worker_func;
+       start->queue = workers;
+
        spin_lock_irqsave(&workers->lock, flags);
        if (!workers->atomic_start_pending)
                goto out;
@@ -170,10 +163,11 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
 
        workers->num_workers_starting += 1;
        spin_unlock_irqrestore(&workers->lock, flags);
-       start_new_worker(workers);
+       btrfs_queue_worker(workers->atomic_worker_start, &start->work);
        return;
 
 out:
+       kfree(start);
        spin_unlock_irqrestore(&workers->lock, flags);
 }
 
@@ -331,7 +325,7 @@ again:
                        run_ordered_completions(worker->workers, work);
 
                        check_pending_worker_creates(worker);
-
+                       cond_resched();
                }
 
                spin_lock_irq(&worker->lock);
@@ -462,56 +456,55 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
  * starts new worker threads.  This does not enforce the max worker
  * count in case you need to temporarily go past it.
  */
-static int __btrfs_start_workers(struct btrfs_workers *workers,
-                                int num_workers)
+static int __btrfs_start_workers(struct btrfs_workers *workers)
 {
        struct btrfs_worker_thread *worker;
        int ret = 0;
-       int i;
 
-       for (i = 0; i < num_workers; i++) {
-               worker = kzalloc(sizeof(*worker), GFP_NOFS);
-               if (!worker) {
-                       ret = -ENOMEM;
-                       goto fail;
-               }
+       worker = kzalloc(sizeof(*worker), GFP_NOFS);
+       if (!worker) {
+               ret = -ENOMEM;
+               goto fail;
+       }
 
-               INIT_LIST_HEAD(&worker->pending);
-               INIT_LIST_HEAD(&worker->prio_pending);
-               INIT_LIST_HEAD(&worker->worker_list);
-               spin_lock_init(&worker->lock);
-
-               atomic_set(&worker->num_pending, 0);
-               atomic_set(&worker->refs, 1);
-               worker->workers = workers;
-               worker->task = kthread_run(worker_loop, worker,
-                                          "btrfs-%s-%d", workers->name,
-                                          workers->num_workers + i);
-               if (IS_ERR(worker->task)) {
-                       ret = PTR_ERR(worker->task);
-                       kfree(worker);
-                       goto fail;
-               }
-               spin_lock_irq(&workers->lock);
-               list_add_tail(&worker->worker_list, &workers->idle_list);
-               worker->idle = 1;
-               workers->num_workers++;
-               workers->num_workers_starting--;
-               WARN_ON(workers->num_workers_starting < 0);
-               spin_unlock_irq(&workers->lock);
+       INIT_LIST_HEAD(&worker->pending);
+       INIT_LIST_HEAD(&worker->prio_pending);
+       INIT_LIST_HEAD(&worker->worker_list);
+       spin_lock_init(&worker->lock);
+
+       atomic_set(&worker->num_pending, 0);
+       atomic_set(&worker->refs, 1);
+       worker->workers = workers;
+       worker->task = kthread_run(worker_loop, worker,
+                                  "btrfs-%s-%d", workers->name,
+                                  workers->num_workers + 1);
+       if (IS_ERR(worker->task)) {
+               ret = PTR_ERR(worker->task);
+               kfree(worker);
+               goto fail;
        }
+       spin_lock_irq(&workers->lock);
+       list_add_tail(&worker->worker_list, &workers->idle_list);
+       worker->idle = 1;
+       workers->num_workers++;
+       workers->num_workers_starting--;
+       WARN_ON(workers->num_workers_starting < 0);
+       spin_unlock_irq(&workers->lock);
+
        return 0;
 fail:
-       btrfs_stop_workers(workers);
+       spin_lock_irq(&workers->lock);
+       workers->num_workers_starting--;
+       spin_unlock_irq(&workers->lock);
        return ret;
 }
 
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
+int btrfs_start_workers(struct btrfs_workers *workers)
 {
        spin_lock_irq(&workers->lock);
-       workers->num_workers_starting += num_workers;
+       workers->num_workers_starting++;
        spin_unlock_irq(&workers->lock);
-       return __btrfs_start_workers(workers, num_workers);
+       return __btrfs_start_workers(workers);
 }
 
 /*
@@ -568,9 +561,10 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
        struct btrfs_worker_thread *worker;
        unsigned long flags;
        struct list_head *fallback;
+       int ret;
 
-again:
        spin_lock_irqsave(&workers->lock, flags);
+again:
        worker = next_worker(workers);
 
        if (!worker) {
@@ -584,7 +578,10 @@ again:
                        workers->num_workers_starting++;
                        spin_unlock_irqrestore(&workers->lock, flags);
                        /* we're below the limit, start another worker */
-                       __btrfs_start_workers(workers, 1);
+                       ret = __btrfs_start_workers(workers);
+                       spin_lock_irqsave(&workers->lock, flags);
+                       if (ret)
+                               goto fallback;
                        goto again;
                }
        }
@@ -665,7 +662,7 @@ void btrfs_set_work_high_prio(struct btrfs_work *work)
 /*
  * places a struct btrfs_work into the pending queue of one of the kthreads
  */
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
 {
        struct btrfs_worker_thread *worker;
        unsigned long flags;
@@ -673,7 +670,7 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
 
        /* don't requeue something already on a list */
        if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
-               goto out;
+               return;
 
        worker = find_worker(workers);
        if (workers->ordered) {
@@ -712,7 +709,4 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
        if (wake)
                wake_up_process(worker->task);
        spin_unlock_irqrestore(&worker->lock, flags);
-
-out:
-       return 0;
 }
index 5077746cf85e049e87bcd8ded49b592ecc271605..f34cc31fa3c9a8d1c55f7181ca05b10a11f8ba64 100644 (file)
@@ -109,8 +109,8 @@ struct btrfs_workers {
        char *name;
 };
 
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
+int btrfs_start_workers(struct btrfs_workers *workers);
 int btrfs_stop_workers(struct btrfs_workers *workers);
 void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
                        struct btrfs_workers *async_starter);
index 50634abef9b4a51336e95b8bc2aafb4edb7844a0..67385033323d6e49817398a1df9b1596df07e839 100644 (file)
@@ -2692,7 +2692,8 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 int btrfs_readpage(struct file *file, struct page *page);
 void btrfs_evict_inode(struct inode *inode);
 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
-void btrfs_dirty_inode(struct inode *inode, int flags);
+int btrfs_dirty_inode(struct inode *inode);
+int btrfs_update_time(struct file *file);
 struct inode *btrfs_alloc_inode(struct super_block *sb);
 void btrfs_destroy_inode(struct inode *inode);
 int btrfs_drop_inode(struct inode *inode);
index 5b163572e0ca7ddd6d5f8ef6ff6783dbd5871c4d..9c1eccc2c503e5eec8bd20d3dfc057a417eef89a 100644 (file)
@@ -640,8 +640,8 @@ static int btrfs_delayed_inode_reserve_metadata(
         * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
         * we're accounted for.
         */
-       if (!trans->bytes_reserved &&
-           src_rsv != &root->fs_info->delalloc_block_rsv) {
+       if (!src_rsv || (!trans->bytes_reserved &&
+           src_rsv != &root->fs_info->delalloc_block_rsv)) {
                ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
                /*
                 * Since we're under a transaction reserve_metadata_bytes could
index 632f8f3cc9db67f4173ca591b1b66d9ed898627f..f44b3928dc2dc94cb62cefe72f1063f282dc09c2 100644 (file)
@@ -2194,19 +2194,27 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        fs_info->endio_meta_write_workers.idle_thresh = 2;
        fs_info->readahead_workers.idle_thresh = 2;
 
-       btrfs_start_workers(&fs_info->workers, 1);
-       btrfs_start_workers(&fs_info->generic_worker, 1);
-       btrfs_start_workers(&fs_info->submit_workers, 1);
-       btrfs_start_workers(&fs_info->delalloc_workers, 1);
-       btrfs_start_workers(&fs_info->fixup_workers, 1);
-       btrfs_start_workers(&fs_info->endio_workers, 1);
-       btrfs_start_workers(&fs_info->endio_meta_workers, 1);
-       btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
-       btrfs_start_workers(&fs_info->endio_write_workers, 1);
-       btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
-       btrfs_start_workers(&fs_info->delayed_workers, 1);
-       btrfs_start_workers(&fs_info->caching_workers, 1);
-       btrfs_start_workers(&fs_info->readahead_workers, 1);
+       /*
+        * btrfs_start_workers can really only fail because of ENOMEM so just
+        * return -ENOMEM if any of these fail.
+        */
+       ret = btrfs_start_workers(&fs_info->workers);
+       ret |= btrfs_start_workers(&fs_info->generic_worker);
+       ret |= btrfs_start_workers(&fs_info->submit_workers);
+       ret |= btrfs_start_workers(&fs_info->delalloc_workers);
+       ret |= btrfs_start_workers(&fs_info->fixup_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_write_workers);
+       ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
+       ret |= btrfs_start_workers(&fs_info->delayed_workers);
+       ret |= btrfs_start_workers(&fs_info->caching_workers);
+       ret |= btrfs_start_workers(&fs_info->readahead_workers);
+       if (ret) {
+               ret = -ENOMEM;
+               goto fail_sb_buffer;
+       }
 
        fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
        fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
index f0d5718d2587a43c96101c3408a0749fc4abefa0..f5fbe576d2baf48519a01bd449344b49edffa070 100644 (file)
@@ -2822,7 +2822,7 @@ out_free:
        btrfs_release_path(path);
 out:
        spin_lock(&block_group->lock);
-       if (!ret)
+       if (!ret && dcs == BTRFS_DC_SETUP)
                block_group->cache_generation = trans->transid;
        block_group->disk_cache_state = dcs;
        spin_unlock(&block_group->lock);
@@ -4204,12 +4204,17 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
        u64 to_reserve = 0;
+       u64 csum_bytes;
        unsigned nr_extents = 0;
+       int extra_reserve = 0;
        int flush = 1;
        int ret;
 
+       /* Need to be holding the i_mutex here if we aren't free space cache */
        if (btrfs_is_free_space_inode(root, inode))
                flush = 0;
+       else
+               WARN_ON(!mutex_is_locked(&inode->i_mutex));
 
        if (flush && btrfs_transaction_in_commit(root->fs_info))
                schedule_timeout(1);
@@ -4220,11 +4225,9 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        BTRFS_I(inode)->outstanding_extents++;
 
        if (BTRFS_I(inode)->outstanding_extents >
-           BTRFS_I(inode)->reserved_extents) {
+           BTRFS_I(inode)->reserved_extents)
                nr_extents = BTRFS_I(inode)->outstanding_extents -
                        BTRFS_I(inode)->reserved_extents;
-               BTRFS_I(inode)->reserved_extents += nr_extents;
-       }
 
        /*
         * Add an item to reserve for updating the inode when we complete the
@@ -4232,11 +4235,12 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
         */
        if (!BTRFS_I(inode)->delalloc_meta_reserved) {
                nr_extents++;
-               BTRFS_I(inode)->delalloc_meta_reserved = 1;
+               extra_reserve = 1;
        }
 
        to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
        to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
+       csum_bytes = BTRFS_I(inode)->csum_bytes;
        spin_unlock(&BTRFS_I(inode)->lock);
 
        ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
@@ -4246,22 +4250,35 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 
                spin_lock(&BTRFS_I(inode)->lock);
                dropped = drop_outstanding_extent(inode);
-               to_free = calc_csum_metadata_size(inode, num_bytes, 0);
-               spin_unlock(&BTRFS_I(inode)->lock);
-               to_free += btrfs_calc_trans_metadata_size(root, dropped);
-
                /*
-                * Somebody could have come in and twiddled with the
-                * reservation, so if we have to free more than we would have
-                * reserved from this reservation go ahead and release those
-                * bytes.
+                * If the inodes csum_bytes is the same as the original
+                * csum_bytes then we know we haven't raced with any free()ers
+                * so we can just reduce our inodes csum bytes and carry on.
+                * Otherwise we have to do the normal free thing to account for
+                * the case that the free side didn't free up its reserve
+                * because of this outstanding reservation.
                 */
-               to_free -= to_reserve;
+               if (BTRFS_I(inode)->csum_bytes == csum_bytes)
+                       calc_csum_metadata_size(inode, num_bytes, 0);
+               else
+                       to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+               spin_unlock(&BTRFS_I(inode)->lock);
+               if (dropped)
+                       to_free += btrfs_calc_trans_metadata_size(root, dropped);
+
                if (to_free)
                        btrfs_block_rsv_release(root, block_rsv, to_free);
                return ret;
        }
 
+       spin_lock(&BTRFS_I(inode)->lock);
+       if (extra_reserve) {
+               BTRFS_I(inode)->delalloc_meta_reserved = 1;
+               nr_extents--;
+       }
+       BTRFS_I(inode)->reserved_extents += nr_extents;
+       spin_unlock(&BTRFS_I(inode)->lock);
+
        block_rsv_add_bytes(block_rsv, to_reserve, 1);
 
        return 0;
@@ -5107,11 +5124,11 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
        struct btrfs_root *root = orig_root->fs_info->extent_root;
        struct btrfs_free_cluster *last_ptr = NULL;
        struct btrfs_block_group_cache *block_group = NULL;
+       struct btrfs_block_group_cache *used_block_group;
        int empty_cluster = 2 * 1024 * 1024;
        int allowed_chunk_alloc = 0;
        int done_chunk_alloc = 0;
        struct btrfs_space_info *space_info;
-       int last_ptr_loop = 0;
        int loop = 0;
        int index = 0;
        int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
@@ -5173,6 +5190,7 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
 ideal_cache:
                block_group = btrfs_lookup_block_group(root->fs_info,
                                                       search_start);
+               used_block_group = block_group;
                /*
                 * we don't want to use the block group if it doesn't match our
                 * allocation bits, or if its not cached.
@@ -5210,6 +5228,7 @@ search:
                u64 offset;
                int cached;
 
+               used_block_group = block_group;
                btrfs_get_block_group(block_group);
                search_start = block_group->key.objectid;
 
@@ -5286,71 +5305,62 @@ alloc:
                spin_unlock(&block_group->free_space_ctl->tree_lock);
 
                /*
-                * Ok we want to try and use the cluster allocator, so lets look
-                * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
-                * have tried the cluster allocator plenty of times at this
-                * point and not have found anything, so we are likely way too
-                * fragmented for the clustering stuff to find anything, so lets
-                * just skip it and let the allocator find whatever block it can
-                * find
+                * Ok we want to try and use the cluster allocator, so
+                * lets look there
                 */
-               if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
+               if (last_ptr) {
                        /*
                         * the refill lock keeps out other
                         * people trying to start a new cluster
                         */
                        spin_lock(&last_ptr->refill_lock);
-                       if (!last_ptr->block_group ||
-                           last_ptr->block_group->ro ||
-                           !block_group_bits(last_ptr->block_group, data))
+                       used_block_group = last_ptr->block_group;
+                       if (used_block_group != block_group &&
+                           (!used_block_group ||
+                            used_block_group->ro ||
+                            !block_group_bits(used_block_group, data))) {
+                               used_block_group = block_group;
                                goto refill_cluster;
+                       }
+
+                       if (used_block_group != block_group)
+                               btrfs_get_block_group(used_block_group);
 
-                       offset = btrfs_alloc_from_cluster(block_group, last_ptr,
-                                                num_bytes, search_start);
+                       offset = btrfs_alloc_from_cluster(used_block_group,
+                         last_ptr, num_bytes, used_block_group->key.objectid);
                        if (offset) {
                                /* we have a block, we're done */
                                spin_unlock(&last_ptr->refill_lock);
                                goto checks;
                        }
 
-                       spin_lock(&last_ptr->lock);
-                       /*
-                        * whoops, this cluster doesn't actually point to
-                        * this block group.  Get a ref on the block
-                        * group is does point to and try again
-                        */
-                       if (!last_ptr_loop && last_ptr->block_group &&
-                           last_ptr->block_group != block_group &&
-                           index <=
-                                get_block_group_index(last_ptr->block_group)) {
-
-                               btrfs_put_block_group(block_group);
-                               block_group = last_ptr->block_group;
-                               btrfs_get_block_group(block_group);
-                               spin_unlock(&last_ptr->lock);
-                               spin_unlock(&last_ptr->refill_lock);
-
-                               last_ptr_loop = 1;
-                               search_start = block_group->key.objectid;
-                               /*
-                                * we know this block group is properly
-                                * in the list because
-                                * btrfs_remove_block_group, drops the
-                                * cluster before it removes the block
-                                * group from the list
-                                */
-                               goto have_block_group;
+                       WARN_ON(last_ptr->block_group != used_block_group);
+                       if (used_block_group != block_group) {
+                               btrfs_put_block_group(used_block_group);
+                               used_block_group = block_group;
                        }
-                       spin_unlock(&last_ptr->lock);
 refill_cluster:
+                       BUG_ON(used_block_group != block_group);
+                       /* If we are on LOOP_NO_EMPTY_SIZE, we can't
+                        * set up a new clusters, so lets just skip it
+                        * and let the allocator find whatever block
+                        * it can find.  If we reach this point, we
+                        * will have tried the cluster allocator
+                        * plenty of times and not have found
+                        * anything, so we are likely way too
+                        * fragmented for the clustering stuff to find
+                        * anything.  */
+                       if (loop >= LOOP_NO_EMPTY_SIZE) {
+                               spin_unlock(&last_ptr->refill_lock);
+                               goto unclustered_alloc;
+                       }
+
                        /*
                         * this cluster didn't work out, free it and
                         * start over
                         */
                        btrfs_return_cluster_to_free_space(NULL, last_ptr);
 
-                       last_ptr_loop = 0;
-
                        /* allocate a cluster in this block group */
                        ret = btrfs_find_space_cluster(trans, root,
                                               block_group, last_ptr,
@@ -5390,6 +5400,7 @@ refill_cluster:
                        goto loop;
                }
 
+unclustered_alloc:
                offset = btrfs_find_space_for_alloc(block_group, search_start,
                                                    num_bytes, empty_size);
                /*
@@ -5416,14 +5427,14 @@ checks:
                search_start = stripe_align(root, offset);
                /* move on to the next group */
                if (search_start + num_bytes >= search_end) {
-                       btrfs_add_free_space(block_group, offset, num_bytes);
+                       btrfs_add_free_space(used_block_group, offset, num_bytes);
                        goto loop;
                }
 
                /* move on to the next group */
                if (search_start + num_bytes >
-                   block_group->key.objectid + block_group->key.offset) {
-                       btrfs_add_free_space(block_group, offset, num_bytes);
+                   used_block_group->key.objectid + used_block_group->key.offset) {
+                       btrfs_add_free_space(used_block_group, offset, num_bytes);
                        goto loop;
                }
 
@@ -5431,14 +5442,14 @@ checks:
                ins->offset = num_bytes;
 
                if (offset < search_start)
-                       btrfs_add_free_space(block_group, offset,
+                       btrfs_add_free_space(used_block_group, offset,
                                             search_start - offset);
                BUG_ON(offset > search_start);
 
-               ret = btrfs_update_reserved_bytes(block_group, num_bytes,
+               ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
                                                  alloc_type);
                if (ret == -EAGAIN) {
-                       btrfs_add_free_space(block_group, offset, num_bytes);
+                       btrfs_add_free_space(used_block_group, offset, num_bytes);
                        goto loop;
                }
 
@@ -5447,15 +5458,19 @@ checks:
                ins->offset = num_bytes;
 
                if (offset < search_start)
-                       btrfs_add_free_space(block_group, offset,
+                       btrfs_add_free_space(used_block_group, offset,
                                             search_start - offset);
                BUG_ON(offset > search_start);
+               if (used_block_group != block_group)
+                       btrfs_put_block_group(used_block_group);
                btrfs_put_block_group(block_group);
                break;
 loop:
                failed_cluster_refill = false;
                failed_alloc = false;
                BUG_ON(index != get_block_group_index(block_group));
+               if (used_block_group != block_group)
+                       btrfs_put_block_group(used_block_group);
                btrfs_put_block_group(block_group);
        }
        up_read(&space_info->groups_sem);
index be1bf627a14b292655ad10a4478c015d2bff9a44..49f3c9dc09f4c81902299fd81c62da1ed8423250 100644 (file)
@@ -935,8 +935,10 @@ again:
        node = tree_search(tree, start);
        if (!node) {
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
                err = insert_state(tree, prealloc, start, end, &bits);
                prealloc = NULL;
                BUG_ON(err == -EEXIST);
@@ -992,8 +994,10 @@ hit_next:
         */
        if (state->start < start) {
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
                err = split_state(tree, state, prealloc, start);
                BUG_ON(err == -EEXIST);
                prealloc = NULL;
@@ -1024,8 +1028,10 @@ hit_next:
                        this_end = last_start - 1;
 
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
 
                /*
                 * Avoid to free 'prealloc' if it can be merged with
@@ -1051,8 +1057,10 @@ hit_next:
         */
        if (state->start <= end && state->end > end) {
                prealloc = alloc_extent_state_atomic(prealloc);
-               if (!prealloc)
-                       return -ENOMEM;
+               if (!prealloc) {
+                       err = -ENOMEM;
+                       goto out;
+               }
 
                err = split_state(tree, state, prealloc, end + 1);
                BUG_ON(err == -EEXIST);
index dafdfa059bf66a489bd3d858990b9025fd50a72f..97fbe939c050dc7d523baeaed960741c053cbf0d 100644 (file)
@@ -1167,6 +1167,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
        nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
                     PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
                     (sizeof(struct page *)));
+       nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
+       nrptrs = max(nrptrs, 8);
        pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
        if (!pages)
                return -ENOMEM;
@@ -1387,7 +1389,11 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
                goto out;
        }
 
-       file_update_time(file);
+       err = btrfs_update_time(file);
+       if (err) {
+               mutex_unlock(&inode->i_mutex);
+               goto out;
+       }
        BTRFS_I(inode)->sequence++;
 
        start_pos = round_down(pos, root->sectorsize);
index 2c984f7d4c2ac581787ecb6a11962d3e6fd1df75..fd1a06df5bc637c5dad0b0b7e5ce1ce88d5c1616 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/falloc.h>
 #include <linux/slab.h>
 #include <linux/ratelimit.h>
+#include <linux/mount.h>
 #include "compat.h"
 #include "ctree.h"
 #include "disk-io.h"
@@ -2031,7 +2032,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
        /* insert an orphan item to track this unlinked/truncated file */
        if (insert >= 1) {
                ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
-               BUG_ON(ret);
+               BUG_ON(ret && ret != -EEXIST);
        }
 
        /* insert an orphan item to track subvolume contains orphan files */
@@ -2158,6 +2159,38 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                if (ret && ret != -ESTALE)
                        goto out;
 
+               if (ret == -ESTALE && root == root->fs_info->tree_root) {
+                       struct btrfs_root *dead_root;
+                       struct btrfs_fs_info *fs_info = root->fs_info;
+                       int is_dead_root = 0;
+
+                       /*
+                        * this is an orphan in the tree root. Currently these
+                        * could come from 2 sources:
+                        *  a) a snapshot deletion in progress
+                        *  b) a free space cache inode
+                        * We need to distinguish those two, as the snapshot
+                        * orphan must not get deleted.
+                        * find_dead_roots already ran before us, so if this
+                        * is a snapshot deletion, we should find the root
+                        * in the dead_roots list
+                        */
+                       spin_lock(&fs_info->trans_lock);
+                       list_for_each_entry(dead_root, &fs_info->dead_roots,
+                                           root_list) {
+                               if (dead_root->root_key.objectid ==
+                                   found_key.objectid) {
+                                       is_dead_root = 1;
+                                       break;
+                               }
+                       }
+                       spin_unlock(&fs_info->trans_lock);
+                       if (is_dead_root) {
+                               /* prevent this orphan from being found again */
+                               key.offset = found_key.objectid - 1;
+                               continue;
+                       }
+               }
                /*
                 * Inode is already gone but the orphan item is still there,
                 * kill the orphan item.
@@ -2191,7 +2224,14 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                                continue;
                        }
                        nr_truncate++;
+                       /*
+                        * Need to hold the imutex for reservation purposes, not
+                        * a huge deal here but I have a WARN_ON in
+                        * btrfs_delalloc_reserve_space to catch offenders.
+                        */
+                       mutex_lock(&inode->i_mutex);
                        ret = btrfs_truncate(inode);
+                       mutex_unlock(&inode->i_mutex);
                } else {
                        nr_unlink++;
                }
@@ -3327,7 +3367,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                        u64 hint_byte = 0;
                        hole_size = last_byte - cur_offset;
 
-                       trans = btrfs_start_transaction(root, 2);
+                       trans = btrfs_start_transaction(root, 3);
                        if (IS_ERR(trans)) {
                                err = PTR_ERR(trans);
                                break;
@@ -3337,6 +3377,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                                                 cur_offset + hole_size,
                                                 &hint_byte, 1);
                        if (err) {
+                               btrfs_update_inode(trans, root, inode);
                                btrfs_end_transaction(trans, root);
                                break;
                        }
@@ -3346,6 +3387,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                                        0, hole_size, 0, hole_size,
                                        0, 0, 0);
                        if (err) {
+                               btrfs_update_inode(trans, root, inode);
                                btrfs_end_transaction(trans, root);
                                break;
                        }
@@ -3353,6 +3395,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                        btrfs_drop_extent_cache(inode, hole_start,
                                        last_byte - 1, 0);
 
+                       btrfs_update_inode(trans, root, inode);
                        btrfs_end_transaction(trans, root);
                }
                free_extent_map(em);
@@ -3370,6 +3413,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
 
 static int btrfs_setsize(struct inode *inode, loff_t newsize)
 {
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct btrfs_trans_handle *trans;
        loff_t oldsize = i_size_read(inode);
        int ret;
 
@@ -3377,16 +3422,19 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)
                return 0;
 
        if (newsize > oldsize) {
-               i_size_write(inode, newsize);
-               btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
                truncate_pagecache(inode, oldsize, newsize);
                ret = btrfs_cont_expand(inode, oldsize, newsize);
-               if (ret) {
-                       btrfs_setsize(inode, oldsize);
+               if (ret)
                        return ret;
-               }
 
-               mark_inode_dirty(inode);
+               trans = btrfs_start_transaction(root, 1);
+               if (IS_ERR(trans))
+                       return PTR_ERR(trans);
+
+               i_size_write(inode, newsize);
+               btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
+               ret = btrfs_update_inode(trans, root, inode);
+               btrfs_end_transaction_throttle(trans, root);
        } else {
 
                /*
@@ -3426,9 +3474,9 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
 
        if (attr->ia_valid) {
                setattr_copy(inode, attr);
-               mark_inode_dirty(inode);
+               err = btrfs_dirty_inode(inode);
 
-               if (attr->ia_valid & ATTR_MODE)
+               if (!err && attr->ia_valid & ATTR_MODE)
                        err = btrfs_acl_chmod(inode);
        }
 
@@ -4204,42 +4252,80 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
  * FIXME, needs more benchmarking...there are no reasons other than performance
  * to keep or drop this code.
  */
-void btrfs_dirty_inode(struct inode *inode, int flags)
+int btrfs_dirty_inode(struct inode *inode)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_trans_handle *trans;
        int ret;
 
        if (BTRFS_I(inode)->dummy_inode)
-               return;
+               return 0;
 
        trans = btrfs_join_transaction(root);
-       BUG_ON(IS_ERR(trans));
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
 
        ret = btrfs_update_inode(trans, root, inode);
        if (ret && ret == -ENOSPC) {
                /* whoops, lets try again with the full transaction */
                btrfs_end_transaction(trans, root);
                trans = btrfs_start_transaction(root, 1);
-               if (IS_ERR(trans)) {
-                       printk_ratelimited(KERN_ERR "btrfs: fail to "
-                                      "dirty  inode %llu error %ld\n",
-                                      (unsigned long long)btrfs_ino(inode),
-                                      PTR_ERR(trans));
-                       return;
-               }
+               if (IS_ERR(trans))
+                       return PTR_ERR(trans);
 
                ret = btrfs_update_inode(trans, root, inode);
-               if (ret) {
-                       printk_ratelimited(KERN_ERR "btrfs: fail to "
-                                      "dirty  inode %llu error %d\n",
-                                      (unsigned long long)btrfs_ino(inode),
-                                      ret);
-               }
        }
        btrfs_end_transaction(trans, root);
        if (BTRFS_I(inode)->delayed_node)
                btrfs_balance_delayed_items(root);
+
+       return ret;
+}
+
+/*
+ * This is a copy of file_update_time.  We need this so we can return error on
+ * ENOSPC for updating the inode in the case of file write and mmap writes.
+ */
+int btrfs_update_time(struct file *file)
+{
+       struct inode *inode = file->f_path.dentry->d_inode;
+       struct timespec now;
+       int ret;
+       enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
+
+       /* First try to exhaust all avenues to not sync */
+       if (IS_NOCMTIME(inode))
+               return 0;
+
+       now = current_fs_time(inode->i_sb);
+       if (!timespec_equal(&inode->i_mtime, &now))
+               sync_it = S_MTIME;
+
+       if (!timespec_equal(&inode->i_ctime, &now))
+               sync_it |= S_CTIME;
+
+       if (IS_I_VERSION(inode))
+               sync_it |= S_VERSION;
+
+       if (!sync_it)
+               return 0;
+
+       /* Finally allowed to write? Takes lock. */
+       if (mnt_want_write_file(file))
+               return 0;
+
+       /* Only change inode inside the lock region */
+       if (sync_it & S_VERSION)
+               inode_inc_iversion(inode);
+       if (sync_it & S_CTIME)
+               inode->i_ctime = now;
+       if (sync_it & S_MTIME)
+               inode->i_mtime = now;
+       ret = btrfs_dirty_inode(inode);
+       if (!ret)
+               mark_inode_dirty_sync(inode);
+       mnt_drop_write(file->f_path.mnt);
+       return ret;
 }
 
 /*
@@ -4504,10 +4590,6 @@ static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
        int err = btrfs_add_link(trans, dir, inode,
                                 dentry->d_name.name, dentry->d_name.len,
                                 backref, index);
-       if (!err) {
-               d_instantiate(dentry, inode);
-               return 0;
-       }
        if (err > 0)
                err = -EEXIST;
        return err;
@@ -4555,13 +4637,21 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
+       /*
+       * If the active LSM wants to access the inode during
+       * d_instantiate it needs these. Smack checks to see
+       * if the filesystem supports xattrs by looking at the
+       * ops vector.
+       */
+
+       inode->i_op = &btrfs_special_inode_operations;
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
                drop_inode = 1;
        else {
-               inode->i_op = &btrfs_special_inode_operations;
                init_special_inode(inode, inode->i_mode, rdev);
                btrfs_update_inode(trans, root, inode);
+               d_instantiate(dentry, inode);
        }
 out_unlock:
        nr = trans->blocks_used;
@@ -4613,15 +4703,23 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
+       /*
+       * If the active LSM wants to access the inode during
+       * d_instantiate it needs these. Smack checks to see
+       * if the filesystem supports xattrs by looking at the
+       * ops vector.
+       */
+       inode->i_fop = &btrfs_file_operations;
+       inode->i_op = &btrfs_file_inode_operations;
+
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
                drop_inode = 1;
        else {
                inode->i_mapping->a_ops = &btrfs_aops;
                inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
-               inode->i_fop = &btrfs_file_operations;
-               inode->i_op = &btrfs_file_inode_operations;
                BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
+               d_instantiate(dentry, inode);
        }
 out_unlock:
        nr = trans->blocks_used;
@@ -4679,6 +4777,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
                struct dentry *parent = dentry->d_parent;
                err = btrfs_update_inode(trans, root, inode);
                BUG_ON(err);
+               d_instantiate(dentry, inode);
                btrfs_log_new_name(trans, inode, NULL, parent);
        }
 
@@ -6303,7 +6402,12 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        u64 page_start;
        u64 page_end;
 
+       /* Need this to keep space reservations serialized */
+       mutex_lock(&inode->i_mutex);
        ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
+       mutex_unlock(&inode->i_mutex);
+       if (!ret)
+               ret = btrfs_update_time(vma->vm_file);
        if (ret) {
                if (ret == -ENOMEM)
                        ret = VM_FAULT_OOM;
@@ -6515,8 +6619,9 @@ static int btrfs_truncate(struct inode *inode)
                        /* Just need the 1 for updating the inode */
                        trans = btrfs_start_transaction(root, 1);
                        if (IS_ERR(trans)) {
-                               err = PTR_ERR(trans);
-                               goto out;
+                               ret = err = PTR_ERR(trans);
+                               trans = NULL;
+                               break;
                        }
                }
 
@@ -7076,14 +7181,21 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                goto out_unlock;
        }
 
+       /*
+       * If the active LSM wants to access the inode during
+       * d_instantiate it needs these. Smack checks to see
+       * if the filesystem supports xattrs by looking at the
+       * ops vector.
+       */
+       inode->i_fop = &btrfs_file_operations;
+       inode->i_op = &btrfs_file_inode_operations;
+
        err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
        if (err)
                drop_inode = 1;
        else {
                inode->i_mapping->a_ops = &btrfs_aops;
                inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
-               inode->i_fop = &btrfs_file_operations;
-               inode->i_op = &btrfs_file_inode_operations;
                BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
        }
        if (drop_inode)
@@ -7132,6 +7244,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                drop_inode = 1;
 
 out_unlock:
+       if (!err)
+               d_instantiate(dentry, inode);
        nr = trans->blocks_used;
        btrfs_end_transaction_throttle(trans, root);
        if (drop_inode) {
@@ -7353,6 +7467,7 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
        .follow_link    = page_follow_link_light,
        .put_link       = page_put_link,
        .getattr        = btrfs_getattr,
+       .setattr        = btrfs_setattr,
        .permission     = btrfs_permission,
        .setxattr       = btrfs_setxattr,
        .getxattr       = btrfs_getxattr,
index 72d461656f606647292657f2367ee438a50d2a40..c04f02c7d5bbea215557a1d8ae2fc8ccce6e5862 100644 (file)
@@ -252,11 +252,11 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
        trans = btrfs_join_transaction(root);
        BUG_ON(IS_ERR(trans));
 
+       btrfs_update_iflags(inode);
+       inode->i_ctime = CURRENT_TIME;
        ret = btrfs_update_inode(trans, root, inode);
        BUG_ON(ret);
 
-       btrfs_update_iflags(inode);
-       inode->i_ctime = CURRENT_TIME;
        btrfs_end_transaction(trans, root);
 
        mnt_drop_write(file->f_path.mnt);
@@ -858,8 +858,10 @@ static int cluster_pages_for_defrag(struct inode *inode,
                return 0;
        file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
 
+       mutex_lock(&inode->i_mutex);
        ret = btrfs_delalloc_reserve_space(inode,
                                           num_pages << PAGE_CACHE_SHIFT);
+       mutex_unlock(&inode->i_mutex);
        if (ret)
                return ret;
 again:
index dff29d5e151a3b80d516be44cc1ebdf0d00adad6..cfb55434a46981fa64416e68fa3fd29cf58238f5 100644 (file)
@@ -2947,7 +2947,9 @@ static int relocate_file_extent_cluster(struct inode *inode,
        index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
        last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
        while (index <= last_index) {
+               mutex_lock(&inode->i_mutex);
                ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
+               mutex_unlock(&inode->i_mutex);
                if (ret)
                        goto out;
 
index c27bcb67f3304d806ab7e90cef8c3bcdef78b19c..ddf2c90d3fc0c475cbfabf6397c84f734abcc5e8 100644 (file)
@@ -1535,18 +1535,22 @@ static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
 static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
+       int ret = 0;
 
        mutex_lock(&fs_info->scrub_lock);
        if (fs_info->scrub_workers_refcnt == 0) {
                btrfs_init_workers(&fs_info->scrub_workers, "scrub",
                           fs_info->thread_pool_size, &fs_info->generic_worker);
                fs_info->scrub_workers.idle_thresh = 4;
-               btrfs_start_workers(&fs_info->scrub_workers, 1);
+               ret = btrfs_start_workers(&fs_info->scrub_workers);
+               if (ret)
+                       goto out;
        }
        ++fs_info->scrub_workers_refcnt;
+out:
        mutex_unlock(&fs_info->scrub_lock);
 
-       return 0;
+       return ret;
 }
 
 static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
index e28ad4baf483af6b4c7e5d8450dc4a73909d0247..200f63bc6675eca20cf1b55c9ced534efccaf63b 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/slab.h>
 #include <linux/cleancache.h>
 #include <linux/mnt_namespace.h>
+#include <linux/ratelimit.h>
 #include "compat.h"
 #include "delayed-inode.h"
 #include "ctree.h"
@@ -1053,7 +1054,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
        u64 avail_space;
        u64 used_space;
        u64 min_stripe_size;
-       int min_stripes = 1;
+       int min_stripes = 1, num_stripes = 1;
        int i = 0, nr_devices;
        int ret;
 
@@ -1067,12 +1068,16 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
 
        /* calc min stripe number for data space alloction */
        type = btrfs_get_alloc_profile(root, 1);
-       if (type & BTRFS_BLOCK_GROUP_RAID0)
+       if (type & BTRFS_BLOCK_GROUP_RAID0) {
                min_stripes = 2;
-       else if (type & BTRFS_BLOCK_GROUP_RAID1)
+               num_stripes = nr_devices;
+       } else if (type & BTRFS_BLOCK_GROUP_RAID1) {
                min_stripes = 2;
-       else if (type & BTRFS_BLOCK_GROUP_RAID10)
+               num_stripes = 2;
+       } else if (type & BTRFS_BLOCK_GROUP_RAID10) {
                min_stripes = 4;
+               num_stripes = 4;
+       }
 
        if (type & BTRFS_BLOCK_GROUP_DUP)
                min_stripe_size = 2 * BTRFS_STRIPE_LEN;
@@ -1141,13 +1146,16 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
        i = nr_devices - 1;
        avail_space = 0;
        while (nr_devices >= min_stripes) {
+               if (num_stripes > nr_devices)
+                       num_stripes = nr_devices;
+
                if (devices_info[i].max_avail >= min_stripe_size) {
                        int j;
                        u64 alloc_size;
 
-                       avail_space += devices_info[i].max_avail * min_stripes;
+                       avail_space += devices_info[i].max_avail * num_stripes;
                        alloc_size = devices_info[i].max_avail;
-                       for (j = i + 1 - min_stripes; j <= i; j++)
+                       for (j = i + 1 - num_stripes; j <= i; j++)
                                devices_info[j].max_avail -= alloc_size;
                }
                i--;
@@ -1264,6 +1272,16 @@ static int btrfs_unfreeze(struct super_block *sb)
        return 0;
 }
 
+static void btrfs_fs_dirty_inode(struct inode *inode, int flags)
+{
+       int ret;
+
+       ret = btrfs_dirty_inode(inode);
+       if (ret)
+               printk_ratelimited(KERN_ERR "btrfs: fail to dirty inode %Lu "
+                                  "error %d\n", btrfs_ino(inode), ret);
+}
+
 static const struct super_operations btrfs_super_ops = {
        .drop_inode     = btrfs_drop_inode,
        .evict_inode    = btrfs_evict_inode,
@@ -1271,7 +1289,7 @@ static const struct super_operations btrfs_super_ops = {
        .sync_fs        = btrfs_sync_fs,
        .show_options   = btrfs_show_options,
        .write_inode    = btrfs_write_inode,
-       .dirty_inode    = btrfs_dirty_inode,
+       .dirty_inode    = btrfs_fs_dirty_inode,
        .alloc_inode    = btrfs_alloc_inode,
        .destroy_inode  = btrfs_destroy_inode,
        .statfs         = btrfs_statfs,
index c37433d3cd82464adbe13173433521a4ab1cca14..f4b839fd3c9dd5cd854cb7bada4e3831d8ea1713 100644 (file)
@@ -295,6 +295,12 @@ loop_lock:
                        btrfs_requeue_work(&device->work);
                        goto done;
                }
+               /* unplug every 64 requests just for good measure */
+               if (batch_run % 64 == 0) {
+                       blk_finish_plug(&plug);
+                       blk_start_plug(&plug);
+                       sync_pending = 0;
+               }
        }
 
        cond_resched();
@@ -1611,7 +1617,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
                return -EINVAL;
 
-       bdev = blkdev_get_by_path(device_path, FMODE_EXCL,
+       bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
                                  root->fs_info->bdev_holder);
        if (IS_ERR(bdev))
                return PTR_ERR(bdev);
@@ -3258,7 +3264,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
                 */
                if (atomic_read(&bbio->error) > bbio->max_errors) {
                        err = -EIO;
-               } else if (err) {
+               } else {
                        /*
                         * this bio is actually up to date, we didn't
                         * go over the max number of errors
index 4144caf2f9d3a5ef95a7a9162b1de0cfa326cf4e..173b1d22e59b5a4bf8ed714f72b233cb70537468 100644 (file)
@@ -87,7 +87,7 @@ static int ceph_set_page_dirty(struct page *page)
        snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
 
        /* dirty the head */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_head_snapc == NULL)
                ci->i_head_snapc = ceph_get_snap_context(snapc);
        ++ci->i_wrbuffer_ref_head;
@@ -100,7 +100,7 @@ static int ceph_set_page_dirty(struct page *page)
             ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
             ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
             snapc, snapc->seq, snapc->num_snaps);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        /* now adjust page */
        spin_lock_irq(&mapping->tree_lock);
@@ -391,7 +391,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
        struct ceph_snap_context *snapc = NULL;
        struct ceph_cap_snap *capsnap = NULL;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
                dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
                     capsnap->context, capsnap->dirty_pages);
@@ -407,7 +407,7 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
                dout(" head snapc %p has %d dirty pages\n",
                     snapc, ci->i_wrbuffer_ref_head);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return snapc;
 }
 
index 0f327c6c967954ec329f25bb44f45a1e4219cd71..8b53193e4f7ca67e3011c8501c8f1fee62357f9d 100644 (file)
@@ -309,7 +309,7 @@ void ceph_reservation_status(struct ceph_fs_client *fsc,
 /*
  * Find ceph_cap for given mds, if any.
  *
- * Called with i_lock held.
+ * Called with i_ceph_lock held.
  */
 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
 {
@@ -332,9 +332,9 @@ struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
 {
        struct ceph_cap *cap;
 
-       spin_lock(&ci->vfs_inode.i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = __get_cap_for_mds(ci, mds);
-       spin_unlock(&ci->vfs_inode.i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return cap;
 }
 
@@ -361,15 +361,16 @@ static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
 
 int ceph_get_cap_mds(struct inode *inode)
 {
+       struct ceph_inode_info *ci = ceph_inode(inode);
        int mds;
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        mds = __ceph_get_cap_mds(ceph_inode(inode));
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return mds;
 }
 
 /*
- * Called under i_lock.
+ * Called under i_ceph_lock.
  */
 static void __insert_cap_node(struct ceph_inode_info *ci,
                              struct ceph_cap *new)
@@ -415,7 +416,7 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
  *
  * If I_FLUSH is set, leave the inode at the front of the list.
  *
- * Caller holds i_lock
+ * Caller holds i_ceph_lock
  *    -> we take mdsc->cap_delay_lock
  */
 static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
@@ -457,7 +458,7 @@ static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
 /*
  * Cancel delayed work on cap.
  *
- * Caller must hold i_lock.
+ * Caller must hold i_ceph_lock.
  */
 static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
                               struct ceph_inode_info *ci)
@@ -532,14 +533,14 @@ int ceph_add_cap(struct inode *inode,
                wanted |= ceph_caps_for_mode(fmode);
 
 retry:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = __get_cap_for_mds(ci, mds);
        if (!cap) {
                if (new_cap) {
                        cap = new_cap;
                        new_cap = NULL;
                } else {
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        new_cap = get_cap(mdsc, caps_reservation);
                        if (new_cap == NULL)
                                return -ENOMEM;
@@ -625,7 +626,7 @@ retry:
 
        if (fmode >= 0)
                __ceph_get_fmode(ci, fmode);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        wake_up_all(&ci->i_cap_wq);
        return 0;
 }
@@ -792,7 +793,7 @@ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
        struct rb_node *p;
        int ret = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
                cap = rb_entry(p, struct ceph_cap, ci_node);
                if (__cap_is_valid(cap) &&
@@ -801,7 +802,7 @@ int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
                        break;
                }
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        dout("ceph_caps_revoking %p %s = %d\n", inode,
             ceph_cap_string(mask), ret);
        return ret;
@@ -855,7 +856,7 @@ int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
 }
 
 /*
- * called under i_lock
+ * called under i_ceph_lock
  */
 static int __ceph_is_any_caps(struct ceph_inode_info *ci)
 {
@@ -865,7 +866,7 @@ static int __ceph_is_any_caps(struct ceph_inode_info *ci)
 /*
  * Remove a cap.  Take steps to deal with a racing iterate_session_caps.
  *
- * caller should hold i_lock.
+ * caller should hold i_ceph_lock.
  * caller will not hold session s_mutex if called from destroy_inode.
  */
 void __ceph_remove_cap(struct ceph_cap *cap)
@@ -1028,7 +1029,7 @@ static void __queue_cap_release(struct ceph_mds_session *session,
 
 /*
  * Queue cap releases when an inode is dropped from our cache.  Since
- * inode is about to be destroyed, there is no need for i_lock.
+ * inode is about to be destroyed, there is no need for i_ceph_lock.
  */
 void ceph_queue_caps_release(struct inode *inode)
 {
@@ -1049,7 +1050,7 @@ void ceph_queue_caps_release(struct inode *inode)
 
 /*
  * Send a cap msg on the given inode.  Update our caps state, then
- * drop i_lock and send the message.
+ * drop i_ceph_lock and send the message.
  *
  * Make note of max_size reported/requested from mds, revoked caps
  * that have now been implemented.
@@ -1061,13 +1062,13 @@ void ceph_queue_caps_release(struct inode *inode)
  * Return non-zero if delayed release, or we experienced an error
  * such that the caller should requeue + retry later.
  *
- * called with i_lock, then drops it.
+ * called with i_ceph_lock, then drops it.
  * caller should hold snap_rwsem (read), s_mutex.
  */
 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
                      int op, int used, int want, int retain, int flushing,
                      unsigned *pflush_tid)
-       __releases(cap->ci->vfs_inode->i_lock)
+       __releases(cap->ci->i_ceph_lock)
 {
        struct ceph_inode_info *ci = cap->ci;
        struct inode *inode = &ci->vfs_inode;
@@ -1170,7 +1171,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
                xattr_version = ci->i_xattrs.version;
        }
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
                op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
@@ -1198,13 +1199,13 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
  * Unless @again is true, skip cap_snaps that were already sent to
  * the MDS (i.e., during this session).
  *
- * Called under i_lock.  Takes s_mutex as needed.
+ * Called under i_ceph_lock.  Takes s_mutex as needed.
  */
 void __ceph_flush_snaps(struct ceph_inode_info *ci,
                        struct ceph_mds_session **psession,
                        int again)
-               __releases(ci->vfs_inode->i_lock)
-               __acquires(ci->vfs_inode->i_lock)
+               __releases(ci->i_ceph_lock)
+               __acquires(ci->i_ceph_lock)
 {
        struct inode *inode = &ci->vfs_inode;
        int mds;
@@ -1261,7 +1262,7 @@ retry:
                        session = NULL;
                }
                if (!session) {
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        mutex_lock(&mdsc->mutex);
                        session = __ceph_lookup_mds_session(mdsc, mds);
                        mutex_unlock(&mdsc->mutex);
@@ -1275,7 +1276,7 @@ retry:
                         * deletion or migration.  retry, and we'll
                         * get a better @mds value next time.
                         */
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        goto retry;
                }
 
@@ -1285,7 +1286,7 @@ retry:
                        list_del_init(&capsnap->flushing_item);
                list_add_tail(&capsnap->flushing_item,
                              &session->s_cap_snaps_flushing);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
 
                dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
                     inode, capsnap, capsnap->follows, capsnap->flush_tid);
@@ -1302,7 +1303,7 @@ retry:
                next_follows = capsnap->follows + 1;
                ceph_put_cap_snap(capsnap);
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                goto retry;
        }
 
@@ -1322,11 +1323,9 @@ out:
 
 static void ceph_flush_snaps(struct ceph_inode_info *ci)
 {
-       struct inode *inode = &ci->vfs_inode;
-
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        __ceph_flush_snaps(ci, NULL, 0);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -1373,7 +1372,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
  * Add dirty inode to the flushing list.  Assigned a seq number so we
  * can wait for caps to flush without starving.
  *
- * Called under i_lock.
+ * Called under i_ceph_lock.
  */
 static int __mark_caps_flushing(struct inode *inode,
                                 struct ceph_mds_session *session)
@@ -1421,9 +1420,9 @@ static int try_nonblocking_invalidate(struct inode *inode)
        struct ceph_inode_info *ci = ceph_inode(inode);
        u32 invalidating_gen = ci->i_rdcache_gen;
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        invalidate_mapping_pages(&inode->i_data, 0, -1);
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        if (inode->i_data.nrpages == 0 &&
            invalidating_gen == ci->i_rdcache_gen) {
@@ -1470,7 +1469,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
        if (mdsc->stopping)
                is_delayed = 1;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        if (ci->i_ceph_flags & CEPH_I_FLUSH)
                flags |= CHECK_CAPS_FLUSH;
@@ -1480,7 +1479,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
                __ceph_flush_snaps(ci, &session, 0);
        goto retry_locked;
 retry:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 retry_locked:
        file_wanted = __ceph_caps_file_wanted(ci);
        used = __ceph_caps_used(ci);
@@ -1634,7 +1633,7 @@ ack:
                        if (mutex_trylock(&session->s_mutex) == 0) {
                                dout("inverting session/ino locks on %p\n",
                                     session);
-                               spin_unlock(&inode->i_lock);
+                               spin_unlock(&ci->i_ceph_lock);
                                if (took_snap_rwsem) {
                                        up_read(&mdsc->snap_rwsem);
                                        took_snap_rwsem = 0;
@@ -1648,7 +1647,7 @@ ack:
                        if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
                                dout("inverting snap/in locks on %p\n",
                                     inode);
-                               spin_unlock(&inode->i_lock);
+                               spin_unlock(&ci->i_ceph_lock);
                                down_read(&mdsc->snap_rwsem);
                                took_snap_rwsem = 1;
                                goto retry;
@@ -1664,10 +1663,10 @@ ack:
                mds = cap->mds;  /* remember mds, so we don't repeat */
                sent++;
 
-               /* __send_cap drops i_lock */
+               /* __send_cap drops i_ceph_lock */
                delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
                                      retain, flushing, NULL);
-               goto retry; /* retake i_lock and restart our cap scan. */
+               goto retry; /* retake i_ceph_lock and restart our cap scan. */
        }
 
        /*
@@ -1681,7 +1680,7 @@ ack:
        else if (!is_delayed || force_requeue)
                __cap_delay_requeue(mdsc, ci);
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (queue_invalidate)
                ceph_queue_invalidate(inode);
@@ -1704,7 +1703,7 @@ static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
        int flushing = 0;
 
 retry:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
                dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
                goto out;
@@ -1716,7 +1715,7 @@ retry:
                int delayed;
 
                if (!session) {
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        session = cap->session;
                        mutex_lock(&session->s_mutex);
                        goto retry;
@@ -1727,18 +1726,18 @@ retry:
 
                flushing = __mark_caps_flushing(inode, session);
 
-               /* __send_cap drops i_lock */
+               /* __send_cap drops i_ceph_lock */
                delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
                                     cap->issued | cap->implemented, flushing,
                                     flush_tid);
                if (!delayed)
                        goto out_unlocked;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                __cap_delay_requeue(mdsc, ci);
        }
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 out_unlocked:
        if (session && unlock_session)
                mutex_unlock(&session->s_mutex);
@@ -1753,7 +1752,7 @@ static int caps_are_flushed(struct inode *inode, unsigned tid)
        struct ceph_inode_info *ci = ceph_inode(inode);
        int i, ret = 1;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        for (i = 0; i < CEPH_CAP_BITS; i++)
                if ((ci->i_flushing_caps & (1 << i)) &&
                    ci->i_cap_flush_tid[i] <= tid) {
@@ -1761,7 +1760,7 @@ static int caps_are_flushed(struct inode *inode, unsigned tid)
                        ret = 0;
                        break;
                }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return ret;
 }
 
@@ -1868,10 +1867,10 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
                struct ceph_mds_client *mdsc =
                        ceph_sb_to_client(inode->i_sb)->mdsc;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                if (__ceph_caps_dirty(ci))
                        __cap_delay_requeue_front(mdsc, ci);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
        return err;
 }
@@ -1894,7 +1893,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
                struct inode *inode = &ci->vfs_inode;
                struct ceph_cap *cap;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                cap = ci->i_auth_cap;
                if (cap && cap->session == session) {
                        dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
@@ -1904,7 +1903,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
                        pr_err("%p auth cap %p not mds%d ???\n", inode,
                               cap, session->s_mds);
                }
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
 }
 
@@ -1921,7 +1920,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
                struct ceph_cap *cap;
                int delayed = 0;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                cap = ci->i_auth_cap;
                if (cap && cap->session == session) {
                        dout("kick_flushing_caps %p cap %p %s\n", inode,
@@ -1932,14 +1931,14 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
                                             cap->issued | cap->implemented,
                                             ci->i_flushing_caps, NULL);
                        if (delayed) {
-                               spin_lock(&inode->i_lock);
+                               spin_lock(&ci->i_ceph_lock);
                                __cap_delay_requeue(mdsc, ci);
-                               spin_unlock(&inode->i_lock);
+                               spin_unlock(&ci->i_ceph_lock);
                        }
                } else {
                        pr_err("%p auth cap %p not mds%d ???\n", inode,
                               cap, session->s_mds);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                }
        }
 }
@@ -1952,7 +1951,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
        struct ceph_cap *cap;
        int delayed = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = ci->i_auth_cap;
        dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
             ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
@@ -1964,12 +1963,12 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
                                     cap->issued | cap->implemented,
                                     ci->i_flushing_caps, NULL);
                if (delayed) {
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        __cap_delay_requeue(mdsc, ci);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                }
        } else {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
 }
 
@@ -1978,7 +1977,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
  * Take references to capabilities we hold, so that we don't release
  * them to the MDS prematurely.
  *
- * Protected by i_lock.
+ * Protected by i_ceph_lock.
  */
 static void __take_cap_refs(struct ceph_inode_info *ci, int got)
 {
@@ -2016,7 +2015,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
 
        dout("get_cap_refs %p need %s want %s\n", inode,
             ceph_cap_string(need), ceph_cap_string(want));
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        /* make sure file is actually open */
        file_wanted = __ceph_caps_file_wanted(ci);
@@ -2077,7 +2076,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
                     ceph_cap_string(have), ceph_cap_string(need));
        }
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        dout("get_cap_refs %p ret %d got %s\n", inode,
             ret, ceph_cap_string(*got));
        return ret;
@@ -2094,7 +2093,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
        int check = 0;
 
        /* do we need to explicitly request a larger max_size? */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if ((endoff >= ci->i_max_size ||
             endoff > (inode->i_size << 1)) &&
            endoff > ci->i_wanted_max_size) {
@@ -2103,7 +2102,7 @@ static void check_max_size(struct inode *inode, loff_t endoff)
                ci->i_wanted_max_size = endoff;
                check = 1;
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (check)
                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
 }
@@ -2140,9 +2139,9 @@ retry:
  */
 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
 {
-       spin_lock(&ci->vfs_inode.i_lock);
+       spin_lock(&ci->i_ceph_lock);
        __take_cap_refs(ci, caps);
-       spin_unlock(&ci->vfs_inode.i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -2160,7 +2159,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
        int last = 0, put = 0, flushsnaps = 0, wake = 0;
        struct ceph_cap_snap *capsnap;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (had & CEPH_CAP_PIN)
                --ci->i_pin_ref;
        if (had & CEPH_CAP_FILE_RD)
@@ -2193,7 +2192,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
                                }
                        }
                }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
             last ? " last" : "", put ? " put" : "");
@@ -2225,7 +2224,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
        int found = 0;
        struct ceph_cap_snap *capsnap = NULL;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_wrbuffer_ref -= nr;
        last = !ci->i_wrbuffer_ref;
 
@@ -2274,7 +2273,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
                }
        }
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (last) {
                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
@@ -2291,7 +2290,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
  * Handle a cap GRANT message from the MDS.  (Note that a GRANT may
  * actually be a revocation if it specifies a smaller cap set.)
  *
- * caller holds s_mutex and i_lock, we drop both.
+ * caller holds s_mutex and i_ceph_lock, we drop both.
  *
  * return value:
  *  0 - ok
@@ -2302,7 +2301,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
                             struct ceph_mds_session *session,
                             struct ceph_cap *cap,
                             struct ceph_buffer *xattr_buf)
-               __releases(inode->i_lock)
+               __releases(ci->i_ceph_lock)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        int mds = session->s_mds;
@@ -2453,7 +2452,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
        }
        BUG_ON(cap->issued & ~cap->implemented);
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (writeback)
                /*
                 * queue inode for writeback: we can't actually call
@@ -2483,7 +2482,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
                                 struct ceph_mds_caps *m,
                                 struct ceph_mds_session *session,
                                 struct ceph_cap *cap)
-       __releases(inode->i_lock)
+       __releases(ci->i_ceph_lock)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
@@ -2539,7 +2538,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
        wake_up_all(&ci->i_cap_wq);
 
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (drop)
                iput(inode);
 }
@@ -2562,7 +2561,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
        dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
             inode, ci, session->s_mds, follows);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
                if (capsnap->follows == follows) {
                        if (capsnap->flush_tid != flush_tid) {
@@ -2585,7 +2584,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
                             capsnap, capsnap->follows);
                }
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (drop)
                iput(inode);
 }
@@ -2598,7 +2597,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
 static void handle_cap_trunc(struct inode *inode,
                             struct ceph_mds_caps *trunc,
                             struct ceph_mds_session *session)
-       __releases(inode->i_lock)
+       __releases(ci->i_ceph_lock)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        int mds = session->s_mds;
@@ -2617,7 +2616,7 @@ static void handle_cap_trunc(struct inode *inode,
             inode, mds, seq, truncate_size, truncate_seq);
        queue_trunc = ceph_fill_file_size(inode, issued,
                                          truncate_seq, truncate_size, size);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (queue_trunc)
                ceph_queue_vmtruncate(inode);
@@ -2646,7 +2645,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
        dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
             inode, ci, mds, mseq);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        /* make sure we haven't seen a higher mseq */
        for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
@@ -2690,7 +2689,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
        }
        /* else, we already released it */
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -2745,9 +2744,9 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
        up_read(&mdsc->snap_rwsem);
 
        /* make sure we re-request max_size, if necessary */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_requested_max_size = 0;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -2762,6 +2761,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        struct ceph_mds_client *mdsc = session->s_mdsc;
        struct super_block *sb = mdsc->fsc->sb;
        struct inode *inode;
+       struct ceph_inode_info *ci;
        struct ceph_cap *cap;
        struct ceph_mds_caps *h;
        int mds = session->s_mds;
@@ -2815,6 +2815,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
 
        /* lookup ino */
        inode = ceph_find_inode(sb, vino);
+       ci = ceph_inode(inode);
        dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
             vino.snap, inode);
        if (!inode) {
@@ -2844,16 +2845,16 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        }
 
        /* the rest require a cap */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = __get_cap_for_mds(ceph_inode(inode), mds);
        if (!cap) {
                dout(" no cap on %p ino %llx.%llx from mds%d\n",
                     inode, ceph_ino(inode), ceph_snap(inode), mds);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                goto flush_cap_releases;
        }
 
-       /* note that each of these drops i_lock for us */
+       /* note that each of these drops i_ceph_lock for us */
        switch (op) {
        case CEPH_CAP_OP_REVOKE:
        case CEPH_CAP_OP_GRANT:
@@ -2869,7 +2870,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
                break;
 
        default:
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
                       ceph_cap_op_name(op));
        }
@@ -2962,13 +2963,13 @@ void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
        struct inode *inode = &ci->vfs_inode;
        int last = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
             ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
        BUG_ON(ci->i_nr_by_mode[fmode] == 0);
        if (--ci->i_nr_by_mode[fmode] == 0)
                last++;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (last && ci->i_vino.snap == CEPH_NOSNAP)
                ceph_check_caps(ci, 0, NULL);
@@ -2991,7 +2992,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
        int used, dirty;
        int ret = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        used = __ceph_caps_used(ci);
        dirty = __ceph_caps_dirty(ci);
 
@@ -3046,7 +3047,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
                             inode, cap, ceph_cap_string(cap->issued));
                }
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return ret;
 }
 
@@ -3061,7 +3062,7 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
 
        /*
         * force an record for the directory caps if we have a dentry lease.
-        * this is racy (can't take i_lock and d_lock together), but it
+        * this is racy (can't take i_ceph_lock and d_lock together), but it
         * doesn't have to be perfect; the mds will revoke anything we don't
         * release.
         */
index bca3948e9dbf6758746c896359471f4730407e24..98954003a8d313007386d4cfc214c5dba9296647 100644 (file)
@@ -281,18 +281,18 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
        }
 
        /* can we use the dcache? */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if ((filp->f_pos == 2 || fi->dentry) &&
            !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
            ceph_snap(inode) != CEPH_SNAPDIR &&
            ceph_dir_test_complete(inode) &&
            __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                err = __dcache_readdir(filp, dirent, filldir);
                if (err != -EAGAIN)
                        return err;
        } else {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
        if (fi->dentry) {
                err = note_last_dentry(fi, fi->dentry->d_name.name,
@@ -428,12 +428,12 @@ more:
         * were released during the whole readdir, and we should have
         * the complete dir contents in our cache.
         */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_release_count == fi->dir_release_count) {
                ceph_dir_set_complete(inode);
                ci->i_max_offset = filp->f_pos;
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        dout("readdir %p filp %p done.\n", inode, filp);
        return 0;
@@ -607,7 +607,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
                struct ceph_inode_info *ci = ceph_inode(dir);
                struct ceph_dentry_info *di = ceph_dentry(dentry);
 
-               spin_lock(&dir->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
                if (strncmp(dentry->d_name.name,
                            fsc->mount_options->snapdir_name,
@@ -615,13 +615,13 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
                    !is_root_ceph_dentry(dir, dentry) &&
                    ceph_dir_test_complete(dir) &&
                    (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
-                       spin_unlock(&dir->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        dout(" dir %p complete, -ENOENT\n", dir);
                        d_add(dentry, NULL);
                        di->lease_shared_gen = ci->i_shared_gen;
                        return NULL;
                }
-               spin_unlock(&dir->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
 
        op = ceph_snap(dir) == CEPH_SNAPDIR ?
@@ -841,12 +841,12 @@ static int drop_caps_for_unlink(struct inode *inode)
        struct ceph_inode_info *ci = ceph_inode(inode);
        int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (inode->i_nlink == 1) {
                drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
                ci->i_ceph_flags |= CEPH_I_NODELAY;
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return drop;
 }
 
@@ -1015,10 +1015,10 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
        struct ceph_dentry_info *di = ceph_dentry(dentry);
        int valid = 0;
 
-       spin_lock(&dir->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_shared_gen == di->lease_shared_gen)
                valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
-       spin_unlock(&dir->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
             dir, (unsigned)ci->i_shared_gen, dentry,
             (unsigned)di->lease_shared_gen, valid);
@@ -1094,42 +1094,19 @@ static int ceph_snapdir_d_revalidate(struct dentry *dentry,
 /*
  * Set/clear/test dir complete flag on the dir's dentry.
  */
-static struct dentry * __d_find_any_alias(struct inode *inode)
-{
-       struct dentry *alias;
-
-       if (list_empty(&inode->i_dentry))
-               return NULL;
-       alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
-       return alias;
-}
-
 void ceph_dir_set_complete(struct inode *inode)
 {
-       struct dentry *dentry = __d_find_any_alias(inode);
-       
-       if (dentry && ceph_dentry(dentry)) {
-               dout(" marking %p (%p) complete\n", inode, dentry);
-               set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
-       }
+       /* not yet implemented */
 }
 
 void ceph_dir_clear_complete(struct inode *inode)
 {
-       struct dentry *dentry = __d_find_any_alias(inode);
-
-       if (dentry && ceph_dentry(dentry)) {
-               dout(" marking %p (%p) NOT complete\n", inode, dentry);
-               clear_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
-       }
+       /* not yet implemented */
 }
 
 bool ceph_dir_test_complete(struct inode *inode)
 {
-       struct dentry *dentry = __d_find_any_alias(inode);
-
-       if (dentry && ceph_dentry(dentry))
-               return test_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags);
+       /* not yet implemented */
        return false;
 }
 
index ce549d31eeb7934634f4905e00861999cbe1b33f..ed72428d9c75c80a6744ccd6a996b83c1a20d333 100644 (file)
@@ -147,9 +147,9 @@ int ceph_open(struct inode *inode, struct file *file)
 
        /* trivially open snapdir */
        if (ceph_snap(inode) == CEPH_SNAPDIR) {
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                __ceph_get_fmode(ci, fmode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                return ceph_init_file(inode, file, fmode);
        }
 
@@ -158,7 +158,7 @@ int ceph_open(struct inode *inode, struct file *file)
         * write) or any MDS (for read).  Update wanted set
         * asynchronously.
         */
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (__ceph_is_any_real_caps(ci) &&
            (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
                int mds_wanted = __ceph_caps_mds_wanted(ci);
@@ -168,7 +168,7 @@ int ceph_open(struct inode *inode, struct file *file)
                     inode, fmode, ceph_cap_string(wanted),
                     ceph_cap_string(issued));
                __ceph_get_fmode(ci, fmode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
 
                /* adjust wanted? */
                if ((issued & wanted) != wanted &&
@@ -180,10 +180,10 @@ int ceph_open(struct inode *inode, struct file *file)
        } else if (ceph_snap(inode) != CEPH_NOSNAP &&
                   (ci->i_snap_caps & wanted) == wanted) {
                __ceph_get_fmode(ci, fmode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                return ceph_init_file(inode, file, fmode);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
        req = prepare_open_request(inode->i_sb, flags, 0);
@@ -743,9 +743,9 @@ retry_snap:
                 */
                int dirty;
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                ceph_put_cap_refs(ci, got);
 
                ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
@@ -764,9 +764,9 @@ retry_snap:
 
        if (ret >= 0) {
                int dirty;
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                if (dirty)
                        __mark_inode_dirty(inode, dirty);
        }
@@ -797,7 +797,8 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
 
        mutex_lock(&inode->i_mutex);
        __ceph_do_pending_vmtruncate(inode);
-       if (origin != SEEK_CUR || origin != SEEK_SET) {
+
+       if (origin == SEEK_END || origin == SEEK_DATA || origin == SEEK_HOLE) {
                ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
                if (ret < 0) {
                        offset = ret;
index 116f36502f178f5ce0475bdfdd77073e9452e614..87fb132fb33012a9ca7839e9cca832eb472a1087 100644 (file)
@@ -297,6 +297,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
 
        dout("alloc_inode %p\n", &ci->vfs_inode);
 
+       spin_lock_init(&ci->i_ceph_lock);
+
        ci->i_version = 0;
        ci->i_time_warp_seq = 0;
        ci->i_ceph_flags = 0;
@@ -583,7 +585,7 @@ static int fill_inode(struct inode *inode,
                               iinfo->xattr_len);
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        /*
         * provided version will be odd if inode value is projected,
@@ -680,7 +682,7 @@ static int fill_inode(struct inode *inode,
                        char *sym;
 
                        BUG_ON(symlen != inode->i_size);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
 
                        err = -ENOMEM;
                        sym = kmalloc(symlen+1, GFP_NOFS);
@@ -689,7 +691,7 @@ static int fill_inode(struct inode *inode,
                        memcpy(sym, iinfo->symlink, symlen);
                        sym[symlen] = 0;
 
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        if (!ci->i_symlink)
                                ci->i_symlink = sym;
                        else
@@ -715,7 +717,7 @@ static int fill_inode(struct inode *inode,
        }
 
 no_change:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        /* queue truncate if we saw i_size decrease */
        if (queue_trunc)
@@ -750,13 +752,13 @@ no_change:
                                     info->cap.flags,
                                     caps_reservation);
                } else {
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        dout(" %p got snap_caps %s\n", inode,
                             ceph_cap_string(le32_to_cpu(info->cap.caps)));
                        ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
                        if (cap_fmode >= 0)
                                __ceph_get_fmode(ci, cap_fmode);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                }
        } else if (cap_fmode >= 0) {
                pr_warning("mds issued no caps on %llx.%llx\n",
@@ -849,19 +851,20 @@ static void ceph_set_dentry_offset(struct dentry *dn)
 {
        struct dentry *dir = dn->d_parent;
        struct inode *inode = dir->d_inode;
+       struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_dentry_info *di;
 
        BUG_ON(!inode);
 
        di = ceph_dentry(dn);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (!ceph_dir_test_complete(inode)) {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                return;
        }
        di->offset = ceph_inode(inode)->i_max_offset++;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        spin_lock(&dir->d_lock);
        spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
@@ -1308,7 +1311,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
        struct ceph_inode_info *ci = ceph_inode(inode);
        int ret = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
        inode->i_size = size;
        inode->i_blocks = (size + (1 << 9) - 1) >> 9;
@@ -1318,7 +1321,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
            (ci->i_reported_size << 1) < ci->i_max_size)
                ret = 1;
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return ret;
 }
 
@@ -1376,20 +1379,20 @@ static void ceph_invalidate_work(struct work_struct *work)
        u32 orig_gen;
        int check = 0;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("invalidate_pages %p gen %d revoking %d\n", inode,
             ci->i_rdcache_gen, ci->i_rdcache_revoking);
        if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
                /* nevermind! */
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                goto out;
        }
        orig_gen = ci->i_rdcache_gen;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        truncate_inode_pages(&inode->i_data, 0);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (orig_gen == ci->i_rdcache_gen &&
            orig_gen == ci->i_rdcache_revoking) {
                dout("invalidate_pages %p gen %d successful\n", inode,
@@ -1401,7 +1404,7 @@ static void ceph_invalidate_work(struct work_struct *work)
                     inode, orig_gen, ci->i_rdcache_gen,
                     ci->i_rdcache_revoking);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (check)
                ceph_check_caps(ci, 0, NULL);
@@ -1460,10 +1463,10 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
        int wrbuffer_refs, wake = 0;
 
 retry:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        if (ci->i_truncate_pending == 0) {
                dout("__do_pending_vmtruncate %p none pending\n", inode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                return;
        }
 
@@ -1474,7 +1477,7 @@ retry:
        if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
                dout("__do_pending_vmtruncate %p flushing snaps first\n",
                     inode);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                filemap_write_and_wait_range(&inode->i_data, 0,
                                             inode->i_sb->s_maxbytes);
                goto retry;
@@ -1484,15 +1487,15 @@ retry:
        wrbuffer_refs = ci->i_wrbuffer_ref;
        dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
             ci->i_truncate_pending, to);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        truncate_inode_pages(inode->i_mapping, to);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_truncate_pending--;
        if (ci->i_truncate_pending == 0)
                wake = 1;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (wrbuffer_refs == 0)
                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
@@ -1547,7 +1550,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        if (IS_ERR(req))
                return PTR_ERR(req);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        issued = __ceph_caps_issued(ci, NULL);
        dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
 
@@ -1695,7 +1698,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        }
 
        release &= issued;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (inode_dirty_flags)
                __mark_inode_dirty(inode, inode_dirty_flags);
@@ -1717,7 +1720,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        __ceph_do_pending_vmtruncate(inode);
        return err;
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        ceph_mdsc_put_request(req);
        return err;
 }
index 5a14c29cbba6f82b00ca42e8ac0327dbc12fd8bb..790914a598dd5d68b8f40b851c2faff2e790e4af 100644 (file)
@@ -241,11 +241,11 @@ static long ceph_ioctl_lazyio(struct file *file)
        struct ceph_inode_info *ci = ceph_inode(inode);
 
        if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                ci->i_nr_by_mode[fi->fmode]--;
                fi->fmode |= CEPH_FILE_MODE_LAZY;
                ci->i_nr_by_mode[fi->fmode]++;
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                dout("ioctl_layzio: file %p marked lazy\n", file);
 
                ceph_check_caps(ci, 0, NULL);
index 264ab701154fead54aec35e0f45152e4709f201a..6203d805eb45061d20b5d8e08222f97aae6cd0a6 100644 (file)
@@ -732,21 +732,21 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                }
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap = NULL;
        if (mode == USE_AUTH_MDS)
                cap = ci->i_auth_cap;
        if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
                cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
        if (!cap) {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                goto random;
        }
        mds = cap->session->s_mds;
        dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
             inode, ceph_vinop(inode), mds,
             cap == ci->i_auth_cap ? "auth " : "", cap);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return mds;
 
 random:
@@ -951,7 +951,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
 
        dout("removing cap %p, ci is %p, inode is %p\n",
             cap, ci, &ci->vfs_inode);
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        __ceph_remove_cap(cap);
        if (!__ceph_is_any_real_caps(ci)) {
                struct ceph_mds_client *mdsc =
@@ -984,7 +984,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
                }
                spin_unlock(&mdsc->cap_dirty_lock);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        while (drop--)
                iput(inode);
        return 0;
@@ -1015,10 +1015,10 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
 
        wake_up_all(&ci->i_cap_wq);
        if (arg) {
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                ci->i_wanted_max_size = 0;
                ci->i_requested_max_size = 0;
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
        }
        return 0;
 }
@@ -1151,7 +1151,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
        if (session->s_trim_caps <= 0)
                return -1;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        mine = cap->issued | cap->implemented;
        used = __ceph_caps_used(ci);
        oissued = __ceph_caps_issued_other(ci, cap);
@@ -1170,7 +1170,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
                __ceph_remove_cap(cap);
        } else {
                /* try to drop referring dentries */
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                d_prune_aliases(inode);
                dout("trim_caps_cb %p cap %p  pruned, count now %d\n",
                     inode, cap, atomic_read(&inode->i_count));
@@ -1178,7 +1178,7 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
        }
 
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return 0;
 }
 
@@ -1296,7 +1296,7 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
                                           i_flushing_item);
                        struct inode *inode = &ci->vfs_inode;
 
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        if (ci->i_cap_flush_seq <= want_flush_seq) {
                                dout("check_cap_flush still flushing %p "
                                     "seq %lld <= %lld to mds%d\n", inode,
@@ -1304,7 +1304,7 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
                                     session->s_mds);
                                ret = 0;
                        }
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                }
                mutex_unlock(&session->s_mutex);
                ceph_put_mds_session(session);
@@ -1495,6 +1495,7 @@ retry:
                             pos, temp);
                } else if (stop_on_nosnap && inode &&
                           ceph_snap(inode) == CEPH_NOSNAP) {
+                       spin_unlock(&temp->d_lock);
                        break;
                } else {
                        pos -= temp->d_name.len;
@@ -2011,10 +2012,10 @@ void ceph_invalidate_dir_request(struct ceph_mds_request *req)
        struct ceph_inode_info *ci = ceph_inode(inode);
 
        dout("invalidate_dir_request %p (D_COMPLETE, lease(s))\n", inode);
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ceph_dir_clear_complete(inode);
        ci->i_release_count++;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (req->r_dentry)
                ceph_invalidate_dentry_lease(req->r_dentry);
@@ -2422,7 +2423,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
        if (err)
                goto out_free;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        cap->seq = 0;        /* reset cap seq */
        cap->issue_seq = 0;  /* and issue_seq */
 
@@ -2445,7 +2446,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
                rec.v1.pathbase = cpu_to_le64(pathbase);
                reclen = sizeof(rec.v1);
        }
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 
        if (recon_state->flock) {
                int num_fcntl_locks, num_flock_locks;
index 4bb239921dbdf98a945b963d2d05ede9ff434736..a50ca0e39475794018c2350570547bff4f6a7df8 100644 (file)
@@ -20,7 +20,7 @@
  *
  *         mdsc->snap_rwsem
  *
- *         inode->i_lock
+ *         ci->i_ceph_lock
  *                 mdsc->snap_flush_lock
  *                 mdsc->cap_delay_lock
  *
index e2643719133323a07a8e69f0f631c306e80f7532..a559c80f127a04353a488181029744a009165f09 100644 (file)
@@ -446,7 +446,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                return;
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        used = __ceph_caps_used(ci);
        dirty = __ceph_caps_dirty(ci);
 
@@ -528,7 +528,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
                kfree(capsnap);
        }
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 /*
@@ -537,7 +537,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
  *
  * If capsnap can now be flushed, add to snap_flush list, and return 1.
  *
- * Caller must hold i_lock.
+ * Caller must hold i_ceph_lock.
  */
 int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
                            struct ceph_cap_snap *capsnap)
@@ -739,9 +739,9 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
                inode = &ci->vfs_inode;
                ihold(inode);
                spin_unlock(&mdsc->snap_flush_lock);
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                __ceph_flush_snaps(ci, &session, 0);
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                iput(inode);
                spin_lock(&mdsc->snap_flush_lock);
        }
@@ -847,7 +847,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                                continue;
                        ci = ceph_inode(inode);
 
-                       spin_lock(&inode->i_lock);
+                       spin_lock(&ci->i_ceph_lock);
                        if (!ci->i_snap_realm)
                                goto skip_inode;
                        /*
@@ -876,7 +876,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                        oldrealm = ci->i_snap_realm;
                        ci->i_snap_realm = realm;
                        spin_unlock(&realm->inodes_with_caps_lock);
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
 
                        ceph_get_snap_realm(mdsc, realm);
                        ceph_put_snap_realm(mdsc, oldrealm);
@@ -885,7 +885,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                        continue;
 
 skip_inode:
-                       spin_unlock(&inode->i_lock);
+                       spin_unlock(&ci->i_ceph_lock);
                        iput(inode);
                }
 
index 8dc73a594a90c11e31f20ef925104ea10e5b0b15..b48f15f101a0ed61127bc920b82afa3d61fcdc0b 100644 (file)
@@ -383,7 +383,7 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
        if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
                seq_printf(m, ",rsize=%d", fsopt->rsize);
        if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
-               seq_printf(m, ",rasize=%d", fsopt->rsize);
+               seq_printf(m, ",rasize=%d", fsopt->rasize);
        if (fsopt->congestion_kb != default_congestion_kb())
                seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
        if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
index 01bf189e08a91387b6f4686b9999fe03ccf8aa32..edcbf3774a56460d377b31a9d51ea543a114a839 100644 (file)
@@ -220,7 +220,7 @@ struct ceph_dentry_info {
  * The locking for D_COMPLETE is a bit odd:
  *  - we can clear it at almost any time (see ceph_d_prune)
  *  - it is only meaningful if:
- *    - we hold dir inode i_lock
+ *    - we hold dir inode i_ceph_lock
  *    - we hold dir FILE_SHARED caps
  *    - the dentry D_COMPLETE is set
  */
@@ -250,6 +250,8 @@ struct ceph_inode_xattrs_info {
 struct ceph_inode_info {
        struct ceph_vino i_vino;   /* ceph ino + snap */
 
+       spinlock_t i_ceph_lock;
+
        u64 i_version;
        u32 i_time_warp_seq;
 
@@ -271,7 +273,7 @@ struct ceph_inode_info {
 
        struct ceph_inode_xattrs_info i_xattrs;
 
-       /* capabilities.  protected _both_ by i_lock and cap->session's
+       /* capabilities.  protected _both_ by i_ceph_lock and cap->session's
         * s_mutex. */
        struct rb_root i_caps;           /* cap list */
        struct ceph_cap *i_auth_cap;     /* authoritative cap, if any */
@@ -437,18 +439,18 @@ static inline void ceph_i_clear(struct inode *inode, unsigned mask)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_ceph_flags &= ~mask;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 static inline void ceph_i_set(struct inode *inode, unsigned mask)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        ci->i_ceph_flags |= mask;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
 }
 
 static inline bool ceph_i_test(struct inode *inode, unsigned mask)
@@ -456,9 +458,9 @@ static inline bool ceph_i_test(struct inode *inode, unsigned mask)
        struct ceph_inode_info *ci = ceph_inode(inode);
        bool r;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        r = (ci->i_ceph_flags & mask) == mask;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return r;
 }
 
@@ -508,9 +510,9 @@ extern int __ceph_caps_issued_other(struct ceph_inode_info *ci,
 static inline int ceph_caps_issued(struct ceph_inode_info *ci)
 {
        int issued;
-       spin_lock(&ci->vfs_inode.i_lock);
+       spin_lock(&ci->i_ceph_lock);
        issued = __ceph_caps_issued(ci, NULL);
-       spin_unlock(&ci->vfs_inode.i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return issued;
 }
 
@@ -518,9 +520,9 @@ static inline int ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask,
                                        int touch)
 {
        int r;
-       spin_lock(&ci->vfs_inode.i_lock);
+       spin_lock(&ci->i_ceph_lock);
        r = __ceph_caps_issued_mask(ci, mask, touch);
-       spin_unlock(&ci->vfs_inode.i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return r;
 }
 
@@ -743,10 +745,9 @@ extern int ceph_add_cap(struct inode *inode,
 extern void __ceph_remove_cap(struct ceph_cap *cap);
 static inline void ceph_remove_cap(struct ceph_cap *cap)
 {
-       struct inode *inode = &cap->ci->vfs_inode;
-       spin_lock(&inode->i_lock);
+       spin_lock(&cap->ci->i_ceph_lock);
        __ceph_remove_cap(cap);
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&cap->ci->i_ceph_lock);
 }
 extern void ceph_put_cap(struct ceph_mds_client *mdsc,
                         struct ceph_cap *cap);
index 96c6739a02804f081adde4e2972523e1bf34911c..a5e36e4488a7d6e9f6dc0b9a5b9d440b4229e9c1 100644 (file)
@@ -343,8 +343,8 @@ void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
 }
 
 static int __build_xattrs(struct inode *inode)
-       __releases(inode->i_lock)
-       __acquires(inode->i_lock)
+       __releases(ci->i_ceph_lock)
+       __acquires(ci->i_ceph_lock)
 {
        u32 namelen;
        u32 numattr = 0;
@@ -372,7 +372,7 @@ start:
                end = p + ci->i_xattrs.blob->vec.iov_len;
                ceph_decode_32_safe(&p, end, numattr, bad);
                xattr_version = ci->i_xattrs.version;
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
 
                xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
                                 GFP_NOFS);
@@ -387,7 +387,7 @@ start:
                                goto bad_lock;
                }
 
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                if (ci->i_xattrs.version != xattr_version) {
                        /* lost a race, retry */
                        for (i = 0; i < numattr; i++)
@@ -418,7 +418,7 @@ start:
 
        return err;
 bad_lock:
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 bad:
        if (xattrs) {
                for (i = 0; i < numattr; i++)
@@ -512,7 +512,7 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
        if (vxattrs)
                vxattr = ceph_match_vxattr(vxattrs, name);
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
             ci->i_xattrs.version, ci->i_xattrs.index_version);
 
@@ -520,14 +520,14 @@ ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
            (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
                goto get_xattr;
        } else {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                /* get xattrs from mds (if we don't already have them) */
                err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
                if (err)
                        return err;
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        if (vxattr && vxattr->readonly) {
                err = vxattr->getxattr_cb(ci, value, size);
@@ -558,7 +558,7 @@ get_xattr:
        memcpy(value, xattr->val, xattr->val_len);
 
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return err;
 }
 
@@ -573,7 +573,7 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
        u32 len;
        int i;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
             ci->i_xattrs.version, ci->i_xattrs.index_version);
 
@@ -581,13 +581,13 @@ ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
            (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
                goto list_xattr;
        } else {
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
                if (err)
                        return err;
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 
        err = __build_xattrs(inode);
        if (err < 0)
@@ -619,7 +619,7 @@ list_xattr:
                }
 
 out:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        return err;
 }
 
@@ -739,7 +739,7 @@ int ceph_setxattr(struct dentry *dentry, const char *name,
        if (!xattr)
                goto out;
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
 retry:
        issued = __ceph_caps_issued(ci, NULL);
        if (!(issued & CEPH_CAP_XATTR_EXCL))
@@ -752,12 +752,12 @@ retry:
            required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
                struct ceph_buffer *blob = NULL;
 
-               spin_unlock(&inode->i_lock);
+               spin_unlock(&ci->i_ceph_lock);
                dout(" preaallocating new blob size=%d\n", required_blob_size);
                blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
                if (!blob)
                        goto out;
-               spin_lock(&inode->i_lock);
+               spin_lock(&ci->i_ceph_lock);
                if (ci->i_xattrs.prealloc_blob)
                        ceph_buffer_put(ci->i_xattrs.prealloc_blob);
                ci->i_xattrs.prealloc_blob = blob;
@@ -770,13 +770,13 @@ retry:
        dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
        ci->i_xattrs.dirty = true;
        inode->i_ctime = CURRENT_TIME;
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (dirty)
                __mark_inode_dirty(inode, dirty);
        return err;
 
 do_sync:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        err = ceph_sync_setxattr(dentry, name, value, size, flags);
 out:
        kfree(newname);
@@ -833,7 +833,7 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
                        return -EOPNOTSUPP;
        }
 
-       spin_lock(&inode->i_lock);
+       spin_lock(&ci->i_ceph_lock);
        __build_xattrs(inode);
        issued = __ceph_caps_issued(ci, NULL);
        dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
@@ -846,12 +846,12 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
        ci->i_xattrs.dirty = true;
        inode->i_ctime = CURRENT_TIME;
 
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        if (dirty)
                __mark_inode_dirty(inode, dirty);
        return err;
 do_sync:
-       spin_unlock(&inode->i_lock);
+       spin_unlock(&ci->i_ceph_lock);
        err = ceph_send_removexattr(dentry, name);
        return err;
 }
index d6a972df033800eb0c00f6116b3c4b6bb309bd87..8cd4b52d42174ee0a4d524d3176b59102751c61e 100644 (file)
@@ -441,6 +441,8 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
        smb_msg.msg_controllen = 0;
 
        for (total_read = 0; to_read; total_read += length, to_read -= length) {
+               try_to_freeze();
+
                if (server_unresponsive(server)) {
                        total_read = -EAGAIN;
                        break;
index cf0b1539b321acf1cdd69e4db0f590d8f83e9293..4dd9283885e745bafdd7dce81e5456f98c5635c9 100644 (file)
@@ -702,6 +702,13 @@ cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
                                         lock->type, lock->netfid, conf_lock);
 }
 
+/*
+ * Check if there is another lock that prevents us to set the lock (mandatory
+ * style). If such a lock exists, update the flock structure with its
+ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+ * or leave it the same if we can't. Returns 0 if we don't need to request to
+ * the server or 1 otherwise.
+ */
 static int
 cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
               __u8 type, __u16 netfid, struct file_lock *flock)
@@ -739,6 +746,12 @@ cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
        mutex_unlock(&cinode->lock_mutex);
 }
 
+/*
+ * Set the byte-range lock (mandatory style). Returns:
+ * 1) 0, if we set the lock and don't need to request to the server;
+ * 2) 1, if no locks prevent us but we need to request to the server;
+ * 3) -EACCESS, if there is a lock that prevents us and wait is false.
+ */
 static int
 cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
                 bool wait)
@@ -778,6 +791,13 @@ try_again:
        return rc;
 }
 
+/*
+ * Check if there is another lock that prevents us to set the lock (posix
+ * style). If such a lock exists, update the flock structure with its
+ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+ * or leave it the same if we can't. Returns 0 if we don't need to request to
+ * the server or 1 otherwise.
+ */
 static int
 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
 {
@@ -800,6 +820,12 @@ cifs_posix_lock_test(struct file *file, struct file_lock *flock)
        return rc;
 }
 
+/*
+ * Set the byte-range lock (posix style). Returns:
+ * 1) 0, if we set the lock and don't need to request to the server;
+ * 2) 1, if we need to request to the server;
+ * 3) <0, if the error occurs while setting the lock.
+ */
 static int
 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
 {
index 5de03ec20144449c2dea66441b1e66cc577f4a48..a090bbe6ee29e196018867c9f5e4da3efe9d82b9 100644 (file)
@@ -554,7 +554,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
                                 rc);
                        return rc;
                }
-               cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
+               /* FindFirst/Next set last_entry to NULL on malformed reply */
+               if (cifsFile->srch_inf.last_entry)
+                       cifs_save_resume_key(cifsFile->srch_inf.last_entry,
+                                               cifsFile);
        }
 
        while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
@@ -562,7 +565,10 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
                cFYI(1, "calling findnext2");
                rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
                                  &cifsFile->srch_inf);
-               cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
+               /* FindFirst/Next set last_entry to NULL on malformed reply */
+               if (cifsFile->srch_inf.last_entry)
+                       cifs_save_resume_key(cifsFile->srch_inf.last_entry,
+                                               cifsFile);
                if (rc)
                        return -ENOENT;
        }
index 7cacba12b8f114468ef56dab7d58fc5678d0c878..80d850881938d0c0950addc4d97ae4855dadfa4a 100644 (file)
@@ -209,7 +209,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
 {
        int rc;
        int len;
-       __u16 wpwd[129];
+       __le16 wpwd[129];
 
        /* Password cannot be longer than 128 characters */
        if (passwd) /* Password must be converted to NT unicode */
@@ -219,8 +219,8 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
                *wpwd = 0; /* Ensure string is null terminated */
        }
 
-       rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__u16));
-       memset(wpwd, 0, 129 * sizeof(__u16));
+       rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
+       memset(wpwd, 0, 129 * sizeof(__le16));
 
        return rc;
 }
index ca418aaf635254dd85722e791510f330c1a5917d..9d8715c45f2574ae473634feee7b0a07ca607355 100644 (file)
@@ -292,7 +292,7 @@ int __init configfs_inode_init(void)
        return bdi_init(&configfs_backing_dev_info);
 }
 
-void __exit configfs_inode_exit(void)
+void configfs_inode_exit(void)
 {
        bdi_destroy(&configfs_backing_dev_info);
 }
index ecc62178beda98d3975b2796714ab6d3d6b1968e..276e15cafd58e8b5171b8da6a8e503c99631cabd 100644 (file)
@@ -143,28 +143,26 @@ static int __init configfs_init(void)
                goto out;
 
        config_kobj = kobject_create_and_add("config", kernel_kobj);
-       if (!config_kobj) {
-               kmem_cache_destroy(configfs_dir_cachep);
-               configfs_dir_cachep = NULL;
-               goto out;
-       }
+       if (!config_kobj)
+               goto out2;
+
+       err = configfs_inode_init();
+       if (err)
+               goto out3;
 
        err = register_filesystem(&configfs_fs_type);
-       if (err) {
-               printk(KERN_ERR "configfs: Unable to register filesystem!\n");
-               kobject_put(config_kobj);
-               kmem_cache_destroy(configfs_dir_cachep);
-               configfs_dir_cachep = NULL;
-               goto out;
-       }
+       if (err)
+               goto out4;
 
-       err = configfs_inode_init();
-       if (err) {
-               unregister_filesystem(&configfs_fs_type);
-               kobject_put(config_kobj);
-               kmem_cache_destroy(configfs_dir_cachep);
-               configfs_dir_cachep = NULL;
-       }
+       return 0;
+out4:
+       printk(KERN_ERR "configfs: Unable to register filesystem!\n");
+       configfs_inode_exit();
+out3:
+       kobject_put(config_kobj);
+out2:
+       kmem_cache_destroy(configfs_dir_cachep);
+       configfs_dir_cachep = NULL;
 out:
        return err;
 }
index 10ba92def3f675985871f0fc72e9c6d7126357a3..89509b5a090e27320e45b9c0c2f5480e082b1a37 100644 (file)
@@ -2439,16 +2439,14 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
 /**
  * prepend_path - Prepend path string to a buffer
  * @path: the dentry/vfsmount to report
- * @root: root vfsmnt/dentry (may be modified by this function)
+ * @root: root vfsmnt/dentry
  * @buffer: pointer to the end of the buffer
  * @buflen: pointer to buffer length
  *
  * Caller holds the rename_lock.
- *
- * If path is not reachable from the supplied root, then the value of
- * root is changed (without modifying refcounts).
  */
-static int prepend_path(const struct path *path, struct path *root,
+static int prepend_path(const struct path *path,
+                       const struct path *root,
                        char **buffer, int *buflen)
 {
        struct dentry *dentry = path->dentry;
@@ -2483,10 +2481,10 @@ static int prepend_path(const struct path *path, struct path *root,
                dentry = parent;
        }
 
-out:
        if (!error && !slash)
                error = prepend(buffer, buflen, "/", 1);
 
+out:
        br_read_unlock(vfsmount_lock);
        return error;
 
@@ -2500,15 +2498,17 @@ global_root:
                WARN(1, "Root dentry has weird name <%.*s>\n",
                     (int) dentry->d_name.len, dentry->d_name.name);
        }
-       root->mnt = vfsmnt;
-       root->dentry = dentry;
+       if (!slash)
+               error = prepend(buffer, buflen, "/", 1);
+       if (!error)
+               error = vfsmnt->mnt_ns ? 1 : 2;
        goto out;
 }
 
 /**
  * __d_path - return the path of a dentry
  * @path: the dentry/vfsmount to report
- * @root: root vfsmnt/dentry (may be modified by this function)
+ * @root: root vfsmnt/dentry
  * @buf: buffer to return value in
  * @buflen: buffer length
  *
@@ -2519,10 +2519,10 @@ global_root:
  *
  * "buflen" should be positive.
  *
- * If path is not reachable from the supplied root, then the value of
- * root is changed (without modifying refcounts).
+ * If the path is not reachable from the supplied root, return %NULL.
  */
-char *__d_path(const struct path *path, struct path *root,
+char *__d_path(const struct path *path,
+              const struct path *root,
               char *buf, int buflen)
 {
        char *res = buf + buflen;
@@ -2533,7 +2533,28 @@ char *__d_path(const struct path *path, struct path *root,
        error = prepend_path(path, root, &res, &buflen);
        write_sequnlock(&rename_lock);
 
-       if (error)
+       if (error < 0)
+               return ERR_PTR(error);
+       if (error > 0)
+               return NULL;
+       return res;
+}
+
+char *d_absolute_path(const struct path *path,
+              char *buf, int buflen)
+{
+       struct path root = {};
+       char *res = buf + buflen;
+       int error;
+
+       prepend(&res, &buflen, "\0", 1);
+       write_seqlock(&rename_lock);
+       error = prepend_path(path, &root, &res, &buflen);
+       write_sequnlock(&rename_lock);
+
+       if (error > 1)
+               error = -EINVAL;
+       if (error < 0)
                return ERR_PTR(error);
        return res;
 }
@@ -2541,8 +2562,9 @@ char *__d_path(const struct path *path, struct path *root,
 /*
  * same as __d_path but appends "(deleted)" for unlinked files.
  */
-static int path_with_deleted(const struct path *path, struct path *root,
-                                char **buf, int *buflen)
+static int path_with_deleted(const struct path *path,
+                            const struct path *root,
+                            char **buf, int *buflen)
 {
        prepend(buf, buflen, "\0", 1);
        if (d_unlinked(path->dentry)) {
@@ -2579,7 +2601,6 @@ char *d_path(const struct path *path, char *buf, int buflen)
 {
        char *res = buf + buflen;
        struct path root;
-       struct path tmp;
        int error;
 
        /*
@@ -2594,9 +2615,8 @@ char *d_path(const struct path *path, char *buf, int buflen)
 
        get_fs_root(current->fs, &root);
        write_seqlock(&rename_lock);
-       tmp = root;
-       error = path_with_deleted(path, &tmp, &res, &buflen);
-       if (error)
+       error = path_with_deleted(path, &root, &res, &buflen);
+       if (error < 0)
                res = ERR_PTR(error);
        write_sequnlock(&rename_lock);
        path_put(&root);
@@ -2617,7 +2637,6 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
 {
        char *res = buf + buflen;
        struct path root;
-       struct path tmp;
        int error;
 
        if (path->dentry->d_op && path->dentry->d_op->d_dname)
@@ -2625,9 +2644,8 @@ char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
 
        get_fs_root(current->fs, &root);
        write_seqlock(&rename_lock);
-       tmp = root;
-       error = path_with_deleted(path, &tmp, &res, &buflen);
-       if (!error && !path_equal(&tmp, &root))
+       error = path_with_deleted(path, &root, &res, &buflen);
+       if (error > 0)
                error = prepend_unreachable(&res, &buflen);
        write_sequnlock(&rename_lock);
        path_put(&root);
@@ -2758,19 +2776,18 @@ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
        write_seqlock(&rename_lock);
        if (!d_unlinked(pwd.dentry)) {
                unsigned long len;
-               struct path tmp = root;
                char *cwd = page + PAGE_SIZE;
                int buflen = PAGE_SIZE;
 
                prepend(&cwd, &buflen, "\0", 1);
-               error = prepend_path(&pwd, &tmp, &cwd, &buflen);
+               error = prepend_path(&pwd, &root, &cwd, &buflen);
                write_sequnlock(&rename_lock);
 
-               if (error)
+               if (error < 0)
                        goto out;
 
                /* Unreachable from current root */
-               if (!path_equal(&tmp, &root)) {
+               if (error > 0) {
                        error = prepend_unreachable(&cwd, &buflen);
                        if (error)
                                goto out;
index 61fa9e1614afd1922bae4cf5ce0d26dfdbab6b25..607b1557d292d1b24d5a605d28565de17f36bf13 100644 (file)
@@ -1095,7 +1095,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
                  le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
                  ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
 
-       neh->eh_depth = cpu_to_le16(neh->eh_depth + 1);
+       neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
        ext4_mark_inode_dirty(handle, inode);
 out:
        brelse(bh);
@@ -2955,7 +2955,6 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        /* Pre-conditions */
        BUG_ON(!ext4_ext_is_uninitialized(ex));
        BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
-       BUG_ON(map->m_lblk + map->m_len > ee_block + ee_len);
 
        /*
         * Attempt to transfer newly initialized blocks from the currently
index 848f436df29f6ffec6e8487549764d98c63d5b6b..92655fd8965737bac9aa002f173ed607ecdcfa8a 100644 (file)
@@ -1339,8 +1339,11 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
                                        clear_buffer_unwritten(bh);
                                }
 
-                               /* skip page if block allocation undone */
-                               if (buffer_delay(bh) || buffer_unwritten(bh))
+                               /*
+                                * skip page if block allocation undone and
+                                * block is dirty
+                                */
+                               if (ext4_bh_delay_or_unwritten(NULL, bh))
                                        skip_page = 1;
                                bh = bh->b_this_page;
                                block_start += bh->b_size;
@@ -2387,7 +2390,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
        pgoff_t index;
        struct inode *inode = mapping->host;
        handle_t *handle;
-       loff_t page_len;
 
        index = pos >> PAGE_CACHE_SHIFT;
 
@@ -2434,13 +2436,6 @@ retry:
                 */
                if (pos + len > inode->i_size)
                        ext4_truncate_failed_write(inode);
-       } else {
-               page_len = pos & (PAGE_CACHE_SIZE - 1);
-               if (page_len > 0) {
-                       ret = ext4_discard_partial_page_buffers_no_lock(handle,
-                               inode, page, pos - page_len, page_len,
-                               EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
-               }
        }
 
        if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -2483,7 +2478,6 @@ static int ext4_da_write_end(struct file *file,
        loff_t new_i_size;
        unsigned long start, end;
        int write_mode = (int)(unsigned long)fsdata;
-       loff_t page_len;
 
        if (write_mode == FALL_BACK_TO_NONDELALLOC) {
                if (ext4_should_order_data(inode)) {
@@ -2508,7 +2502,7 @@ static int ext4_da_write_end(struct file *file,
         */
 
        new_i_size = pos + copied;
-       if (new_i_size > EXT4_I(inode)->i_disksize) {
+       if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
                if (ext4_da_should_update_i_disksize(page, end)) {
                        down_write(&EXT4_I(inode)->i_data_sem);
                        if (new_i_size > EXT4_I(inode)->i_disksize) {
@@ -2532,16 +2526,6 @@ static int ext4_da_write_end(struct file *file,
        }
        ret2 = generic_write_end(file, mapping, pos, len, copied,
                                                        page, fsdata);
-
-       page_len = PAGE_CACHE_SIZE -
-                       ((pos + copied - 1) & (PAGE_CACHE_SIZE - 1));
-
-       if (page_len > 0) {
-               ret = ext4_discard_partial_page_buffers_no_lock(handle,
-                       inode, page, pos + copied - 1, page_len,
-                       EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
-       }
-
        copied = ret2;
        if (ret2 < 0)
                ret = ret2;
@@ -2781,10 +2765,11 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
                  iocb->private, io_end->inode->i_ino, iocb, offset,
                  size);
 
+       iocb->private = NULL;
+
        /* if not aio dio with unwritten extents, just free io and return */
        if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
                ext4_free_io_end(io_end);
-               iocb->private = NULL;
 out:
                if (is_async)
                        aio_complete(iocb, ret, 0);
@@ -2807,7 +2792,6 @@ out:
        spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
 
        /* queue the work to convert unwritten extents to written */
-       iocb->private = NULL;
        queue_work(wq, &io_end->work);
 
        /* XXX: probably should move into the real I/O completion handler */
@@ -3203,26 +3187,8 @@ int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
 
        iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 
-       if (!page_has_buffers(page)) {
-               /*
-                * If the range to be discarded covers a partial block
-                * we need to get the page buffers.  This is because
-                * partial blocks cannot be released and the page needs
-                * to be updated with the contents of the block before
-                * we write the zeros on top of it.
-                */
-               if ((from & (blocksize - 1)) ||
-                   ((from + length) & (blocksize - 1))) {
-                       create_empty_buffers(page, blocksize, 0);
-               } else {
-                       /*
-                        * If there are no partial blocks,
-                        * there is nothing to update,
-                        * so we can return now
-                        */
-                       return 0;
-               }
-       }
+       if (!page_has_buffers(page))
+               create_empty_buffers(page, blocksize, 0);
 
        /* Find the buffer that contains "offset" */
        bh = page_buffers(page);
index 7ce1d0b19c94576892d0d80f016f7db44110ccb7..7e106c810c62bb18435d9a0fcb6d5066a3813fad 100644 (file)
@@ -385,6 +385,18 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
 
                block_end = block_start + blocksize;
                if (block_start >= len) {
+                       /*
+                        * Comments copied from block_write_full_page_endio:
+                        *
+                        * The page straddles i_size.  It must be zeroed out on
+                        * each and every writepage invocation because it may
+                        * be mmapped.  "A file is mapped in multiples of the
+                        * page size.  For a file that is not a multiple of
+                        * the  page size, the remaining memory is zeroed when
+                        * mapped, and writes to that region are not written
+                        * out to the file."
+                        */
+                       zero_user_segment(page, block_start, block_end);
                        clear_buffer_dirty(bh);
                        set_buffer_uptodate(bh);
                        continue;
index 3858767ec672ef214ad5f288f30932ee0b5d933a..3e1329e2f826132d8aec264116deb2c572be9ef7 100644 (file)
@@ -1155,9 +1155,9 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
                seq_puts(seq, ",block_validity");
 
        if (!test_opt(sb, INIT_INODE_TABLE))
-               seq_puts(seq, ",noinit_inode_table");
+               seq_puts(seq, ",noinit_itable");
        else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)
-               seq_printf(seq, ",init_inode_table=%u",
+               seq_printf(seq, ",init_itable=%u",
                           (unsigned) sbi->s_li_wait_mult);
 
        ext4_show_quota_options(seq, sb);
@@ -1333,8 +1333,7 @@ enum {
        Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
        Opt_inode_readahead_blks, Opt_journal_ioprio,
        Opt_dioread_nolock, Opt_dioread_lock,
-       Opt_discard, Opt_nodiscard,
-       Opt_init_inode_table, Opt_noinit_inode_table,
+       Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
 };
 
 static const match_table_t tokens = {
@@ -1407,9 +1406,9 @@ static const match_table_t tokens = {
        {Opt_dioread_lock, "dioread_lock"},
        {Opt_discard, "discard"},
        {Opt_nodiscard, "nodiscard"},
-       {Opt_init_inode_table, "init_itable=%u"},
-       {Opt_init_inode_table, "init_itable"},
-       {Opt_noinit_inode_table, "noinit_itable"},
+       {Opt_init_itable, "init_itable=%u"},
+       {Opt_init_itable, "init_itable"},
+       {Opt_noinit_itable, "noinit_itable"},
        {Opt_err, NULL},
 };
 
@@ -1892,7 +1891,7 @@ set_qf_format:
                case Opt_dioread_lock:
                        clear_opt(sb, DIOREAD_NOLOCK);
                        break;
-               case Opt_init_inode_table:
+               case Opt_init_itable:
                        set_opt(sb, INIT_INODE_TABLE);
                        if (args[0].from) {
                                if (match_int(&args[0], &option))
@@ -1903,7 +1902,7 @@ set_qf_format:
                                return 0;
                        sbi->s_li_wait_mult = option;
                        break;
-               case Opt_noinit_inode_table:
+               case Opt_noinit_itable:
                        clear_opt(sb, INIT_INODE_TABLE);
                        break;
                default:
index 73c3992b2bb4aa765d3dc2418e907ca0a7ba78f2..517f211a3bd45c60f607e5da7d957bc739d55d47 100644 (file)
@@ -47,17 +47,6 @@ struct wb_writeback_work {
        struct completion *done;        /* set if the caller waits */
 };
 
-const char *wb_reason_name[] = {
-       [WB_REASON_BACKGROUND]          = "background",
-       [WB_REASON_TRY_TO_FREE_PAGES]   = "try_to_free_pages",
-       [WB_REASON_SYNC]                = "sync",
-       [WB_REASON_PERIODIC]            = "periodic",
-       [WB_REASON_LAPTOP_TIMER]        = "laptop_timer",
-       [WB_REASON_FREE_MORE_MEM]       = "free_more_memory",
-       [WB_REASON_FS_FREE_SPACE]       = "fs_free_space",
-       [WB_REASON_FORKER_THREAD]       = "forker_thread"
-};
-
 /*
  * Include the creation of the trace points after defining the
  * wb_writeback_work structure so that the definition remains local to this
@@ -156,6 +145,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
  * bdi_start_writeback - start writeback
  * @bdi: the backing device to write from
  * @nr_pages: the number of pages to write
+ * @reason: reason why some writeback work was initiated
  *
  * Description:
  *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
@@ -1223,6 +1213,7 @@ static void wait_sb_inodes(struct super_block *sb)
  * writeback_inodes_sb_nr -    writeback dirty inodes from given super_block
  * @sb: the superblock
  * @nr: the number of pages to write
+ * @reason: reason why some writeback work initiated
  *
  * Start writeback on some inodes on this super_block. No guarantees are made
  * on how many (if any) will be written, and this function does not wait
@@ -1251,6 +1242,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr);
 /**
  * writeback_inodes_sb -       writeback dirty inodes from given super_block
  * @sb: the superblock
+ * @reason: reason why some writeback work was initiated
  *
  * Start writeback on some inodes on this super_block. No guarantees are made
  * on how many (if any) will be written, and this function does not wait
@@ -1265,6 +1257,7 @@ EXPORT_SYMBOL(writeback_inodes_sb);
 /**
  * writeback_inodes_sb_if_idle -       start writeback if none underway
  * @sb: the superblock
+ * @reason: reason why some writeback work was initiated
  *
  * Invoke writeback_inodes_sb if no writeback is currently underway.
  * Returns 1 if writeback was started, 0 if not.
@@ -1285,6 +1278,7 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
  * writeback_inodes_sb_if_idle -       start writeback if none underway
  * @sb: the superblock
  * @nr: the number of pages to write
+ * @reason: reason why some writeback work was initiated
  *
  * Invoke writeback_inodes_sb if no writeback is currently underway.
  * Returns 1 if writeback was started, 0 if not.
index 5cb8614508c339fb5e0c18031f3ebf249a4ec6c0..2aaf3eaaf13da03e328ac3523fe16ca09a60c690 100644 (file)
@@ -1512,7 +1512,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
        else if (outarg->offset + num > file_size)
                num = file_size - outarg->offset;
 
-       while (num) {
+       while (num && req->num_pages < FUSE_MAX_PAGES_PER_REQ) {
                struct page *page;
                unsigned int this_num;
 
@@ -1526,6 +1526,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
 
                num -= this_num;
                total_len += this_num;
+               index++;
        }
        req->misc.retrieve_in.offset = outarg->offset;
        req->misc.retrieve_in.size = total_len;
index 594f07a81c2899ba33a173be33cfc818afa0d39b..0c84100acd4492966e2fa48de6fbfacca0f10172 100644 (file)
@@ -1556,7 +1556,7 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
        struct inode *inode = file->f_path.dentry->d_inode;
 
        mutex_lock(&inode->i_mutex);
-       if (origin != SEEK_CUR || origin != SEEK_SET) {
+       if (origin != SEEK_CUR && origin != SEEK_SET) {
                retval = fuse_update_attributes(inode, NULL, file, NULL);
                if (retval)
                        goto exit;
@@ -1567,6 +1567,10 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
                offset += i_size_read(inode);
                break;
        case SEEK_CUR:
+               if (offset == 0) {
+                       retval = file->f_pos;
+                       goto exit;
+               }
                offset += file->f_pos;
                break;
        case SEEK_DATA:
index 3e6d727564792edd3b59dd6c509db3173e7a082f..aa83109b94316c9ed86bd0b4b5c30df5d9d9675f 100644 (file)
@@ -1138,28 +1138,28 @@ static int __init fuse_fs_init(void)
 {
        int err;
 
-       err = register_filesystem(&fuse_fs_type);
-       if (err)
-               goto out;
-
-       err = register_fuseblk();
-       if (err)
-               goto out_unreg;
-
        fuse_inode_cachep = kmem_cache_create("fuse_inode",
                                              sizeof(struct fuse_inode),
                                              0, SLAB_HWCACHE_ALIGN,
                                              fuse_inode_init_once);
        err = -ENOMEM;
        if (!fuse_inode_cachep)
-               goto out_unreg2;
+               goto out;
+
+       err = register_fuseblk();
+       if (err)
+               goto out2;
+
+       err = register_filesystem(&fuse_fs_type);
+       if (err)
+               goto out3;
 
        return 0;
 
- out_unreg2:
+ out3:
        unregister_fuseblk();
- out_unreg:
-       unregister_filesystem(&fuse_fs_type);
+ out2:
+       kmem_cache_destroy(fuse_inode_cachep);
  out:
        return err;
 }
index 3b0d05dcd7c1cc8f098205e781536cc2d1605acf..637694bf3a03c5652d780700686a9e719fca2709 100644 (file)
@@ -1205,6 +1205,8 @@ int __break_lease(struct inode *inode, unsigned int mode)
        int want_write = (mode & O_ACCMODE) != O_RDONLY;
 
        new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK);
+       if (IS_ERR(new_fl))
+               return PTR_ERR(new_fl);
 
        lock_flocks();
 
@@ -1221,12 +1223,6 @@ int __break_lease(struct inode *inode, unsigned int mode)
                if (fl->fl_owner == current->files)
                        i_have_this_lease = 1;
 
-       if (IS_ERR(new_fl) && !i_have_this_lease
-                       && ((mode & O_NONBLOCK) == 0)) {
-               error = PTR_ERR(new_fl);
-               goto out;
-       }
-
        break_time = 0;
        if (lease_break_time > 0) {
                break_time = jiffies + lease_break_time * HZ;
@@ -1284,8 +1280,7 @@ restart:
 
 out:
        unlock_flocks();
-       if (!IS_ERR(new_fl))
-               locks_free_lock(new_fl);
+       locks_free_lock(new_fl);
        return error;
 }
 
index 6d3a1963879b0f13fd4929195a19c958d9deb4bb..cfc6d4448aa54bdc538131b1e53285a92bb20073 100644 (file)
@@ -1048,15 +1048,12 @@ static int show_mountinfo(struct seq_file *m, void *v)
        if (err)
                goto out;
        seq_putc(m, ' ');
-       seq_path_root(m, &mnt_path, &root, " \t\n\\");
-       if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) {
-               /*
-                * Mountpoint is outside root, discard that one.  Ugly,
-                * but less so than trying to do that in iterator in a
-                * race-free way (due to renames).
-                */
-               return SEQ_SKIP;
-       }
+
+       /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
+       err = seq_path_root(m, &mnt_path, &root, " \t\n\\");
+       if (err)
+               goto out;
+
        seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
        show_mnt_opts(m, mnt);
 
@@ -2776,3 +2773,8 @@ void kern_unmount(struct vfsmount *mnt)
        }
 }
 EXPORT_SYMBOL(kern_unmount);
+
+bool our_mnt(struct vfsmount *mnt)
+{
+       return check_mnt(mnt);
+}
index 5b5fa33b6b9dfd0384ca0cd7654363d8e2db560c..cbd1a61c110a38ca7a3e1d1e2de8b237e810b92e 100644 (file)
@@ -548,7 +548,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
 
        error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY);
        if (error)
-               goto out_bdi;
+               goto out_fput;
 
        server->ncp_filp = ncp_filp;
        server->ncp_sock = sock;
@@ -559,7 +559,7 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
                error = -EBADF;
                server->info_filp = fget(data.info_fd);
                if (!server->info_filp)
-                       goto out_fput;
+                       goto out_bdi;
                error = -ENOTSOCK;
                sock_inode = server->info_filp->f_path.dentry->d_inode;
                if (!S_ISSOCK(sock_inode->i_mode))
@@ -746,9 +746,9 @@ out_nls:
 out_fput2:
        if (server->info_filp)
                fput(server->info_filp);
-out_fput:
-       bdi_destroy(&server->bdi);
 out_bdi:
+       bdi_destroy(&server->bdi);
+out_fput:
        /* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>:
         * 
         * The previously used put_filp(ncp_filp); was bogus, since
index eca56d4b39c0ae82caf3abb117bb9ad5163ec1b3..606ef0f20aed58d7e67b33ab2696382ece03c6f5 100644 (file)
@@ -147,7 +147,7 @@ static loff_t nfs_file_llseek(struct file *filp, loff_t offset, int origin)
         * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
         * the cached file length
         */
-       if (origin != SEEK_SET || origin != SEEK_CUR) {
+       if (origin != SEEK_SET && origin != SEEK_CUR) {
                struct inode *inode = filp->f_mapping->host;
 
                int retval = nfs_revalidate_file_size(inode, filp);
index be2bbac13817c7ec624363afb4caf8bdd65a72b4..d9f4d78c34131264ba1b58bd36b1ec3cd35e5d6d 100644 (file)
@@ -39,6 +39,8 @@
 #include <linux/delay.h>
 #include <linux/errno.h>
 #include <linux/string.h>
+#include <linux/ratelimit.h>
+#include <linux/printk.h>
 #include <linux/slab.h>
 #include <linux/sunrpc/clnt.h>
 #include <linux/sunrpc/gss_api.h>
@@ -894,6 +896,8 @@ out:
 
 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
 {
+       if (delegation == NULL)
+               return 0;
        if ((delegation->type & fmode) != fmode)
                return 0;
        if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
@@ -1036,8 +1040,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
                }
                rcu_read_lock();
                delegation = rcu_dereference(nfsi->delegation);
-               if (delegation == NULL ||
-                   !can_open_delegated(delegation, fmode)) {
+               if (!can_open_delegated(delegation, fmode)) {
                        rcu_read_unlock();
                        break;
                }
@@ -1091,7 +1094,12 @@ static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data
                if (delegation)
                        delegation_flags = delegation->flags;
                rcu_read_unlock();
-               if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
+               if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) {
+                       pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
+                                       "returning a delegation for "
+                                       "OPEN(CLAIM_DELEGATE_CUR)\n",
+                                       NFS_CLIENT(inode)->cl_server);
+               } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
                        nfs_inode_set_delegation(state->inode,
                                        data->owner->so_cred,
                                        &data->o_res);
@@ -1423,11 +1431,9 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
                        goto out_no_action;
                rcu_read_lock();
                delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
-               if (delegation != NULL &&
-                   test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) == 0) {
-                       rcu_read_unlock();
-                       goto out_no_action;
-               }
+               if (data->o_arg.claim != NFS4_OPEN_CLAIM_DELEGATE_CUR &&
+                   can_open_delegated(delegation, data->o_arg.fmode))
+                       goto unlock_no_action;
                rcu_read_unlock();
        }
        /* Update sequence id. */
@@ -1444,6 +1450,8 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
                return;
        rpc_call_start(task);
        return;
+unlock_no_action:
+       rcu_read_unlock();
 out_no_action:
        task->tk_action = NULL;
 
index 39914be40b03694008ada2c56af6aaf5fb3a7f97..6a7107ae6b72d407bf95c4953bd1017e4704f63f 100644 (file)
@@ -1156,11 +1156,13 @@ restart:
                if (status >= 0) {
                        status = nfs4_reclaim_locks(state, ops);
                        if (status >= 0) {
+                               spin_lock(&state->state_lock);
                                list_for_each_entry(lock, &state->lock_states, ls_locks) {
                                        if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
                                                printk("%s: Lock reclaim failed!\n",
                                                        __func__);
                                }
+                               spin_unlock(&state->state_lock);
                                nfs4_put_open_state(state);
                                goto restart;
                        }
@@ -1224,10 +1226,12 @@ static void nfs4_clear_open_state(struct nfs4_state *state)
        clear_bit(NFS_O_RDONLY_STATE, &state->flags);
        clear_bit(NFS_O_WRONLY_STATE, &state->flags);
        clear_bit(NFS_O_RDWR_STATE, &state->flags);
+       spin_lock(&state->state_lock);
        list_for_each_entry(lock, &state->lock_states, ls_locks) {
                lock->ls_seqid.flags = 0;
                lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
        }
+       spin_unlock(&state->state_lock);
 }
 
 static void nfs4_reset_seqids(struct nfs_server *server,
@@ -1350,12 +1354,14 @@ static void nfs4_warn_keyexpired(const char *s)
 static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
 {
        switch (error) {
+               case 0:
+                       break;
                case -NFS4ERR_CB_PATH_DOWN:
                        nfs_handle_cb_pathdown(clp);
-                       return 0;
+                       break;
                case -NFS4ERR_NO_GRACE:
                        nfs4_state_end_reclaim_reboot(clp);
-                       return 0;
+                       break;
                case -NFS4ERR_STALE_CLIENTID:
                case -NFS4ERR_LEASE_MOVED:
                        set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
@@ -1375,13 +1381,15 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
                case -NFS4ERR_SEQ_MISORDERED:
                        set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
                        /* Zero session reset errors */
-                       return 0;
+                       break;
                case -EKEYEXPIRED:
                        /* Nothing we can do */
                        nfs4_warn_keyexpired(clp->cl_hostname);
-                       return 0;
+                       break;
+               default:
+                       return error;
        }
-       return error;
+       return 0;
 }
 
 static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
@@ -1428,7 +1436,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
        struct rpc_cred *cred;
        const struct nfs4_state_maintenance_ops *ops =
                clp->cl_mvops->state_renewal_ops;
-       int status = -NFS4ERR_EXPIRED;
+       int status;
 
        /* Is the client already known to have an expired lease? */
        if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
@@ -1438,6 +1446,7 @@ static int nfs4_check_lease(struct nfs_client *clp)
        spin_unlock(&clp->cl_lock);
        if (cred == NULL) {
                cred = nfs4_get_setclientid_cred(clp);
+               status = -ENOKEY;
                if (cred == NULL)
                        goto out;
        }
@@ -1525,16 +1534,16 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
 {
        if (!flags)
                return;
-       else if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
+       if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
                nfs41_handle_server_reboot(clp);
-       else if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
+       if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
                            SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
                            SEQ4_STATUS_ADMIN_STATE_REVOKED |
                            SEQ4_STATUS_LEASE_MOVED))
                nfs41_handle_state_revoked(clp);
-       else if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
+       if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
                nfs41_handle_recallable_state_revoked(clp);
-       else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
+       if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
                            SEQ4_STATUS_BACKCHANNEL_FAULT |
                            SEQ4_STATUS_CB_PATH_DOWN_SESSION))
                nfs41_handle_cb_path_down(clp);
@@ -1662,10 +1671,10 @@ static void nfs4_state_manager(struct nfs_client *clp)
 
                if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
                        status = nfs4_check_lease(clp);
+                       if (status < 0)
+                               goto out_error;
                        if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
                                continue;
-                       if (status < 0 && status != -NFS4ERR_CB_PATH_DOWN)
-                               goto out_error;
                }
 
                /* Initialize or reset the session */
index 41d6743d303c2a7923e5d09bc1df1fcf41514c29..ac258beeda3c4e3e9a3a36a375619838c8c7a350 100644 (file)
@@ -625,6 +625,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
                if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
                        goto out_free;
 
+               if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
+                       goto out_free;
+
                len = argv[n].v_size * argv[n].v_nmembs;
                base = (void __user *)(unsigned long)argv[n].v_base;
                if (len == 0) {
@@ -842,6 +845,19 @@ long nilfs_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        case FS_IOC32_GETVERSION:
                cmd = FS_IOC_GETVERSION;
                break;
+       case NILFS_IOCTL_CHANGE_CPMODE:
+       case NILFS_IOCTL_DELETE_CHECKPOINT:
+       case NILFS_IOCTL_GET_CPINFO:
+       case NILFS_IOCTL_GET_CPSTAT:
+       case NILFS_IOCTL_GET_SUINFO:
+       case NILFS_IOCTL_GET_SUSTAT:
+       case NILFS_IOCTL_GET_VINFO:
+       case NILFS_IOCTL_GET_BDESCS:
+       case NILFS_IOCTL_CLEAN_SEGMENTS:
+       case NILFS_IOCTL_SYNC:
+       case NILFS_IOCTL_RESIZE:
+       case NILFS_IOCTL_SET_ALLOC_RANGE:
+               break;
        default:
                return -ENOIOCTLCMD;
        }
index 586174168e2ac8818fc51397223fe683b4afa189..80e4645f7990cf4cc0e636a7c19f9840503cdd42 100644 (file)
@@ -131,12 +131,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                K(i.freeswap),
                K(global_page_state(NR_FILE_DIRTY)),
                K(global_page_state(NR_WRITEBACK)),
-               K(global_page_state(NR_ANON_PAGES)
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
+               K(global_page_state(NR_ANON_PAGES)
                  + global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
-                 HPAGE_PMD_NR
+                 HPAGE_PMD_NR),
+#else
+               K(global_page_state(NR_ANON_PAGES)),
 #endif
-                 ),
                K(global_page_state(NR_FILE_MAPPED)),
                K(global_page_state(NR_SHMEM)),
                K(global_page_state(NR_SLAB_RECLAIMABLE) +
index 9a8a2b77b87479621838ee00c928a43edbe938e9..03102d978180eba68469ef01d13ad25465f5b796 100644 (file)
@@ -91,20 +91,18 @@ static struct file_system_type proc_fs_type = {
 
 void __init proc_root_init(void)
 {
-       struct vfsmount *mnt;
        int err;
 
        proc_init_inodecache();
        err = register_filesystem(&proc_fs_type);
        if (err)
                return;
-       mnt = kern_mount_data(&proc_fs_type, &init_pid_ns);
-       if (IS_ERR(mnt)) {
+       err = pid_ns_prepare_proc(&init_pid_ns);
+       if (err) {
                unregister_filesystem(&proc_fs_type);
                return;
        }
 
-       init_pid_ns.proc_mnt = mnt;
        proc_symlink("mounts", NULL, "self/mounts");
 
        proc_net_init();
@@ -209,5 +207,5 @@ int pid_ns_prepare_proc(struct pid_namespace *ns)
 
 void pid_ns_release_proc(struct pid_namespace *ns)
 {
-       mntput(ns->proc_mnt);
+       kern_unmount(ns->proc_mnt);
 }
index 42b274da92c39d539c7c2f0f82a2f825c1a20383..0855e6f20391715c945c32487bb2346f11f0e8a8 100644 (file)
@@ -32,7 +32,7 @@ static cputime64_t get_idle_time(int cpu)
                idle = kstat_cpu(cpu).cpustat.idle;
                idle = cputime64_add(idle, arch_idle_time(cpu));
        } else
-               idle = usecs_to_cputime(idle_time);
+               idle = usecs_to_cputime64(idle_time);
 
        return idle;
 }
@@ -46,7 +46,7 @@ static cputime64_t get_iowait_time(int cpu)
                /* !NO_HZ so we can rely on cpustat.iowait */
                iowait = kstat_cpu(cpu).cpustat.iowait;
        else
-               iowait = usecs_to_cputime(iowait_time);
+               iowait = usecs_to_cputime64(iowait_time);
 
        return iowait;
 }
index 05d6b0e78c959a341137c97fbb2ea2fa89b25197..dba43c3ea3afb6605972d3a0e3eca3ac5248876e 100644 (file)
@@ -449,8 +449,6 @@ EXPORT_SYMBOL(seq_path);
 
 /*
  * Same as seq_path, but relative to supplied root.
- *
- * root may be changed, see __d_path().
  */
 int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
                  char *esc)
@@ -463,6 +461,8 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
                char *p;
 
                p = __d_path(path, root, buf, size);
+               if (!p)
+                       return SEQ_SKIP;
                res = PTR_ERR(p);
                if (!IS_ERR(p)) {
                        char *end = mangle_path(buf, p, esc);
@@ -474,7 +474,7 @@ int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
        }
        seq_commit(m, res);
 
-       return res < 0 ? res : 0;
+       return res < 0 && res != -ENAMETOOLONG ? res : 0;
 }
 
 /*
index 20403dc5d4378da7a6e962601a8e60740cb6d5db..ae0e76bb6ebf44d5219693e27c08f9e9c8715da2 100644 (file)
@@ -2264,19 +2264,12 @@ static int __init ubifs_init(void)
                return -EINVAL;
        }
 
-       err = register_filesystem(&ubifs_fs_type);
-       if (err) {
-               ubifs_err("cannot register file system, error %d", err);
-               return err;
-       }
-
-       err = -ENOMEM;
        ubifs_inode_slab = kmem_cache_create("ubifs_inode_slab",
                                sizeof(struct ubifs_inode), 0,
                                SLAB_MEM_SPREAD | SLAB_RECLAIM_ACCOUNT,
                                &inode_slab_ctor);
        if (!ubifs_inode_slab)
-               goto out_reg;
+               return -ENOMEM;
 
        register_shrinker(&ubifs_shrinker_info);
 
@@ -2288,15 +2281,20 @@ static int __init ubifs_init(void)
        if (err)
                goto out_compr;
 
+       err = register_filesystem(&ubifs_fs_type);
+       if (err) {
+               ubifs_err("cannot register file system, error %d", err);
+               goto out_dbg;
+       }
        return 0;
 
+out_dbg:
+       dbg_debugfs_exit();
 out_compr:
        ubifs_compressors_exit();
 out_shrinker:
        unregister_shrinker(&ubifs_shrinker_info);
        kmem_cache_destroy(ubifs_inode_slab);
-out_reg:
-       unregister_filesystem(&ubifs_fs_type);
        return err;
 }
 /* late_initcall to let compressors initialize first */
index b6c4b3795c4a000ce27ac3799b5c39ae6c8044e9..76e4266d2e7e4a8fc3bfc609016e4de467b7e5cf 100644 (file)
@@ -42,6 +42,8 @@ xfs_acl_from_disk(struct xfs_acl *aclp)
        int count, i;
 
        count = be32_to_cpu(aclp->acl_cnt);
+       if (count > XFS_ACL_MAX_ENTRIES)
+               return ERR_PTR(-EFSCORRUPTED);
 
        acl = posix_acl_alloc(count, GFP_KERNEL);
        if (!acl)
index d4906e7c97873b302201cddf442bdc5eeb54a29d..c1b55e5965517a9407f678610b62f29fdabf33b3 100644 (file)
@@ -110,6 +110,7 @@ xfs_attr_namesp_match(int arg_flags, int ondisk_flags)
 /*
  * Query whether the requested number of additional bytes of extended
  * attribute space will be able to fit inline.
+ *
  * Returns zero if not, else the di_forkoff fork offset to be used in the
  * literal area for attribute data once the new bytes have been added.
  *
@@ -122,7 +123,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
        int offset;
        int minforkoff; /* lower limit on valid forkoff locations */
        int maxforkoff; /* upper limit on valid forkoff locations */
-       int dsize;      
+       int dsize;
        xfs_mount_t *mp = dp->i_mount;
 
        offset = (XFS_LITINO(mp) - bytes) >> 3; /* rounded down */
@@ -136,47 +137,60 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
                return (offset >= minforkoff) ? minforkoff : 0;
        }
 
-       if (!(mp->m_flags & XFS_MOUNT_ATTR2)) {
-               if (bytes <= XFS_IFORK_ASIZE(dp))
-                       return dp->i_d.di_forkoff;
+       /*
+        * If the requested numbers of bytes is smaller or equal to the
+        * current attribute fork size we can always proceed.
+        *
+        * Note that if_bytes in the data fork might actually be larger than
+        * the current data fork size is due to delalloc extents. In that
+        * case either the extent count will go down when they are converted
+        * to real extents, or the delalloc conversion will take care of the
+        * literal area rebalancing.
+        */
+       if (bytes <= XFS_IFORK_ASIZE(dp))
+               return dp->i_d.di_forkoff;
+
+       /*
+        * For attr2 we can try to move the forkoff if there is space in the
+        * literal area, but for the old format we are done if there is no
+        * space in the fixed attribute fork.
+        */
+       if (!(mp->m_flags & XFS_MOUNT_ATTR2))
                return 0;
-       }
 
        dsize = dp->i_df.if_bytes;
-       
+
        switch (dp->i_d.di_format) {
        case XFS_DINODE_FMT_EXTENTS:
-               /* 
+               /*
                 * If there is no attr fork and the data fork is extents, 
-                * determine if creating the default attr fork will result 
-                * in the extents form migrating to btree. If so, the 
-                * minimum offset only needs to be the space required for 
+                * determine if creating the default attr fork will result
+                * in the extents form migrating to btree. If so, the
+                * minimum offset only needs to be the space required for
                 * the btree root.
-                */ 
+                */
                if (!dp->i_d.di_forkoff && dp->i_df.if_bytes >
                    xfs_default_attroffset(dp))
                        dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
                break;
-               
        case XFS_DINODE_FMT_BTREE:
                /*
-                * If have data btree then keep forkoff if we have one,
-                * otherwise we are adding a new attr, so then we set 
-                * minforkoff to where the btree root can finish so we have 
+                * If we have a data btree then keep forkoff if we have one,
+                * otherwise we are adding a new attr, so then we set
+                * minforkoff to where the btree root can finish so we have
                 * plenty of room for attrs
                 */
                if (dp->i_d.di_forkoff) {
-                       if (offset < dp->i_d.di_forkoff) 
+                       if (offset < dp->i_d.di_forkoff)
                                return 0;
-                       else 
-                               return dp->i_d.di_forkoff;
-               } else
-                       dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
+                       return dp->i_d.di_forkoff;
+               }
+               dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
                break;
        }
-       
-       /* 
-        * A data fork btree root must have space for at least 
+
+       /*
+        * A data fork btree root must have space for at least
         * MINDBTPTRS key/ptr pairs if the data fork is small or empty.
         */
        minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
@@ -186,10 +200,10 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
        maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
        maxforkoff = maxforkoff >> 3;   /* rounded down */
 
-       if (offset >= minforkoff && offset < maxforkoff)
-               return offset;
        if (offset >= maxforkoff)
                return maxforkoff;
+       if (offset >= minforkoff)
+               return offset;
        return 0;
 }
 
index c68baeb0974adb2e57f690496fa8957c94c92e0d..d0ab78837057815f17605150d31a633c2eeb2739 100644 (file)
@@ -2383,6 +2383,8 @@ xfs_bmap_btalloc(
        int             tryagain;
        int             error;
 
+       ASSERT(ap->length);
+
        mp = ap->ip->i_mount;
        align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
        if (unlikely(align)) {
@@ -4629,6 +4631,8 @@ xfs_bmapi_allocate(
        int                     error;
        int                     rt;
 
+       ASSERT(bma->length > 0);
+
        rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip);
 
        /*
@@ -4849,6 +4853,7 @@ xfs_bmapi_write(
        ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
        ASSERT(!(flags & XFS_BMAPI_IGSTATE));
        ASSERT(tp != NULL);
+       ASSERT(len > 0);
 
        whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
                XFS_ATTR_FORK : XFS_DATA_FORK;
@@ -4918,9 +4923,22 @@ xfs_bmapi_write(
                        bma.eof = eof;
                        bma.conv = !!(flags & XFS_BMAPI_CONVERT);
                        bma.wasdel = wasdelay;
-                       bma.length = len;
                        bma.offset = bno;
 
+                       /*
+                        * There's a 32/64 bit type mismatch between the
+                        * allocation length request (which can be 64 bits in
+                        * length) and the bma length request, which is
+                        * xfs_extlen_t and therefore 32 bits. Hence we have to
+                        * check for 32-bit overflows and handle them here.
+                        */
+                       if (len > (xfs_filblks_t)MAXEXTLEN)
+                               bma.length = MAXEXTLEN;
+                       else
+                               bma.length = len;
+
+                       ASSERT(len > 0);
+                       ASSERT(bma.length > 0);
                        error = xfs_bmapi_allocate(&bma, flags);
                        if (error)
                                goto error0;
index da108977b21f8582c7af92e2d4edcdbca8ba07b7..558910f5e3c06a3451558a486a7b982b0f8ee19d 100644 (file)
@@ -98,22 +98,22 @@ xfs_fs_encode_fh(
        switch (fileid_type) {
        case FILEID_INO32_GEN_PARENT:
                spin_lock(&dentry->d_lock);
-               fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino;
+               fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
                fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
                spin_unlock(&dentry->d_lock);
                /*FALLTHRU*/
        case FILEID_INO32_GEN:
-               fid->i32.ino = inode->i_ino;
+               fid->i32.ino = XFS_I(inode)->i_ino;
                fid->i32.gen = inode->i_generation;
                break;
        case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
                spin_lock(&dentry->d_lock);
-               fid64->parent_ino = dentry->d_parent->d_inode->i_ino;
+               fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
                fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
                spin_unlock(&dentry->d_lock);
                /*FALLTHRU*/
        case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
-               fid64->ino = inode->i_ino;
+               fid64->ino = XFS_I(inode)->i_ino;
                fid64->gen = inode->i_generation;
                break;
        }
index c0237c602f11deb92fa5f533f74a647005fbf1b8..755ee8164880fe4122bac9de94119f1c7086a9b7 100644 (file)
@@ -2835,6 +2835,27 @@ corrupt_out:
        return XFS_ERROR(EFSCORRUPTED);
 }
 
+void
+xfs_promote_inode(
+       struct xfs_inode        *ip)
+{
+       struct xfs_buf          *bp;
+
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
+
+       bp = xfs_incore(ip->i_mount->m_ddev_targp, ip->i_imap.im_blkno,
+                       ip->i_imap.im_len, XBF_TRYLOCK);
+       if (!bp)
+               return;
+
+       if (XFS_BUF_ISDELAYWRITE(bp)) {
+               xfs_buf_delwri_promote(bp);
+               wake_up_process(ip->i_mount->m_ddev_targp->bt_task);
+       }
+
+       xfs_buf_relse(bp);
+}
+
 /*
  * Return a pointer to the extent record at file index idx.
  */
index 760140d1dd661f42e653576a7c947b11eb8b6fbd..b4cd4739f98e74b2e256295b48fe64e285b320b0 100644 (file)
@@ -498,6 +498,7 @@ int         xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
 void           xfs_iext_realloc(xfs_inode_t *, int, int);
 void           xfs_iunpin_wait(xfs_inode_t *);
 int            xfs_iflush(xfs_inode_t *, uint);
+void           xfs_promote_inode(struct xfs_inode *);
 void           xfs_lock_inodes(xfs_inode_t **, int, uint);
 void           xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
 
index a14cd89fe4655e2647d92d2191c1ffb8e6588787..34817adf4b9ed837da47d6f9ccfa977829fdd33c 100644 (file)
@@ -150,6 +150,117 @@ xlog_grant_add_space(
        } while (head_val != old);
 }
 
+STATIC bool
+xlog_reserveq_wake(
+       struct log              *log,
+       int                     *free_bytes)
+{
+       struct xlog_ticket      *tic;
+       int                     need_bytes;
+
+       list_for_each_entry(tic, &log->l_reserveq, t_queue) {
+               if (tic->t_flags & XLOG_TIC_PERM_RESERV)
+                       need_bytes = tic->t_unit_res * tic->t_cnt;
+               else
+                       need_bytes = tic->t_unit_res;
+
+               if (*free_bytes < need_bytes)
+                       return false;
+               *free_bytes -= need_bytes;
+
+               trace_xfs_log_grant_wake_up(log, tic);
+               wake_up(&tic->t_wait);
+       }
+
+       return true;
+}
+
+STATIC bool
+xlog_writeq_wake(
+       struct log              *log,
+       int                     *free_bytes)
+{
+       struct xlog_ticket      *tic;
+       int                     need_bytes;
+
+       list_for_each_entry(tic, &log->l_writeq, t_queue) {
+               ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
+
+               need_bytes = tic->t_unit_res;
+
+               if (*free_bytes < need_bytes)
+                       return false;
+               *free_bytes -= need_bytes;
+
+               trace_xfs_log_regrant_write_wake_up(log, tic);
+               wake_up(&tic->t_wait);
+       }
+
+       return true;
+}
+
+STATIC int
+xlog_reserveq_wait(
+       struct log              *log,
+       struct xlog_ticket      *tic,
+       int                     need_bytes)
+{
+       list_add_tail(&tic->t_queue, &log->l_reserveq);
+
+       do {
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+               xlog_grant_push_ail(log, need_bytes);
+
+               XFS_STATS_INC(xs_sleep_logspace);
+               trace_xfs_log_grant_sleep(log, tic);
+
+               xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
+               trace_xfs_log_grant_wake(log, tic);
+
+               spin_lock(&log->l_grant_reserve_lock);
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+       } while (xlog_space_left(log, &log->l_grant_reserve_head) < need_bytes);
+
+       list_del_init(&tic->t_queue);
+       return 0;
+shutdown:
+       list_del_init(&tic->t_queue);
+       return XFS_ERROR(EIO);
+}
+
+STATIC int
+xlog_writeq_wait(
+       struct log              *log,
+       struct xlog_ticket      *tic,
+       int                     need_bytes)
+{
+       list_add_tail(&tic->t_queue, &log->l_writeq);
+
+       do {
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+               xlog_grant_push_ail(log, need_bytes);
+
+               XFS_STATS_INC(xs_sleep_logspace);
+               trace_xfs_log_regrant_write_sleep(log, tic);
+
+               xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
+               trace_xfs_log_regrant_write_wake(log, tic);
+
+               spin_lock(&log->l_grant_write_lock);
+               if (XLOG_FORCED_SHUTDOWN(log))
+                       goto shutdown;
+       } while (xlog_space_left(log, &log->l_grant_write_head) < need_bytes);
+
+       list_del_init(&tic->t_queue);
+       return 0;
+shutdown:
+       list_del_init(&tic->t_queue);
+       return XFS_ERROR(EIO);
+}
+
 static void
 xlog_tic_reset_res(xlog_ticket_t *tic)
 {
@@ -350,8 +461,19 @@ xfs_log_reserve(
                retval = xlog_grant_log_space(log, internal_ticket);
        }
 
+       if (unlikely(retval)) {
+               /*
+                * If we are failing, make sure the ticket doesn't have any
+                * current reservations.  We don't want to add this back
+                * when the ticket/ transaction gets cancelled.
+                */
+               internal_ticket->t_curr_res = 0;
+               /* ungrant will give back unit_res * t_cnt. */
+               internal_ticket->t_cnt = 0;
+       }
+
        return retval;
-}      /* xfs_log_reserve */
+}
 
 
 /*
@@ -2481,8 +2603,8 @@ restart:
 /*
  * Atomically get the log space required for a log ticket.
  *
- * Once a ticket gets put onto the reserveq, it will only return after
- * the needed reservation is satisfied.
+ * Once a ticket gets put onto the reserveq, it will only return after the
+ * needed reservation is satisfied.
  *
  * This function is structured so that it has a lock free fast path. This is
  * necessary because every new transaction reservation will come through this
@@ -2490,113 +2612,53 @@ restart:
  * every pass.
  *
  * As tickets are only ever moved on and off the reserveq under the
- * l_grant_reserve_lock, we only need to take that lock if we are going
- * to add the ticket to the queue and sleep. We can avoid taking the lock if the
- * ticket was never added to the reserveq because the t_queue list head will be
- * empty and we hold the only reference to it so it can safely be checked
- * unlocked.
+ * l_grant_reserve_lock, we only need to take that lock if we are going to add
+ * the ticket to the queue and sleep. We can avoid taking the lock if the ticket
+ * was never added to the reserveq because the t_queue list head will be empty
+ * and we hold the only reference to it so it can safely be checked unlocked.
  */
 STATIC int
-xlog_grant_log_space(xlog_t       *log,
-                    xlog_ticket_t *tic)
+xlog_grant_log_space(
+       struct log              *log,
+       struct xlog_ticket      *tic)
 {
-       int              free_bytes;
-       int              need_bytes;
+       int                     free_bytes, need_bytes;
+       int                     error = 0;
 
-#ifdef DEBUG
-       if (log->l_flags & XLOG_ACTIVE_RECOVERY)
-               panic("grant Recovery problem");
-#endif
+       ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
 
        trace_xfs_log_grant_enter(log, tic);
 
+       /*
+        * If there are other waiters on the queue then give them a chance at
+        * logspace before us.  Wake up the first waiters, if we do not wake
+        * up all the waiters then go to sleep waiting for more free space,
+        * otherwise try to get some space for this transaction.
+        */
        need_bytes = tic->t_unit_res;
        if (tic->t_flags & XFS_LOG_PERM_RESERV)
                need_bytes *= tic->t_ocnt;
-
-       /* something is already sleeping; insert new transaction at end */
-       if (!list_empty_careful(&log->l_reserveq)) {
-               spin_lock(&log->l_grant_reserve_lock);
-               /* recheck the queue now we are locked */
-               if (list_empty(&log->l_reserveq)) {
-                       spin_unlock(&log->l_grant_reserve_lock);
-                       goto redo;
-               }
-               list_add_tail(&tic->t_queue, &log->l_reserveq);
-
-               trace_xfs_log_grant_sleep1(log, tic);
-
-               /*
-                * Gotta check this before going to sleep, while we're
-                * holding the grant lock.
-                */
-               if (XLOG_FORCED_SHUTDOWN(log))
-                       goto error_return;
-
-               XFS_STATS_INC(xs_sleep_logspace);
-               xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
-
-               /*
-                * If we got an error, and the filesystem is shutting down,
-                * we'll catch it down below. So just continue...
-                */
-               trace_xfs_log_grant_wake1(log, tic);
-       }
-
-redo:
-       if (XLOG_FORCED_SHUTDOWN(log))
-               goto error_return_unlocked;
-
        free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
-       if (free_bytes < need_bytes) {
+       if (!list_empty_careful(&log->l_reserveq)) {
                spin_lock(&log->l_grant_reserve_lock);
-               if (list_empty(&tic->t_queue))
-                       list_add_tail(&tic->t_queue, &log->l_reserveq);
-
-               trace_xfs_log_grant_sleep2(log, tic);
-
-               if (XLOG_FORCED_SHUTDOWN(log))
-                       goto error_return;
-
-               xlog_grant_push_ail(log, need_bytes);
-
-               XFS_STATS_INC(xs_sleep_logspace);
-               xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
-
-               trace_xfs_log_grant_wake2(log, tic);
-               goto redo;
-       }
-
-       if (!list_empty(&tic->t_queue)) {
+               if (!xlog_reserveq_wake(log, &free_bytes) ||
+                   free_bytes < need_bytes)
+                       error = xlog_reserveq_wait(log, tic, need_bytes);
+               spin_unlock(&log->l_grant_reserve_lock);
+       } else if (free_bytes < need_bytes) {
                spin_lock(&log->l_grant_reserve_lock);
-               list_del_init(&tic->t_queue);
+               error = xlog_reserveq_wait(log, tic, need_bytes);
                spin_unlock(&log->l_grant_reserve_lock);
        }
+       if (error)
+               return error;
 
-       /* we've got enough space */
        xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
        xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
        trace_xfs_log_grant_exit(log, tic);
        xlog_verify_grant_tail(log);
        return 0;
-
-error_return_unlocked:
-       spin_lock(&log->l_grant_reserve_lock);
-error_return:
-       list_del_init(&tic->t_queue);
-       spin_unlock(&log->l_grant_reserve_lock);
-       trace_xfs_log_grant_error(log, tic);
-
-       /*
-        * If we are failing, make sure the ticket doesn't have any
-        * current reservations. We don't want to add this back when
-        * the ticket/transaction gets cancelled.
-        */
-       tic->t_curr_res = 0;
-       tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
-       return XFS_ERROR(EIO);
-}      /* xlog_grant_log_space */
-
+}
 
 /*
  * Replenish the byte reservation required by moving the grant write head.
@@ -2605,10 +2667,12 @@ error_return:
  * free fast path.
  */
 STATIC int
-xlog_regrant_write_log_space(xlog_t       *log,
-                            xlog_ticket_t *tic)
+xlog_regrant_write_log_space(
+       struct log              *log,
+       struct xlog_ticket      *tic)
 {
-       int             free_bytes, need_bytes;
+       int                     free_bytes, need_bytes;
+       int                     error = 0;
 
        tic->t_curr_res = tic->t_unit_res;
        xlog_tic_reset_res(tic);
@@ -2616,104 +2680,38 @@ xlog_regrant_write_log_space(xlog_t       *log,
        if (tic->t_cnt > 0)
                return 0;
 
-#ifdef DEBUG
-       if (log->l_flags & XLOG_ACTIVE_RECOVERY)
-               panic("regrant Recovery problem");
-#endif
+       ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
 
        trace_xfs_log_regrant_write_enter(log, tic);
-       if (XLOG_FORCED_SHUTDOWN(log))
-               goto error_return_unlocked;
 
-       /* If there are other waiters on the queue then give them a
-        * chance at logspace before us. Wake up the first waiters,
-        * if we do not wake up all the waiters then go to sleep waiting
-        * for more free space, otherwise try to get some space for
-        * this transaction.
+       /*
+        * If there are other waiters on the queue then give them a chance at
+        * logspace before us.  Wake up the first waiters, if we do not wake
+        * up all the waiters then go to sleep waiting for more free space,
+        * otherwise try to get some space for this transaction.
         */
        need_bytes = tic->t_unit_res;
-       if (!list_empty_careful(&log->l_writeq)) {
-               struct xlog_ticket *ntic;
-
-               spin_lock(&log->l_grant_write_lock);
-               free_bytes = xlog_space_left(log, &log->l_grant_write_head);
-               list_for_each_entry(ntic, &log->l_writeq, t_queue) {
-                       ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
-
-                       if (free_bytes < ntic->t_unit_res)
-                               break;
-                       free_bytes -= ntic->t_unit_res;
-                       wake_up(&ntic->t_wait);
-               }
-
-               if (ntic != list_first_entry(&log->l_writeq,
-                                               struct xlog_ticket, t_queue)) {
-                       if (list_empty(&tic->t_queue))
-                               list_add_tail(&tic->t_queue, &log->l_writeq);
-                       trace_xfs_log_regrant_write_sleep1(log, tic);
-
-                       xlog_grant_push_ail(log, need_bytes);
-
-                       XFS_STATS_INC(xs_sleep_logspace);
-                       xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
-                       trace_xfs_log_regrant_write_wake1(log, tic);
-               } else
-                       spin_unlock(&log->l_grant_write_lock);
-       }
-
-redo:
-       if (XLOG_FORCED_SHUTDOWN(log))
-               goto error_return_unlocked;
-
        free_bytes = xlog_space_left(log, &log->l_grant_write_head);
-       if (free_bytes < need_bytes) {
+       if (!list_empty_careful(&log->l_writeq)) {
                spin_lock(&log->l_grant_write_lock);
-               if (list_empty(&tic->t_queue))
-                       list_add_tail(&tic->t_queue, &log->l_writeq);
-
-               if (XLOG_FORCED_SHUTDOWN(log))
-                       goto error_return;
-
-               xlog_grant_push_ail(log, need_bytes);
-
-               XFS_STATS_INC(xs_sleep_logspace);
-               trace_xfs_log_regrant_write_sleep2(log, tic);
-               xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
-
-               trace_xfs_log_regrant_write_wake2(log, tic);
-               goto redo;
-       }
-
-       if (!list_empty(&tic->t_queue)) {
+               if (!xlog_writeq_wake(log, &free_bytes) ||
+                   free_bytes < need_bytes)
+                       error = xlog_writeq_wait(log, tic, need_bytes);
+               spin_unlock(&log->l_grant_write_lock);
+       } else if (free_bytes < need_bytes) {
                spin_lock(&log->l_grant_write_lock);
-               list_del_init(&tic->t_queue);
+               error = xlog_writeq_wait(log, tic, need_bytes);
                spin_unlock(&log->l_grant_write_lock);
        }
 
-       /* we've got enough space */
+       if (error)
+               return error;
+
        xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
        trace_xfs_log_regrant_write_exit(log, tic);
        xlog_verify_grant_tail(log);
        return 0;
-
-
- error_return_unlocked:
-       spin_lock(&log->l_grant_write_lock);
- error_return:
-       list_del_init(&tic->t_queue);
-       spin_unlock(&log->l_grant_write_lock);
-       trace_xfs_log_regrant_write_error(log, tic);
-
-       /*
-        * If we are failing, make sure the ticket doesn't have any
-        * current reservations. We don't want to add this back when
-        * the ticket/transaction gets cancelled.
-        */
-       tic->t_curr_res = 0;
-       tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
-       return XFS_ERROR(EIO);
-}      /* xlog_regrant_write_log_space */
-
+}
 
 /* The first cnt-1 times through here we don't need to
  * move the grant write head because the permanent
index 3eca58f51ae9040b0fcc5fe6b809df38dbf0b4ac..8a899496fd5fe55ef50f4cfdcfcce20e7875a60c 100644 (file)
@@ -868,27 +868,6 @@ xfs_fs_dirty_inode(
        XFS_I(inode)->i_update_core = 1;
 }
 
-STATIC int
-xfs_log_inode(
-       struct xfs_inode        *ip)
-{
-       struct xfs_mount        *mp = ip->i_mount;
-       struct xfs_trans        *tp;
-       int                     error;
-
-       tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
-       error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
-       if (error) {
-               xfs_trans_cancel(tp, 0);
-               return error;
-       }
-
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-       return xfs_trans_commit(tp, 0);
-}
-
 STATIC int
 xfs_fs_write_inode(
        struct inode            *inode,
@@ -902,10 +881,8 @@ xfs_fs_write_inode(
 
        if (XFS_FORCED_SHUTDOWN(mp))
                return -XFS_ERROR(EIO);
-       if (!ip->i_update_core)
-               return 0;
 
-       if (wbc->sync_mode == WB_SYNC_ALL) {
+       if (wbc->sync_mode == WB_SYNC_ALL || wbc->for_kupdate) {
                /*
                 * Make sure the inode has made it it into the log.  Instead
                 * of forcing it all the way to stable storage using a
@@ -913,11 +890,14 @@ xfs_fs_write_inode(
                 * ->sync_fs call do that for thus, which reduces the number
                 * of synchronous log forces dramatically.
                 */
-               error = xfs_log_inode(ip);
+               error = xfs_log_dirty_inode(ip, NULL, 0);
                if (error)
                        goto out;
                return 0;
        } else {
+               if (!ip->i_update_core)
+                       return 0;
+
                /*
                 * We make this non-blocking if the inode is contended, return
                 * EAGAIN to indicate to the caller that they did not succeed.
index aa3dc1a4d53d4f85f97a38f0db217bfcc4e3c953..f0994aedcd158c2db3d6f9b2bf4d21a4f819bec6 100644 (file)
@@ -336,6 +336,32 @@ xfs_sync_fsdata(
        return error;
 }
 
+int
+xfs_log_dirty_inode(
+       struct xfs_inode        *ip,
+       struct xfs_perag        *pag,
+       int                     flags)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_trans        *tp;
+       int                     error;
+
+       if (!ip->i_update_core)
+               return 0;
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
+       error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
+       if (error) {
+               xfs_trans_cancel(tp, 0);
+               return error;
+       }
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+       return xfs_trans_commit(tp, 0);
+}
+
 /*
  * When remounting a filesystem read-only or freezing the filesystem, we have
  * two phases to execute. This first phase is syncing the data before we
@@ -359,6 +385,16 @@ xfs_quiesce_data(
 {
        int                     error, error2 = 0;
 
+       /*
+        * Log all pending size and timestamp updates.  The vfs writeback
+        * code is supposed to do this, but due to its overagressive
+        * livelock detection it will skip inodes where appending writes
+        * were written out in the first non-blocking sync phase if their
+        * completion took long enough that it happened after taking the
+        * timestamp for the cut-off in the blocking phase.
+        */
+       xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0);
+
        xfs_qm_sync(mp, SYNC_TRYLOCK);
        xfs_qm_sync(mp, SYNC_WAIT);
 
@@ -770,6 +806,17 @@ restart:
        if (!xfs_iflock_nowait(ip)) {
                if (!(sync_mode & SYNC_WAIT))
                        goto out;
+
+               /*
+                * If we only have a single dirty inode in a cluster there is
+                * a fair chance that the AIL push may have pushed it into
+                * the buffer, but xfsbufd won't touch it until 30 seconds
+                * from now, and thus we will lock up here.
+                *
+                * Promote the inode buffer to the front of the delwri list
+                * and wake up xfsbufd now.
+                */
+               xfs_promote_inode(ip);
                xfs_iflock(ip);
        }
 
index 941202e7ac6e594e2c423c19bc89248397e39516..fa965479d788d29da66b0e85bd59123c1fe08c65 100644 (file)
@@ -34,6 +34,8 @@ void xfs_quiesce_attr(struct xfs_mount *mp);
 
 void xfs_flush_inodes(struct xfs_inode *ip);
 
+int xfs_log_dirty_inode(struct xfs_inode *ip, struct xfs_perag *pag, int flags);
+
 int xfs_reclaim_inodes(struct xfs_mount *mp, int mode);
 int xfs_reclaim_inodes_count(struct xfs_mount *mp);
 void xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan);
index f1d2802b2f0782130954248f11237c10cf09e92f..49403579887324b87c821bc7eb62b0114314ce20 100644 (file)
@@ -834,18 +834,14 @@ DEFINE_LOGGRANT_EVENT(xfs_log_umount_write);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake);
 DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
 DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
index 62ce6823c0f2ac82ccdf285cf99dc8d36a9bb80b..12a1764f612b2709360eb90f9c084b2dff3a8bb9 100644 (file)
@@ -40,6 +40,7 @@ typedef u64 cputime64_t;
  */
 #define cputime_to_usecs(__ct)         jiffies_to_usecs(__ct)
 #define usecs_to_cputime(__msecs)      usecs_to_jiffies(__msecs)
+#define usecs_to_cputime64(__msecs)    nsecs_to_jiffies64((__msecs) * 1000)
 
 /*
  * Convert cputime to seconds and back.
index f4c38d8c6674a3dd71ea08a47dc68e89e81e0d92..2292d1af9d705f129ae523ce00a6b7794fb1648c 100644 (file)
@@ -685,9 +685,15 @@ __SYSCALL(__NR_syncfs, sys_syncfs)
 __SYSCALL(__NR_setns, sys_setns)
 #define __NR_sendmmsg 269
 __SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
+#define __NR_process_vm_readv 270
+__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
+          compat_sys_process_vm_readv)
+#define __NR_process_vm_writev 271
+__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
+          compat_sys_process_vm_writev)
 
 #undef __NR_syscalls
-#define __NR_syscalls 270
+#define __NR_syscalls 272
 
 /*
  * All syscalls below here should go away really,
index f81676f1b3105636bde40ebd0c99d5e7265f7600..14b6cd022284dfec490a2119b06de2ecb9386b76 100644 (file)
        {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x675B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x675D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x677B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x68fa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
        {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+       {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
        {0, 0, 0}
 
 #define r128_PCI_IDS \
index c7a6d3b5bc7bb010051c887e5e0c64fc7adca113..94acd8172b5bfcb0986014916533501f90ac047d 100644 (file)
@@ -805,9 +805,6 @@ extern void blk_unprep_request(struct request *);
  */
 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
                                        spinlock_t *lock, int node_id);
-extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
-                                                          request_fn_proc *,
-                                                          spinlock_t *, int node_id);
 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
 extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
                                                      request_fn_proc *, spinlock_t *);
index c86c940d1de3a58b73fd4027b80524daca997478..081147da05642dbf98d53052e96f109301368910 100644 (file)
@@ -71,7 +71,7 @@ struct timecounter {
 
 /**
  * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
- * @tc:                Pointer to cycle counter.
+ * @cc:                Pointer to cycle counter.
  * @cycles:    Cycles
  *
  * XXX - This could use some mult_lxl_ll() asm optimization. Same code
@@ -114,7 +114,7 @@ extern u64 timecounter_read(struct timecounter *tc);
  *                        time base as values returned by
  *                        timecounter_read()
  * @tc:                Pointer to time counter.
- * @cycle:     a value returned by tc->cc->read()
+ * @cycle_tstamp:      a value returned by tc->cc->read()
  *
  * Cycle counts that are converted correctly as long as they
  * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
@@ -156,11 +156,12 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
  * @mult:              cycle to nanosecond multiplier
  * @shift:             cycle to nanosecond divisor (power of two)
  * @max_idle_ns:       max idle time permitted by the clocksource (nsecs)
- * @maxadj             maximum adjustment value to mult (~11%)
+ * @maxadj:            maximum adjustment value to mult (~11%)
  * @flags:             flags describing special properties
  * @archdata:          arch-specific data
  * @suspend:           suspend function for the clocksource, if necessary
  * @resume:            resume function for the clocksource, if necessary
+ * @cycle_last:                most recent cycle counter value seen by ::read()
  */
 struct clocksource {
        /*
@@ -187,6 +188,7 @@ struct clocksource {
        void (*suspend)(struct clocksource *cs);
        void (*resume)(struct clocksource *cs);
 
+       /* private: */
 #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
        /* Watchdog related data, used by the framework */
        struct list_head wd_list;
@@ -261,6 +263,9 @@ static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
 
 /**
  * clocksource_cyc2ns - converts clocksource cycles to nanoseconds
+ * @cycles:    cycles
+ * @mult:      cycle to nanosecond multiplier
+ * @shift:     cycle to nanosecond divisor (power of two)
  *
  * Converts cycles to nanoseconds, using the given mult and shift.
  *
index 154bf56830156876d56c1ced7d4c1e6c96973805..66ed067fb7291e89f1548718e581d1ed348c4120 100644 (file)
@@ -552,5 +552,14 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
 
 extern void __user *compat_alloc_user_space(unsigned long len);
 
+asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
+               const struct compat_iovec __user *lvec,
+               unsigned long liovcnt, const struct compat_iovec __user *rvec,
+               unsigned long riovcnt, unsigned long flags);
+asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
+               const struct compat_iovec __user *lvec,
+               unsigned long liovcnt, const struct compat_iovec __user *rvec,
+               unsigned long riovcnt, unsigned long flags);
+
 #endif /* CONFIG_COMPAT */
 #endif /* _LINUX_COMPAT_H */
index 4df926199369622bffed05e3e60eeff8c42e4532..ed9f74f6c519a1f071348d691d69c7ed5795e938 100644 (file)
@@ -339,7 +339,8 @@ extern int d_validate(struct dentry *, struct dentry *);
  */
 extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
 
-extern char *__d_path(const struct path *path, struct path *root, char *, int);
+extern char *__d_path(const struct path *, const struct path *, char *, int);
+extern char *d_absolute_path(const struct path *, char *, int);
 extern char *d_path(const struct path *, char *, int);
 extern char *d_path_with_unreachable(const struct path *, char *, int);
 extern char *dentry_path_raw(struct dentry *, char *, int);
index ef90cbd8e1735c781083a2321760741107c2827b..57c9a8ae4f2df2127dffe7e88aee95b5f2cdce1a 100644 (file)
@@ -31,6 +31,7 @@ extern void free_dmar_iommu(struct intel_iommu *iommu);
 extern int iommu_calculate_agaw(struct intel_iommu *iommu);
 extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
 extern int dmar_disabled;
+extern int intel_iommu_enabled;
 #else
 static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
 {
@@ -44,6 +45,7 @@ static inline void free_dmar_iommu(struct intel_iommu *iommu)
 {
 }
 #define dmar_disabled  (1)
+#define intel_iommu_enabled (0)
 #endif
 
 
index e3130220ce3e3ddf7837c64b5fdadb0342f9f690..e0bc4ffb8e7f0ec42a916219ab02f43a112609d1 100644 (file)
@@ -393,8 +393,8 @@ struct inodes_stat_t {
 #include <linux/semaphore.h>
 #include <linux/fiemap.h>
 #include <linux/rculist_bl.h>
-#include <linux/shrinker.h>
 #include <linux/atomic.h>
+#include <linux/shrinker.h>
 
 #include <asm/byteorder.h>
 
@@ -1942,6 +1942,7 @@ extern int fd_statfs(int, struct kstatfs *);
 extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
 extern int freeze_super(struct super_block *super);
 extern int thaw_super(struct super_block *super);
+extern bool our_mnt(struct vfsmount *mnt);
 
 extern int current_umask(void);
 
index 96efa6794ea5293a59f40638d344005b2497f0bc..c3da42dd22baf17cbf853e6defaed7796bfba950 100644 (file)
@@ -172,6 +172,7 @@ enum {
        TRACE_EVENT_FL_FILTERED_BIT,
        TRACE_EVENT_FL_RECORDED_CMD_BIT,
        TRACE_EVENT_FL_CAP_ANY_BIT,
+       TRACE_EVENT_FL_NO_SET_FILTER_BIT,
 };
 
 enum {
@@ -179,6 +180,7 @@ enum {
        TRACE_EVENT_FL_FILTERED         = (1 << TRACE_EVENT_FL_FILTERED_BIT),
        TRACE_EVENT_FL_RECORDED_CMD     = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
        TRACE_EVENT_FL_CAP_ANY          = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
+       TRACE_EVENT_FL_NO_SET_FILTER    = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
 };
 
 struct ftrace_event_call {
index 94b1e356c02ab4fa808b5dd43d1ed9f57afbe50f..32574eef93941bab73a9b43138cd8a67511101ec 100644 (file)
@@ -126,6 +126,8 @@ extern struct cred init_cred;
 # define INIT_PERF_EVENTS(tsk)
 #endif
 
+#define INIT_TASK_COMM "swapper"
+
 /*
  *  INIT_TASK is used to set up the first task table, touch at
  * your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -162,7 +164,7 @@ extern struct cred init_cred;
        .group_leader   = &tsk,                                         \
        RCU_INIT_POINTER(.real_cred, &init_cred),                       \
        RCU_INIT_POINTER(.cred, &init_cred),                            \
-       .comm           = "swapper",                                    \
+       .comm           = INIT_TASK_COMM,                               \
        .thread         = INIT_THREAD,                                  \
        .fs             = &init_fs,                                     \
        .files          = &init_files,                                  \
index c3892fc1d5389c86c664a43ca57bffa0f008d4b8..68e67e50d028e95681f37e47ba98d2e047b01b8b 100644 (file)
@@ -557,6 +557,7 @@ struct kvm_ppc_pvinfo {
 #define KVM_CAP_MAX_VCPUS 66       /* returns max vcpus per vm */
 #define KVM_CAP_PPC_PAPR 68
 #define KVM_CAP_S390_GMAP 71
+#define KVM_CAP_TSC_DEADLINE_TIMER 72
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index f549056fb20bd5533555918cc1b1f9805c2cdcc3..87f402ccec55567330943ab774ffb12ae21c7da8 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/spinlock.h>
 #include <linux/lockdep.h>
 #include <linux/percpu.h>
+#include <linux/cpu.h>
 
 /* can make br locks by using local lock for read side, global lock for write */
 #define br_lock_init(name)     name##_lock_init()
 
 #define DEFINE_LGLOCK(name)                                            \
                                                                        \
+ DEFINE_SPINLOCK(name##_cpu_lock);                                     \
+ cpumask_t name##_cpus __read_mostly;                                  \
  DEFINE_PER_CPU(arch_spinlock_t, name##_lock);                         \
  DEFINE_LGLOCK_LOCKDEP(name);                                          \
                                                                        \
+ static int                                                            \
+ name##_lg_cpu_callback(struct notifier_block *nb,                     \
+                               unsigned long action, void *hcpu)       \
+ {                                                                     \
+       switch (action & ~CPU_TASKS_FROZEN) {                           \
+       case CPU_UP_PREPARE:                                            \
+               spin_lock(&name##_cpu_lock);                            \
+               cpu_set((unsigned long)hcpu, name##_cpus);              \
+               spin_unlock(&name##_cpu_lock);                          \
+               break;                                                  \
+       case CPU_UP_CANCELED: case CPU_DEAD:                            \
+               spin_lock(&name##_cpu_lock);                            \
+               cpu_clear((unsigned long)hcpu, name##_cpus);            \
+               spin_unlock(&name##_cpu_lock);                          \
+       }                                                               \
+       return NOTIFY_OK;                                               \
+ }                                                                     \
+ static struct notifier_block name##_lg_cpu_notifier = {               \
+       .notifier_call = name##_lg_cpu_callback,                        \
+ };                                                                    \
  void name##_lock_init(void) {                                         \
        int i;                                                          \
        LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
                lock = &per_cpu(name##_lock, i);                        \
                *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;     \
        }                                                               \
+       register_hotcpu_notifier(&name##_lg_cpu_notifier);              \
+       get_online_cpus();                                              \
+       for_each_online_cpu(i)                                          \
+               cpu_set(i, name##_cpus);                                \
+       put_online_cpus();                                              \
  }                                                                     \
  EXPORT_SYMBOL(name##_lock_init);                                      \
                                                                        \
                                                                        \
  void name##_global_lock_online(void) {                                        \
        int i;                                                          \
-       preempt_disable();                                              \
+       spin_lock(&name##_cpu_lock);                                    \
        rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_);           \
-       for_each_online_cpu(i) {                                        \
+       for_each_cpu(i, &name##_cpus) {                                 \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_lock(lock);                                   \
  void name##_global_unlock_online(void) {                              \
        int i;                                                          \
        rwlock_release(&name##_lock_dep_map, 1, _RET_IP_);              \
-       for_each_online_cpu(i) {                                        \
+       for_each_cpu(i, &name##_cpus) {                                 \
                arch_spinlock_t *lock;                                  \
                lock = &per_cpu(name##_lock, i);                        \
                arch_spin_unlock(lock);                                 \
        }                                                               \
-       preempt_enable();                                               \
+       spin_unlock(&name##_cpu_lock);                                  \
  }                                                                     \
  EXPORT_SYMBOL(name##_global_unlock_online);                           \
                                                                        \
index 25b808631cd92c50d10cf6a31b2d9b9942b62ac9..fd7ff3d91e6a920ff084beca09d10b5b9abba981 100644 (file)
@@ -185,7 +185,6 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
 #define rounddown_pow_of_two(n)                        \
 (                                              \
        __builtin_constant_p(n) ? (             \
-               (n == 1) ? 0 :                  \
                (1UL << ilog2(n))) :            \
        __rounddown_pow_of_two(n)               \
  )
index 3dc3a8c2c4858a1d3400aa2d5fd029d36a1177c6..4baadd18f4ad3402f47fbd2ac919bafba519bed4 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/mmzone.h>
 #include <linux/rbtree.h>
 #include <linux/prio_tree.h>
+#include <linux/atomic.h>
 #include <linux/debug_locks.h>
 #include <linux/mm_types.h>
 #include <linux/range.h>
index 415f2db414e17cd7c885b84e88b7b806746d3fe2..c8ef9bc54d500d0df052d4bd433e4b24d1855bb1 100644 (file)
@@ -218,6 +218,7 @@ struct mmc_card {
 #define MMC_QUIRK_INAND_CMD38  (1<<6)          /* iNAND devices have broken CMD38 */
 #define MMC_QUIRK_BLK_NO_CMD23 (1<<7)          /* Avoid CMD23 for regular multiblock */
 #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8)  /* Avoid sending 512 bytes in */
+#define MMC_QUIRK_LONG_READ_TIME (1<<9)                /* Data read time > CSD says */
                                                /* byte mode */
        unsigned int    poweroff_notify_state;  /* eMMC4.5 notify feature */
 #define MMC_NO_POWER_NOTIFICATION      0
@@ -433,6 +434,11 @@ static inline int mmc_card_broken_byte_mode_512(const struct mmc_card *c)
        return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512;
 }
 
+static inline int mmc_card_long_read_time(const struct mmc_card *c)
+{
+       return c->quirks & MMC_QUIRK_LONG_READ_TIME;
+}
+
 #define mmc_card_name(c)       ((c)->cid.prod_name)
 #define mmc_card_id(c)         (dev_name(&(c)->dev))
 
index cbeb5867cff79d7d70952cc6285063c3feb92e7c..a82ad4dd306a657565f5c9987f4b888468325ba2 100644 (file)
@@ -2536,6 +2536,8 @@ extern void               net_disable_timestamp(void);
 extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
 extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
 extern void dev_seq_stop(struct seq_file *seq, void *v);
+extern int dev_seq_open_ops(struct inode *inode, struct file *file,
+                           const struct seq_operations *ops);
 #endif
 
 extern int netdev_class_create_file(struct class_attribute *class_attr);
index 172ba70306d1e77a4591a0474ad58c1df9460796..2aaee0ca9da847ec447abc59df56c916f18bc6ac 100644 (file)
 #define PCI_DEVICE_ID_AMD_11H_NB_DRAM  0x1302
 #define PCI_DEVICE_ID_AMD_11H_NB_MISC  0x1303
 #define PCI_DEVICE_ID_AMD_11H_NB_LINK  0x1304
+#define PCI_DEVICE_ID_AMD_15H_NB_F0    0x1600
+#define PCI_DEVICE_ID_AMD_15H_NB_F1    0x1601
+#define PCI_DEVICE_ID_AMD_15H_NB_F2    0x1602
 #define PCI_DEVICE_ID_AMD_15H_NB_F3    0x1603
 #define PCI_DEVICE_ID_AMD_15H_NB_F4    0x1604
+#define PCI_DEVICE_ID_AMD_15H_NB_F5    0x1605
 #define PCI_DEVICE_ID_AMD_CNB17H_F3    0x1703
 #define PCI_DEVICE_ID_AMD_LANCE                0x2000
 #define PCI_DEVICE_ID_AMD_LANCE_HOME   0x2001
index 1e9ebe5e0091e7fa1b1fabb72f72af979f35104c..b1f89122bf6a820102f43714fd42246d1dadd207 100644 (file)
@@ -822,6 +822,7 @@ struct perf_event {
        int                             mmap_locked;
        struct user_struct              *mmap_user;
        struct ring_buffer              *rb;
+       struct list_head                rb_entry;
 
        /* poll related */
        wait_queue_head_t               waitq;
index c5336705921fdae6a3feb8ea99dfa252ae012c52..7281d5acf2f971a2fbae880b7935ac37cdea01df 100644 (file)
@@ -30,7 +30,7 @@
  */
 
 struct tc_stats {
-       __u64   bytes;                  /* NUmber of enqueues bytes */
+       __u64   bytes;                  /* Number of enqueued bytes */
        __u32   packets;                /* Number of enqueued packets   */
        __u32   drops;                  /* Packets dropped because of lack of resources */
        __u32   overlimits;             /* Number of throttle events when this
@@ -297,7 +297,7 @@ struct tc_htb_glob {
        __u32 debug;            /* debug flags */
 
        /* stats */
-       __u32 direct_pkts; /* count of non shapped packets */
+       __u32 direct_pkts; /* count of non shaped packets */
 };
 enum {
        TCA_HTB_UNSPEC,
@@ -503,7 +503,7 @@ enum {
 };
 #define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
 
-/* State transition probablities for 4 state model */
+/* State transition probabilities for 4 state model */
 struct tc_netem_gimodel {
        __u32   p13;
        __u32   p31;
index a83833a1f7a26f589a7584252ff413333286f5ea..07ceb97d53facc505bae4b489d3cae96bd7d4021 100644 (file)
@@ -35,7 +35,7 @@ struct shrinker {
 
        /* These are for internal use */
        struct list_head list;
-       long nr;        /* objs pending delete */
+       atomic_long_t nr_in_batch; /* objs pending delete */
 };
 #define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
 extern void register_shrinker(struct shrinker *);
index e2accb3164d8d969245e4567b0f308d4a35cb152..d0de882c0d96d5277f23306bf1d2884c40807292 100644 (file)
@@ -24,7 +24,7 @@ struct sigma_firmware {
 struct sigma_firmware_header {
        unsigned char magic[7];
        u8 version;
-       u32 crc;
+       __le32 crc;
 };
 
 enum {
@@ -40,19 +40,14 @@ enum {
 struct sigma_action {
        u8 instr;
        u8 len_hi;
-       u16 len;
-       u16 addr;
+       __le16 len;
+       __be16 addr;
        unsigned char payload[];
 };
 
 static inline u32 sigma_action_len(struct sigma_action *sa)
 {
-       return (sa->len_hi << 16) | sa->len;
-}
-
-static inline size_t sigma_action_size(struct sigma_action *sa, u32 payload_len)
-{
-       return sizeof(*sa) + payload_len + (payload_len % 2);
+       return (sa->len_hi << 16) | le16_to_cpu(sa->len);
 }
 
 extern int process_sigma_firmware(struct i2c_client *client, const char *name);
index b1377b931eb7e4c28b8852936af55d08ef40693c..5fb2c3d10c05047a4c50fb2e4440e7862a6a9908 100644 (file)
@@ -254,7 +254,7 @@ unsigned long soc_camera_apply_board_flags(struct soc_camera_link *icl,
 static inline struct video_device *soc_camera_i2c_to_vdev(const struct i2c_client *client)
 {
        struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct soc_camera_device *icd = (struct soc_camera_device *)sd->grp_id;
+       struct soc_camera_device *icd = v4l2_get_subdev_hostdata(sd);
        return icd ? icd->vdev : NULL;
 }
 
@@ -279,6 +279,11 @@ static inline struct soc_camera_device *soc_camera_from_vbq(const struct videobu
        return container_of(vq, struct soc_camera_device, vb_vidq);
 }
 
+static inline u32 soc_camera_grp_id(const struct soc_camera_device *icd)
+{
+       return (icd->iface << 8) | (icd->devnum + 1);
+}
+
 void soc_camera_lock(struct vb2_queue *vq);
 void soc_camera_unlock(struct vb2_queue *vq);
 
index 4fb6c43817918992f8334c49022d5184ea45c8e4..75766b42660e2dd385b26a0144c96dc90b49cbaf 100644 (file)
@@ -53,6 +53,7 @@ struct dst_entry {
 #define DST_NOHASH             0x0008
 #define DST_NOCACHE            0x0010
 #define DST_NOCOUNT            0x0020
+#define DST_NOPEER             0x0040
 
        short                   error;
        short                   obsolete;
@@ -205,12 +206,7 @@ dst_feature(const struct dst_entry *dst, u32 feature)
 
 static inline u32 dst_mtu(const struct dst_entry *dst)
 {
-       u32 mtu = dst_metric_raw(dst, RTAX_MTU);
-
-       if (!mtu)
-               mtu = dst->ops->default_mtu(dst);
-
-       return mtu;
+       return dst->ops->mtu(dst);
 }
 
 /* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
index 9adb99845a5695b8159cc6c208bc6004ebf7b25c..e1c2ee0eef47506020f743230e923d840e64e7a9 100644 (file)
@@ -17,7 +17,7 @@ struct dst_ops {
        int                     (*gc)(struct dst_ops *ops);
        struct dst_entry *      (*check)(struct dst_entry *, __u32 cookie);
        unsigned int            (*default_advmss)(const struct dst_entry *);
-       unsigned int            (*default_mtu)(const struct dst_entry *);
+       unsigned int            (*mtu)(const struct dst_entry *);
        u32 *                   (*cow_metrics)(struct dst_entry *, unsigned long);
        void                    (*destroy)(struct dst_entry *);
        void                    (*ifdown)(struct dst_entry *,
index a09447749e2d59a467c51cde514b35cef79c9b1b..57f15a7f1cddcb4a254cd52f46c914cce176caea 100644 (file)
@@ -207,6 +207,7 @@ extern struct flow_cache_object *flow_cache_lookup(
                u8 dir, flow_resolve_t resolver, void *ctx);
 
 extern void flow_cache_flush(void);
+extern void flow_cache_flush_deferred(void);
 extern atomic_t flow_cache_genid;
 
 #endif
index b897d6e6d0a5a35f6a19b5403bbd7b5cefa47a39..f941964a9931978002b7ab4a137408b0d69c09d9 100644 (file)
@@ -31,6 +31,7 @@
 /** struct ip_options - IP Options
  *
  * @faddr - Saved first hop address
+ * @nexthop - Saved nexthop address in LSRR and SSRR
  * @is_data - Options in __data, rather than skb
  * @is_strictroute - Strict source route
  * @srr_is_hit - Packet destination addr was our one
@@ -41,6 +42,7 @@
  */
 struct ip_options {
        __be32          faddr;
+       __be32          nexthop;
        unsigned char   optlen;
        unsigned char   srr;
        unsigned char   rr;
index 78c83e62218fbfff60506e4323f72343aed2b83a..e9ff3fc5e688615b6b4df99b0aef82af0621ab57 100644 (file)
@@ -35,6 +35,7 @@ struct inet_peer {
 
        u32                     metrics[RTAX_MAX];
        u32                     rate_tokens;    /* rate limiting for ICMP */
+       int                     redirect_genid;
        unsigned long           rate_last;
        unsigned long           pmtu_expires;
        u32                     pmtu_orig;
index 873d5be7926c1bef17c83ea7c79eecafede9149e..e5a7b9aaf5526b160d747f1ef321477a9f2679cb 100644 (file)
@@ -1207,7 +1207,7 @@ extern void ip_vs_control_cleanup(void);
 extern struct ip_vs_dest *
 ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
                __be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
-               __u16 protocol, __u32 fwmark);
+               __u16 protocol, __u32 fwmark, __u32 flags);
 extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp);
 
 
index 4283508b3e185882bff18fe267f205df4eda39cc..a88fb6939387f228ac5826949f68151e0fceaf16 100644 (file)
@@ -67,18 +67,18 @@ struct nf_ct_event_notifier {
        int (*fcn)(unsigned int events, struct nf_ct_event *item);
 };
 
-extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
-extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb);
-extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb);
+extern int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *nb);
+extern void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb);
 
 extern void nf_ct_deliver_cached_events(struct nf_conn *ct);
 
 static inline void
 nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
 {
+       struct net *net = nf_ct_net(ct);
        struct nf_conntrack_ecache *e;
 
-       if (nf_conntrack_event_cb == NULL)
+       if (net->ct.nf_conntrack_event_cb == NULL)
                return;
 
        e = nf_ct_ecache_find(ct);
@@ -95,11 +95,12 @@ nf_conntrack_eventmask_report(unsigned int eventmask,
                              int report)
 {
        int ret = 0;
+       struct net *net = nf_ct_net(ct);
        struct nf_ct_event_notifier *notify;
        struct nf_conntrack_ecache *e;
 
        rcu_read_lock();
-       notify = rcu_dereference(nf_conntrack_event_cb);
+       notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
        if (notify == NULL)
                goto out_unlock;
 
@@ -164,9 +165,8 @@ struct nf_exp_event_notifier {
        int (*fcn)(unsigned int events, struct nf_exp_event *item);
 };
 
-extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
-extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb);
-extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb);
+extern int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *nb);
+extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb);
 
 static inline void
 nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
@@ -174,11 +174,12 @@ nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
                          u32 pid,
                          int report)
 {
+       struct net *net = nf_ct_exp_net(exp);
        struct nf_exp_event_notifier *notify;
        struct nf_conntrack_ecache *e;
 
        rcu_read_lock();
-       notify = rcu_dereference(nf_expect_event_cb);
+       notify = rcu_dereference(net->ct.nf_expect_event_cb);
        if (notify == NULL)
                goto out_unlock;
 
index 0249399e51a773608814e6cabaeb36915d7c6533..7a911eca0f18b4a751b3410b75223080d5da4dbe 100644 (file)
@@ -18,6 +18,8 @@ struct netns_ct {
        struct hlist_nulls_head unconfirmed;
        struct hlist_nulls_head dying;
        struct ip_conntrack_stat __percpu *stat;
+       struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
+       struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
        int                     sysctl_events;
        unsigned int            sysctl_events_retry_timeout;
        int                     sysctl_acct;
index 3319f16b3beb899727c7a434e75fb1010d7ee139..b72a3b83393604b29845514942a5cb8ad8729970 100644 (file)
@@ -116,7 +116,7 @@ struct red_parms {
        u32             qR;             /* Cached random number */
 
        unsigned long   qavg;           /* Average queue length: A scaled */
-       psched_time_t   qidlestart;     /* Start of current idle period */
+       ktime_t         qidlestart;     /* Start of current idle period */
 };
 
 static inline u32 red_rmask(u8 Plog)
@@ -148,17 +148,17 @@ static inline void red_set_parms(struct red_parms *p,
 
 static inline int red_is_idling(struct red_parms *p)
 {
-       return p->qidlestart != PSCHED_PASTPERFECT;
+       return p->qidlestart.tv64 != 0;
 }
 
 static inline void red_start_of_idle_period(struct red_parms *p)
 {
-       p->qidlestart = psched_get_time();
+       p->qidlestart = ktime_get();
 }
 
 static inline void red_end_of_idle_period(struct red_parms *p)
 {
-       p->qidlestart = PSCHED_PASTPERFECT;
+       p->qidlestart.tv64 = 0;
 }
 
 static inline void red_restart(struct red_parms *p)
@@ -170,13 +170,10 @@ static inline void red_restart(struct red_parms *p)
 
 static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
 {
-       psched_time_t now;
-       long us_idle;
+       s64 delta = ktime_us_delta(ktime_get(), p->qidlestart);
+       long us_idle = min_t(s64, delta, p->Scell_max);
        int  shift;
 
-       now = psched_get_time();
-       us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max);
-
        /*
         * The problem: ideally, average length queue recalcultion should
         * be done over constant clock intervals. This is too expensive, so
index db7b3432f07c41ce124c9d2a792035cead8b048d..91855d185b537f96fc0ea09134c96a93b63aa3d3 100644 (file)
@@ -71,12 +71,12 @@ struct rtable {
        struct fib_info         *fi; /* for client ref to shared metrics */
 };
 
-static inline bool rt_is_input_route(struct rtable *rt)
+static inline bool rt_is_input_route(const struct rtable *rt)
 {
        return rt->rt_route_iif != 0;
 }
 
-static inline bool rt_is_output_route(struct rtable *rt)
+static inline bool rt_is_output_route(const struct rtable *rt)
 {
        return rt->rt_route_iif == 0;
 }
index e90e7a9935ddc5c70c8e920487bc1196c2cc49e2..a15432da27c3a911ef2669c736e43cdd18b2daa0 100644 (file)
@@ -241,6 +241,9 @@ extern struct sctp_globals {
         * bits is an indicator of when to send and window update SACK.
         */
        int rwnd_update_shift;
+
+       /* Threshold for autoclose timeout, in seconds. */
+       unsigned long max_autoclose;
 } sctp_globals;
 
 #define sctp_rto_initial               (sctp_globals.rto_initial)
@@ -281,6 +284,7 @@ extern struct sctp_globals {
 #define sctp_auth_enable               (sctp_globals.auth_enable)
 #define sctp_checksum_disable          (sctp_globals.checksum_disable)
 #define sctp_rwnd_upd_shift            (sctp_globals.rwnd_update_shift)
+#define sctp_max_autoclose             (sctp_globals.max_autoclose)
 
 /* SCTP Socket type: UDP or TCP style. */
 typedef enum {
index abb6e0f0c3c3e59b79f23fc72d2e9d8d389e3774..32e39371fba627ebfaab66713e645889ccb7ba47 100644 (file)
@@ -637,12 +637,14 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
 
 /*
  * Take into account size of receive queue and backlog queue
+ * Do not take into account this skb truesize,
+ * to allow even a single big packet to come.
  */
 static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
 {
        unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
 
-       return qsize + skb->truesize > sk->sk_rcvbuf;
+       return qsize > sk->sk_rcvbuf;
 }
 
 /* The per-socket spinlock must be held here. */
index d1e95c6ac7769b2d7e328eefddf8c6f4a3077010..5a35a2a2d3c514bef92ef7bed3d32216cc08c206 100644 (file)
@@ -147,6 +147,7 @@ struct fcoe_ctlr {
        u8 map_dest;
        u8 spma;
        u8 probe_tries;
+       u8 priority;
        u8 dest_addr[ETH_ALEN];
        u8 ctl_src_addr[ETH_ALEN];
 
@@ -301,6 +302,7 @@ struct fcoe_percpu_s {
  * @lport:                    The associated local port
  * @fcoe_pending_queue:               The pending Rx queue of skbs
  * @fcoe_pending_queue_active: Indicates if the pending queue is active
+ * @priority:                 Packet priority (DCB)
  * @max_queue_depth:          Max queue depth of pending queue
  * @min_queue_depth:          Min queue depth of pending queue
  * @timer:                    The queue timer
@@ -316,6 +318,7 @@ struct fcoe_port {
        struct fc_lport       *lport;
        struct sk_buff_head   fcoe_pending_queue;
        u8                    fcoe_pending_queue_active;
+       u8                    priority;
        u32                   max_queue_depth;
        u32                   min_queue_depth;
        struct timer_list     timer;
index 7f5fed3c89e1808f6f2535ea06bb20cdd772b7f6..6873c7dd9145d2a23f682d9e8d2c695a7571d656 100644 (file)
@@ -103,9 +103,10 @@ enum se_cmd_flags_table {
        SCF_SCSI_NON_DATA_CDB           = 0x00000040,
        SCF_SCSI_CDB_EXCEPTION          = 0x00000080,
        SCF_SCSI_RESERVATION_CONFLICT   = 0x00000100,
-       SCF_SE_CMD_FAILED               = 0x00000400,
+       SCF_FUA                         = 0x00000200,
        SCF_SE_LUN_CMD                  = 0x00000800,
        SCF_SE_ALLOW_EOO                = 0x00001000,
+       SCF_BIDI                        = 0x00002000,
        SCF_SENT_CHECK_CONDITION        = 0x00004000,
        SCF_OVERFLOW_BIT                = 0x00008000,
        SCF_UNDERFLOW_BIT               = 0x00010000,
@@ -154,6 +155,7 @@ enum tcm_sense_reason_table {
        TCM_CHECK_CONDITION_ABORT_CMD           = 0x0d,
        TCM_CHECK_CONDITION_UNIT_ATTENTION      = 0x0e,
        TCM_CHECK_CONDITION_NOT_READY           = 0x0f,
+       TCM_RESERVATION_CONFLICT                = 0x10,
 };
 
 struct se_obj {
@@ -211,7 +213,6 @@ struct t10_alua_lu_gp {
        u16     lu_gp_id;
        int     lu_gp_valid_id;
        u32     lu_gp_members;
-       atomic_t lu_gp_shutdown;
        atomic_t lu_gp_ref_cnt;
        spinlock_t lu_gp_lock;
        struct config_group lu_gp_group;
@@ -422,11 +423,9 @@ struct se_cmd {
        int                     sam_task_attr;
        /* Transport protocol dependent state, see transport_state_table */
        enum transport_state_table t_state;
-       /* Transport specific error status */
-       int                     transport_error_status;
        /* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
-       int                     check_release:1;
-       int                     cmd_wait_set:1;
+       unsigned                check_release:1;
+       unsigned                cmd_wait_set:1;
        /* See se_cmd_flags_table */
        u32                     se_cmd_flags;
        u32                     se_ordered_id;
@@ -441,13 +440,10 @@ struct se_cmd {
        /* Used for sense data */
        void                    *sense_buffer;
        struct list_head        se_delayed_node;
-       struct list_head        se_ordered_node;
        struct list_head        se_lun_node;
        struct list_head        se_qf_node;
        struct se_device      *se_dev;
        struct se_dev_entry   *se_deve;
-       struct se_device        *se_obj_ptr;
-       struct se_device        *se_orig_obj_ptr;
        struct se_lun           *se_lun;
        /* Only used for internal passthrough and legacy TCM fabric modules */
        struct se_session       *se_sess;
@@ -463,8 +459,6 @@ struct se_cmd {
        unsigned char           __t_task_cdb[TCM_MAX_COMMAND_SIZE];
        unsigned long long      t_task_lba;
        int                     t_tasks_failed;
-       int                     t_tasks_fua;
-       bool                    t_tasks_bidi;
        u32                     t_tasks_sg_chained_no;
        atomic_t                t_fe_count;
        atomic_t                t_se_count;
@@ -489,14 +483,6 @@ struct se_cmd {
 
        struct work_struct      work;
 
-       /*
-        * Used for pre-registered fabric SGL passthrough WRITE and READ
-        * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
-        * and other HW target mode fabric modules.
-        */
-       struct scatterlist      *t_task_pt_sgl;
-       u32                     t_task_pt_sgl_num;
-
        struct scatterlist      *t_data_sg;
        unsigned int            t_data_nents;
        struct scatterlist      *t_bidi_data_sg;
@@ -562,7 +548,7 @@ struct se_node_acl {
 } ____cacheline_aligned;
 
 struct se_session {
-       int                     sess_tearing_down:1;
+       unsigned                sess_tearing_down:1;
        u64                     sess_bin_isid;
        struct se_node_acl      *se_node_acl;
        struct se_portal_group *se_tpg;
@@ -683,7 +669,6 @@ struct se_subsystem_dev {
        struct t10_reservation t10_pr;
        spinlock_t      se_dev_lock;
        void            *se_dev_su_ptr;
-       struct list_head se_dev_node;
        struct config_group se_dev_group;
        /* For T10 Reservations */
        struct config_group se_dev_pr_group;
@@ -692,9 +677,6 @@ struct se_subsystem_dev {
 } ____cacheline_aligned;
 
 struct se_device {
-       /* Set to 1 if thread is NOT sleeping on thread_sem */
-       u8                      thread_active;
-       u8                      dev_status_timer_flags;
        /* RELATIVE TARGET PORT IDENTIFER Counter */
        u16                     dev_rpti_counter;
        /* Used for SAM Task Attribute ordering */
@@ -719,14 +701,10 @@ struct se_device {
        u64                     write_bytes;
        spinlock_t              stats_lock;
        /* Active commands on this virtual SE device */
-       atomic_t                active_cmds;
        atomic_t                simple_cmds;
        atomic_t                depth_left;
        atomic_t                dev_ordered_id;
-       atomic_t                dev_tur_active;
        atomic_t                execute_tasks;
-       atomic_t                dev_status_thr_count;
-       atomic_t                dev_hoq_count;
        atomic_t                dev_ordered_sync;
        atomic_t                dev_qf_count;
        struct se_obj           dev_obj;
@@ -734,14 +712,9 @@ struct se_device {
        struct se_obj           dev_export_obj;
        struct se_queue_obj     dev_queue_obj;
        spinlock_t              delayed_cmd_lock;
-       spinlock_t              ordered_cmd_lock;
        spinlock_t              execute_task_lock;
-       spinlock_t              state_task_lock;
-       spinlock_t              dev_alua_lock;
        spinlock_t              dev_reservation_lock;
-       spinlock_t              dev_state_lock;
        spinlock_t              dev_status_lock;
-       spinlock_t              dev_status_thr_lock;
        spinlock_t              se_port_lock;
        spinlock_t              se_tmr_lock;
        spinlock_t              qf_cmd_lock;
@@ -753,14 +726,10 @@ struct se_device {
        struct t10_pr_registration *dev_pr_res_holder;
        struct list_head        dev_sep_list;
        struct list_head        dev_tmr_list;
-       struct timer_list       dev_status_timer;
        /* Pointer to descriptor for processing thread */
        struct task_struct      *process_thread;
-       pid_t                   process_thread_pid;
-       struct task_struct              *dev_mgmt_thread;
        struct work_struct      qf_work_queue;
        struct list_head        delayed_cmd_list;
-       struct list_head        ordered_cmd_list;
        struct list_head        execute_task_list;
        struct list_head        state_task_list;
        struct list_head        qf_cmd_list;
@@ -771,8 +740,6 @@ struct se_device {
        struct se_subsystem_api *transport;
        /* Linked list for struct se_hba struct se_device list */
        struct list_head        dev_list;
-       /* Linked list for struct se_global->g_se_dev_list */
-       struct list_head        g_se_dev_list;
 }  ____cacheline_aligned;
 
 struct se_hba {
@@ -834,7 +801,6 @@ struct se_port {
        u32             sep_index;
        struct scsi_port_stats sep_stats;
        /* Used for ALUA Target Port Groups membership */
-       atomic_t        sep_tg_pt_gp_active;
        atomic_t        sep_tg_pt_secondary_offline;
        /* Used for PR ALL_TG_PT=1 */
        atomic_t        sep_tg_pt_ref_cnt;
index c16e9431dd01bb40d748fc315d04d103d77a351d..dac4f2d859fd72734fbc54ffda839216fe554079 100644 (file)
 
 #define PYX_TRANSPORT_STATUS_INTERVAL          5 /* In seconds */
 
-#define PYX_TRANSPORT_SENT_TO_TRANSPORT                0
-#define PYX_TRANSPORT_WRITE_PENDING            1
-
-#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE       -1
-#define PYX_TRANSPORT_HBA_QUEUE_FULL           -2
-#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS     -3
-#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES  -4
-#define PYX_TRANSPORT_INVALID_CDB_FIELD                -5
-#define PYX_TRANSPORT_INVALID_PARAMETER_LIST   -6
-#define PYX_TRANSPORT_LU_COMM_FAILURE          -7
-#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE                -8
-#define PYX_TRANSPORT_WRITE_PROTECTED          -9
-#define PYX_TRANSPORT_RESERVATION_CONFLICT     -10
-#define PYX_TRANSPORT_ILLEGAL_REQUEST          -11
-#define PYX_TRANSPORT_USE_SENSE_REASON         -12
-
-#ifndef SAM_STAT_RESERVATION_CONFLICT
-#define SAM_STAT_RESERVATION_CONFLICT          0x18
-#endif
-
-#define TRANSPORT_PLUGIN_FREE                  0
-#define TRANSPORT_PLUGIN_REGISTERED            1
-
 #define TRANSPORT_PLUGIN_PHBA_PDEV             1
 #define TRANSPORT_PLUGIN_VHBA_PDEV             2
 #define TRANSPORT_PLUGIN_VHBA_VDEV             3
@@ -158,7 +135,6 @@ extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
 extern int transport_handle_cdb_direct(struct se_cmd *);
 extern int transport_generic_handle_cdb_map(struct se_cmd *);
 extern int transport_generic_handle_data(struct se_cmd *);
-extern void transport_new_cmd_failure(struct se_cmd *);
 extern int transport_generic_handle_tmr(struct se_cmd *);
 extern bool target_stop_task(struct se_task *task, unsigned long *flags);
 extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
index b99caa8b780c624af834caeab4130549479e3844..99d1d0decf88e41a7c0c038d463330e8351046b1 100644 (file)
                {I_REFERENCED,          "I_REFERENCED"}         \
        )
 
+#define WB_WORK_REASON                                                 \
+               {WB_REASON_BACKGROUND,          "background"},          \
+               {WB_REASON_TRY_TO_FREE_PAGES,   "try_to_free_pages"},   \
+               {WB_REASON_SYNC,                "sync"},                \
+               {WB_REASON_PERIODIC,            "periodic"},            \
+               {WB_REASON_LAPTOP_TIMER,        "laptop_timer"},        \
+               {WB_REASON_FREE_MORE_MEM,       "free_more_memory"},    \
+               {WB_REASON_FS_FREE_SPACE,       "fs_free_space"},       \
+               {WB_REASON_FORKER_THREAD,       "forker_thread"}
+
 struct wb_writeback_work;
 
 DECLARE_EVENT_CLASS(writeback_work_class,
@@ -55,7 +65,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
                  __entry->for_kupdate,
                  __entry->range_cyclic,
                  __entry->for_background,
-                 wb_reason_name[__entry->reason]
+                 __print_symbolic(__entry->reason, WB_WORK_REASON)
        )
 );
 #define DEFINE_WRITEBACK_WORK_EVENT(name) \
@@ -184,7 +194,8 @@ TRACE_EVENT(writeback_queue_io,
                __entry->older, /* older_than_this in jiffies */
                __entry->age,   /* older_than_this in relative milliseconds */
                __entry->moved,
-               wb_reason_name[__entry->reason])
+               __print_symbolic(__entry->reason, WB_WORK_REASON)
+       )
 );
 
 TRACE_EVENT(global_dirty_state,
index f0b6890370be053e25feadfd7bf0eeeacb78f800..f6f07aa35af5f8b0e91347d9707ffd08eccd577f 100644 (file)
@@ -29,8 +29,7 @@ enum xsd_sockmsg_type
     XS_IS_DOMAIN_INTRODUCED,
     XS_RESUME,
     XS_SET_TARGET,
-    XS_RESTRICT,
-    XS_RESET_WATCHES
+    XS_RESTRICT
 };
 
 #define XS_WRITE_NONE "NONE"
index 2e0ecfcc881dd124e3b2a2d868971fd1e6dea6f8..5b4293d9819d87b636d09d63224df43535e44fc4 100644 (file)
@@ -1269,7 +1269,7 @@ void mq_clear_sbinfo(struct ipc_namespace *ns)
 
 void mq_put_mnt(struct ipc_namespace *ns)
 {
-       mntput(ns->mq_mnt);
+       kern_unmount(ns->mq_mnt);
 }
 
 static int __init init_mqueue_fs(void)
@@ -1291,11 +1291,9 @@ static int __init init_mqueue_fs(void)
 
        spin_lock_init(&mq_lock);
 
-       init_ipc_ns.mq_mnt = kern_mount_data(&mqueue_fs_type, &init_ipc_ns);
-       if (IS_ERR(init_ipc_ns.mq_mnt)) {
-               error = PTR_ERR(init_ipc_ns.mq_mnt);
+       error = mq_init_ns(&init_ipc_ns);
+       if (error)
                goto out_filesystem;
-       }
 
        return 0;
 
index 8b5ce5d3f3ef3e4f468d5afc4175cd22518fc029..5652101cdac03d42ca9645017cc2a4f8df6537cb 100644 (file)
@@ -27,11 +27,6 @@ DEFINE_SPINLOCK(mq_lock);
  */
 struct ipc_namespace init_ipc_ns = {
        .count          = ATOMIC_INIT(1),
-#ifdef CONFIG_POSIX_MQUEUE
-       .mq_queues_max   = DFLT_QUEUESMAX,
-       .mq_msg_max      = DFLT_MSGMAX,
-       .mq_msgsize_max  = DFLT_MSGSIZEMAX,
-#endif
        .user_ns = &init_user_ns,
 };
 
index d9d5648f3cdcc4bcde3a7a67afe845aa89dc2a1f..a184470cf9b51c896826986deca941dd5dcf3d89 100644 (file)
@@ -2098,11 +2098,6 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
                        continue;
                /* get old css_set pointer */
                task_lock(tsk);
-               if (tsk->flags & PF_EXITING) {
-                       /* ignore this task if it's going away */
-                       task_unlock(tsk);
-                       continue;
-               }
                oldcg = tsk->cgroups;
                get_css_set(oldcg);
                task_unlock(tsk);
index 9fe58c46a426de719da9636d8c74d8c9f6bb312d..0b1712dba587fdee93709798b14389f3f1d85b34 100644 (file)
@@ -123,6 +123,19 @@ static inline struct cpuset *task_cs(struct task_struct *task)
                            struct cpuset, css);
 }
 
+#ifdef CONFIG_NUMA
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+       return task->mempolicy;
+}
+#else
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+       return false;
+}
+#endif
+
+
 /* bits in struct cpuset flags field */
 typedef enum {
        CS_CPU_EXCLUSIVE,
@@ -949,7 +962,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
 static void cpuset_change_task_nodemask(struct task_struct *tsk,
                                        nodemask_t *newmems)
 {
-       bool masks_disjoint = !nodes_intersects(*newmems, tsk->mems_allowed);
+       bool need_loop;
 
 repeat:
        /*
@@ -962,6 +975,14 @@ repeat:
                return;
 
        task_lock(tsk);
+       /*
+        * Determine if a loop is necessary if another thread is doing
+        * get_mems_allowed().  If at least one node remains unchanged and
+        * tsk does not have a mempolicy, then an empty nodemask will not be
+        * possible when mems_allowed is larger than a word.
+        */
+       need_loop = task_has_mempolicy(tsk) ||
+                       !nodes_intersects(*newmems, tsk->mems_allowed);
        nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
        mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
 
@@ -981,11 +1002,9 @@ repeat:
 
        /*
         * Allocation of memory is very fast, we needn't sleep when waiting
-        * for the read-side.  No wait is necessary, however, if at least one
-        * node remains unchanged.
+        * for the read-side.
         */
-       while (masks_disjoint &&
-                       ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
+       while (need_loop && ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
                task_unlock(tsk);
                if (!task_curr(tsk))
                        yield();
index 0e8457da6f9551c3eae667a6ac09a735a341b4bf..58690af323e469213db42bce2c0d1a772db12519 100644 (file)
@@ -185,6 +185,9 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
 static void update_context_time(struct perf_event_context *ctx);
 static u64 perf_event_time(struct perf_event *event);
 
+static void ring_buffer_attach(struct perf_event *event,
+                              struct ring_buffer *rb);
+
 void __weak perf_event_print_debug(void)       { }
 
 extern __weak const char *perf_pmu_name(void)
@@ -2171,9 +2174,10 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
         */
        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
 
-       perf_event_sched_in(cpuctx, ctx, task);
+       if (ctx->nr_events)
+               cpuctx->task_ctx = ctx;
 
-       cpuctx->task_ctx = ctx;
+       perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
 
        perf_pmu_enable(ctx->pmu);
        perf_ctx_unlock(cpuctx, ctx);
@@ -3190,12 +3194,33 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
        struct ring_buffer *rb;
        unsigned int events = POLL_HUP;
 
+       /*
+        * Race between perf_event_set_output() and perf_poll(): perf_poll()
+        * grabs the rb reference but perf_event_set_output() overrides it.
+        * Here is the timeline for two threads T1, T2:
+        * t0: T1, rb = rcu_dereference(event->rb)
+        * t1: T2, old_rb = event->rb
+        * t2: T2, event->rb = new rb
+        * t3: T2, ring_buffer_detach(old_rb)
+        * t4: T1, ring_buffer_attach(rb1)
+        * t5: T1, poll_wait(event->waitq)
+        *
+        * To avoid this problem, we grab mmap_mutex in perf_poll()
+        * thereby ensuring that the assignment of the new ring buffer
+        * and the detachment of the old buffer appear atomic to perf_poll()
+        */
+       mutex_lock(&event->mmap_mutex);
+
        rcu_read_lock();
        rb = rcu_dereference(event->rb);
-       if (rb)
+       if (rb) {
+               ring_buffer_attach(event, rb);
                events = atomic_xchg(&rb->poll, 0);
+       }
        rcu_read_unlock();
 
+       mutex_unlock(&event->mmap_mutex);
+
        poll_wait(file, &event->waitq, wait);
 
        return events;
@@ -3496,6 +3521,53 @@ unlock:
        return ret;
 }
 
+static void ring_buffer_attach(struct perf_event *event,
+                              struct ring_buffer *rb)
+{
+       unsigned long flags;
+
+       if (!list_empty(&event->rb_entry))
+               return;
+
+       spin_lock_irqsave(&rb->event_lock, flags);
+       if (!list_empty(&event->rb_entry))
+               goto unlock;
+
+       list_add(&event->rb_entry, &rb->event_list);
+unlock:
+       spin_unlock_irqrestore(&rb->event_lock, flags);
+}
+
+static void ring_buffer_detach(struct perf_event *event,
+                              struct ring_buffer *rb)
+{
+       unsigned long flags;
+
+       if (list_empty(&event->rb_entry))
+               return;
+
+       spin_lock_irqsave(&rb->event_lock, flags);
+       list_del_init(&event->rb_entry);
+       wake_up_all(&event->waitq);
+       spin_unlock_irqrestore(&rb->event_lock, flags);
+}
+
+static void ring_buffer_wakeup(struct perf_event *event)
+{
+       struct ring_buffer *rb;
+
+       rcu_read_lock();
+       rb = rcu_dereference(event->rb);
+       if (!rb)
+               goto unlock;
+
+       list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
+               wake_up_all(&event->waitq);
+
+unlock:
+       rcu_read_unlock();
+}
+
 static void rb_free_rcu(struct rcu_head *rcu_head)
 {
        struct ring_buffer *rb;
@@ -3521,9 +3593,19 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
 
 static void ring_buffer_put(struct ring_buffer *rb)
 {
+       struct perf_event *event, *n;
+       unsigned long flags;
+
        if (!atomic_dec_and_test(&rb->refcount))
                return;
 
+       spin_lock_irqsave(&rb->event_lock, flags);
+       list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
+               list_del_init(&event->rb_entry);
+               wake_up_all(&event->waitq);
+       }
+       spin_unlock_irqrestore(&rb->event_lock, flags);
+
        call_rcu(&rb->rcu_head, rb_free_rcu);
 }
 
@@ -3546,6 +3628,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
                atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
                vma->vm_mm->pinned_vm -= event->mmap_locked;
                rcu_assign_pointer(event->rb, NULL);
+               ring_buffer_detach(event, rb);
                mutex_unlock(&event->mmap_mutex);
 
                ring_buffer_put(rb);
@@ -3700,7 +3783,7 @@ static const struct file_operations perf_fops = {
 
 void perf_event_wakeup(struct perf_event *event)
 {
-       wake_up_all(&event->waitq);
+       ring_buffer_wakeup(event);
 
        if (event->pending_kill) {
                kill_fasync(&event->fasync, SIGIO, event->pending_kill);
@@ -5822,6 +5905,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
        INIT_LIST_HEAD(&event->group_entry);
        INIT_LIST_HEAD(&event->event_entry);
        INIT_LIST_HEAD(&event->sibling_list);
+       INIT_LIST_HEAD(&event->rb_entry);
+
        init_waitqueue_head(&event->waitq);
        init_irq_work(&event->pending, perf_pending_event);
 
@@ -6028,6 +6113,8 @@ set:
 
        old_rb = event->rb;
        rcu_assign_pointer(event->rb, rb);
+       if (old_rb)
+               ring_buffer_detach(event, old_rb);
        ret = 0;
 unlock:
        mutex_unlock(&event->mmap_mutex);
index 09097dd8116c0e0bf5120d4da26c5f539a7f600a..64568a699375f105232eb588963da8707f926295 100644 (file)
@@ -22,6 +22,9 @@ struct ring_buffer {
        local_t                         lost;           /* nr records lost   */
 
        long                            watermark;      /* wakeup watermark  */
+       /* poll crap */
+       spinlock_t                      event_lock;
+       struct list_head                event_list;
 
        struct perf_event_mmap_page     *user_page;
        void                            *data_pages[0];
index a2a29205cc0fc10913277132162e0a515a944552..7f3011c6b57fa3288c7e4c46a555736763765a84 100644 (file)
@@ -209,6 +209,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
                rb->writable = 1;
 
        atomic_set(&rb->refcount, 1);
+
+       INIT_LIST_HEAD(&rb->event_list);
+       spin_lock_init(&rb->event_lock);
 }
 
 #ifndef CONFIG_PERF_USE_VMALLOC
index ea87f4d2f455c8c99164cb555287ad1c9fb165e3..1614be20173dcb19ef45e65fa25d07f8d3c65194 100644 (file)
@@ -314,17 +314,29 @@ again:
 #endif
 
        lock_page(page_head);
+
+       /*
+        * If page_head->mapping is NULL, then it cannot be a PageAnon
+        * page; but it might be the ZERO_PAGE or in the gate area or
+        * in a special mapping (all cases which we are happy to fail);
+        * or it may have been a good file page when get_user_pages_fast
+        * found it, but truncated or holepunched or subjected to
+        * invalidate_complete_page2 before we got the page lock (also
+        * cases which we are happy to fail).  And we hold a reference,
+        * so refcount care in invalidate_complete_page's remove_mapping
+        * prevents drop_caches from setting mapping to NULL beneath us.
+        *
+        * The case we do have to guard against is when memory pressure made
+        * shmem_writepage move it from filecache to swapcache beneath us:
+        * an unlikely race, but we do need to retry for page_head->mapping.
+        */
        if (!page_head->mapping) {
+               int shmem_swizzled = PageSwapCache(page_head);
                unlock_page(page_head);
                put_page(page_head);
-               /*
-               * ZERO_PAGE pages don't have a mapping. Avoid a busy loop
-               * trying to find one. RW mapping would have COW'd (and thus
-               * have a mapping) so this page is RO and won't ever change.
-               */
-               if ((page_head == ZERO_PAGE(address)))
-                       return -EFAULT;
-               goto again;
+               if (shmem_swizzled)
+                       goto again;
+               return -EFAULT;
        }
 
        /*
index 0e2b179bc7b371ea8aa60f52899ae60ff1cb5ec2..1da999f5e746caedacb8b40d0015e7c3f273ad04 100644 (file)
@@ -623,8 +623,9 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
 
 static int irq_wait_for_interrupt(struct irqaction *action)
 {
+       set_current_state(TASK_INTERRUPTIBLE);
+
        while (!kthread_should_stop()) {
-               set_current_state(TASK_INTERRUPTIBLE);
 
                if (test_and_clear_bit(IRQTF_RUNTHREAD,
                                       &action->thread_flags)) {
@@ -632,7 +633,9 @@ static int irq_wait_for_interrupt(struct irqaction *action)
                        return 0;
                }
                schedule();
+               set_current_state(TASK_INTERRUPTIBLE);
        }
+       __set_current_state(TASK_RUNNING);
        return -1;
 }
 
index bbdfe2a462a088b210d5792c674b215274b1b39b..66ff7109f6970ca63cb4aa9bf6b4800d69ef2f3a 100644 (file)
@@ -66,8 +66,9 @@ void jump_label_inc(struct jump_label_key *key)
                return;
 
        jump_label_lock();
-       if (atomic_add_return(1, &key->enabled) == 1)
+       if (atomic_read(&key->enabled) == 0)
                jump_label_update(key, JUMP_LABEL_ENABLE);
+       atomic_inc(&key->enabled);
        jump_label_unlock();
 }
 
index e69434b070da3f922909ece9417627e11234dcd6..b2e08c932d91c6507f07d637f61f0df1205778b8 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/stringify.h>
 #include <linux/bitops.h>
 #include <linux/gfp.h>
+#include <linux/kmemcheck.h>
 
 #include <asm/sections.h>
 
@@ -2948,7 +2949,12 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
 void lockdep_init_map(struct lockdep_map *lock, const char *name,
                      struct lock_class_key *key, int subclass)
 {
-       memset(lock, 0, sizeof(*lock));
+       int i;
+
+       kmemcheck_mark_initialized(lock, sizeof(*lock));
+
+       for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
+               lock->class_cache[i] = NULL;
 
 #ifdef CONFIG_LOCK_STAT
        lock->cpu = raw_smp_processor_id();
index 1455a0d4eedd4b386c759d689f939ba5d7a9007a..7982a0a841eaf082fe929e24a1f4bd5aefd4f015 100644 (file)
@@ -1293,10 +1293,11 @@ again:
        raw_spin_lock(&logbuf_lock);
        if (con_start != log_end)
                retry = 1;
+       raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
        if (retry && console_trylock())
                goto again;
 
-       raw_spin_unlock_irqrestore(&logbuf_lock, flags);
        if (wake_klogd)
                wake_up_klogd();
 }
index 0e9344a71be33f6335bf55fd3a7bfe0f52418162..d6b149ccf925c320841e8a42f31fd23b6ee64dc6 100644 (file)
@@ -71,6 +71,7 @@
 #include <linux/ctype.h>
 #include <linux/ftrace.h>
 #include <linux/slab.h>
+#include <linux/init_task.h>
 
 #include <asm/tlb.h>
 #include <asm/irq_regs.h>
@@ -4810,6 +4811,9 @@ EXPORT_SYMBOL(wait_for_completion);
  * This waits for either a completion of a specific task to be signaled or for a
  * specified timeout to expire. The timeout is in jiffies. It is not
  * interruptible.
+ *
+ * The return value is 0 if timed out, and positive (at least 1, or number of
+ * jiffies left till timeout) if completed.
  */
 unsigned long __sched
 wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -4824,6 +4828,8 @@ EXPORT_SYMBOL(wait_for_completion_timeout);
  *
  * This waits for completion of a specific task to be signaled. It is
  * interruptible.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if completed.
  */
 int __sched wait_for_completion_interruptible(struct completion *x)
 {
@@ -4841,6 +4847,9 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
  *
  * This waits for either a completion of a specific task to be signaled or for a
  * specified timeout to expire. It is interruptible. The timeout is in jiffies.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+ * positive (at least 1, or number of jiffies left till timeout) if completed.
  */
 long __sched
 wait_for_completion_interruptible_timeout(struct completion *x,
@@ -4856,6 +4865,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
  *
  * This waits to be signaled for completion of a specific task. It can be
  * interrupted by a kill signal.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if completed.
  */
 int __sched wait_for_completion_killable(struct completion *x)
 {
@@ -4874,6 +4885,9 @@ EXPORT_SYMBOL(wait_for_completion_killable);
  * This waits for either a completion of a specific task to be
  * signaled or for a specified timeout to expire. It can be
  * interrupted by a kill signal. The timeout is in jiffies.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+ * positive (at least 1, or number of jiffies left till timeout) if completed.
  */
 long __sched
 wait_for_completion_killable_timeout(struct completion *x,
@@ -6099,6 +6113,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
         */
        idle->sched_class = &idle_sched_class;
        ftrace_graph_init_idle_task(idle, cpu);
+#if defined(CONFIG_SMP)
+       sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
+#endif
 }
 
 /*
index 5c9e67923b7cfd7826903c17322c3f0c55de5d74..8a39fa3e3c6c7bafe368bc2e27c607801e8afb87 100644 (file)
@@ -772,19 +772,32 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
                list_del_leaf_cfs_rq(cfs_rq);
 }
 
+static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
+{
+       long tg_weight;
+
+       /*
+        * Use this CPU's actual weight instead of the last load_contribution
+        * to gain a more accurate current total weight. See
+        * update_cfs_rq_load_contribution().
+        */
+       tg_weight = atomic_read(&tg->load_weight);
+       tg_weight -= cfs_rq->load_contribution;
+       tg_weight += cfs_rq->load.weight;
+
+       return tg_weight;
+}
+
 static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
 {
-       long load_weight, load, shares;
+       long tg_weight, load, shares;
 
+       tg_weight = calc_tg_weight(tg, cfs_rq);
        load = cfs_rq->load.weight;
 
-       load_weight = atomic_read(&tg->load_weight);
-       load_weight += load;
-       load_weight -= cfs_rq->load_contribution;
-
        shares = (tg->shares * load);
-       if (load_weight)
-               shares /= load_weight;
+       if (tg_weight)
+               shares /= tg_weight;
 
        if (shares < MIN_SHARES)
                shares = MIN_SHARES;
@@ -1743,7 +1756,7 @@ static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 
 static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
 {
-       if (!cfs_rq->runtime_enabled || !cfs_rq->nr_running)
+       if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
                return;
 
        __return_cfs_rq_runtime(cfs_rq);
@@ -2036,36 +2049,100 @@ static void task_waking_fair(struct task_struct *p)
  * Adding load to a group doesn't make a group heavier, but can cause movement
  * of group shares between cpus. Assuming the shares were perfectly aligned one
  * can calculate the shift in shares.
+ *
+ * Calculate the effective load difference if @wl is added (subtracted) to @tg
+ * on this @cpu and results in a total addition (subtraction) of @wg to the
+ * total group weight.
+ *
+ * Given a runqueue weight distribution (rw_i) we can compute a shares
+ * distribution (s_i) using:
+ *
+ *   s_i = rw_i / \Sum rw_j                                            (1)
+ *
+ * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
+ * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
+ * shares distribution (s_i):
+ *
+ *   rw_i = {   2,   4,   1,   0 }
+ *   s_i  = { 2/7, 4/7, 1/7,   0 }
+ *
+ * As per wake_affine() we're interested in the load of two CPUs (the CPU the
+ * task used to run on and the CPU the waker is running on), we need to
+ * compute the effect of waking a task on either CPU and, in case of a sync
+ * wakeup, compute the effect of the current task going to sleep.
+ *
+ * So for a change of @wl to the local @cpu with an overall group weight change
+ * of @wl we can compute the new shares distribution (s'_i) using:
+ *
+ *   s'_i = (rw_i + @wl) / (@wg + \Sum rw_j)                           (2)
+ *
+ * Suppose we're interested in CPUs 0 and 1, and want to compute the load
+ * differences in waking a task to CPU 0. The additional task changes the
+ * weight and shares distributions like:
+ *
+ *   rw'_i = {   3,   4,   1,   0 }
+ *   s'_i  = { 3/8, 4/8, 1/8,   0 }
+ *
+ * We can then compute the difference in effective weight by using:
+ *
+ *   dw_i = S * (s'_i - s_i)                                           (3)
+ *
+ * Where 'S' is the group weight as seen by its parent.
+ *
+ * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
+ * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
+ * 4/7) times the weight of the group.
  */
 static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
 {
        struct sched_entity *se = tg->se[cpu];
 
-       if (!tg->parent)
+       if (!tg->parent)        /* the trivial, non-cgroup case */
                return wl;
 
        for_each_sched_entity(se) {
-               long lw, w;
+               long w, W;
 
                tg = se->my_q->tg;
-               w = se->my_q->load.weight;
 
-               /* use this cpu's instantaneous contribution */
-               lw = atomic_read(&tg->load_weight);
-               lw -= se->my_q->load_contribution;
-               lw += w + wg;
+               /*
+                * W = @wg + \Sum rw_j
+                */
+               W = wg + calc_tg_weight(tg, se->my_q);
 
-               wl += w;
+               /*
+                * w = rw_i + @wl
+                */
+               w = se->my_q->load.weight + wl;
 
-               if (lw > 0 && wl < lw)
-                       wl = (wl * tg->shares) / lw;
+               /*
+                * wl = S * s'_i; see (2)
+                */
+               if (W > 0 && w < W)
+                       wl = (w * tg->shares) / W;
                else
                        wl = tg->shares;
 
-               /* zero point is MIN_SHARES */
+               /*
+                * Per the above, wl is the new se->load.weight value; since
+                * those are clipped to [MIN_SHARES, ...) do so now. See
+                * calc_cfs_shares().
+                */
                if (wl < MIN_SHARES)
                        wl = MIN_SHARES;
+
+               /*
+                * wl = dw_i = S * (s'_i - s_i); see (3)
+                */
                wl -= se->load.weight;
+
+               /*
+                * Recursively apply this logic to all parent groups to compute
+                * the final effective load change on the root group. Since
+                * only the @tg group gets extra weight, all parent groups can
+                * only redistribute existing shares. @wl is the shift in shares
+                * resulting from this level per the above.
+                */
                wg = 0;
        }
 
@@ -2249,7 +2326,8 @@ static int select_idle_sibling(struct task_struct *p, int target)
        int cpu = smp_processor_id();
        int prev_cpu = task_cpu(p);
        struct sched_domain *sd;
-       int i;
+       struct sched_group *sg;
+       int i, smt = 0;
 
        /*
         * If the task is going to be woken-up on this cpu and if it is
@@ -2269,25 +2347,40 @@ static int select_idle_sibling(struct task_struct *p, int target)
         * Otherwise, iterate the domains and find an elegible idle cpu.
         */
        rcu_read_lock();
+again:
        for_each_domain(target, sd) {
+               if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
+                       continue;
+
+               if (smt && !(sd->flags & SD_SHARE_CPUPOWER))
+                       break;
+
                if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
                        break;
 
-               for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) {
-                       if (idle_cpu(i)) {
-                               target = i;
-                               break;
+               sg = sd->groups;
+               do {
+                       if (!cpumask_intersects(sched_group_cpus(sg),
+                                               tsk_cpus_allowed(p)))
+                               goto next;
+
+                       for_each_cpu(i, sched_group_cpus(sg)) {
+                               if (!idle_cpu(i))
+                                       goto next;
                        }
-               }
 
-               /*
-                * Lets stop looking for an idle sibling when we reached
-                * the domain that spans the current cpu and prev_cpu.
-                */
-               if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
-                   cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
-                       break;
+                       target = cpumask_first_and(sched_group_cpus(sg),
+                                       tsk_cpus_allowed(p));
+                       goto done;
+next:
+                       sg = sg->next;
+               } while (sg != sd->groups);
+       }
+       if (!smt) {
+               smt = 1;
+               goto again;
        }
+done:
        rcu_read_unlock();
 
        return target;
@@ -3511,7 +3604,7 @@ static bool update_sd_pick_busiest(struct sched_domain *sd,
 }
 
 /**
- * update_sd_lb_stats - Update sched_group's statistics for load balancing.
+ * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
  * @sd: sched_domain whose statistics are to be updated.
  * @this_cpu: Cpu for which load balance is currently performed.
  * @idle: Idle status of this_cpu
index efa0a7b75dde7408e89bd07e5b1a490c4f68ea95..84802245abd2562acad3c0eb734fe48cc5213b8e 100644 (file)
@@ -67,3 +67,4 @@ SCHED_FEAT(NONTASK_POWER, 1)
 SCHED_FEAT(TTWU_QUEUE, 1)
 
 SCHED_FEAT(FORCE_SD_OVERLAP, 0)
+SCHED_FEAT(RT_RUNTIME_SHARE, 1)
index 056cbd2e2a27fea8cb15e76bfc711fe32de03303..583a1368afe6ed7d96879d762f73553b2068e27c 100644 (file)
@@ -560,6 +560,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
 {
        int more = 0;
 
+       if (!sched_feat(RT_RUNTIME_SHARE))
+               return more;
+
        if (rt_rq->rt_time > rt_rq->rt_runtime) {
                raw_spin_unlock(&rt_rq->rt_runtime_lock);
                more = do_balance_runtime(rt_rq);
index 6318b511afa10b3044c7bbd1eb49b28f7c490fcc..a650694883a180e93c5ec1d6414e45ba904fcff3 100644 (file)
@@ -1354,7 +1354,7 @@ static ssize_t binary_sysctl(const int *name, int nlen,
 
        fput(file);
 out_putname:
-       putname(pathname);
+       __putname(pathname);
 out:
        return result;
 }
index c436e790b21bf7cd89878eb9d92146e1b1c511d6..8a46f5d64504f15dcaf31ec4f5fcee7ea15a8bdf 100644 (file)
@@ -195,7 +195,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer)
                struct alarm *alarm;
                ktime_t expired = next->expires;
 
-               if (expired.tv64 >= now.tv64)
+               if (expired.tv64 > now.tv64)
                        break;
 
                alarm = container_of(next, struct alarm, node);
index cfc65e1eb9fbba411fad95fabee1b78f0e6517f3..d3ad022136e56b97f7b51ec1a3fa018889a4944b 100644 (file)
@@ -548,7 +548,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
         * note a margin of 12.5% is used because this can be computed with
         * a shift, versus say 10% which would require division.
         */
-       return max_nsecs - (max_nsecs >> 5);
+       return max_nsecs - (max_nsecs >> 3);
 }
 
 #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
@@ -647,7 +647,7 @@ static void clocksource_enqueue(struct clocksource *cs)
 
 /**
  * __clocksource_updatefreq_scale - Used update clocksource with new freq
- * @t:         clocksource to be registered
+ * @cs:                clocksource to be registered
  * @scale:     Scale factor multiplied against freq to get clocksource hz
  * @freq:      clocksource frequency (cycles per second) divided by scale
  *
@@ -669,7 +669,7 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
         * ~ 0.06ppm granularity for NTP. We apply the same 12.5%
         * margin as we do in clocksource_max_deferment()
         */
-       sec = (cs->mask - (cs->mask >> 5));
+       sec = (cs->mask - (cs->mask >> 3));
        do_div(sec, freq);
        do_div(sec, scale);
        if (!sec)
@@ -699,7 +699,7 @@ EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
 
 /**
  * __clocksource_register_scale - Used to install new clocksources
- * @t:         clocksource to be registered
+ * @cs:                clocksource to be registered
  * @scale:     Scale factor multiplied against freq to get clocksource hz
  * @freq:      clocksource frequency (cycles per second) divided by scale
  *
@@ -727,7 +727,7 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale);
 
 /**
  * clocksource_register - Used to install new clocksources
- * @t:         clocksource to be registered
+ * @cs:                clocksource to be registered
  *
  * Returns -EBUSY if registration fails, zero otherwise.
  */
@@ -761,6 +761,8 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating)
 
 /**
  * clocksource_change_rating - Change the rating of a registered clocksource
+ * @cs:                clocksource to be changed
+ * @rating:    new rating
  */
 void clocksource_change_rating(struct clocksource *cs, int rating)
 {
@@ -772,6 +774,7 @@ EXPORT_SYMBOL(clocksource_change_rating);
 
 /**
  * clocksource_unregister - remove a registered clocksource
+ * @cs:        clocksource to be unregistered
  */
 void clocksource_unregister(struct clocksource *cs)
 {
@@ -787,6 +790,7 @@ EXPORT_SYMBOL(clocksource_unregister);
 /**
  * sysfs_show_current_clocksources - sysfs interface for current clocksource
  * @dev:       unused
+ * @attr:      unused
  * @buf:       char buffer to be filled with clocksource list
  *
  * Provides sysfs interface for listing current clocksource.
@@ -807,6 +811,7 @@ sysfs_show_current_clocksources(struct sys_device *dev,
 /**
  * sysfs_override_clocksource - interface for manually overriding clocksource
  * @dev:       unused
+ * @attr:      unused
  * @buf:       name of override clocksource
  * @count:     length of buffer
  *
@@ -842,6 +847,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
 /**
  * sysfs_show_available_clocksources - sysfs interface for listing clocksource
  * @dev:       unused
+ * @attr:      unused
  * @buf:       char buffer to be filled with clocksource list
  *
  * Provides sysfs interface for listing registered clocksources
index f954282d9a82758acf951960392e37b5b07c6014..fd4a7b1625a20b35fad917817e136fdbd2d48e62 100644 (file)
@@ -71,7 +71,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
             (dev->features & CLOCK_EVT_FEAT_C3STOP))
                return 0;
 
-       clockevents_exchange_device(NULL, dev);
+       clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
        tick_broadcast_device.evtdev = dev;
        if (!cpumask_empty(tick_get_broadcast_mask()))
                tick_broadcast_start_periodic(dev);
index dbaa62422b13c057754252d986f97440ec7dd3af..9c3c62b0c4bc89ebd307ff79950039021d157861 100644 (file)
@@ -1368,7 +1368,7 @@ SYSCALL_DEFINE0(getppid)
        int pid;
 
        rcu_read_lock();
-       pid = task_tgid_vnr(current->real_parent);
+       pid = task_tgid_vnr(rcu_dereference(current->real_parent));
        rcu_read_unlock();
 
        return pid;
index 900b409543db10cfc46b9da703f463a1cee9b78e..b1e8943fed1d3a9fd61916527c59c70d57af7d2c 100644 (file)
@@ -152,7 +152,6 @@ void clear_ftrace_function(void)
        ftrace_pid_function = ftrace_stub;
 }
 
-#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
 /*
  * For those archs that do not test ftrace_trace_stop in their
@@ -1212,7 +1211,9 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
        if (!src->count) {
                free_ftrace_hash_rcu(*dst);
                rcu_assign_pointer(*dst, EMPTY_HASH);
-               return 0;
+               /* still need to update the function records */
+               ret = 0;
+               goto out;
        }
 
        /*
index 581876f9f3872e9103a0110893396652599a1d6a..c212a7f934ec4841d8c9887ecf93f6a0b7526b27 100644 (file)
@@ -1078,7 +1078,6 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
        /* First see if we did not already create this dir */
        list_for_each_entry(system, &event_subsystems, list) {
                if (strcmp(system->name, name) == 0) {
-                       __get_system(system);
                        system->nr_events++;
                        return system->entry;
                }
index 816d3d074979306713836d9447382cb641aecb54..95dc31efd6dd503dbd169a159dac4c5048f5a568 100644 (file)
@@ -1649,7 +1649,9 @@ static int replace_system_preds(struct event_subsystem *system,
                 */
                err = replace_preds(call, NULL, ps, filter_string, true);
                if (err)
-                       goto fail;
+                       call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
+               else
+                       call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
        }
 
        list_for_each_entry(call, &ftrace_events, list) {
@@ -1658,6 +1660,9 @@ static int replace_system_preds(struct event_subsystem *system,
                if (strcmp(call->class->system, system->name) != 0)
                        continue;
 
+               if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)
+                       continue;
+
                filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
                if (!filter_item)
                        goto fail_mem;
@@ -1686,7 +1691,7 @@ static int replace_system_preds(struct event_subsystem *system,
                 * replace the filter for the call.
                 */
                filter = call->filter;
-               call->filter = filter_item->filter;
+               rcu_assign_pointer(call->filter, filter_item->filter);
                filter_item->filter = filter;
 
                fail = false;
@@ -1741,7 +1746,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
                filter = call->filter;
                if (!filter)
                        goto out_unlock;
-               call->filter = NULL;
+               RCU_INIT_POINTER(call->filter, NULL);
                /* Make sure the filter is not being used */
                synchronize_sched();
                __free_filter(filter);
@@ -1782,7 +1787,7 @@ out:
         * string
         */
        tmp = call->filter;
-       call->filter = filter;
+       rcu_assign_pointer(call->filter, filter);
        if (tmp) {
                /* Make sure the call is done with the filter */
                synchronize_sched();
index 74c6c7fce74900ec9870a15313268c87f9e1ecc3..fea790a2b17659e9b701987101db7929bdfbe8df 100644 (file)
@@ -245,7 +245,7 @@ static void put_hash_bucket(struct hash_bucket *bucket,
 
 static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
 {
-       return ((a->dev_addr == a->dev_addr) &&
+       return ((a->dev_addr == b->dev_addr) &&
                (a->dev == b->dev)) ? true : false;
 }
 
index c0018f2d50e04e2ea03045989b742254be0a8489..5f0a3c91fdac043437392bbe141b45c30cdcf66e 100644 (file)
@@ -1828,7 +1828,7 @@ repeat:
                page = __page_cache_alloc(gfp | __GFP_COLD);
                if (!page)
                        return ERR_PTR(-ENOMEM);
-               err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
+               err = add_to_page_cache_lru(page, mapping, index, gfp);
                if (unlikely(err)) {
                        page_cache_release(page);
                        if (err == -EEXIST)
@@ -1925,10 +1925,7 @@ static struct page *wait_on_page_read(struct page *page)
  * @gfp:       the page allocator flags to use if allocating
  *
  * This is the same as "read_mapping_page(mapping, index, NULL)", but with
- * any new page allocations done using the specified allocation flags. Note
- * that the Radix tree operations will still use GFP_KERNEL, so you can't
- * expect to do this atomically or anything like that - but you can pass in
- * other page requirements.
+ * any new page allocations done using the specified allocation flags.
  *
  * If the page does not get brought uptodate, return -EIO.
  */
@@ -2407,7 +2404,6 @@ static ssize_t generic_perform_write(struct file *file,
                                                iov_iter_count(i));
 
 again:
-
                /*
                 * Bring in the user page that we will copy from _first_.
                 * Otherwise there's a nasty deadlock on copying from the
@@ -2463,7 +2459,10 @@ again:
                written += copied;
 
                balance_dirty_pages_ratelimited(mapping);
-
+               if (fatal_signal_pending(current)) {
+                       status = -EINTR;
+                       break;
+               }
        } while (iov_iter_count(i));
 
        return written ? written : status;
index 4298abaae153033caafe1f8ac641fdef1dee097e..36b3d988b4ef6ac8c263ee0732c1d08513afb04f 100644 (file)
@@ -2259,12 +2259,8 @@ static void khugepaged_do_scan(struct page **hpage)
 
 static void khugepaged_alloc_sleep(void)
 {
-       DEFINE_WAIT(wait);
-       add_wait_queue(&khugepaged_wait, &wait);
-       schedule_timeout_interruptible(
-               msecs_to_jiffies(
-                       khugepaged_alloc_sleep_millisecs));
-       remove_wait_queue(&khugepaged_wait, &wait);
+       wait_event_freezable_timeout(khugepaged_wait, false,
+                       msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
 }
 
 #ifndef CONFIG_NUMA
@@ -2313,14 +2309,10 @@ static void khugepaged_loop(void)
                if (unlikely(kthread_should_stop()))
                        break;
                if (khugepaged_has_work()) {
-                       DEFINE_WAIT(wait);
                        if (!khugepaged_scan_sleep_millisecs)
                                continue;
-                       add_wait_queue(&khugepaged_wait, &wait);
-                       schedule_timeout_interruptible(
-                               msecs_to_jiffies(
-                                       khugepaged_scan_sleep_millisecs));
-                       remove_wait_queue(&khugepaged_wait, &wait);
+                       wait_event_freezable_timeout(khugepaged_wait, false,
+                           msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
                } else if (khugepaged_enabled())
                        wait_event_freezable(khugepaged_wait,
                                             khugepaged_wait_event());
index bb28a5f9db8ddbf2f65391fe9206ca99b46a256b..2316840b337a37447d2d8cbe294f53df71e53575 100644 (file)
@@ -576,6 +576,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
        __SetPageHead(page);
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
                __SetPageTail(p);
+               set_page_count(p, 0);
                p->first_page = page;
        }
 }
@@ -900,7 +901,6 @@ retry:
        h->resv_huge_pages += delta;
        ret = 0;
 
-       spin_unlock(&hugetlb_lock);
        /* Free the needed pages to the hugetlb pool */
        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
                if ((--needed) < 0)
@@ -914,6 +914,7 @@ retry:
                VM_BUG_ON(page_count(page));
                enqueue_huge_page(h, page);
        }
+       spin_unlock(&hugetlb_lock);
 
        /* Free unnecessary surplus pages to the buddy allocator */
 free:
index 6aff93c98acaf6020eabd37f2e9b49a04135f187..b63f5f7dfa071225487ebf5baa365f5b3baebe3b 100644 (file)
@@ -4907,9 +4907,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
                int cpu;
                enable_swap_cgroup();
                parent = NULL;
-               root_mem_cgroup = memcg;
                if (mem_cgroup_soft_limit_tree_init())
                        goto free_out;
+               root_mem_cgroup = memcg;
                for_each_possible_cpu(cpu) {
                        struct memcg_stock_pcp *stock =
                                                &per_cpu(memcg_stock, cpu);
@@ -4948,7 +4948,6 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
        return &memcg->css;
 free_out:
        __mem_cgroup_free(memcg);
-       root_mem_cgroup = NULL;
        return ERR_PTR(error);
 }
 
index adc395481813532efe82dced222e79115b075b0e..c3fdbcb17658ce405131e5b0310e1857fd6558bc 100644 (file)
@@ -636,6 +636,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
        struct vm_area_struct *prev;
        struct vm_area_struct *vma;
        int err = 0;
+       pgoff_t pgoff;
        unsigned long vmstart;
        unsigned long vmend;
 
@@ -643,13 +644,21 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
        if (!vma || vma->vm_start > start)
                return -EFAULT;
 
+       if (start > vma->vm_start)
+               prev = vma;
+
        for (; vma && vma->vm_start < end; prev = vma, vma = next) {
                next = vma->vm_next;
                vmstart = max(start, vma->vm_start);
                vmend   = min(end, vma->vm_end);
 
+               if (mpol_equal(vma_policy(vma), new_pol))
+                       continue;
+
+               pgoff = vma->vm_pgoff +
+                       ((vmstart - vma->vm_start) >> PAGE_SHIFT);
                prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
-                                 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
+                                 vma->anon_vma, vma->vm_file, pgoff,
                                  new_pol);
                if (prev) {
                        vma = prev;
index 578e29174fa6a0b84e8cbb10f8bf22ffd9a37b57..177aca424a069ac1ae1b44d48a8e6d992cd42a4d 100644 (file)
@@ -871,9 +871,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
 
        if (anon_vma)
                put_anon_vma(anon_vma);
-out:
        unlock_page(hpage);
 
+out:
        if (rc != -EAGAIN) {
                list_del(&hpage->lru);
                put_page(hpage);
index 76f2c5ae908e85a858c006d932edcdf9128e0cc2..069b64e521fccf2725199550653bfdb7055d6f81 100644 (file)
@@ -176,7 +176,7 @@ static bool oom_unkillable_task(struct task_struct *p,
 unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
                      const nodemask_t *nodemask, unsigned long totalpages)
 {
-       int points;
+       long points;
 
        if (oom_unkillable_task(p, mem, nodemask))
                return 0;
index 71252486bc6f1f161b87592ded6e8b7e6c0dc8ee..50f08241f9815668d73b1adfcd5c9585f11bbc15 100644 (file)
@@ -411,8 +411,13 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
  *
  * Returns @bdi's dirty limit in pages. The term "dirty" in the context of
  * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
- * And the "limit" in the name is not seriously taken as hard limit in
- * balance_dirty_pages().
+ *
+ * Note that balance_dirty_pages() will only seriously take it as a hard limit
+ * when sleeping max_pause per page is not enough to keep the dirty pages under
+ * control. For example, when the device is completely stalled due to some error
+ * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
+ * In the other normal situations, it acts more gently by throttling the tasks
+ * more (rather than completely block them) when the bdi dirty pages go high.
  *
  * It allocates high/low dirty limits to fast/slow devices, in order to prevent
  * - starving fast devices
@@ -594,6 +599,13 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
         */
        if (unlikely(bdi_thresh > thresh))
                bdi_thresh = thresh;
+       /*
+        * It's very possible that bdi_thresh is close to 0 not because the
+        * device is slow, but that it has remained inactive for long time.
+        * Honour such devices a reasonable good (hopefully IO efficient)
+        * threshold, so that the occasional writes won't be blocked and active
+        * writes can rampup the threshold quickly.
+        */
        bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
        /*
         * scale global setpoint to bdi's:
@@ -977,8 +989,7 @@ static unsigned long bdi_max_pause(struct backing_dev_info *bdi,
         *
         * 8 serves as the safety ratio.
         */
-       if (bdi_dirty)
-               t = min(t, bdi_dirty * HZ / (8 * bw + 1));
+       t = min(t, bdi_dirty * HZ / (8 * bw + 1));
 
        /*
         * The pause time will be settled within range (max_pause/4, max_pause).
@@ -1136,6 +1147,19 @@ pause:
                if (task_ratelimit)
                        break;
 
+               /*
+                * In the case of an unresponding NFS server and the NFS dirty
+                * pages exceeds dirty_thresh, give the other good bdi's a pipe
+                * to go through, so that tasks on them still remain responsive.
+                *
+                * In theory 1 page is enough to keep the comsumer-producer
+                * pipe going: the flusher cleans 1 page => the task dirties 1
+                * more page. However bdi_dirty has accounting errors.  So use
+                * the larger and more IO friendly bdi_stat_error.
+                */
+               if (bdi_dirty <= bdi_stat_error(bdi))
+                       break;
+
                if (fatal_signal_pending(current))
                        break;
        }
index 9dd443d89d8be665813bbeb4e17e54fafde46428..2b8ba3aebf6e2c6b46b0d12dfea058ee3ab022fe 100644 (file)
@@ -356,8 +356,8 @@ void prep_compound_page(struct page *page, unsigned long order)
        __SetPageHead(page);
        for (i = 1; i < nr_pages; i++) {
                struct page *p = page + i;
-
                __SetPageTail(p);
+               set_page_count(p, 0);
                p->first_page = page;
        }
 }
@@ -3377,9 +3377,15 @@ static void setup_zone_migrate_reserve(struct zone *zone)
        unsigned long block_migratetype;
        int reserve;
 
-       /* Get the start pfn, end pfn and the number of blocks to reserve */
+       /*
+        * Get the start pfn, end pfn and the number of blocks to reserve
+        * We have to be careful to be aligned to pageblock_nr_pages to
+        * make sure that we always check pfn_valid for the first page in
+        * the block.
+        */
        start_pfn = zone->zone_start_pfn;
        end_pfn = start_pfn + zone->spanned_pages;
+       start_pfn = roundup(start_pfn, pageblock_nr_pages);
        reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
                                                        pageblock_order;
 
index 3bb810a72006cd65e345e16fd124b61e029964db..716eb4acf2fc29cf9500ad3972eb00b5413ab446 100644 (file)
@@ -1023,9 +1023,11 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
                if (!is_vmalloc_addr(addr))
                        return __pa(addr);
                else
-                       return page_to_phys(vmalloc_to_page(addr));
+                       return page_to_phys(vmalloc_to_page(addr)) +
+                              offset_in_page(addr);
        } else
-               return page_to_phys(pcpu_addr_to_page(addr));
+               return page_to_phys(pcpu_addr_to_page(addr)) +
+                      offset_in_page(addr);
 }
 
 /**
index 708efe886154da626cfe01f4a2b6eaf055ada914..83311c9aaf9de0ad8494f04703c248f155cfc2c1 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -595,6 +595,7 @@ static enum {
        PARTIAL_AC,
        PARTIAL_L3,
        EARLY,
+       LATE,
        FULL
 } g_cpucache_up;
 
@@ -671,7 +672,7 @@ static void init_node_lock_keys(int q)
 {
        struct cache_sizes *s = malloc_sizes;
 
-       if (g_cpucache_up != FULL)
+       if (g_cpucache_up < LATE)
                return;
 
        for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
@@ -1666,6 +1667,8 @@ void __init kmem_cache_init_late(void)
 {
        struct kmem_cache *cachep;
 
+       g_cpucache_up = LATE;
+
        /* Annotate slab for lockdep -- annotate the malloc caches */
        init_lock_keys();
 
index e583f770dfee5228e0e6a71b4bef574e8d88c552..21fdf46ad5aac727111834d0955af07472daaec2 100644 (file)
@@ -1315,7 +1315,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
                unsigned long align, unsigned long flags, unsigned long start,
                unsigned long end, int node, gfp_t gfp_mask, void *caller)
 {
-       static struct vmap_area *va;
+       struct vmap_area *va;
        struct vm_struct *area;
 
        BUG_ON(in_interrupt());
@@ -1658,6 +1658,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
                goto fail;
 
        addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
+       if (!addr)
+               return NULL;
 
        /*
         * In this function, newly allocated vm_struct is not added
index a1893c050795b6e92f36a71cd0f5dd985f334a2c..f54a05b7a61d9eb658562b191996beb9d4cea397 100644 (file)
@@ -183,7 +183,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
  */
 void register_shrinker(struct shrinker *shrinker)
 {
-       shrinker->nr = 0;
+       atomic_long_set(&shrinker->nr_in_batch, 0);
        down_write(&shrinker_rwsem);
        list_add_tail(&shrinker->list, &shrinker_list);
        up_write(&shrinker_rwsem);
@@ -247,25 +247,26 @@ unsigned long shrink_slab(struct shrink_control *shrink,
 
        list_for_each_entry(shrinker, &shrinker_list, list) {
                unsigned long long delta;
-               unsigned long total_scan;
-               unsigned long max_pass;
+               long total_scan;
+               long max_pass;
                int shrink_ret = 0;
                long nr;
                long new_nr;
                long batch_size = shrinker->batch ? shrinker->batch
                                                  : SHRINK_BATCH;
 
+               max_pass = do_shrinker_shrink(shrinker, shrink, 0);
+               if (max_pass <= 0)
+                       continue;
+
                /*
                 * copy the current shrinker scan count into a local variable
                 * and zero it so that other concurrent shrinker invocations
                 * don't also do this scanning work.
                 */
-               do {
-                       nr = shrinker->nr;
-               } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
+               nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
 
                total_scan = nr;
-               max_pass = do_shrinker_shrink(shrinker, shrink, 0);
                delta = (4 * nr_pages_scanned) / shrinker->seeks;
                delta *= max_pass;
                do_div(delta, lru_pages + 1);
@@ -325,12 +326,11 @@ unsigned long shrink_slab(struct shrink_control *shrink,
                 * manner that handles concurrent updates. If we exhausted the
                 * scan, there is no need to do an update.
                 */
-               do {
-                       nr = shrinker->nr;
-                       new_nr = total_scan + nr;
-                       if (total_scan <= 0)
-                               break;
-               } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
+               if (total_scan > 0)
+                       new_nr = atomic_long_add_return(total_scan,
+                                       &shrinker->nr_in_batch);
+               else
+                       new_nr = atomic_long_read(&shrinker->nr_in_batch);
 
                trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
        }
index c7aafc7c5ed4854b2a46f6e37b5386ef937fee8e..5f09a578d49d00d86dc78d73af89588c85c79371 100644 (file)
@@ -245,9 +245,11 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        if (tt_global_entry) {
                /* This node is probably going to update its tt table */
                tt_global_entry->orig_node->tt_poss_change = true;
-               /* The global entry has to be marked as PENDING and has to be
+               /* The global entry has to be marked as ROAMING and has to be
                 * kept for consistency purpose */
-               tt_global_entry->flags |= TT_CLIENT_PENDING;
+               tt_global_entry->flags |= TT_CLIENT_ROAM;
+               tt_global_entry->roam_at = jiffies;
+
                send_roam_adv(bat_priv, tt_global_entry->addr,
                              tt_global_entry->orig_node);
        }
@@ -694,6 +696,7 @@ void tt_global_del(struct bat_priv *bat_priv,
                   const char *message, bool roaming)
 {
        struct tt_global_entry *tt_global_entry = NULL;
+       struct tt_local_entry *tt_local_entry = NULL;
 
        tt_global_entry = tt_global_hash_find(bat_priv, addr);
        if (!tt_global_entry)
@@ -701,15 +704,29 @@ void tt_global_del(struct bat_priv *bat_priv,
 
        if (tt_global_entry->orig_node == orig_node) {
                if (roaming) {
-                       tt_global_entry->flags |= TT_CLIENT_ROAM;
-                       tt_global_entry->roam_at = jiffies;
-                       goto out;
+                       /* if we are deleting a global entry due to a roam
+                        * event, there are two possibilities:
+                        * 1) the client roamed from node A to node B => we mark
+                        *    it with TT_CLIENT_ROAM, we start a timer and we
+                        *    wait for node B to claim it. In case of timeout
+                        *    the entry is purged.
+                        * 2) the client roamed to us => we can directly delete
+                        *    the global entry, since it is useless now. */
+                       tt_local_entry = tt_local_hash_find(bat_priv,
+                                                       tt_global_entry->addr);
+                       if (!tt_local_entry) {
+                               tt_global_entry->flags |= TT_CLIENT_ROAM;
+                               tt_global_entry->roam_at = jiffies;
+                               goto out;
+                       }
                }
                _tt_global_del(bat_priv, tt_global_entry, message);
        }
 out:
        if (tt_global_entry)
                tt_global_entry_free_ref(tt_global_entry);
+       if (tt_local_entry)
+               tt_local_entry_free_ref(tt_local_entry);
 }
 
 void tt_global_del_orig(struct bat_priv *bat_priv,
index 91bcd3a961ec22c501451ef588fe86b171c55ae3..1eea8208b2cc760fc36bd56b8531ba81faccc481 100644 (file)
@@ -79,17 +79,12 @@ static struct bnep_session *__bnep_get_session(u8 *dst)
 
 static void __bnep_link_session(struct bnep_session *s)
 {
-       /* It's safe to call __module_get() here because sessions are added
-          by the socket layer which has to hold the reference to this module.
-        */
-       __module_get(THIS_MODULE);
        list_add(&s->list, &bnep_session_list);
 }
 
 static void __bnep_unlink_session(struct bnep_session *s)
 {
        list_del(&s->list);
-       module_put(THIS_MODULE);
 }
 
 static int bnep_send(struct bnep_session *s, void *data, size_t len)
@@ -530,6 +525,7 @@ static int bnep_session(void *arg)
 
        up_write(&bnep_session_sem);
        free_netdev(dev);
+       module_put_and_exit(0);
        return 0;
 }
 
@@ -616,9 +612,11 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
 
        __bnep_link_session(s);
 
+       __module_get(THIS_MODULE);
        s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name);
        if (IS_ERR(s->task)) {
                /* Session thread start failed, gotta cleanup. */
+               module_put(THIS_MODULE);
                unregister_netdev(dev);
                __bnep_unlink_session(s);
                err = PTR_ERR(s->task);
index 7d00ddf9e9dcb55ab74cf768db31a3e15af647ac..5a6e634f7fca53015c51c9d8fcdbad1dbd41b3a1 100644 (file)
@@ -67,14 +67,12 @@ static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr)
 
 static void __cmtp_link_session(struct cmtp_session *session)
 {
-       __module_get(THIS_MODULE);
        list_add(&session->list, &cmtp_session_list);
 }
 
 static void __cmtp_unlink_session(struct cmtp_session *session)
 {
        list_del(&session->list);
-       module_put(THIS_MODULE);
 }
 
 static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci)
@@ -327,6 +325,7 @@ static int cmtp_session(void *arg)
        up_write(&cmtp_session_sem);
 
        kfree(session);
+       module_put_and_exit(0);
        return 0;
 }
 
@@ -376,9 +375,11 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
 
        __cmtp_link_session(session);
 
+       __module_get(THIS_MODULE);
        session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d",
                                                                session->num);
        if (IS_ERR(session->task)) {
+               module_put(THIS_MODULE);
                err = PTR_ERR(session->task);
                goto unlink;
        }
index e0af7237cd9245fedfc8886ec446f670ddc48500..c1c597e3e198e9ffba26dd4dcf2e59e2ffc923c3 100644 (file)
@@ -673,7 +673,7 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
                goto encrypt;
 
 auth:
-       if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
+       if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
                return 0;
 
        if (!hci_conn_auth(conn, sec_level, auth_type))
index d7d96b6b1f0d63b338e5ed58fb125fbc6290aff5..643a41b76e2eadf1f8f783eba1881392b563beda 100644 (file)
@@ -545,7 +545,7 @@ static void hci_setup(struct hci_dev *hdev)
 {
        hci_setup_event_mask(hdev);
 
-       if (hdev->lmp_ver > 1)
+       if (hdev->hci_ver > 1)
                hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
 
        if (hdev->features[6] & LMP_SIMPLE_PAIR) {
index 5ea94a1eecf2f9a4338aa116d645b03b77fa7bd6..17b5b1cd96579e352b2f3bff0bb5d9b7d9406ff2 100644 (file)
@@ -2152,7 +2152,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi
        void *ptr = req->data;
        int type, olen;
        unsigned long val;
-       struct l2cap_conf_rfc rfc;
+       struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
 
        BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
 
@@ -2271,6 +2271,16 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
                }
        }
 
+       /* Use sane default values in case a misbehaving remote device
+        * did not send an RFC option.
+        */
+       rfc.mode = chan->mode;
+       rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
+       rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
+       rfc.max_pdu_size = cpu_to_le16(chan->imtu);
+
+       BT_ERR("Expected RFC option was not found, using defaults");
+
 done:
        switch (rfc.mode) {
        case L2CAP_MODE_ERTM:
index 4e32e18211f9187d8a98a27772a9ba97f5cf7112..2d28dfe983890fc74a2a8ebf5bc5f7e5716058c5 100644 (file)
@@ -1146,6 +1146,7 @@ static int rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci)
                        if (list_empty(&s->dlcs)) {
                                s->state = BT_DISCONN;
                                rfcomm_send_disc(s, 0);
+                               rfcomm_session_clear_timer(s);
                        }
 
                        break;
index d6ec3720c77e448c4fd014f78397dfe5fb52e592..fa8b8f763580a0fcf82cf9afe5c6fb91d1f64f8a 100644 (file)
@@ -114,12 +114,18 @@ static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const vo
        return NULL;
 }
 
+static unsigned int fake_mtu(const struct dst_entry *dst)
+{
+       return dst->dev->mtu;
+}
+
 static struct dst_ops fake_dst_ops = {
        .family =               AF_INET,
        .protocol =             cpu_to_be16(ETH_P_IP),
        .update_pmtu =          fake_update_pmtu,
        .cow_metrics =          fake_cow_metrics,
        .neigh_lookup =         fake_neigh_lookup,
+       .mtu =                  fake_mtu,
 };
 
 /*
@@ -141,7 +147,7 @@ void br_netfilter_rtable_init(struct net_bridge *br)
        rt->dst.dev = br->dev;
        rt->dst.path = &rt->dst;
        dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
-       rt->dst.flags   = DST_NOXFRM;
+       rt->dst.flags   = DST_NOXFRM | DST_NOPEER;
        rt->dst.ops = &fake_dst_ops;
 }
 
index e5f9ece3c9a0f9637c8ad98382b88705b27e1d30..a1daf8227ed11c1a1853a8fb5a246919e9deb5f5 100644 (file)
@@ -18,6 +18,7 @@
 #include <net/sock.h>
 
 #include "br_private.h"
+#include "br_private_stp.h"
 
 static inline size_t br_nlmsg_size(void)
 {
@@ -188,6 +189,11 @@ static int br_rtm_setlink(struct sk_buff *skb,  struct nlmsghdr *nlh, void *arg)
 
        p->state = new_state;
        br_log_state(p);
+
+       spin_lock_bh(&p->br->lock);
+       br_port_state_selection(p->br);
+       spin_unlock_bh(&p->br->lock);
+
        br_ifinfo_notify(RTM_NEWLINK, p);
 
        return 0;
index ad0a3f7cf6cc73081ca600b241060ce5ef867e44..dd147d78a5889ab6c2139712199c3672f9e087c6 100644 (file)
@@ -399,25 +399,24 @@ void br_port_state_selection(struct net_bridge *br)
        struct net_bridge_port *p;
        unsigned int liveports = 0;
 
-       /* Don't change port states if userspace is handling STP */
-       if (br->stp_enabled == BR_USER_STP)
-               return;
-
        list_for_each_entry(p, &br->port_list, list) {
                if (p->state == BR_STATE_DISABLED)
                        continue;
 
-               if (p->port_no == br->root_port) {
-                       p->config_pending = 0;
-                       p->topology_change_ack = 0;
-                       br_make_forwarding(p);
-               } else if (br_is_designated_port(p)) {
-                       del_timer(&p->message_age_timer);
-                       br_make_forwarding(p);
-               } else {
-                       p->config_pending = 0;
-                       p->topology_change_ack = 0;
-                       br_make_blocking(p);
+               /* Don't change port states if userspace is handling STP */
+               if (br->stp_enabled != BR_USER_STP) {
+                       if (p->port_no == br->root_port) {
+                               p->config_pending = 0;
+                               p->topology_change_ack = 0;
+                               br_make_forwarding(p);
+                       } else if (br_is_designated_port(p)) {
+                               del_timer(&p->message_age_timer);
+                               br_make_forwarding(p);
+                       } else {
+                               p->config_pending = 0;
+                               p->topology_change_ack = 0;
+                               br_make_blocking(p);
+                       }
                }
 
                if (p->state == BR_STATE_FORWARDING)
index f39921171d0d94e51e1270891a6703be598d01e9..d3ca87bf23b7ff952a428b367f6ad525382e6740 100644 (file)
@@ -136,20 +136,21 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
 
 static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
 {
-       int tmp;
        u16 chks;
        u16 len;
+       __le16 data;
+
        struct cffrml *this = container_obj(layr);
        if (this->dofcs) {
                chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
-               tmp = cpu_to_le16(chks);
-               cfpkt_add_trail(pkt, &tmp, 2);
+               data = cpu_to_le16(chks);
+               cfpkt_add_trail(pkt, &data, 2);
        } else {
                cfpkt_pad_trail(pkt, 2);
        }
        len = cfpkt_getlen(pkt);
-       tmp = cpu_to_le16(len);
-       cfpkt_add_head(pkt, &tmp, 2);
+       data = cpu_to_le16(len);
+       cfpkt_add_head(pkt, &data, 2);
        cfpkt_info(pkt)->hdr_len += 2;
        if (cfpkt_erroneous(pkt)) {
                pr_err("Packet is erroneous!\n");
index 42599e31dcad8a6ceb1f3b29d9171f6a249e56a3..3a94eae7abe924d8d73d454b5a9330ab50bda968 100644 (file)
@@ -477,7 +477,6 @@ int crush_do_rule(struct crush_map *map,
        int i, j;
        int numrep;
        int firstn;
-       int rc = -1;
 
        BUG_ON(ruleno >= map->max_rules);
 
@@ -491,23 +490,18 @@ int crush_do_rule(struct crush_map *map,
         * that this may or may not correspond to the specific types
         * referenced by the crush rule.
         */
-       if (force >= 0) {
-               if (force >= map->max_devices ||
-                   map->device_parents[force] == 0) {
-                       /*dprintk("CRUSH: forcefed device dne\n");*/
-                       rc = -1;  /* force fed device dne */
-                       goto out;
-               }
-               if (!is_out(map, weight, force, x)) {
-                       while (1) {
-                               force_context[++force_pos] = force;
-                               if (force >= 0)
-                                       force = map->device_parents[force];
-                               else
-                                       force = map->bucket_parents[-1-force];
-                               if (force == 0)
-                                       break;
-                       }
+       if (force >= 0 &&
+           force < map->max_devices &&
+           map->device_parents[force] != 0 &&
+           !is_out(map, weight, force, x)) {
+               while (1) {
+                       force_context[++force_pos] = force;
+                       if (force >= 0)
+                               force = map->device_parents[force];
+                       else
+                               force = map->bucket_parents[-1-force];
+                       if (force == 0)
+                               break;
                }
        }
 
@@ -600,10 +594,7 @@ int crush_do_rule(struct crush_map *map,
                        BUG_ON(1);
                }
        }
-       rc = result_len;
-
-out:
-       return rc;
+       return result_len;
 }
 
 
index 6ba50a1e404c4bac04cc7d56718865d5a1749c2f..5a13edfc9f73411f688920b51400489eb3e89ee2 100644 (file)
@@ -1396,7 +1396,7 @@ rollback:
        for_each_net(net) {
                for_each_netdev(net, dev) {
                        if (dev == last)
-                               break;
+                               goto outroll;
 
                        if (dev->flags & IFF_UP) {
                                nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
@@ -1407,6 +1407,7 @@ rollback:
                }
        }
 
+outroll:
        raw_notifier_chain_unregister(&netdev_chain, nb);
        goto unlock;
 }
@@ -4282,6 +4283,12 @@ static int dev_seq_open(struct inode *inode, struct file *file)
                            sizeof(struct dev_iter_state));
 }
 
+int dev_seq_open_ops(struct inode *inode, struct file *file,
+                    const struct seq_operations *ops)
+{
+       return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
+}
+
 static const struct file_operations dev_seq_fops = {
        .owner   = THIS_MODULE,
        .open    = dev_seq_open,
index 277faef9148d0053bc6987ae67e73571d49a4d99..febba516db6274c83d56577e7dc4d7fd61a15b72 100644 (file)
@@ -696,8 +696,7 @@ static const struct seq_operations dev_mc_seq_ops = {
 
 static int dev_mc_seq_open(struct inode *inode, struct file *file)
 {
-       return seq_open_net(inode, file, &dev_mc_seq_ops,
-                           sizeof(struct seq_net_private));
+       return dev_seq_open_ops(inode, file, &dev_mc_seq_ops);
 }
 
 static const struct file_operations dev_mc_seq_fops = {
index 8ae42de9c79e78379691f20207987c5b2ea338b6..e318c7e98042ffcfd1fc43a1d04b5cf816d651a8 100644 (file)
@@ -358,6 +358,18 @@ void flow_cache_flush(void)
        put_online_cpus();
 }
 
+static void flow_cache_flush_task(struct work_struct *work)
+{
+       flow_cache_flush();
+}
+
+static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
+
+void flow_cache_flush_deferred(void)
+{
+       schedule_work(&flow_cache_flush_work);
+}
+
 static int __cpuinit flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
 {
        struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
index 039d51e6c284e7ab655319d399b9d40060357dcf..5ac07d31fbc9e395abf71ae46c897b5db3c7d6f8 100644 (file)
@@ -2397,7 +2397,10 @@ static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
        struct net *net = seq_file_net(seq);
        struct neigh_table *tbl = state->tbl;
 
-       pn = pn->next;
+       do {
+               pn = pn->next;
+       } while (pn && !net_eq(pneigh_net(pn), net));
+
        while (!pn) {
                if (++state->bucket > PNEIGH_HASHMASK)
                        break;
index c71c434a4c053e440dc816682d944c521e05c50f..385aefe536489548ed3f53e61094faaac0d08afd 100644 (file)
@@ -665,11 +665,14 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
        if (count) {
                int i;
 
-               if (count > 1<<30) {
+               if (count > INT_MAX)
+                       return -EINVAL;
+               count = roundup_pow_of_two(count);
+               if (count > (ULONG_MAX - sizeof(struct rps_dev_flow_table))
+                               / sizeof(struct rps_dev_flow)) {
                        /* Enforce a limit to prevent overflow */
                        return -EINVAL;
                }
-               count = roundup_pow_of_two(count);
                table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
                if (!table)
                        return -ENOMEM;
index 182236b2510aeb16e6e0e2026264d683edf93e7e..9b570a6a33c5d8c52d777e160742dc31ec350c16 100644 (file)
  * but then some measure against one socket starving all other sockets
  * would be needed.
  *
- * It was 128 by default. Experiments with real servers show, that
+ * The minimum value of it is 128. Experiments with real servers show that
  * it is absolutely not enough even at 100conn/sec. 256 cures most
- * of problems. This value is adjusted to 128 for very small machines
- * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
+ * of problems.
+ * This value is adjusted to 128 for low memory machines,
+ * and it will increase in proportion to the memory of machine.
  * Note : Dont forget somaxconn that may limit backlog too.
  */
 int sysctl_max_syn_backlog = 256;
index 025233de25f969cb67e075d6ea5c4a9bba746bd9..925991ae6f52e67e2221f1ed7490a4583bf26b9d 100644 (file)
@@ -19,6 +19,7 @@ static int __init net_secret_init(void)
 }
 late_initcall(net_secret_init);
 
+#ifdef CONFIG_INET
 static u32 seq_scale(u32 seq)
 {
        /*
@@ -33,6 +34,7 @@ static u32 seq_scale(u32 seq)
         */
        return seq + (ktime_to_ns(ktime_get_real()) >> 6);
 }
+#endif
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
index 18a3cebb753d39d57b2962f991f32fb048d76c29..3c30ee4a57105a2746b3e1b2bfab68af8ef0c917 100644 (file)
@@ -2230,7 +2230,7 @@ static int skb_prepare_for_shift(struct sk_buff *skb)
  * @shiftlen: shift up to this many bytes
  *
  * Attempts to shift up to shiftlen worth of bytes, which may be less than
- * the length of the skb, from tgt to skb. Returns number bytes shifted.
+ * the length of the skb, from skb to tgt. Returns number bytes shifted.
  * It's up to caller to free skb if everything was shifted.
  *
  * If @tgt runs out of frags, the whole operation is aborted.
index 4ed7b1d12f5ecde5b8c2119c0d4cfaaa765ff470..b23f174ab84c3b6c72302834d8e13695d22a1c6c 100644 (file)
@@ -288,11 +288,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        unsigned long flags;
        struct sk_buff_head *list = &sk->sk_receive_queue;
 
-       /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
-          number of warnings when compiling with -W --ANK
-        */
-       if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-           (unsigned)sk->sk_rcvbuf) {
+       if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
                atomic_inc(&sk->sk_drops);
                trace_sock_rcvqueue_full(sk, skb);
                return -ENOMEM;
index 90a919afbed79ee5998f510badfaaa199a441086..3f4e5414c8e5200711839e8847437809047cb778 100644 (file)
@@ -111,6 +111,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
                               inet->inet_sport, inet->inet_dport, sk);
        if (IS_ERR(rt)) {
+               err = PTR_ERR(rt);
                rt = NULL;
                goto failure;
        }
index a77d16158eb6fa2eec5b1890b81ce12b92d88851..94f4ec036669cc152af1a9ff9639889016235228 100644 (file)
@@ -112,7 +112,7 @@ static unsigned long dn_rt_deadline;
 static int dn_dst_gc(struct dst_ops *ops);
 static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
 static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
-static unsigned int dn_dst_default_mtu(const struct dst_entry *dst);
+static unsigned int dn_dst_mtu(const struct dst_entry *dst);
 static void dn_dst_destroy(struct dst_entry *);
 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
 static void dn_dst_link_failure(struct sk_buff *);
@@ -135,7 +135,7 @@ static struct dst_ops dn_dst_ops = {
        .gc =                   dn_dst_gc,
        .check =                dn_dst_check,
        .default_advmss =       dn_dst_default_advmss,
-       .default_mtu =          dn_dst_default_mtu,
+       .mtu =                  dn_dst_mtu,
        .cow_metrics =          dst_cow_metrics_generic,
        .destroy =              dn_dst_destroy,
        .negative_advice =      dn_dst_negative_advice,
@@ -825,9 +825,11 @@ static unsigned int dn_dst_default_advmss(const struct dst_entry *dst)
        return dn_mss_from_pmtu(dst->dev, dst_mtu(dst));
 }
 
-static unsigned int dn_dst_default_mtu(const struct dst_entry *dst)
+static unsigned int dn_dst_mtu(const struct dst_entry *dst)
 {
-       return dst->dev->mtu;
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       return mtu ? : dst->dev->mtu;
 }
 
 static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
index 67f691bd4acfee77b2b3402e2e9e4e5092bab8c0..d9c150cc59a952ac86585b6c600acc398b80bc14 100644 (file)
@@ -36,16 +36,13 @@ static void dn_slow_timer(unsigned long arg);
 
 void dn_start_slow_timer(struct sock *sk)
 {
-       sk->sk_timer.expires    = jiffies + SLOW_INTERVAL;
-       sk->sk_timer.function   = dn_slow_timer;
-       sk->sk_timer.data       = (unsigned long)sk;
-
-       add_timer(&sk->sk_timer);
+       setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk);
+       sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
 }
 
 void dn_stop_slow_timer(struct sock *sk)
 {
-       del_timer(&sk->sk_timer);
+       sk_stop_timer(sk, &sk->sk_timer);
 }
 
 static void dn_slow_timer(unsigned long arg)
@@ -53,12 +50,10 @@ static void dn_slow_timer(unsigned long arg)
        struct sock *sk = (struct sock *)arg;
        struct dn_scp *scp = DN_SK(sk);
 
-       sock_hold(sk);
        bh_lock_sock(sk);
 
        if (sock_owned_by_user(sk)) {
-               sk->sk_timer.expires = jiffies + HZ / 10;
-               add_timer(&sk->sk_timer);
+               sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10);
                goto out;
        }
 
@@ -100,9 +95,7 @@ static void dn_slow_timer(unsigned long arg)
                        scp->keepalive_fxn(sk);
        }
 
-       sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
-
-       add_timer(&sk->sk_timer);
+       sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
 out:
        bh_unlock_sock(sk);
        sock_put(sk);
index c6b5092f29a15511bb7c69f37a86dca2c538a164..65f01dc47565bcc26282d4472bbd9c3fb2d3712c 100644 (file)
@@ -1490,7 +1490,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
                             void __user *buffer,
                             size_t *lenp, loff_t *ppos)
 {
+       int old_value = *(int *)ctl->data;
        int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+       int new_value = *(int *)ctl->data;
 
        if (write) {
                struct ipv4_devconf *cnf = ctl->extra1;
@@ -1501,6 +1503,9 @@ static int devinet_conf_proc(ctl_table *ctl, int write,
 
                if (cnf == net->ipv4.devconf_dflt)
                        devinet_copy_dflt_conf(net, i);
+               if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
+                       if ((new_value == 0) && (old_value != 0))
+                               rt_cache_flush(net, 0);
        }
 
        return ret;
index c7472eff2d514b475579d1a3e5a89269d04fce0e..b2ca095cb9dab36a4eb6e8da7308c4e55a6437cc 100644 (file)
@@ -1716,7 +1716,8 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
        if (err) {
                int j;
 
-               pmc->sfcount[sfmode]--;
+               if (!delta)
+                       pmc->sfcount[sfmode]--;
                for (j=0; j<i; j++)
                        (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
        } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
index 68e8ac5143834b80e09cc4bbbbdaecf05a3a2710..ccee270a9b6587efe63e74abcd298bae34fea779 100644 (file)
@@ -108,9 +108,6 @@ static int inet_csk_diag_fill(struct sock *sk,
                       icsk->icsk_ca_ops->name);
        }
 
-       if ((ext & (1 << (INET_DIAG_TOS - 1))) && (sk->sk_family != AF_INET6))
-               RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
-
        r->idiag_family = sk->sk_family;
        r->idiag_state = sk->sk_state;
        r->idiag_timer = 0;
@@ -125,16 +122,23 @@ static int inet_csk_diag_fill(struct sock *sk,
        r->id.idiag_src[0] = inet->inet_rcv_saddr;
        r->id.idiag_dst[0] = inet->inet_daddr;
 
+       /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
+        * hence this needs to be included regardless of socket family.
+        */
+       if (ext & (1 << (INET_DIAG_TOS - 1)))
+               RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
+
 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
        if (r->idiag_family == AF_INET6) {
                const struct ipv6_pinfo *np = inet6_sk(sk);
 
+               if (ext & (1 << (INET_DIAG_TCLASS - 1)))
+                       RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
+
                ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
                               &np->rcv_saddr);
                ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
                               &np->daddr);
-               if (ext & (1 << (INET_DIAG_TCLASS - 1)))
-                       RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
        }
 #endif
 
index 3b34d1c862709e7bde3cd3665fad1e191692506d..29a07b6c7168f7369b13e25d8c96011c6118ec56 100644 (file)
@@ -84,7 +84,7 @@ int ip_forward(struct sk_buff *skb)
 
        rt = skb_rtable(skb);
 
-       if (opt->is_strictroute && ip_hdr(skb)->daddr != rt->rt_gateway)
+       if (opt->is_strictroute && opt->nexthop != rt->rt_gateway)
                goto sr_failed;
 
        if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
index 05d20cca9d66efd2500268a3d0ba4a31117739e7..1e60f7679075b660f489bcd1a9f462c832c5e77e 100644 (file)
@@ -568,12 +568,13 @@ void ip_forward_options(struct sk_buff *skb)
                     ) {
                        if (srrptr + 3 > srrspace)
                                break;
-                       if (memcmp(&ip_hdr(skb)->daddr, &optptr[srrptr-1], 4) == 0)
+                       if (memcmp(&opt->nexthop, &optptr[srrptr-1], 4) == 0)
                                break;
                }
                if (srrptr + 3 <= srrspace) {
                        opt->is_changed = 1;
                        ip_rt_get_source(&optptr[srrptr-1], skb, rt);
+                       ip_hdr(skb)->daddr = opt->nexthop;
                        optptr[2] = srrptr+4;
                } else if (net_ratelimit())
                        printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
@@ -640,7 +641,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
        }
        if (srrptr <= srrspace) {
                opt->srr_is_hit = 1;
-               iph->daddr = nexthop;
+               opt->nexthop = nexthop;
                opt->is_changed = 1;
        }
        return 0;
index 0da2afc97f32ffae2773098391aba0dcbd004903..99ec116bef145e1ac0432918894b447b2abf8da3 100644 (file)
@@ -253,6 +253,10 @@ static int __init ic_open_devs(void)
                }
        }
 
+       /* no point in waiting if we could not bring up at least one device */
+       if (!ic_first_dev)
+               goto have_carrier;
+
        /* wait for a carrier on at least one device */
        start = jiffies;
        while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
index 065effd8349a81689828927c84142a778aebb650..0b2e7329abdadaafd112e736bc6f19bdb21373f6 100644 (file)
@@ -285,6 +285,8 @@ static struct ip_tunnel * ipip_tunnel_locate(struct net *net,
        if (register_netdevice(dev) < 0)
                goto failed_free;
 
+       strcpy(nt->parms.name, dev->name);
+
        dev_hold(dev);
        ipip_tunnel_link(ipn, nt);
        return nt;
@@ -759,7 +761,6 @@ static int ipip_tunnel_init(struct net_device *dev)
        struct ip_tunnel *tunnel = netdev_priv(dev);
 
        tunnel->dev = dev;
-       strcpy(tunnel->parms.name, dev->name);
 
        memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
        memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -825,6 +826,7 @@ static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
 static int __net_init ipip_init_net(struct net *net)
 {
        struct ipip_net *ipn = net_generic(net, ipip_net_id);
+       struct ip_tunnel *t;
        int err;
 
        ipn->tunnels[0] = ipn->tunnels_wc;
@@ -848,6 +850,9 @@ static int __net_init ipip_init_net(struct net *net)
        if ((err = register_netdev(ipn->fb_tunnel_dev)))
                goto err_reg_dev;
 
+       t = netdev_priv(ipn->fb_tunnel_dev);
+
+       strcpy(t->parms.name, ipn->fb_tunnel_dev->name);
        return 0;
 
 err_reg_dev:
index 9899619ab9b8db0f9d8d02c8005c0e6bb01fda94..4f47e064e262c2f24e7cb13eacfcebff0fad86a3 100644 (file)
@@ -64,7 +64,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type)
        /* Change in oif may mean change in hh_len. */
        hh_len = skb_dst(skb)->dev->hard_header_len;
        if (skb_headroom(skb) < hh_len &&
-           pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
+           pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
+                               0, GFP_ATOMIC))
                return -1;
 
        return 0;
index 1dfc18a03fd4219fe9dd419011877d0a7ce3dfa7..f19f2182894c1c1549d27eb79cb409914e5b5bf0 100644 (file)
@@ -325,7 +325,6 @@ config IP_NF_TARGET_TTL
 # raw + specific targets
 config IP_NF_RAW
        tristate  'raw table support (required for NOTRACK/TRACE)'
-       depends on NETFILTER_ADVANCED
        help
          This option adds a `raw' table to iptables. This table is the very
          first in the netfilter framework and hooks in at the PREROUTING
index 0c74da8a04732ce9702e9c1c8d80496f1c8df423..94cdbc55ca7ead13879d563608f8a191ca3b424c 100644 (file)
@@ -91,6 +91,7 @@
 #include <linux/rcupdate.h>
 #include <linux/times.h>
 #include <linux/slab.h>
+#include <linux/prefetch.h>
 #include <net/dst.h>
 #include <net/net_namespace.h>
 #include <net/protocol.h>
 #include <net/secure_seq.h>
 
 #define RT_FL_TOS(oldflp4) \
-    ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
+       ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
 
 #define IP_MAX_MTU     0xFFF0
 
 
 static int ip_rt_max_size;
 static int ip_rt_gc_timeout __read_mostly      = RT_GC_TIMEOUT;
+static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
 static int ip_rt_redirect_number __read_mostly = 9;
 static int ip_rt_redirect_load __read_mostly   = HZ / 50;
@@ -131,6 +133,10 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
 static int ip_rt_min_pmtu __read_mostly                = 512 + 20 + 20;
 static int ip_rt_min_advmss __read_mostly      = 256;
 static int rt_chain_length_max __read_mostly   = 20;
+static int redirect_genid;
+
+static struct delayed_work expires_work;
+static unsigned long expires_ljiffies;
 
 /*
  *     Interface to generic destination cache.
@@ -138,7 +144,7 @@ static int rt_chain_length_max __read_mostly        = 20;
 
 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
 static unsigned int     ipv4_default_advmss(const struct dst_entry *dst);
-static unsigned int     ipv4_default_mtu(const struct dst_entry *dst);
+static unsigned int     ipv4_mtu(const struct dst_entry *dst);
 static void             ipv4_dst_destroy(struct dst_entry *dst);
 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
 static void             ipv4_link_failure(struct sk_buff *skb);
@@ -193,7 +199,7 @@ static struct dst_ops ipv4_dst_ops = {
        .gc =                   rt_garbage_collect,
        .check =                ipv4_dst_check,
        .default_advmss =       ipv4_default_advmss,
-       .default_mtu =          ipv4_default_mtu,
+       .mtu =                  ipv4_mtu,
        .cow_metrics =          ipv4_cow_metrics,
        .destroy =              ipv4_dst_destroy,
        .ifdown =               ipv4_dst_ifdown,
@@ -416,9 +422,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
        else {
                struct rtable *r = v;
                struct neighbour *n;
-               int len;
+               int len, HHUptod;
 
+               rcu_read_lock();
                n = dst_get_neighbour(&r->dst);
+               HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
+               rcu_read_unlock();
+
                seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
                              "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
                        r->dst.dev ? r->dst.dev->name : "*",
@@ -432,7 +442,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
                              dst_metric(&r->dst, RTAX_RTTVAR)),
                        r->rt_key_tos,
                        -1,
-                       (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0,
+                       HHUptod,
                        r->rt_spec_dst, &len);
 
                seq_printf(seq, "%*s\n", 127 - len, "");
@@ -825,6 +835,97 @@ static int has_noalias(const struct rtable *head, const struct rtable *rth)
        return ONE;
 }
 
+static void rt_check_expire(void)
+{
+       static unsigned int rover;
+       unsigned int i = rover, goal;
+       struct rtable *rth;
+       struct rtable __rcu **rthp;
+       unsigned long samples = 0;
+       unsigned long sum = 0, sum2 = 0;
+       unsigned long delta;
+       u64 mult;
+
+       delta = jiffies - expires_ljiffies;
+       expires_ljiffies = jiffies;
+       mult = ((u64)delta) << rt_hash_log;
+       if (ip_rt_gc_timeout > 1)
+               do_div(mult, ip_rt_gc_timeout);
+       goal = (unsigned int)mult;
+       if (goal > rt_hash_mask)
+               goal = rt_hash_mask + 1;
+       for (; goal > 0; goal--) {
+               unsigned long tmo = ip_rt_gc_timeout;
+               unsigned long length;
+
+               i = (i + 1) & rt_hash_mask;
+               rthp = &rt_hash_table[i].chain;
+
+               if (need_resched())
+                       cond_resched();
+
+               samples++;
+
+               if (rcu_dereference_raw(*rthp) == NULL)
+                       continue;
+               length = 0;
+               spin_lock_bh(rt_hash_lock_addr(i));
+               while ((rth = rcu_dereference_protected(*rthp,
+                                       lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) {
+                       prefetch(rth->dst.rt_next);
+                       if (rt_is_expired(rth)) {
+                               *rthp = rth->dst.rt_next;
+                               rt_free(rth);
+                               continue;
+                       }
+                       if (rth->dst.expires) {
+                               /* Entry is expired even if it is in use */
+                               if (time_before_eq(jiffies, rth->dst.expires)) {
+nofree:
+                                       tmo >>= 1;
+                                       rthp = &rth->dst.rt_next;
+                                       /*
+                                        * We only count entries on
+                                        * a chain with equal hash inputs once
+                                        * so that entries for different QOS
+                                        * levels, and other non-hash input
+                                        * attributes don't unfairly skew
+                                        * the length computation
+                                        */
+                                       length += has_noalias(rt_hash_table[i].chain, rth);
+                                       continue;
+                               }
+                       } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout))
+                               goto nofree;
+
+                       /* Cleanup aged off entries. */
+                       *rthp = rth->dst.rt_next;
+                       rt_free(rth);
+               }
+               spin_unlock_bh(rt_hash_lock_addr(i));
+               sum += length;
+               sum2 += length*length;
+       }
+       if (samples) {
+               unsigned long avg = sum / samples;
+               unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
+               rt_chain_length_max = max_t(unsigned long,
+                                       ip_rt_gc_elasticity,
+                                       (avg + 4*sd) >> FRACT_BITS);
+       }
+       rover = i;
+}
+
+/*
+ * rt_worker_func() is run in process context.
+ * we call rt_check_expire() to scan part of the hash table
+ */
+static void rt_worker_func(struct work_struct *work)
+{
+       rt_check_expire();
+       schedule_delayed_work(&expires_work, ip_rt_gc_interval);
+}
+
 /*
  * Perturbation of rt_genid by a small quantity [1..256]
  * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
@@ -837,6 +938,7 @@ static void rt_cache_invalidate(struct net *net)
 
        get_random_bytes(&shuffle, sizeof(shuffle));
        atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
+       redirect_genid++;
 }
 
 /*
@@ -1265,7 +1367,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
 {
        struct rtable *rt = (struct rtable *) dst;
 
-       if (rt) {
+       if (rt && !(rt->dst.flags & DST_NOPEER)) {
                if (rt->peer == NULL)
                        rt_bind_peer(rt, rt->rt_dst, 1);
 
@@ -1276,7 +1378,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
                        iph->id = htons(inet_getid(rt->peer, more));
                        return;
                }
-       } else
+       } else if (!rt)
                printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
                       __builtin_return_address(0));
 
@@ -1304,7 +1406,7 @@ static void rt_del(unsigned hash, struct rtable *rt)
        spin_unlock_bh(rt_hash_lock_addr(hash));
 }
 
-static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
+static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
 {
        struct rtable *rt = (struct rtable *) dst;
        __be32 orig_gw = rt->rt_gateway;
@@ -1315,21 +1417,19 @@ static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
        rt->rt_gateway = peer->redirect_learned.a4;
 
        n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
-       if (IS_ERR(n))
-               return PTR_ERR(n);
+       if (IS_ERR(n)) {
+               rt->rt_gateway = orig_gw;
+               return;
+       }
        old_n = xchg(&rt->dst._neighbour, n);
        if (old_n)
                neigh_release(old_n);
-       if (!n || !(n->nud_state & NUD_VALID)) {
-               if (n)
-                       neigh_event_send(n, NULL);
-               rt->rt_gateway = orig_gw;
-               return -EAGAIN;
+       if (!(n->nud_state & NUD_VALID)) {
+               neigh_event_send(n, NULL);
        } else {
                rt->rt_flags |= RTCF_REDIRECTED;
                call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
        }
-       return 0;
 }
 
 /* called in rcu_read_lock() section */
@@ -1391,8 +1491,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
 
                                peer = rt->peer;
                                if (peer) {
-                                       if (peer->redirect_learned.a4 != new_gw) {
+                                       if (peer->redirect_learned.a4 != new_gw ||
+                                           peer->redirect_genid != redirect_genid) {
                                                peer->redirect_learned.a4 = new_gw;
+                                               peer->redirect_genid = redirect_genid;
                                                atomic_inc(&__rt_peer_genid);
                                        }
                                        check_peer_redir(&rt->dst, peer);
@@ -1685,12 +1787,8 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
 }
 
 
-static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+static void ipv4_validate_peer(struct rtable *rt)
 {
-       struct rtable *rt = (struct rtable *) dst;
-
-       if (rt_is_expired(rt))
-               return NULL;
        if (rt->rt_peer_genid != rt_peer_genid()) {
                struct inet_peer *peer;
 
@@ -1699,17 +1797,26 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
 
                peer = rt->peer;
                if (peer) {
-                       check_peer_pmtu(dst, peer);
+                       check_peer_pmtu(&rt->dst, peer);
 
+                       if (peer->redirect_genid != redirect_genid)
+                               peer->redirect_learned.a4 = 0;
                        if (peer->redirect_learned.a4 &&
-                           peer->redirect_learned.a4 != rt->rt_gateway) {
-                               if (check_peer_redir(dst, peer))
-                                       return NULL;
-                       }
+                           peer->redirect_learned.a4 != rt->rt_gateway)
+                               check_peer_redir(&rt->dst, peer);
                }
 
                rt->rt_peer_genid = rt_peer_genid();
        }
+}
+
+static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+{
+       struct rtable *rt = (struct rtable *) dst;
+
+       if (rt_is_expired(rt))
+               return NULL;
+       ipv4_validate_peer(rt);
        return dst;
 }
 
@@ -1814,12 +1921,17 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
        return advmss;
 }
 
-static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
+static unsigned int ipv4_mtu(const struct dst_entry *dst)
 {
-       unsigned int mtu = dst->dev->mtu;
+       const struct rtable *rt = (const struct rtable *) dst;
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       if (mtu && rt_is_output_route(rt))
+               return mtu;
+
+       mtu = dst->dev->mtu;
 
        if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
-               const struct rtable *rt = (const struct rtable *) dst;
 
                if (rt->rt_gateway != rt->rt_dst && mtu > 576)
                        mtu = 576;
@@ -1852,6 +1964,8 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
                dst_init_metrics(&rt->dst, peer->metrics, false);
 
                check_peer_pmtu(&rt->dst, peer);
+               if (peer->redirect_genid != redirect_genid)
+                       peer->redirect_learned.a4 = 0;
                if (peer->redirect_learned.a4 &&
                    peer->redirect_learned.a4 != rt->rt_gateway) {
                        rt->rt_gateway = peer->redirect_learned.a4;
@@ -2357,6 +2471,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
                    rth->rt_mark == skb->mark &&
                    net_eq(dev_net(rth->dst.dev), net) &&
                    !rt_is_expired(rth)) {
+                       ipv4_validate_peer(rth);
                        if (noref) {
                                dst_use_noref(&rth->dst, jiffies);
                                skb_dst_set_noref(skb, &rth->dst);
@@ -2415,11 +2530,11 @@ EXPORT_SYMBOL(ip_route_input_common);
 static struct rtable *__mkroute_output(const struct fib_result *res,
                                       const struct flowi4 *fl4,
                                       __be32 orig_daddr, __be32 orig_saddr,
-                                      int orig_oif, struct net_device *dev_out,
+                                      int orig_oif, __u8 orig_rtos,
+                                      struct net_device *dev_out,
                                       unsigned int flags)
 {
        struct fib_info *fi = res->fi;
-       u32 tos = RT_FL_TOS(fl4);
        struct in_device *in_dev;
        u16 type = res->type;
        struct rtable *rth;
@@ -2470,7 +2585,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
        rth->rt_genid = rt_genid(dev_net(dev_out));
        rth->rt_flags   = flags;
        rth->rt_type    = type;
-       rth->rt_key_tos = tos;
+       rth->rt_key_tos = orig_rtos;
        rth->rt_dst     = fl4->daddr;
        rth->rt_src     = fl4->saddr;
        rth->rt_route_iif = 0;
@@ -2520,7 +2635,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
 static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
 {
        struct net_device *dev_out = NULL;
-       u32 tos = RT_FL_TOS(fl4);
+       __u8 tos = RT_FL_TOS(fl4);
        unsigned int flags = 0;
        struct fib_result res;
        struct rtable *rth;
@@ -2696,7 +2811,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
 
 make_route:
        rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
-                              dev_out, flags);
+                              tos, dev_out, flags);
        if (!IS_ERR(rth)) {
                unsigned int hash;
 
@@ -2732,6 +2847,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
                            (IPTOS_RT_MASK | RTO_ONLINK)) &&
                    net_eq(dev_net(rth->dst.dev), net) &&
                    !rt_is_expired(rth)) {
+                       ipv4_validate_peer(rth);
                        dst_use(&rth->dst, jiffies);
                        RT_CACHE_STAT_INC(out_hit);
                        rcu_read_unlock_bh();
@@ -2755,9 +2871,11 @@ static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 coo
        return NULL;
 }
 
-static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
+static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
 {
-       return 0;
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       return mtu ? : dst->dev->mtu;
 }
 
 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -2775,7 +2893,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
        .protocol               =       cpu_to_be16(ETH_P_IP),
        .destroy                =       ipv4_dst_destroy,
        .check                  =       ipv4_blackhole_dst_check,
-       .default_mtu            =       ipv4_blackhole_default_mtu,
+       .mtu                    =       ipv4_blackhole_mtu,
        .default_advmss         =       ipv4_default_advmss,
        .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
        .cow_metrics            =       ipv4_rt_blackhole_cow_metrics,
@@ -3156,6 +3274,13 @@ static ctl_table ipv4_route_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
+       {
+               .procname       = "gc_interval",
+               .data           = &ip_rt_gc_interval,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
        {
                .procname       = "redirect_load",
                .data           = &ip_rt_redirect_load,
@@ -3366,6 +3491,11 @@ int __init ip_rt_init(void)
        devinet_init();
        ip_fib_init();
 
+       INIT_DELAYED_WORK_DEFERRABLE(&expires_work, rt_worker_func);
+       expires_ljiffies = jiffies;
+       schedule_delayed_work(&expires_work,
+               net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
+
        if (ip_rt_proc_init())
                printk(KERN_ERR "Unable to create route proc files\n");
 #ifdef CONFIG_XFRM
index ab0966df1e2a8aec9e4ecb40e77332cbebbde466..5a65eeac1d29b8f4035314fb1db1dc403af1a428 100644 (file)
@@ -1164,7 +1164,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        struct inet_sock *inet = inet_sk(sk);
        struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
        struct sk_buff *skb;
-       unsigned int ulen;
+       unsigned int ulen, copied;
        int peeked;
        int err;
        int is_udplite = IS_UDPLITE(sk);
@@ -1186,9 +1186,10 @@ try_again:
                goto out;
 
        ulen = skb->len - sizeof(struct udphdr);
-       if (len > ulen)
-               len = ulen;
-       else if (len < ulen)
+       copied = len;
+       if (copied > ulen)
+               copied = ulen;
+       else if (copied < ulen)
                msg->msg_flags |= MSG_TRUNC;
 
        /*
@@ -1197,14 +1198,14 @@ try_again:
         * coverage checksum (UDP-Lite), do it before the copy.
         */
 
-       if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
+       if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
                if (udp_lib_checksum_complete(skb))
                        goto csum_copy_err;
        }
 
        if (skb_csum_unnecessary(skb))
                err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
-                                             msg->msg_iov, len);
+                                             msg->msg_iov, copied);
        else {
                err = skb_copy_and_csum_datagram_iovec(skb,
                                                       sizeof(struct udphdr),
@@ -1233,7 +1234,7 @@ try_again:
        if (inet->cmsg_flags)
                ip_cmsg_recv(msg, skb);
 
-       err = len;
+       err = copied;
        if (flags & MSG_TRUNC)
                err = ulen;
 
index cf88df82e2c21ca21da8184f0370096b094dffba..36806def8cfd5c1b185fc6ebfe06bf91fb6403c9 100644 (file)
@@ -1805,7 +1805,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
                return ERR_PTR(-EACCES);
 
        /* Add default multicast route */
-       addrconf_add_mroute(dev);
+       if (!(dev->flags & IFF_LOOPBACK))
+               addrconf_add_mroute(dev);
 
        /* Add link local route */
        addrconf_add_lroute(dev);
index fee46d5a2f125f54451f5115dbddd050138af3a7..1567fb120392a0231dcf47f5c72ac950f952ae48 100644 (file)
@@ -85,7 +85,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk,
  * request_sock (formerly open request) hash tables.
  */
 static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
-                          const u32 rnd, const u16 synq_hsize)
+                          const u32 rnd, const u32 synq_hsize)
 {
        u32 c;
 
index 84d0bd5cac939814edaed4379f09464a958d61bf..ec562713db9b16e3ee6a3b03ca17a13af2024e77 100644 (file)
@@ -603,7 +603,7 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
        static atomic_t ipv6_fragmentation_id;
        int old, new;
 
-       if (rt) {
+       if (rt && !(rt->dst.flags & DST_NOPEER)) {
                struct inet_peer *peer;
 
                if (!rt->rt6i_peer)
index c99e3ee9781f246f82185e8a0e246e02a258cf82..26cb08c84b7488d533eafcccf44b3c134b8b9b2d 100644 (file)
@@ -503,7 +503,7 @@ done:
                        goto e_inval;
                if (val > 255 || val < -1)
                        goto e_inval;
-               np->mcast_hops = val;
+               np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val);
                retv = 0;
                break;
 
index 44e5b7f2a6c1badcbf4dbb5ed2a844ae2daa199c..0cb78d7ddaf5f74237bd6f9e871ae255e9175072 100644 (file)
@@ -1571,7 +1571,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
        }
        if (!rt->rt6i_peer)
                rt6_bind_peer(rt, 1);
-       if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
+       if (!inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
                goto release;
 
        if (dev->addr_len) {
index 448464844a253474fed5624f75c68c9bb33cd98e..f792b34cbe9cb2710e072c33e4127b4c0d416210 100644 (file)
@@ -186,7 +186,6 @@ config IP6_NF_MANGLE
 
 config IP6_NF_RAW
        tristate  'raw table support (required for TRACE)'
-       depends on NETFILTER_ADVANCED
        help
          This option adds a `raw' table to ip6tables. This table is the very
          first in the netfilter framework and hooks in at the PREROUTING
index 8473016bba4a8cd6ae4dfc420c7827524fc3a84b..b582a0a0f1c5a31ba926f70cf1947db00f7168ff 100644 (file)
@@ -77,7 +77,7 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
                                    const struct in6_addr *dest);
 static struct dst_entry        *ip6_dst_check(struct dst_entry *dst, u32 cookie);
 static unsigned int     ip6_default_advmss(const struct dst_entry *dst);
-static unsigned int     ip6_default_mtu(const struct dst_entry *dst);
+static unsigned int     ip6_mtu(const struct dst_entry *dst);
 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
 static void            ip6_dst_destroy(struct dst_entry *);
 static void            ip6_dst_ifdown(struct dst_entry *,
@@ -144,7 +144,7 @@ static struct dst_ops ip6_dst_ops_template = {
        .gc_thresh              =       1024,
        .check                  =       ip6_dst_check,
        .default_advmss         =       ip6_default_advmss,
-       .default_mtu            =       ip6_default_mtu,
+       .mtu                    =       ip6_mtu,
        .cow_metrics            =       ipv6_cow_metrics,
        .destroy                =       ip6_dst_destroy,
        .ifdown                 =       ip6_dst_ifdown,
@@ -155,9 +155,11 @@ static struct dst_ops ip6_dst_ops_template = {
        .neigh_lookup           =       ip6_neigh_lookup,
 };
 
-static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
+static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
 {
-       return 0;
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       return mtu ? : dst->dev->mtu;
 }
 
 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
@@ -175,7 +177,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
        .protocol               =       cpu_to_be16(ETH_P_IPV6),
        .destroy                =       ip6_dst_destroy,
        .check                  =       ip6_dst_check,
-       .default_mtu            =       ip6_blackhole_default_mtu,
+       .mtu                    =       ip6_blackhole_mtu,
        .default_advmss         =       ip6_default_advmss,
        .update_pmtu            =       ip6_rt_blackhole_update_pmtu,
        .cow_metrics            =       ip6_rt_blackhole_cow_metrics,
@@ -726,7 +728,7 @@ static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
                int attempts = !in_softirq();
 
                if (!(rt->rt6i_flags&RTF_GATEWAY)) {
-                       if (rt->rt6i_dst.plen != 128 &&
+                       if (ort->rt6i_dst.plen != 128 &&
                            ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
                                rt->rt6i_flags |= RTF_ANYCAST;
                        ipv6_addr_copy(&rt->rt6i_gateway, daddr);
@@ -1041,10 +1043,15 @@ static unsigned int ip6_default_advmss(const struct dst_entry *dst)
        return mtu;
 }
 
-static unsigned int ip6_default_mtu(const struct dst_entry *dst)
+static unsigned int ip6_mtu(const struct dst_entry *dst)
 {
-       unsigned int mtu = IPV6_MIN_MTU;
        struct inet6_dev *idev;
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       if (mtu)
+               return mtu;
+
+       mtu = IPV6_MIN_MTU;
 
        rcu_read_lock();
        idev = __in6_dev_get(dst->dev);
index a7a18602a046e1ffe5f0f00883844459802f4a25..96f3623618e31a648e765eb68fd506942420f55b 100644 (file)
@@ -263,6 +263,8 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
        if (register_netdevice(dev) < 0)
                goto failed_free;
 
+       strcpy(nt->parms.name, dev->name);
+
        dev_hold(dev);
 
        ipip6_tunnel_link(sitn, nt);
@@ -1144,7 +1146,6 @@ static int ipip6_tunnel_init(struct net_device *dev)
        struct ip_tunnel *tunnel = netdev_priv(dev);
 
        tunnel->dev = dev;
-       strcpy(tunnel->parms.name, dev->name);
 
        memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
        memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
@@ -1207,6 +1208,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
 static int __net_init sit_init_net(struct net *net)
 {
        struct sit_net *sitn = net_generic(net, sit_net_id);
+       struct ip_tunnel *t;
        int err;
 
        sitn->tunnels[0] = sitn->tunnels_wc;
@@ -1231,6 +1233,9 @@ static int __net_init sit_init_net(struct net *net)
        if ((err = register_netdev(sitn->fb_tunnel_dev)))
                goto err_reg_dev;
 
+       t = netdev_priv(sitn->fb_tunnel_dev);
+
+       strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
        return 0;
 
 err_reg_dev:
index 36131d122a6f3f9007776ff343a9197bd0430b0a..2dea4bb7b54a3381a7c50e60c2ade383eabe477b 100644 (file)
@@ -1255,6 +1255,13 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        if (!want_cookie || tmp_opt.tstamp_ok)
                TCP_ECN_create_request(req, tcp_hdr(skb));
 
+       treq->iif = sk->sk_bound_dev_if;
+
+       /* So that link locals have meaning */
+       if (!sk->sk_bound_dev_if &&
+           ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
+               treq->iif = inet6_iif(skb);
+
        if (!isn) {
                struct inet_peer *peer = NULL;
 
@@ -1264,12 +1271,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
                        atomic_inc(&skb->users);
                        treq->pktopts = skb;
                }
-               treq->iif = sk->sk_bound_dev_if;
-
-               /* So that link locals have meaning */
-               if (!sk->sk_bound_dev_if &&
-                   ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
-                       treq->iif = inet6_iif(skb);
 
                if (want_cookie) {
                        isn = cookie_v6_init_sequence(sk, skb, &req->mss);
index 846f4757eb8d46394a604595be0698d485ae1ab0..8c25419151839cc2e7a8be6940de19d62a505690 100644 (file)
@@ -340,7 +340,7 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
        struct ipv6_pinfo *np = inet6_sk(sk);
        struct inet_sock *inet = inet_sk(sk);
        struct sk_buff *skb;
-       unsigned int ulen;
+       unsigned int ulen, copied;
        int peeked;
        int err;
        int is_udplite = IS_UDPLITE(sk);
@@ -363,9 +363,10 @@ try_again:
                goto out;
 
        ulen = skb->len - sizeof(struct udphdr);
-       if (len > ulen)
-               len = ulen;
-       else if (len < ulen)
+       copied = len;
+       if (copied > ulen)
+               copied = ulen;
+       else if (copied < ulen)
                msg->msg_flags |= MSG_TRUNC;
 
        is_udp4 = (skb->protocol == htons(ETH_P_IP));
@@ -376,14 +377,14 @@ try_again:
         * coverage checksum (UDP-Lite), do it before the copy.
         */
 
-       if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
+       if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
                if (udp_lib_checksum_complete(skb))
                        goto csum_copy_err;
        }
 
        if (skb_csum_unnecessary(skb))
                err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
-                                             msg->msg_iov,len);
+                                             msg->msg_iov, copied       );
        else {
                err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
                if (err == -EINVAL)
@@ -432,7 +433,7 @@ try_again:
                        datagram_recv_ctl(sk, msg, skb);
        }
 
-       err = len;
+       err = copied;
        if (flags & MSG_TRUNC)
                err = ulen;
 
index cf0f308abf5e7324aa05a40cfffd4eee166b0d51..89ff8c67943e8af26efe1bac80b306e297921ebd 100644 (file)
@@ -1072,7 +1072,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
 
        /* Get routing info from the tunnel socket */
        skb_dst_drop(skb);
-       skb_dst_set(skb, dst_clone(__sk_dst_get(sk)));
+       skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
 
        inet = inet_sk(sk);
        fl = &inet->cork.fl;
index dfd3a648a55107bda2ff14adb6f9e91c06449240..a18e6c3d36e37e699089ed5e0910c857da073d1c 100644 (file)
@@ -833,15 +833,15 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
                copied += used;
                len -= used;
 
+               /* For non stream protcols we get one packet per recvmsg call */
+               if (sk->sk_type != SOCK_STREAM)
+                       goto copy_uaddr;
+
                if (!(flags & MSG_PEEK)) {
                        sk_eat_skb(sk, skb, 0);
                        *seq = 0;
                }
 
-               /* For non stream protcols we get one packet per recvmsg call */
-               if (sk->sk_type != SOCK_STREAM)
-                       goto copy_uaddr;
-
                /* Partial read */
                if (used + offset < skb->len)
                        continue;
@@ -857,6 +857,12 @@ copy_uaddr:
        }
        if (llc_sk(sk)->cmsg_flags)
                llc_cmsg_rcv(msg, skb);
+
+       if (!(flags & MSG_PEEK)) {
+                       sk_eat_skb(sk, skb, 0);
+                       *seq = 0;
+       }
+
        goto out;
 }
 
index b3f65520e7a716312adc51d48d8fa9d8e2993c3d..2e4b961648d4352f46594291065ec5ec7af1ab0c 100644 (file)
@@ -161,6 +161,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                return -ENOENT;
        }
 
+       /* if we're already stopping ignore any new requests to stop */
+       if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+               spin_unlock_bh(&sta->lock);
+               return -EALREADY;
+       }
+
        if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
                /* not even started yet! */
                ieee80211_assign_tid_tx(sta, tid, NULL);
@@ -169,6 +175,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                return 0;
        }
 
+       set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
+
        spin_unlock_bh(&sta->lock);
 
 #ifdef CONFIG_MAC80211_HT_DEBUG
@@ -176,8 +184,6 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
               sta->sta.addr, tid);
 #endif /* CONFIG_MAC80211_HT_DEBUG */
 
-       set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
-
        del_timer_sync(&tid_tx->addba_resp_timer);
 
        /*
@@ -187,6 +193,20 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
         */
        clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
 
+       /*
+        * There might be a few packets being processed right now (on
+        * another CPU) that have already gotten past the aggregation
+        * check when it was still OPERATIONAL and consequently have
+        * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
+        * call into the driver at the same time or even before the
+        * TX paths calls into it, which could confuse the driver.
+        *
+        * Wait for all currently running TX paths to finish before
+        * telling the driver. New packets will not go through since
+        * the aggregation session is no longer OPERATIONAL.
+        */
+       synchronize_net();
+
        tid_tx->stop_initiator = initiator;
        tid_tx->tx_stop = tx;
 
@@ -283,6 +303,38 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
        __release(agg_queue);
 }
 
+/*
+ * splice packets from the STA's pending to the local pending,
+ * requires a call to ieee80211_agg_splice_finish later
+ */
+static void __acquires(agg_queue)
+ieee80211_agg_splice_packets(struct ieee80211_local *local,
+                            struct tid_ampdu_tx *tid_tx, u16 tid)
+{
+       int queue = ieee80211_ac_from_tid(tid);
+       unsigned long flags;
+
+       ieee80211_stop_queue_agg(local, tid);
+
+       if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
+                         " from the pending queue\n", tid))
+               return;
+
+       if (!skb_queue_empty(&tid_tx->pending)) {
+               spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+               /* copy over remaining packets */
+               skb_queue_splice_tail_init(&tid_tx->pending,
+                                          &local->pending[queue]);
+               spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+       }
+}
+
+static void __releases(agg_queue)
+ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
+{
+       ieee80211_wake_queue_agg(local, tid);
+}
+
 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
 {
        struct tid_ampdu_tx *tid_tx;
@@ -294,19 +346,17 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
        tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
 
        /*
-        * While we're asking the driver about the aggregation,
-        * stop the AC queue so that we don't have to worry
-        * about frames that came in while we were doing that,
-        * which would require us to put them to the AC pending
-        * afterwards which just makes the code more complex.
+        * Start queuing up packets for this aggregation session.
+        * We're going to release them once the driver is OK with
+        * that.
         */
-       ieee80211_stop_queue_agg(local, tid);
-
        clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
 
        /*
-        * make sure no packets are being processed to get
-        * valid starting sequence number
+        * Make sure no packets are being processed. This ensures that
+        * we have a valid starting sequence number and that in-flight
+        * packets have been flushed out and no packets for this TID
+        * will go into the driver during the ampdu_action call.
         */
        synchronize_net();
 
@@ -320,17 +370,15 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
                                        " tid %d\n", tid);
 #endif
                spin_lock_bh(&sta->lock);
+               ieee80211_agg_splice_packets(local, tid_tx, tid);
                ieee80211_assign_tid_tx(sta, tid, NULL);
+               ieee80211_agg_splice_finish(local, tid);
                spin_unlock_bh(&sta->lock);
 
-               ieee80211_wake_queue_agg(local, tid);
                kfree_rcu(tid_tx, rcu_head);
                return;
        }
 
-       /* we can take packets again now */
-       ieee80211_wake_queue_agg(local, tid);
-
        /* activate the timer for the recipient's addBA response */
        mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
 #ifdef CONFIG_MAC80211_HT_DEBUG
@@ -446,38 +494,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
 }
 EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
 
-/*
- * splice packets from the STA's pending to the local pending,
- * requires a call to ieee80211_agg_splice_finish later
- */
-static void __acquires(agg_queue)
-ieee80211_agg_splice_packets(struct ieee80211_local *local,
-                            struct tid_ampdu_tx *tid_tx, u16 tid)
-{
-       int queue = ieee80211_ac_from_tid(tid);
-       unsigned long flags;
-
-       ieee80211_stop_queue_agg(local, tid);
-
-       if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
-                         " from the pending queue\n", tid))
-               return;
-
-       if (!skb_queue_empty(&tid_tx->pending)) {
-               spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
-               /* copy over remaining packets */
-               skb_queue_splice_tail_init(&tid_tx->pending,
-                                          &local->pending[queue]);
-               spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
-       }
-}
-
-static void __releases(agg_queue)
-ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
-{
-       ieee80211_wake_queue_agg(local, tid);
-}
-
 static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
                                         struct sta_info *sta, u16 tid)
 {
@@ -757,11 +773,27 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
                goto out;
        }
 
-       del_timer(&tid_tx->addba_resp_timer);
+       del_timer_sync(&tid_tx->addba_resp_timer);
 
 #ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
 #endif
+
+       /*
+        * addba_resp_timer may have fired before we got here, and
+        * caused WANT_STOP to be set. If the stop then was already
+        * processed further, STOPPING might be set.
+        */
+       if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
+           test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+               printk(KERN_DEBUG
+                      "got addBA resp for tid %d but we already gave up\n",
+                      tid);
+#endif
+               goto out;
+       }
+
        /*
         * IEEE 802.11-2007 7.3.1.14:
         * In an ADDBA Response frame, when the Status Code field
index c5f341798c16769a1e5c5b7f48937e1a15d47f80..3110cbdc501b83d3650c978d26c51c2ef7c47568 100644 (file)
@@ -274,9 +274,9 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
 
                PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack");
 
-               PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
-                            "3839 bytes");
                PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: "
+                            "3839 bytes");
+               PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
                             "7935 bytes");
 
                /*
index d999bf3b84e1b27d0d79c91d44b7771756d92a3e..cae443563ec9d98a8230de9fae394b960fa66621 100644 (file)
@@ -757,6 +757,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
        if (!local->int_scan_req)
                return -ENOMEM;
 
+       for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+               if (!local->hw.wiphy->bands[band])
+                       continue;
+               local->int_scan_req->rates[band] = (u32) -1;
+       }
+
        /* if low-level driver supports AP, we also support VLAN */
        if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
                hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
index 80de436eae20eed9aa58f2a99fc7366b4f840649..16518f386117ad8f35dc075bd7ee4f244ab298c4 100644 (file)
@@ -260,7 +260,7 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
        struct ieee80211_radiotap_header *rthdr;
        unsigned char *pos;
-       __le16 txflags;
+       u16 txflags;
 
        rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len);
 
@@ -290,13 +290,13 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
        txflags = 0;
        if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
            !is_multicast_ether_addr(hdr->addr1))
-               txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
+               txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
 
        if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
            (info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
-               txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
+               txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
        else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
-               txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
+               txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
 
        put_unaligned_le16(txflags, pos);
        pos += 2;
index eca0fad09709518266d9aed37c61f75e2f05afcf..d5230ecc784d2702feb580da4d80e9ef1d3ec0bd 100644 (file)
@@ -1039,7 +1039,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                                             struct ieee80211_sub_if_data,
                                             u.ap);
 
-                       memset(&sta->sta.drv_priv, 0, hw->sta_data_size);
                        WARN_ON(drv_sta_add(local, sdata, &sta->sta));
                }
        }
index 8260b13d93c926cf9964d7a4f4e225be0123a68c..d5597b759ba39724e7bc3fe5376b7b5bb02eb13c 100644 (file)
@@ -201,7 +201,6 @@ config NF_CONNTRACK_BROADCAST
 
 config NF_CONNTRACK_NETBIOS_NS
        tristate "NetBIOS name service protocol support"
-       depends on NETFILTER_ADVANCED
        select NF_CONNTRACK_BROADCAST
        help
          NetBIOS name service requests are sent as broadcast messages from an
@@ -542,7 +541,6 @@ config NETFILTER_XT_TARGET_NOTRACK
        tristate  '"NOTRACK" target support'
        depends on IP_NF_RAW || IP6_NF_RAW
        depends on NF_CONNTRACK
-       depends on NETFILTER_ADVANCED
        help
          The NOTRACK target allows a select rule to specify
          which packets *not* to enter the conntrack/NAT
index 6ee10f5d59bd8a4fde8ad54447c5552d11863303..37d667e3f6f82d82e442b66d4c38dd592cba06d3 100644 (file)
@@ -158,7 +158,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipport4_elem data = { };
-       u32 ip, ip_to, p = 0, port, port_to;
+       u32 ip, ip_to = 0, p = 0, port, port_to;
        u32 timeout = h->timeout;
        bool with_ports = false;
        int ret;
index fb90e344e90709f205bef4c2a61b3855be1c7f56..e69e2718fbe162343eaf97defd30153155ee2ef1 100644 (file)
@@ -162,7 +162,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportip4_elem data = { };
-       u32 ip, ip_to, p = 0, port, port_to;
+       u32 ip, ip_to = 0, p = 0, port, port_to;
        u32 timeout = h->timeout;
        bool with_ports = false;
        int ret;
index deb3e3dfa5fcb13ba9f125f0a9dbcc7042d0735a..64199b4e93c952e24ca8c5508af1d3788b1286f8 100644 (file)
@@ -184,7 +184,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
-       u32 ip, ip_to, p = 0, port, port_to;
+       u32 ip, ip_to = 0, p = 0, port, port_to;
        u32 ip2_from = 0, ip2_to, ip2_last, ip2;
        u32 timeout = h->timeout;
        bool with_ports = false;
index 12571fb2881c2c3670aab3a765f7a6e0cf3afcf7..29fa5badde757d6de0ac2fc0359cc27ba37ff83b 100644 (file)
@@ -616,7 +616,7 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
        if ((cp) && (!cp->dest)) {
                dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
                                       cp->dport, &cp->vaddr, cp->vport,
-                                      cp->protocol, cp->fwmark);
+                                      cp->protocol, cp->fwmark, cp->flags);
                ip_vs_bind_dest(cp, dest);
                return dest;
        } else
index 008bf97cc91a58b14a0ef9fd6924eb6f71063f11..e1a66cf37f9a6fb1af5714fffa2199cd9f9bf27b 100644 (file)
@@ -619,15 +619,21 @@ struct ip_vs_dest *ip_vs_find_dest(struct net  *net, int af,
                                   const union nf_inet_addr *daddr,
                                   __be16 dport,
                                   const union nf_inet_addr *vaddr,
-                                  __be16 vport, __u16 protocol, __u32 fwmark)
+                                  __be16 vport, __u16 protocol, __u32 fwmark,
+                                  __u32 flags)
 {
        struct ip_vs_dest *dest;
        struct ip_vs_service *svc;
+       __be16 port = dport;
 
        svc = ip_vs_service_get(net, af, fwmark, protocol, vaddr, vport);
        if (!svc)
                return NULL;
-       dest = ip_vs_lookup_dest(svc, daddr, dport);
+       if (fwmark && (flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ)
+               port = 0;
+       dest = ip_vs_lookup_dest(svc, daddr, port);
+       if (!dest)
+               dest = ip_vs_lookup_dest(svc, daddr, port ^ dport);
        if (dest)
                atomic_inc(&dest->refcnt);
        ip_vs_service_put(svc);
index 3cdd479f9b5d179292182bb2711ec243b57736ee..2b6678c0ce147117de1a5b384eea4540ef4e79ee 100644 (file)
@@ -740,7 +740,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
                 * but still handled.
                 */
                dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr,
-                                      param->vport, protocol, fwmark);
+                                      param->vport, protocol, fwmark, flags);
 
                /*  Set the approprite ativity flag */
                if (protocol == IPPROTO_TCP) {
index 6b368be937c615610a7fa7b90d26b58a2fa041d5..b62c4148b92131444f6e132cb55a058991d68379 100644 (file)
 
 static DEFINE_MUTEX(nf_ct_ecache_mutex);
 
-struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb __read_mostly;
-EXPORT_SYMBOL_GPL(nf_conntrack_event_cb);
-
-struct nf_exp_event_notifier __rcu *nf_expect_event_cb __read_mostly;
-EXPORT_SYMBOL_GPL(nf_expect_event_cb);
-
 /* deliver cached events and clear cache entry - must be called with locally
  * disabled softirqs */
 void nf_ct_deliver_cached_events(struct nf_conn *ct)
 {
+       struct net *net = nf_ct_net(ct);
        unsigned long events;
        struct nf_ct_event_notifier *notify;
        struct nf_conntrack_ecache *e;
 
        rcu_read_lock();
-       notify = rcu_dereference(nf_conntrack_event_cb);
+       notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
        if (notify == NULL)
                goto out_unlock;
 
@@ -83,19 +78,20 @@ out_unlock:
 }
 EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
 
-int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
+int nf_conntrack_register_notifier(struct net *net,
+                                  struct nf_ct_event_notifier *new)
 {
        int ret = 0;
        struct nf_ct_event_notifier *notify;
 
        mutex_lock(&nf_ct_ecache_mutex);
-       notify = rcu_dereference_protected(nf_conntrack_event_cb,
+       notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
                                           lockdep_is_held(&nf_ct_ecache_mutex));
        if (notify != NULL) {
                ret = -EBUSY;
                goto out_unlock;
        }
-       RCU_INIT_POINTER(nf_conntrack_event_cb, new);
+       RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, new);
        mutex_unlock(&nf_ct_ecache_mutex);
        return ret;
 
@@ -105,32 +101,34 @@ out_unlock:
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
 
-void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
+void nf_conntrack_unregister_notifier(struct net *net,
+                                     struct nf_ct_event_notifier *new)
 {
        struct nf_ct_event_notifier *notify;
 
        mutex_lock(&nf_ct_ecache_mutex);
-       notify = rcu_dereference_protected(nf_conntrack_event_cb,
+       notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
                                           lockdep_is_held(&nf_ct_ecache_mutex));
        BUG_ON(notify != new);
-       RCU_INIT_POINTER(nf_conntrack_event_cb, NULL);
+       RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
        mutex_unlock(&nf_ct_ecache_mutex);
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
 
-int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
+int nf_ct_expect_register_notifier(struct net *net,
+                                  struct nf_exp_event_notifier *new)
 {
        int ret = 0;
        struct nf_exp_event_notifier *notify;
 
        mutex_lock(&nf_ct_ecache_mutex);
-       notify = rcu_dereference_protected(nf_expect_event_cb,
+       notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
                                           lockdep_is_held(&nf_ct_ecache_mutex));
        if (notify != NULL) {
                ret = -EBUSY;
                goto out_unlock;
        }
-       RCU_INIT_POINTER(nf_expect_event_cb, new);
+       RCU_INIT_POINTER(net->ct.nf_expect_event_cb, new);
        mutex_unlock(&nf_ct_ecache_mutex);
        return ret;
 
@@ -140,15 +138,16 @@ out_unlock:
 }
 EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
 
-void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
+void nf_ct_expect_unregister_notifier(struct net *net,
+                                     struct nf_exp_event_notifier *new)
 {
        struct nf_exp_event_notifier *notify;
 
        mutex_lock(&nf_ct_ecache_mutex);
-       notify = rcu_dereference_protected(nf_expect_event_cb,
+       notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
                                           lockdep_is_held(&nf_ct_ecache_mutex));
        BUG_ON(notify != new);
-       RCU_INIT_POINTER(nf_expect_event_cb, NULL);
+       RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
        mutex_unlock(&nf_ct_ecache_mutex);
 }
 EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
index e58aa9b1fe8a043226c4bc7ee90f5145a7adc316..257e77256c5cda4dc9d4038e6996f50a4ea7dbe9 100644 (file)
@@ -4,7 +4,7 @@
  * (C) 2001 by Jay Schulist <jschlst@samba.org>
  * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
  * (C) 2003 by Patrick Mchardy <kaber@trash.net>
- * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org>
  *
  * Initial connection tracking via netlink development funded and
  * generally made possible by Network Robots, Inc. (www.networkrobots.com)
@@ -135,7 +135,7 @@ nla_put_failure:
 static inline int
 ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
 {
-       long timeout = (ct->timeout.expires - jiffies) / HZ;
+       long timeout = ((long)ct->timeout.expires - (long)jiffies) / HZ;
 
        if (timeout < 0)
                timeout = 0;
@@ -1358,12 +1358,15 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
                                                    nf_ct_protonum(ct));
                if (helper == NULL) {
                        rcu_read_unlock();
+                       spin_unlock_bh(&nf_conntrack_lock);
 #ifdef CONFIG_MODULES
                        if (request_module("nfct-helper-%s", helpname) < 0) {
+                               spin_lock_bh(&nf_conntrack_lock);
                                err = -EOPNOTSUPP;
                                goto err1;
                        }
 
+                       spin_lock_bh(&nf_conntrack_lock);
                        rcu_read_lock();
                        helper = __nf_conntrack_helper_find(helpname,
                                                            nf_ct_l3num(ct),
@@ -1638,7 +1641,7 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
                          const struct nf_conntrack_expect *exp)
 {
        struct nf_conn *master = exp->master;
-       long timeout = (exp->timeout.expires - jiffies) / HZ;
+       long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
        struct nf_conn_help *help;
 
        if (timeout < 0)
@@ -1869,25 +1872,30 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
 
        err = -ENOMEM;
        skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
-       if (skb2 == NULL)
+       if (skb2 == NULL) {
+               nf_ct_expect_put(exp);
                goto out;
+       }
 
        rcu_read_lock();
        err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
                                      nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
        rcu_read_unlock();
+       nf_ct_expect_put(exp);
        if (err <= 0)
                goto free;
 
-       nf_ct_expect_put(exp);
+       err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       if (err < 0)
+               goto out;
 
-       return netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
+       return 0;
 
 free:
        kfree_skb(skb2);
 out:
-       nf_ct_expect_put(exp);
-       return err;
+       /* this avoids a loop in nfnetlink. */
+       return err == -EAGAIN ? -ENOBUFS : err;
 }
 
 static int
@@ -2163,6 +2171,54 @@ MODULE_ALIAS("ip_conntrack_netlink");
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
 
+static int __net_init ctnetlink_net_init(struct net *net)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+       int ret;
+
+       ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
+       if (ret < 0) {
+               pr_err("ctnetlink_init: cannot register notifier.\n");
+               goto err_out;
+       }
+
+       ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
+       if (ret < 0) {
+               pr_err("ctnetlink_init: cannot expect register notifier.\n");
+               goto err_unreg_notifier;
+       }
+#endif
+       return 0;
+
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+err_unreg_notifier:
+       nf_conntrack_unregister_notifier(net, &ctnl_notifier);
+err_out:
+       return ret;
+#endif
+}
+
+static void ctnetlink_net_exit(struct net *net)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+       nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
+       nf_conntrack_unregister_notifier(net, &ctnl_notifier);
+#endif
+}
+
+static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
+{
+       struct net *net;
+
+       list_for_each_entry(net, net_exit_list, exit_list)
+               ctnetlink_net_exit(net);
+}
+
+static struct pernet_operations ctnetlink_net_ops = {
+       .init           = ctnetlink_net_init,
+       .exit_batch     = ctnetlink_net_exit_batch,
+};
+
 static int __init ctnetlink_init(void)
 {
        int ret;
@@ -2180,28 +2236,15 @@ static int __init ctnetlink_init(void)
                goto err_unreg_subsys;
        }
 
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-       ret = nf_conntrack_register_notifier(&ctnl_notifier);
-       if (ret < 0) {
-               pr_err("ctnetlink_init: cannot register notifier.\n");
+       if (register_pernet_subsys(&ctnetlink_net_ops)) {
+               pr_err("ctnetlink_init: cannot register pernet operations\n");
                goto err_unreg_exp_subsys;
        }
 
-       ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp);
-       if (ret < 0) {
-               pr_err("ctnetlink_init: cannot expect register notifier.\n");
-               goto err_unreg_notifier;
-       }
-#endif
-
        return 0;
 
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-err_unreg_notifier:
-       nf_conntrack_unregister_notifier(&ctnl_notifier);
 err_unreg_exp_subsys:
        nfnetlink_subsys_unregister(&ctnl_exp_subsys);
-#endif
 err_unreg_subsys:
        nfnetlink_subsys_unregister(&ctnl_subsys);
 err_out:
@@ -2213,11 +2256,7 @@ static void __exit ctnetlink_exit(void)
        pr_info("ctnetlink: unregistering from nfnetlink.\n");
 
        nf_ct_remove_userspace_expectations();
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-       nf_ct_expect_unregister_notifier(&ctnl_notifier_exp);
-       nf_conntrack_unregister_notifier(&ctnl_notifier);
-#endif
-
+       unregister_pernet_subsys(&ctnetlink_net_ops);
        nfnetlink_subsys_unregister(&ctnl_exp_subsys);
        nfnetlink_subsys_unregister(&ctnl_subsys);
 }
index 5b138506690ec578105911ffb64309b5297d6458..9ddf1c3bfb39c5f67cb8f8c24ccdd22d03c38f10 100644 (file)
@@ -87,10 +87,10 @@ connbytes_mt(const struct sk_buff *skb, struct xt_action_param *par)
                break;
        }
 
-       if (sinfo->count.to)
+       if (sinfo->count.to >= sinfo->count.from)
                return what <= sinfo->count.to && what >= sinfo->count.from;
-       else
-               return what >= sinfo->count.from;
+       else /* inverted */
+               return what < sinfo->count.to || what > sinfo->count.from;
 }
 
 static int connbytes_mt_check(const struct xt_mtchk_param *par)
index 9c24de10a6579b78e452e47912c56e5d894e58ea..824f184f7a9bbecbfd9217e86fd474485b135ade 100644 (file)
@@ -111,8 +111,6 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
        struct netlbl_domaddr_map *addrmap = NULL;
        struct netlbl_domaddr4_map *map4 = NULL;
        struct netlbl_domaddr6_map *map6 = NULL;
-       const struct in_addr *addr4, *mask4;
-       const struct in6_addr *addr6, *mask6;
 
        entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
        if (entry == NULL)
@@ -133,9 +131,9 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
                INIT_LIST_HEAD(&addrmap->list6);
 
                switch (family) {
-               case AF_INET:
-                       addr4 = addr;
-                       mask4 = mask;
+               case AF_INET: {
+                       const struct in_addr *addr4 = addr;
+                       const struct in_addr *mask4 = mask;
                        map4 = kzalloc(sizeof(*map4), GFP_ATOMIC);
                        if (map4 == NULL)
                                goto cfg_unlbl_map_add_failure;
@@ -148,9 +146,11 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
                        if (ret_val != 0)
                                goto cfg_unlbl_map_add_failure;
                        break;
-               case AF_INET6:
-                       addr6 = addr;
-                       mask6 = mask;
+                       }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+               case AF_INET6: {
+                       const struct in6_addr *addr6 = addr;
+                       const struct in6_addr *mask6 = mask;
                        map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
                        if (map6 == NULL)
                                goto cfg_unlbl_map_add_failure;
@@ -162,11 +162,13 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
                        map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3];
                        ipv6_addr_copy(&map6->list.mask, mask6);
                        map6->list.valid = 1;
-                       ret_val = netlbl_af4list_add(&map4->list,
-                                                    &addrmap->list4);
+                       ret_val = netlbl_af6list_add(&map6->list,
+                                                    &addrmap->list6);
                        if (ret_val != 0)
                                goto cfg_unlbl_map_add_failure;
                        break;
+                       }
+#endif /* IPv6 */
                default:
                        goto cfg_unlbl_map_add_failure;
                        break;
@@ -225,9 +227,11 @@ int netlbl_cfg_unlbl_static_add(struct net *net,
        case AF_INET:
                addr_len = sizeof(struct in_addr);
                break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
        case AF_INET6:
                addr_len = sizeof(struct in6_addr);
                break;
+#endif /* IPv6 */
        default:
                return -EPFNOSUPPORT;
        }
@@ -266,9 +270,11 @@ int netlbl_cfg_unlbl_static_del(struct net *net,
        case AF_INET:
                addr_len = sizeof(struct in_addr);
                break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
        case AF_INET6:
                addr_len = sizeof(struct in6_addr);
                break;
+#endif /* IPv6 */
        default:
                return -EPFNOSUPPORT;
        }
index 3925c6578767ea61be8cc66933d38a6c500cd1d2..ea66034499ce6bf54c3be5f02ec66ff2140f69a8 100644 (file)
@@ -69,7 +69,7 @@ static int __nci_request(struct nci_dev *ndev,
        __u32 timeout)
 {
        int rc = 0;
-       unsigned long completion_rc;
+       long completion_rc;
 
        ndev->req_status = NCI_REQ_PEND;
 
index 82a6f34d39d012fb35d9a0d490503fcc2048e6e2..d9d4970b9b07c0da22f97e29a8ac0f38ec4caff1 100644 (file)
@@ -1630,8 +1630,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
        if (snaplen > res)
                snaplen = res;
 
-       if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-           (unsigned)sk->sk_rcvbuf)
+       if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                goto drop_n_acct;
 
        if (skb_shared(skb)) {
@@ -1762,8 +1761,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        if (po->tp_version <= TPACKET_V2) {
                if (macoff + snaplen > po->rx_ring.frame_size) {
                        if (po->copy_thresh &&
-                               atomic_read(&sk->sk_rmem_alloc) + skb->truesize
-                               < (unsigned)sk->sk_rcvbuf) {
+                           atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
                                if (skb_shared(skb)) {
                                        copy_skb = skb_clone(skb, GFP_ATOMIC);
                                } else {
@@ -2450,8 +2448,12 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
 {
        struct packet_sock *po = pkt_sk(sk);
 
-       if (po->fanout)
+       if (po->fanout) {
+               if (dev)
+                       dev_put(dev);
+
                return -EINVAL;
+       }
 
        lock_sock(sk);
 
index b9493a09a870343fe90444bea4b1fac547d42e46..6cd8ddfb512d78ac0b77f24f5a2e1ceb3330968c 100644 (file)
@@ -385,7 +385,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
        struct gred_sched_data *q;
 
        if (table->tab[dp] == NULL) {
-               table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
+               table->tab[dp] = kzalloc(sizeof(*q), GFP_ATOMIC);
                if (table->tab[dp] == NULL)
                        return -ENOMEM;
        }
index f88256cbacbfe4b89c202d591514fb28feaac11b..28de43092330abc125423d5328babc709b1f986f 100644 (file)
@@ -107,7 +107,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
        if (!netif_is_multiqueue(dev))
                return -EOPNOTSUPP;
 
-       if (nla_len(opt) < sizeof(*qopt))
+       if (!opt || nla_len(opt) < sizeof(*qopt))
                return -EINVAL;
 
        qopt = nla_data(opt);
index eb3b9a86c6ed93d502a4629f241a0d2496b01385..a4ab207cdc5986f2ed1421d7981ecad01382f3b3 100644 (file)
@@ -488,7 +488,7 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
                return -EINVAL;
 
        s = sizeof(struct disttable) + n * sizeof(s16);
-       d = kmalloc(s, GFP_KERNEL);
+       d = kmalloc(s, GFP_KERNEL | __GFP_NOWARN);
        if (!d)
                d = vmalloc(s);
        if (!d)
@@ -501,9 +501,10 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
        root_lock = qdisc_root_sleeping_lock(sch);
 
        spin_lock_bh(root_lock);
-       dist_free(q->delay_dist);
-       q->delay_dist = d;
+       swap(q->delay_dist, d);
        spin_unlock_bh(root_lock);
+
+       dist_free(d);
        return 0;
 }
 
index 6649463da1b68e6e59e6709c7e1d0b527a60ab63..d617161f8dd3904ddda7ac23da1178ccd03a79c4 100644 (file)
@@ -209,8 +209,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
                                 ctl->Plog, ctl->Scell_log,
                                 nla_data(tb[TCA_RED_STAB]));
 
-       if (skb_queue_empty(&sch->q))
-               red_end_of_idle_period(&q->parms);
+       if (!q->qdisc->q.qlen)
+               red_start_of_idle_period(&q->parms);
 
        sch_tree_unlock(sch);
        return 0;
index a3b7120fcc74c45cb642a7edc3813bde893240f1..4f4c52c0eeb3b28459d1abad498531a777c598bc 100644 (file)
@@ -225,11 +225,11 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
 
 
 static int
-__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
+__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
+              struct net_device *dev, struct netdev_queue *txq,
+              struct neighbour *mn)
 {
-       struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
-       struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
-       struct neighbour *mn = dst_get_neighbour(skb_dst(skb));
+       struct teql_sched_data *q = qdisc_priv(txq->qdisc);
        struct neighbour *n = q->ncache;
 
        if (mn->tbl == NULL)
@@ -262,17 +262,26 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
 }
 
 static inline int teql_resolve(struct sk_buff *skb,
-                              struct sk_buff *skb_res, struct net_device *dev)
+                              struct sk_buff *skb_res,
+                              struct net_device *dev,
+                              struct netdev_queue *txq)
 {
-       struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+       struct dst_entry *dst = skb_dst(skb);
+       struct neighbour *mn;
+       int res;
+
        if (txq->qdisc == &noop_qdisc)
                return -ENODEV;
 
-       if (dev->header_ops == NULL ||
-           skb_dst(skb) == NULL ||
-           dst_get_neighbour(skb_dst(skb)) == NULL)
+       if (!dev->header_ops || !dst)
                return 0;
-       return __teql_resolve(skb, skb_res, dev);
+
+       rcu_read_lock();
+       mn = dst_get_neighbour(dst);
+       res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
+       rcu_read_unlock();
+
+       return res;
 }
 
 static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -307,7 +316,7 @@ restart:
                        continue;
                }
 
-               switch (teql_resolve(skb, skb_res, slave)) {
+               switch (teql_resolve(skb, skb_res, slave, slave_txq)) {
                case 0:
                        if (__netif_tx_trylock(slave_txq)) {
                                unsigned int length = qdisc_pkt_len(skb);
index 152b5b3c3fffa978ae0d725d182f9aa54c8a9cf6..acd2edbc073ebf4ad334a4b0a16ff7b45413fac5 100644 (file)
@@ -173,7 +173,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
        asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
        asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
-               (unsigned long)sp->autoclose * HZ;
+               min_t(unsigned long, sp->autoclose, sctp_max_autoclose) * HZ;
 
        /* Initializes the timers */
        for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
index 865e68fef21c326c631183c7c7d5ded4ad842647..bf812048cf6f7a244c547e0cd31a731351abfab3 100644 (file)
@@ -82,7 +82,7 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
        struct sctp_auth_bytes *key;
 
        /* Verify that we are not going to overflow INT_MAX */
-       if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
+       if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
                return NULL;
 
        /* Allocate the shared key */
index 08b3cead6503c62f91dc8e97d9b817de7a79ffb9..817174eb5f41a50147dddded99bf222e421001b6 100644 (file)
@@ -697,13 +697,7 @@ static void sctp_packet_append_data(struct sctp_packet *packet,
        /* Keep track of how many bytes are in flight to the receiver. */
        asoc->outqueue.outstanding_bytes += datasize;
 
-       /* Update our view of the receiver's rwnd. Include sk_buff overhead
-        * while updating peer.rwnd so that it reduces the chances of a
-        * receiver running out of receive buffer space even when receive
-        * window is still open. This can happen when a sender is sending
-        * sending small messages.
-        */
-       datasize += sizeof(struct sk_buff);
+       /* Update our view of the receiver's rwnd. */
        if (datasize < rwnd)
                rwnd -= datasize;
        else
index 14c2b06028ffb1bea3acde6243ff9386d053616e..cfeb1d4a1ee6ca730595959946ced911b7442baa 100644 (file)
@@ -411,8 +411,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                                        chunk->transport->flight_size -=
                                                        sctp_data_size(chunk);
                                q->outstanding_bytes -= sctp_data_size(chunk);
-                               q->asoc->peer.rwnd += (sctp_data_size(chunk) +
-                                                       sizeof(struct sk_buff));
+                               q->asoc->peer.rwnd += sctp_data_size(chunk);
                        }
                        continue;
                }
@@ -432,8 +431,7 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                         * (Section 7.2.4)), add the data size of those
                         * chunks to the rwnd.
                         */
-                       q->asoc->peer.rwnd += (sctp_data_size(chunk) +
-                                               sizeof(struct sk_buff));
+                       q->asoc->peer.rwnd += sctp_data_size(chunk);
                        q->outstanding_bytes -= sctp_data_size(chunk);
                        if (chunk->transport)
                                transport->flight_size -= sctp_data_size(chunk);
index 61b9fca5a173bba9057f9a09dc2ac6cf45f34bc9..6f6ad8686833920fee313ad2dbd4b0cb4a23cade 100644 (file)
@@ -1285,6 +1285,9 @@ SCTP_STATIC __init int sctp_init(void)
        sctp_max_instreams              = SCTP_DEFAULT_INSTREAMS;
        sctp_max_outstreams             = SCTP_DEFAULT_OUTSTREAMS;
 
+       /* Initialize maximum autoclose timeout. */
+       sctp_max_autoclose              = INT_MAX / HZ;
+
        /* Initialize handle used for association ids. */
        idr_init(&sctp_assocs_id);
 
index 13bf5fcdbff1b9f80d2d0c6288ab98762d499e74..54a7cd2fdd7af5c96d9a0354bfb6bfd56b27f09d 100644 (file)
@@ -2200,8 +2200,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
                return -EINVAL;
        if (copy_from_user(&sp->autoclose, optval, optlen))
                return -EFAULT;
-       /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
-       sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
 
        return 0;
 }
index 6b3952961b858369d8a63b908b0911357a9e28e7..60ffbd067ff75643ac3f5cc61e4ba20c2b8ef3b9 100644 (file)
@@ -53,6 +53,10 @@ static int sack_timer_min = 1;
 static int sack_timer_max = 500;
 static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
 static int rwnd_scale_max = 16;
+static unsigned long max_autoclose_min = 0;
+static unsigned long max_autoclose_max =
+       (MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
+       ? UINT_MAX : MAX_SCHEDULE_TIMEOUT / HZ;
 
 extern long sysctl_sctp_mem[3];
 extern int sysctl_sctp_rmem[3];
@@ -258,6 +262,15 @@ static ctl_table sctp_table[] = {
                .extra1         = &one,
                .extra2         = &rwnd_scale_max,
        },
+       {
+               .procname       = "max_autoclose",
+               .data           = &sctp_max_autoclose,
+               .maxlen         = sizeof(unsigned long),
+               .mode           = 0644,
+               .proc_handler   = &proc_doulongvec_minmax,
+               .extra1         = &max_autoclose_min,
+               .extra2         = &max_autoclose_max,
+       },
 
        { /* sentinel */ }
 };
index d12ffa5458115e3912f8b7f22a337963d8d3213c..00a1a2acd587681adf4e904d9cfddbd05f0251e1 100644 (file)
@@ -590,6 +590,27 @@ void rpc_prepare_task(struct rpc_task *task)
        task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
 }
 
+static void
+rpc_init_task_statistics(struct rpc_task *task)
+{
+       /* Initialize retry counters */
+       task->tk_garb_retry = 2;
+       task->tk_cred_retry = 2;
+       task->tk_rebind_retry = 2;
+
+       /* starting timestamp */
+       task->tk_start = ktime_get();
+}
+
+static void
+rpc_reset_task_statistics(struct rpc_task *task)
+{
+       task->tk_timeouts = 0;
+       task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_KILLED|RPC_TASK_SENT);
+
+       rpc_init_task_statistics(task);
+}
+
 /*
  * Helper that calls task->tk_ops->rpc_call_done if it exists
  */
@@ -602,6 +623,7 @@ void rpc_exit_task(struct rpc_task *task)
                        WARN_ON(RPC_ASSASSINATED(task));
                        /* Always release the RPC slot and buffer memory */
                        xprt_release(task);
+                       rpc_reset_task_statistics(task);
                }
        }
 }
@@ -804,11 +826,6 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
        task->tk_calldata = task_setup_data->callback_data;
        INIT_LIST_HEAD(&task->tk_task);
 
-       /* Initialize retry counters */
-       task->tk_garb_retry = 2;
-       task->tk_cred_retry = 2;
-       task->tk_rebind_retry = 2;
-
        task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
        task->tk_owner = current->tgid;
 
@@ -818,8 +835,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
        if (task->tk_ops->rpc_call_prepare != NULL)
                task->tk_action = rpc_prepare_task;
 
-       /* starting timestamp */
-       task->tk_start = ktime_get();
+       rpc_init_task_statistics(task);
 
        dprintk("RPC:       new task initialized, procpid %u\n",
                                task_pid_nr(current));
index f4385e45a5fcb398086c6c8c5c8ab4daff7ee139..c64c0ef519b594320ff688f3881579d2926be21d 100644 (file)
@@ -995,13 +995,11 @@ out_init_req:
 
 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
 {
-       if (xprt_dynamic_free_slot(xprt, req))
-               return;
-
-       memset(req, 0, sizeof(*req));   /* mark unused */
-
        spin_lock(&xprt->reserve_lock);
-       list_add(&req->rq_list, &xprt->free);
+       if (!xprt_dynamic_free_slot(xprt, req)) {
+               memset(req, 0, sizeof(*req));   /* mark unused */
+               list_add(&req->rq_list, &xprt->free);
+       }
        rpc_wake_up_next(&xprt->backlog);
        spin_unlock(&xprt->reserve_lock);
 }
index 466fbcc5cf77a92ef491be50eb836f652165da16..b595a3d8679f016a7b0936683170a69cc86c7905 100644 (file)
@@ -1957,6 +1957,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                        if ((UNIXCB(skb).pid  != siocb->scm->pid) ||
                            (UNIXCB(skb).cred != siocb->scm->cred)) {
                                skb_queue_head(&sk->sk_receive_queue, skb);
+                               sk->sk_data_ready(sk, skb->len);
                                break;
                        }
                } else {
@@ -1974,6 +1975,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                chunk = min_t(unsigned int, skb->len, size);
                if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
                        skb_queue_head(&sk->sk_receive_queue, skb);
+                       sk->sk_data_ready(sk, skb->len);
                        if (copied == 0)
                                copied = -EFAULT;
                        break;
@@ -1991,6 +1993,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                        /* put the skb back if we didn't use it up.. */
                        if (skb->len) {
                                skb_queue_head(&sk->sk_receive_queue, skb);
+                               sk->sk_data_ready(sk, skb->len);
                                break;
                        }
 
@@ -2006,6 +2009,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
 
                        /* put message back and return */
                        skb_queue_head(&sk->sk_receive_queue, skb);
+                       sk->sk_data_ready(sk, skb->len);
                        break;
                }
        } while (size);
index b3a476fe82725f738f5215b32cdf2296970451d2..ffafda5022c2b72d8f45714458e5fdd16203c4ee 100644 (file)
@@ -89,8 +89,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
        [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
        [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
 
-       [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN },
-       [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN },
+       [NL80211_ATTR_MAC] = { .len = ETH_ALEN },
+       [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN },
 
        [NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
        [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
index e71f5a66574e31cb1d4e44429402ddcb5a61a529..3302c56f60d1511d292a2e9b4f71ad87b6d6f722 100644 (file)
 #define REG_DBG_PRINT(args...)
 #endif
 
+static struct regulatory_request core_request_world = {
+       .initiator = NL80211_REGDOM_SET_BY_CORE,
+       .alpha2[0] = '0',
+       .alpha2[1] = '0',
+       .intersect = false,
+       .processed = true,
+       .country_ie_env = ENVIRON_ANY,
+};
+
 /* Receipt of information from last regulatory request */
-static struct regulatory_request *last_request;
+static struct regulatory_request *last_request = &core_request_world;
 
 /* To trigger userspace events */
 static struct platform_device *reg_pdev;
@@ -150,7 +159,7 @@ static char user_alpha2[2];
 module_param(ieee80211_regdom, charp, 0444);
 MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
 
-static void reset_regdomains(void)
+static void reset_regdomains(bool full_reset)
 {
        /* avoid freeing static information or freeing something twice */
        if (cfg80211_regdomain == cfg80211_world_regdom)
@@ -165,6 +174,13 @@ static void reset_regdomains(void)
 
        cfg80211_world_regdom = &world_regdom;
        cfg80211_regdomain = NULL;
+
+       if (!full_reset)
+               return;
+
+       if (last_request != &core_request_world)
+               kfree(last_request);
+       last_request = &core_request_world;
 }
 
 /*
@@ -175,7 +191,7 @@ static void update_world_regdomain(const struct ieee80211_regdomain *rd)
 {
        BUG_ON(!last_request);
 
-       reset_regdomains();
+       reset_regdomains(false);
 
        cfg80211_world_regdom = rd;
        cfg80211_regdomain = rd;
@@ -1407,7 +1423,8 @@ static int __regulatory_hint(struct wiphy *wiphy,
        }
 
 new_request:
-       kfree(last_request);
+       if (last_request != &core_request_world)
+               kfree(last_request);
 
        last_request = pending_request;
        last_request->intersect = intersect;
@@ -1577,9 +1594,6 @@ static int regulatory_hint_core(const char *alpha2)
 {
        struct regulatory_request *request;
 
-       kfree(last_request);
-       last_request = NULL;
-
        request = kzalloc(sizeof(struct regulatory_request),
                          GFP_KERNEL);
        if (!request)
@@ -1777,7 +1791,7 @@ static void restore_regulatory_settings(bool reset_user)
        mutex_lock(&cfg80211_mutex);
        mutex_lock(&reg_mutex);
 
-       reset_regdomains();
+       reset_regdomains(true);
        restore_alpha2(alpha2, reset_user);
 
        /*
@@ -2037,12 +2051,18 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
        }
 
        request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
+       if (!request_wiphy &&
+           (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
+            last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)) {
+               schedule_delayed_work(&reg_timeout, 0);
+               return -ENODEV;
+       }
 
        if (!last_request->intersect) {
                int r;
 
                if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) {
-                       reset_regdomains();
+                       reset_regdomains(false);
                        cfg80211_regdomain = rd;
                        return 0;
                }
@@ -2063,7 +2083,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
                if (r)
                        return r;
 
-               reset_regdomains();
+               reset_regdomains(false);
                cfg80211_regdomain = rd;
                return 0;
        }
@@ -2088,7 +2108,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
 
                rd = NULL;
 
-               reset_regdomains();
+               reset_regdomains(false);
                cfg80211_regdomain = intersected_rd;
 
                return 0;
@@ -2108,7 +2128,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
        kfree(rd);
        rd = NULL;
 
-       reset_regdomains();
+       reset_regdomains(false);
        cfg80211_regdomain = intersected_rd;
 
        return 0;
@@ -2261,11 +2281,8 @@ void /* __init_or_exit */ regulatory_exit(void)
        mutex_lock(&cfg80211_mutex);
        mutex_lock(&reg_mutex);
 
-       reset_regdomains();
-
-       kfree(last_request);
+       reset_regdomains(true);
 
-       last_request = NULL;
        dev_set_uevent_suppress(&reg_pdev->dev, true);
 
        platform_device_unregister(reg_pdev);
index 552df27dcf53d3388fb2eb9ab3ba7a31c7ffcd64..9049a5caeb257d783db1008224eabea7cefa7cef 100644 (file)
@@ -2276,8 +2276,6 @@ static void __xfrm_garbage_collect(struct net *net)
 {
        struct dst_entry *head, *next;
 
-       flow_cache_flush();
-
        spin_lock_bh(&xfrm_policy_sk_bundle_lock);
        head = xfrm_policy_sk_bundles;
        xfrm_policy_sk_bundles = NULL;
@@ -2290,6 +2288,18 @@ static void __xfrm_garbage_collect(struct net *net)
        }
 }
 
+static void xfrm_garbage_collect(struct net *net)
+{
+       flow_cache_flush();
+       __xfrm_garbage_collect(net);
+}
+
+static void xfrm_garbage_collect_deferred(struct net *net)
+{
+       flow_cache_flush_deferred();
+       __xfrm_garbage_collect(net);
+}
+
 static void xfrm_init_pmtu(struct dst_entry *dst)
 {
        do {
@@ -2382,9 +2392,11 @@ static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
        return dst_metric_advmss(dst->path);
 }
 
-static unsigned int xfrm_default_mtu(const struct dst_entry *dst)
+static unsigned int xfrm_mtu(const struct dst_entry *dst)
 {
-       return dst_mtu(dst->path);
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       return mtu ? : dst_mtu(dst->path);
 }
 
 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr)
@@ -2411,8 +2423,8 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                        dst_ops->check = xfrm_dst_check;
                if (likely(dst_ops->default_advmss == NULL))
                        dst_ops->default_advmss = xfrm_default_advmss;
-               if (likely(dst_ops->default_mtu == NULL))
-                       dst_ops->default_mtu = xfrm_default_mtu;
+               if (likely(dst_ops->mtu == NULL))
+                       dst_ops->mtu = xfrm_mtu;
                if (likely(dst_ops->negative_advice == NULL))
                        dst_ops->negative_advice = xfrm_negative_advice;
                if (likely(dst_ops->link_failure == NULL))
@@ -2420,7 +2432,7 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
                if (likely(dst_ops->neigh_lookup == NULL))
                        dst_ops->neigh_lookup = xfrm_neigh_lookup;
                if (likely(afinfo->garbage_collect == NULL))
-                       afinfo->garbage_collect = __xfrm_garbage_collect;
+                       afinfo->garbage_collect = xfrm_garbage_collect_deferred;
                xfrm_policy_afinfo[afinfo->family] = afinfo;
        }
        write_unlock_bh(&xfrm_policy_afinfo_lock);
@@ -2514,7 +2526,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
 
        switch (event) {
        case NETDEV_DOWN:
-               __xfrm_garbage_collect(dev_net(dev));
+               xfrm_garbage_collect(dev_net(dev));
        }
        return NOTIFY_DONE;
 }
index ba573fe7c74d5bfe0495372931ebff69406f35c2..914833d99b06f78242fa12584c568ecef6a5e65a 100644 (file)
@@ -60,8 +60,8 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
            --directory=$(srctree) --directory=$(objtree)           \
            --output $(obj)/config.pot
        $(Q)sed -i s/CHARSET/UTF-8/ $(obj)/config.pot
-       $(Q)ln -fs Kconfig.x86 arch/um/Kconfig
-       $(Q)(for i in `ls $(srctree)/arch/*/Kconfig`;    \
+       $(Q)(for i in `ls $(srctree)/arch/*/Kconfig      \
+           $(srctree)/arch/*/um/Kconfig`;               \
            do                                           \
                echo "  GEN $$i";                        \
                $(obj)/kxgettext $$i                     \
@@ -69,7 +69,6 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
            done )
        $(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \
            --output $(obj)/linux.pot
-       $(Q)rm -f $(srctree)/arch/um/Kconfig
        $(Q)rm -f $(obj)/config.pot
 
 PHONY += allnoconfig allyesconfig allmodconfig alldefconfig randconfig
index 36cc0cc39e78e135630384e6500f4a789fe7aa2a..b566eba4a65cc16128b34152154c3ba26505b692 100644 (file)
@@ -57,23 +57,44 @@ static int prepend(char **buffer, int buflen, const char *str, int namelen)
 static int d_namespace_path(struct path *path, char *buf, int buflen,
                            char **name, int flags)
 {
-       struct path root, tmp;
        char *res;
-       int connected, error = 0;
+       int error = 0;
+       int connected = 1;
+
+       if (path->mnt->mnt_flags & MNT_INTERNAL) {
+               /* it's not mounted anywhere */
+               res = dentry_path(path->dentry, buf, buflen);
+               *name = res;
+               if (IS_ERR(res)) {
+                       *name = buf;
+                       return PTR_ERR(res);
+               }
+               if (path->dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
+                   strncmp(*name, "/sys/", 5) == 0) {
+                       /* TODO: convert over to using a per namespace
+                        * control instead of hard coded /proc
+                        */
+                       return prepend(name, *name - buf, "/proc", 5);
+               }
+               return 0;
+       }
 
-       /* Get the root we want to resolve too, released below */
+       /* resolve paths relative to chroot?*/
        if (flags & PATH_CHROOT_REL) {
-               /* resolve paths relative to chroot */
+               struct path root;
                get_fs_root(current->fs, &root);
-       } else {
-               /* resolve paths relative to namespace */
-               root.mnt = current->nsproxy->mnt_ns->root;
-               root.dentry = root.mnt->mnt_root;
-               path_get(&root);
+               res = __d_path(path, &root, buf, buflen);
+               if (res && !IS_ERR(res)) {
+                       /* everything's fine */
+                       *name = res;
+                       path_put(&root);
+                       goto ok;
+               }
+               path_put(&root);
+               connected = 0;
        }
 
-       tmp = root;
-       res = __d_path(path, &tmp, buf, buflen);
+       res = d_absolute_path(path, buf, buflen);
 
        *name = res;
        /* handle error conditions - and still allow a partial path to
@@ -84,7 +105,10 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
                *name = buf;
                goto out;
        }
+       if (!our_mnt(path->mnt))
+               connected = 0;
 
+ok:
        /* Handle two cases:
         * 1. A deleted dentry && profile is not allowing mediation of deleted
         * 2. On some filesystems, newly allocated dentries appear to the
@@ -97,10 +121,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
                        goto out;
        }
 
-       /* Determine if the path is connected to the expected root */
-       connected = tmp.dentry == root.dentry && tmp.mnt == root.mnt;
-
-       /* If the path is not connected,
+       /* If the path is not connected to the expected root,
         * check if it is a sysctl and handle specially else remove any
         * leading / that __d_path may have returned.
         * Unless
@@ -112,17 +133,9 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
         *     namespace root.
         */
        if (!connected) {
-               /* is the disconnect path a sysctl? */
-               if (tmp.dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
-                   strncmp(*name, "/sys/", 5) == 0) {
-                       /* TODO: convert over to using a per namespace
-                        * control instead of hard coded /proc
-                        */
-                       error = prepend(name, *name - buf, "/proc", 5);
-               } else if (!(flags & PATH_CONNECT_PATH) &&
+               if (!(flags & PATH_CONNECT_PATH) &&
                           !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) &&
-                            (tmp.mnt == current->nsproxy->mnt_ns->root &&
-                             tmp.dentry == tmp.mnt->mnt_root))) {
+                            our_mnt(path->mnt))) {
                        /* disconnected path, don't return pathname starting
                         * with '/'
                         */
@@ -133,8 +146,6 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
        }
 
 out:
-       path_put(&root);
-
        return error;
 }
 
index 5dd5b140242cd8872b255c9e88878ab56155d07c..8738deff26fadde6a6bf200972d0422193e4fdc9 100644 (file)
@@ -27,20 +27,35 @@ static int evmkey_len = MAX_KEY_SIZE;
 
 struct crypto_shash *hmac_tfm;
 
+static DEFINE_MUTEX(mutex);
+
 static struct shash_desc *init_desc(void)
 {
        int rc;
        struct shash_desc *desc;
 
        if (hmac_tfm == NULL) {
+               mutex_lock(&mutex);
+               if (hmac_tfm)
+                       goto out;
                hmac_tfm = crypto_alloc_shash(evm_hmac, 0, CRYPTO_ALG_ASYNC);
                if (IS_ERR(hmac_tfm)) {
                        pr_err("Can not allocate %s (reason: %ld)\n",
                               evm_hmac, PTR_ERR(hmac_tfm));
                        rc = PTR_ERR(hmac_tfm);
                        hmac_tfm = NULL;
+                       mutex_unlock(&mutex);
+                       return ERR_PTR(rc);
+               }
+               rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
+               if (rc) {
+                       crypto_free_shash(hmac_tfm);
+                       hmac_tfm = NULL;
+                       mutex_unlock(&mutex);
                        return ERR_PTR(rc);
                }
+out:
+               mutex_unlock(&mutex);
        }
 
        desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(hmac_tfm),
@@ -51,11 +66,7 @@ static struct shash_desc *init_desc(void)
        desc->tfm = hmac_tfm;
        desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
-       rc = crypto_shash_setkey(hmac_tfm, evmkey, evmkey_len);
-       if (rc)
-               goto out;
        rc = crypto_shash_init(desc);
-out:
        if (rc) {
                kfree(desc);
                return ERR_PTR(rc);
index 0b62bd112461c712cfc9425f9e8ac0f6caf194f0..7b9eb1faf68b5fe435b7f70e36ff21330934d6a2 100644 (file)
@@ -123,7 +123,9 @@ static void sel_netport_insert(struct sel_netport *port)
        if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
                struct sel_netport *tail;
                tail = list_entry(
-                       rcu_dereference(sel_netport_hash[idx].list.prev),
+                       rcu_dereference_protected(
+                               sel_netport_hash[idx].list.prev,
+                               lockdep_is_held(&sel_netport_lock)),
                        struct sel_netport, list);
                list_del_rcu(&tail->list);
                kfree_rcu(tail, rcu);
index 738bbdf8d4c77ceba3ba3abfe733a90dc56cd4fb..d9f3ced8756ec4dc87492b8edecf172375843f5d 100644 (file)
@@ -101,9 +101,8 @@ static char *tomoyo_get_absolute_path(struct path *path, char * const buffer,
 {
        char *pos = ERR_PTR(-ENOMEM);
        if (buflen >= 256) {
-               struct path ns_root = { };
                /* go to whatever namespace root we are under */
-               pos = __d_path(path, &ns_root, buffer, buflen - 1);
+               pos = d_absolute_path(path, buffer, buflen - 1);
                if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
                        struct inode *inode = path->dentry->d_inode;
                        if (inode && S_ISDIR(inode->i_mode)) {
@@ -294,8 +293,16 @@ char *tomoyo_realpath_from_path(struct path *path)
                        pos = tomoyo_get_local_path(path->dentry, buf,
                                                    buf_len - 1);
                /* Get absolute name for the rest. */
-               else
+               else {
                        pos = tomoyo_get_absolute_path(path, buf, buf_len - 1);
+                       /*
+                        * Fall back to local name if absolute name is not
+                        * available.
+                        */
+                       if (pos == ERR_PTR(-EINVAL))
+                               pos = tomoyo_get_local_path(path->dentry, buf,
+                                                           buf_len - 1);
+               }
 encode:
                if (IS_ERR(pos))
                        continue;
index 6e5addeb236b49595ea563e7525d5a262a32e295..73516f69ac7ca8a33244cb300df8958ac2d77e20 100644 (file)
@@ -899,6 +899,10 @@ static void atmel_ac97c_reset(struct atmel_ac97c *chip)
                /* AC97 v2.2 specifications says minimum 1 us. */
                udelay(2);
                gpio_set_value(chip->reset_pin, 1);
+       } else {
+               ac97c_writel(chip, MR, AC97C_MR_WRST | AC97C_MR_ENA);
+               udelay(2);
+               ac97c_writel(chip, MR, AC97C_MR_ENA);
        }
 }
 
index 096507d2ca9a7323c8d8e674ff4da7d921e677e4..c2f79e63124d82cb32ee0a47668f8997bc73e298 100644 (file)
@@ -2507,8 +2507,8 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
        SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
+       SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS 1101HA", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
-       SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
@@ -2971,7 +2971,8 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        /* SCH */
        { PCI_DEVICE(0x8086, 0x811b),
          .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
-         AZX_DCAPS_BUFSIZE},
+         AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Poulsbo */
+       /* ICH */
        { PCI_DEVICE(0x8086, 0x2668),
          .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
          AZX_DCAPS_BUFSIZE },  /* ICH6 */
index cbde019d3d52b985e4c8a95b5f74386ac34eef69..1d07e8fa243360d25236a4942ab5e363d69d1558 100644 (file)
@@ -297,6 +297,8 @@ static int alc_mux_select(struct hda_codec *codec, unsigned int adc_idx,
        imux = &spec->input_mux[mux_idx];
        if (!imux->num_items && mux_idx > 0)
                imux = &spec->input_mux[0];
+       if (!imux->num_items)
+               return 0;
 
        if (idx >= imux->num_items)
                idx = imux->num_items - 1;
@@ -2629,6 +2631,8 @@ static const char *alc_get_line_out_pfx(struct alc_spec *spec, int ch,
        case AUTO_PIN_SPEAKER_OUT:
                if (cfg->line_outs == 1)
                        return "Speaker";
+               if (cfg->line_outs == 2)
+                       return ch ? "Bass Speaker" : "Speaker";
                break;
        case AUTO_PIN_HP_OUT:
                /* for multi-io case, only the primary out */
@@ -2902,7 +2906,7 @@ static hda_nid_t alc_auto_look_for_dac(struct hda_codec *codec, hda_nid_t pin)
                if (!nid)
                        continue;
                if (found_in_nid_list(nid, spec->multiout.dac_nids,
-                                     spec->multiout.num_dacs))
+                                     ARRAY_SIZE(spec->private_dac_nids)))
                        continue;
                if (found_in_nid_list(nid, spec->multiout.hp_out_nid,
                                      ARRAY_SIZE(spec->multiout.hp_out_nid)))
@@ -2923,6 +2927,7 @@ static hda_nid_t get_dac_if_single(struct hda_codec *codec, hda_nid_t pin)
        return 0;
 }
 
+/* return 0 if no possible DAC is found, 1 if one or more found */
 static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
                                    const hda_nid_t *pins, hda_nid_t *dacs)
 {
@@ -2940,7 +2945,7 @@ static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
                if (!dacs[i])
                        dacs[i] = alc_auto_look_for_dac(codec, pins[i]);
        }
-       return 0;
+       return 1;
 }
 
 static int alc_auto_fill_multi_ios(struct hda_codec *codec,
@@ -2950,7 +2955,7 @@ static int alc_auto_fill_multi_ios(struct hda_codec *codec,
 static int alc_auto_fill_dac_nids(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       const struct auto_pin_cfg *cfg = &spec->autocfg;
+       struct auto_pin_cfg *cfg = &spec->autocfg;
        bool redone = false;
        int i;
 
@@ -2961,6 +2966,7 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
        spec->multiout.extra_out_nid[0] = 0;
        memset(spec->private_dac_nids, 0, sizeof(spec->private_dac_nids));
        spec->multiout.dac_nids = spec->private_dac_nids;
+       spec->multi_ios = 0;
 
        /* fill hard-wired DACs first */
        if (!redone) {
@@ -2994,10 +3000,12 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
        for (i = 0; i < cfg->line_outs; i++) {
                if (spec->private_dac_nids[i])
                        spec->multiout.num_dacs++;
-               else
+               else {
                        memmove(spec->private_dac_nids + i,
                                spec->private_dac_nids + i + 1,
                                sizeof(hda_nid_t) * (cfg->line_outs - i - 1));
+                       spec->private_dac_nids[cfg->line_outs - 1] = 0;
+               }
        }
 
        if (cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
@@ -3019,9 +3027,28 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
        if (cfg->line_out_type != AUTO_PIN_HP_OUT)
                alc_auto_fill_extra_dacs(codec, cfg->hp_outs, cfg->hp_pins,
                                 spec->multiout.hp_out_nid);
-       if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT)
-               alc_auto_fill_extra_dacs(codec, cfg->speaker_outs, cfg->speaker_pins,
-                                spec->multiout.extra_out_nid);
+       if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
+               int err = alc_auto_fill_extra_dacs(codec, cfg->speaker_outs,
+                                       cfg->speaker_pins,
+                                       spec->multiout.extra_out_nid);
+               /* if no speaker volume is assigned, try again as the primary
+                * output
+                */
+               if (!err && cfg->speaker_outs > 0 &&
+                   cfg->line_out_type == AUTO_PIN_HP_OUT) {
+                       cfg->hp_outs = cfg->line_outs;
+                       memcpy(cfg->hp_pins, cfg->line_out_pins,
+                              sizeof(cfg->hp_pins));
+                       cfg->line_outs = cfg->speaker_outs;
+                       memcpy(cfg->line_out_pins, cfg->speaker_pins,
+                              sizeof(cfg->speaker_pins));
+                       cfg->speaker_outs = 0;
+                       memset(cfg->speaker_pins, 0, sizeof(cfg->speaker_pins));
+                       cfg->line_out_type = AUTO_PIN_SPEAKER_OUT;
+                       redone = false;
+                       goto again;
+               }
+       }
 
        return 0;
 }
@@ -3171,7 +3198,8 @@ static int alc_auto_create_multi_out_ctls(struct hda_codec *codec,
 }
 
 static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
-                                    hda_nid_t dac, const char *pfx)
+                                    hda_nid_t dac, const char *pfx,
+                                    int cidx)
 {
        struct alc_spec *spec = codec->spec;
        hda_nid_t sw, vol;
@@ -3187,15 +3215,15 @@ static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
                if (is_ctl_used(spec->sw_ctls, val))
                        return 0; /* already created */
                mark_ctl_usage(spec->sw_ctls, val);
-               return add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, val);
+               return __add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, cidx, val);
        }
 
        sw = alc_look_for_out_mute_nid(codec, pin, dac);
        vol = alc_look_for_out_vol_nid(codec, pin, dac);
-       err = alc_auto_add_stereo_vol(codec, pfx, 0, vol);
+       err = alc_auto_add_stereo_vol(codec, pfx, cidx, vol);
        if (err < 0)
                return err;
-       err = alc_auto_add_stereo_sw(codec, pfx, 0, sw);
+       err = alc_auto_add_stereo_sw(codec, pfx, cidx, sw);
        if (err < 0)
                return err;
        return 0;
@@ -3236,16 +3264,21 @@ static int alc_auto_create_extra_outs(struct hda_codec *codec, int num_pins,
                hda_nid_t dac = *dacs;
                if (!dac)
                        dac = spec->multiout.dac_nids[0];
-               return alc_auto_create_extra_out(codec, *pins, dac, pfx);
+               return alc_auto_create_extra_out(codec, *pins, dac, pfx, 0);
        }
 
        if (dacs[num_pins - 1]) {
                /* OK, we have a multi-output system with individual volumes */
                for (i = 0; i < num_pins; i++) {
-                       snprintf(name, sizeof(name), "%s %s",
-                                pfx, channel_name[i]);
-                       err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
-                                                       name);
+                       if (num_pins >= 3) {
+                               snprintf(name, sizeof(name), "%s %s",
+                                        pfx, channel_name[i]);
+                               err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
+                                                               name, 0);
+                       } else {
+                               err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
+                                                               pfx, i);
+                       }
                        if (err < 0)
                                return err;
                }
index f3658658548e35179c70069cb31a498cabc155f7..616678fde486d7877ca35bd0a0f72b49647eb833 100644 (file)
@@ -215,6 +215,7 @@ struct sigmatel_spec {
        unsigned int gpio_mute;
        unsigned int gpio_led;
        unsigned int gpio_led_polarity;
+       unsigned int vref_mute_led_nid; /* pin NID for mute-LED vref control */
        unsigned int vref_led;
 
        /* stream */
@@ -4318,12 +4319,10 @@ static void stac_store_hints(struct hda_codec *codec)
                spec->eapd_switch = val;
        get_int_hint(codec, "gpio_led_polarity", &spec->gpio_led_polarity);
        if (get_int_hint(codec, "gpio_led", &spec->gpio_led)) {
-               if (spec->gpio_led <= 8) {
-                       spec->gpio_mask |= spec->gpio_led;
-                       spec->gpio_dir |= spec->gpio_led;
-                       if (spec->gpio_led_polarity)
-                               spec->gpio_data |= spec->gpio_led;
-               }
+               spec->gpio_mask |= spec->gpio_led;
+               spec->gpio_dir |= spec->gpio_led;
+               if (spec->gpio_led_polarity)
+                       spec->gpio_data |= spec->gpio_led;
        }
 }
 
@@ -4441,7 +4440,9 @@ static int stac92xx_init(struct hda_codec *codec)
                int pinctl, def_conf;
 
                /* power on when no jack detection is available */
-               if (!spec->hp_detect) {
+               /* or when the VREF is used for controlling LED */
+               if (!spec->hp_detect ||
+                   spec->vref_mute_led_nid == nid) {
                        stac_toggle_power_map(codec, nid, 1);
                        continue;
                }
@@ -4913,8 +4914,14 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity)
                        if (sscanf(dev->name, "HP_Mute_LED_%d_%x",
                                  &spec->gpio_led_polarity,
                                  &spec->gpio_led) == 2) {
-                               if (spec->gpio_led < 4)
+                               unsigned int max_gpio;
+                               max_gpio = snd_hda_param_read(codec, codec->afg,
+                                                             AC_PAR_GPIO_CAP);
+                               max_gpio &= AC_GPIO_IO_COUNT;
+                               if (spec->gpio_led < max_gpio)
                                        spec->gpio_led = 1 << spec->gpio_led;
+                               else
+                                       spec->vref_mute_led_nid = spec->gpio_led;
                                return 1;
                        }
                        if (sscanf(dev->name, "HP_Mute_LED_%d",
@@ -4922,6 +4929,12 @@ static int find_mute_led_gpio(struct hda_codec *codec, int default_polarity)
                                set_hp_led_gpio(codec);
                                return 1;
                        }
+                       /* BIOS bug: unfilled OEM string */
+                       if (strstr(dev->name, "HP_Mute_LED_P_G")) {
+                               set_hp_led_gpio(codec);
+                               spec->gpio_led_polarity = 1;
+                               return 1;
+                       }
                }
 
                /*
@@ -5043,29 +5056,12 @@ static int stac92xx_pre_resume(struct hda_codec *codec)
        struct sigmatel_spec *spec = codec->spec;
 
        /* sync mute LED */
-       if (spec->gpio_led) {
-               if (spec->gpio_led <= 8) {
-                       stac_gpio_set(codec, spec->gpio_mask,
-                                       spec->gpio_dir, spec->gpio_data);
-               } else {
-                       stac_vrefout_set(codec,
-                                       spec->gpio_led, spec->vref_led);
-               }
-       }
-       return 0;
-}
-
-static int stac92xx_post_suspend(struct hda_codec *codec)
-{
-       struct sigmatel_spec *spec = codec->spec;
-       if (spec->gpio_led > 8) {
-               /* with vref-out pin used for mute led control
-                * codec AFG is prevented from D3 state, but on
-                * system suspend it can (and should) be used
-                */
-               snd_hda_codec_read(codec, codec->afg, 0,
-                               AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-       }
+       if (spec->vref_mute_led_nid)
+               stac_vrefout_set(codec, spec->vref_mute_led_nid,
+                                spec->vref_led);
+       else if (spec->gpio_led)
+               stac_gpio_set(codec, spec->gpio_mask,
+                             spec->gpio_dir, spec->gpio_data);
        return 0;
 }
 
@@ -5076,7 +5072,7 @@ static void stac92xx_set_power_state(struct hda_codec *codec, hda_nid_t fg,
        struct sigmatel_spec *spec = codec->spec;
 
        if (power_state == AC_PWRST_D3) {
-               if (spec->gpio_led > 8) {
+               if (spec->vref_mute_led_nid) {
                        /* with vref-out pin used for mute led control
                         * codec AFG is prevented from D3 state
                         */
@@ -5129,7 +5125,7 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
                }
        }
        /*polarity defines *not* muted state level*/
-       if (spec->gpio_led <= 8) {
+       if (!spec->vref_mute_led_nid) {
                if (muted)
                        spec->gpio_data &= ~spec->gpio_led; /* orange */
                else
@@ -5147,7 +5143,8 @@ static int stac92xx_update_led_status(struct hda_codec *codec)
                muted_lvl = spec->gpio_led_polarity ?
                                AC_PINCTL_VREF_GRD : AC_PINCTL_VREF_HIZ;
                spec->vref_led = muted ? muted_lvl : notmtd_lvl;
-               stac_vrefout_set(codec, spec->gpio_led, spec->vref_led);
+               stac_vrefout_set(codec, spec->vref_mute_led_nid,
+                                spec->vref_led);
        }
        return 0;
 }
@@ -5661,15 +5658,13 @@ again:
 
 #ifdef CONFIG_SND_HDA_POWER_SAVE
        if (spec->gpio_led) {
-               if (spec->gpio_led <= 8) {
+               if (!spec->vref_mute_led_nid) {
                        spec->gpio_mask |= spec->gpio_led;
                        spec->gpio_dir |= spec->gpio_led;
                        spec->gpio_data |= spec->gpio_led;
                } else {
                        codec->patch_ops.set_power_state =
                                        stac92xx_set_power_state;
-                       codec->patch_ops.post_suspend =
-                                       stac92xx_post_suspend;
                }
                codec->patch_ops.pre_resume = stac92xx_pre_resume;
                codec->patch_ops.check_power_status =
@@ -5976,15 +5971,13 @@ again:
 
 #ifdef CONFIG_SND_HDA_POWER_SAVE
        if (spec->gpio_led) {
-               if (spec->gpio_led <= 8) {
+               if (!spec->vref_mute_led_nid) {
                        spec->gpio_mask |= spec->gpio_led;
                        spec->gpio_dir |= spec->gpio_led;
                        spec->gpio_data |= spec->gpio_led;
                } else {
                        codec->patch_ops.set_power_state =
                                        stac92xx_set_power_state;
-                       codec->patch_ops.post_suspend =
-                                       stac92xx_post_suspend;
                }
                codec->patch_ops.pre_resume = stac92xx_pre_resume;
                codec->patch_ops.check_power_status =
index a391e622a19209f535eb0441e85348cda4d085aa..28dfafb56dd1d70a9d95e4ef53f6e7ba22fb4faf 100644 (file)
@@ -41,6 +41,7 @@ MODULE_SUPPORTED_DEVICE("{{SiS,SiS7019 Audio Accelerator}}");
 static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
 static char *id = SNDRV_DEFAULT_STR1;  /* ID for this card */
 static int enable = 1;
+static int codecs = 1;
 
 module_param(index, int, 0444);
 MODULE_PARM_DESC(index, "Index value for SiS7019 Audio Accelerator.");
@@ -48,6 +49,8 @@ module_param(id, charp, 0444);
 MODULE_PARM_DESC(id, "ID string for SiS7019 Audio Accelerator.");
 module_param(enable, bool, 0444);
 MODULE_PARM_DESC(enable, "Enable SiS7019 Audio Accelerator.");
+module_param(codecs, int, 0444);
+MODULE_PARM_DESC(codecs, "Set bit to indicate that codec number is expected to be present (default 1)");
 
 static DEFINE_PCI_DEVICE_TABLE(snd_sis7019_ids) = {
        { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x7019) },
@@ -140,6 +143,9 @@ struct sis7019 {
        dma_addr_t silence_dma_addr;
 };
 
+/* These values are also used by the module param 'codecs' to indicate
+ * which codecs should be present.
+ */
 #define SIS_PRIMARY_CODEC_PRESENT      0x0001
 #define SIS_SECONDARY_CODEC_PRESENT    0x0002
 #define SIS_TERTIARY_CODEC_PRESENT     0x0004
@@ -1078,6 +1084,7 @@ static int sis_chip_init(struct sis7019 *sis)
 {
        unsigned long io = sis->ioport;
        void __iomem *ioaddr = sis->ioaddr;
+       unsigned long timeout;
        u16 status;
        int count;
        int i;
@@ -1104,21 +1111,45 @@ static int sis_chip_init(struct sis7019 *sis)
        while ((inw(io + SIS_AC97_STATUS) & SIS_AC97_STATUS_BUSY) && --count)
                udelay(1);
 
+       /* Command complete, we can let go of the semaphore now.
+        */
+       outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
+       if (!count)
+               return -EIO;
+
        /* Now that we've finished the reset, find out what's attached.
+        * There are some codec/board combinations that take an extremely
+        * long time to come up. 350+ ms has been observed in the field,
+        * so we'll give them up to 500ms.
         */
-       status = inl(io + SIS_AC97_STATUS);
-       if (status & SIS_AC97_STATUS_CODEC_READY)
-               sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
-       if (status & SIS_AC97_STATUS_CODEC2_READY)
-               sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
-       if (status & SIS_AC97_STATUS_CODEC3_READY)
-               sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
-
-       /* All done, let go of the semaphore, and check for errors
+       sis->codecs_present = 0;
+       timeout = msecs_to_jiffies(500) + jiffies;
+       while (time_before_eq(jiffies, timeout)) {
+               status = inl(io + SIS_AC97_STATUS);
+               if (status & SIS_AC97_STATUS_CODEC_READY)
+                       sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
+               if (status & SIS_AC97_STATUS_CODEC2_READY)
+                       sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
+               if (status & SIS_AC97_STATUS_CODEC3_READY)
+                       sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
+
+               if (sis->codecs_present == codecs)
+                       break;
+
+               msleep(1);
+       }
+
+       /* All done, check for errors.
         */
-       outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
-       if (!sis->codecs_present || !count)
+       if (!sis->codecs_present) {
+               printk(KERN_ERR "sis7019: could not find any codecs\n");
                return -EIO;
+       }
+
+       if (sis->codecs_present != codecs) {
+               printk(KERN_WARNING "sis7019: missing codecs, found %0x, expected %0x\n",
+                      sis->codecs_present, codecs);
+       }
 
        /* Let the hardware know that the audio driver is alive,
         * and enable PCM slots on the AC-link for L/R playback (3 & 4) and
@@ -1390,6 +1421,17 @@ static int __devinit snd_sis7019_probe(struct pci_dev *pci,
        if (!enable)
                goto error_out;
 
+       /* The user can specify which codecs should be present so that we
+        * can wait for them to show up if they are slow to recover from
+        * the AC97 cold reset. We default to a single codec, the primary.
+        *
+        * We assume that SIS_PRIMARY_*_PRESENT matches bits 0-2.
+        */
+       codecs &= SIS_PRIMARY_CODEC_PRESENT | SIS_SECONDARY_CODEC_PRESENT |
+                 SIS_TERTIARY_CODEC_PRESENT;
+       if (!codecs)
+               codecs = SIS_PRIMARY_CODEC_PRESENT;
+
        rc = snd_card_create(index, id, THIS_MODULE, sizeof(*sis), &card);
        if (rc < 0)
                goto error_out;
index bee3c94f58b0736c57f361141e0ed32e58640317..d1fcc816ce9705c5aca82f68eb327d65618301cd 100644 (file)
@@ -1,6 +1,6 @@
 config SND_ATMEL_SOC
        tristate "SoC Audio for the Atmel System-on-Chip"
-       depends on ARCH_AT91 || AVR32
+       depends on ARCH_AT91
        help
          Say Y or M if you want to add support for codecs attached to
          the ATMEL SSC interface. You will also need
@@ -24,25 +24,6 @@ config SND_AT91_SOC_SAM9G20_WM8731
          Say Y if you want to add support for SoC audio on WM8731-based
          AT91sam9g20 evaluation board.
 
-config SND_AT32_SOC_PLAYPAQ
-        tristate "SoC Audio support for PlayPaq with WM8510"
-        depends on SND_ATMEL_SOC && BOARD_PLAYPAQ && AT91_PROGRAMMABLE_CLOCKS
-        select SND_ATMEL_SOC_SSC
-        select SND_SOC_WM8510
-        help
-          Say Y or M here if you want to add support for SoC audio
-          on the LRS PlayPaq.
-
-config SND_AT32_SOC_PLAYPAQ_SLAVE
-        bool "Run CODEC on PlayPaq in slave mode"
-        depends on SND_AT32_SOC_PLAYPAQ
-        default n
-        help
-          Say Y if you want to run with the AT32 SSC generating the BCLK
-          and FRAME signals on the PlayPaq.  Unless you want to play
-          with the AT32 as the SSC master, you probably want to say N here,
-          as this will give you better sound quality.
-
 config SND_AT91_SOC_AFEB9260
        tristate "SoC Audio support for AFEB9260 board"
        depends on ARCH_AT91 && MACH_AFEB9260 && SND_ATMEL_SOC
index e7ea56bd5f82a94de94d4169c464e809d34dfb5e..a5c0bf19da78f01e823fc614c61528c30272a67c 100644 (file)
@@ -8,9 +8,5 @@ obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o
 # AT91 Machine Support
 snd-soc-sam9g20-wm8731-objs := sam9g20_wm8731.o
 
-# AT32 Machine Support
-snd-soc-playpaq-objs := playpaq_wm8510.o
-
 obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
-obj-$(CONFIG_SND_AT32_SOC_PLAYPAQ) += snd-soc-playpaq.o
 obj-$(CONFIG_SND_AT91_SOC_AFEB9260) += snd-soc-afeb9260.o
diff --git a/sound/soc/atmel/playpaq_wm8510.c b/sound/soc/atmel/playpaq_wm8510.c
deleted file mode 100644 (file)
index 73ae99a..0000000
+++ /dev/null
@@ -1,473 +0,0 @@
-/* sound/soc/at32/playpaq_wm8510.c
- * ASoC machine driver for PlayPaq using WM8510 codec
- *
- * Copyright (C) 2008 Long Range Systems
- *    Geoffrey Wossum <gwossum@acm.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This code is largely inspired by sound/soc/at91/eti_b1_wm8731.c
- *
- * NOTE: If you don't have the AT32 enhanced portmux configured (which
- * isn't currently in the mainline or Atmel patched kernel), you will
- * need to set the MCLK pin (PA30) to peripheral A in your board initialization
- * code.  Something like:
- *     at32_select_periph(GPIO_PIN_PA(30), GPIO_PERIPH_A, 0);
- *
- */
-
-/* #define DEBUG */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/clk.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include <mach/at32ap700x.h>
-#include <mach/portmux.h>
-
-#include "../codecs/wm8510.h"
-#include "atmel-pcm.h"
-#include "atmel_ssc_dai.h"
-
-
-/*-------------------------------------------------------------------------*\
- * constants
-\*-------------------------------------------------------------------------*/
-#define MCLK_PIN               GPIO_PIN_PA(30)
-#define MCLK_PERIPH            GPIO_PERIPH_A
-
-
-/*-------------------------------------------------------------------------*\
- * data types
-\*-------------------------------------------------------------------------*/
-/* SSC clocking data */
-struct ssc_clock_data {
-       /* CMR div */
-       unsigned int cmr_div;
-
-       /* Frame period (as needed by xCMR.PERIOD) */
-       unsigned int period;
-
-       /* The SSC clock rate these settings where calculated for */
-       unsigned long ssc_rate;
-};
-
-
-/*-------------------------------------------------------------------------*\
- * module data
-\*-------------------------------------------------------------------------*/
-static struct clk *_gclk0;
-static struct clk *_pll0;
-
-#define CODEC_CLK (_gclk0)
-
-
-/*-------------------------------------------------------------------------*\
- * Sound SOC operations
-\*-------------------------------------------------------------------------*/
-#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-static struct ssc_clock_data playpaq_wm8510_calc_ssc_clock(
-       struct snd_pcm_hw_params *params,
-       struct snd_soc_dai *cpu_dai)
-{
-       struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai);
-       struct ssc_device *ssc = ssc_p->ssc;
-       struct ssc_clock_data cd;
-       unsigned int rate, width_bits, channels;
-       unsigned int bitrate, ssc_div;
-       unsigned actual_rate;
-
-
-       /*
-        * Figure out required bitrate
-        */
-       rate = params_rate(params);
-       channels = params_channels(params);
-       width_bits = snd_pcm_format_physical_width(params_format(params));
-       bitrate = rate * width_bits * channels;
-
-
-       /*
-        * Figure out required SSC divider and period for required bitrate
-        */
-       cd.ssc_rate = clk_get_rate(ssc->clk);
-       ssc_div = cd.ssc_rate / bitrate;
-       cd.cmr_div = ssc_div / 2;
-       if (ssc_div & 1) {
-               /* round cmr_div up */
-               cd.cmr_div++;
-       }
-       cd.period = width_bits - 1;
-
-
-       /*
-        * Find actual rate, compare to requested rate
-        */
-       actual_rate = (cd.ssc_rate / (cd.cmr_div * 2)) / (2 * (cd.period + 1));
-       pr_debug("playpaq_wm8510: Request rate = %u, actual rate = %u\n",
-                rate, actual_rate);
-
-
-       return cd;
-}
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
-
-static int playpaq_wm8510_hw_params(struct snd_pcm_substream *substream,
-                                   struct snd_pcm_hw_params *params)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_dai *codec_dai = rtd->codec_dai;
-       struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-       struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai);
-       struct ssc_device *ssc = ssc_p->ssc;
-       unsigned int pll_out = 0, bclk = 0, mclk_div = 0;
-       int ret;
-
-
-       /* Due to difficulties with getting the correct clocks from the AT32's
-        * PLL0, we're going to let the CODEC be in charge of all the clocks
-        */
-#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-       const unsigned int fmt = (SND_SOC_DAIFMT_I2S |
-                                 SND_SOC_DAIFMT_NB_NF |
-                                 SND_SOC_DAIFMT_CBM_CFM);
-#else
-       struct ssc_clock_data cd;
-       const unsigned int fmt = (SND_SOC_DAIFMT_I2S |
-                                 SND_SOC_DAIFMT_NB_NF |
-                                 SND_SOC_DAIFMT_CBS_CFS);
-#endif
-
-       if (ssc == NULL) {
-               pr_warning("playpaq_wm8510_hw_params: ssc is NULL!\n");
-               return -EINVAL;
-       }
-
-
-       /*
-        * Figure out PLL and BCLK dividers for WM8510
-        */
-       switch (params_rate(params)) {
-       case 48000:
-               pll_out = 24576000;
-               mclk_div = WM8510_MCLKDIV_2;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 44100:
-               pll_out = 22579200;
-               mclk_div = WM8510_MCLKDIV_2;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 22050:
-               pll_out = 22579200;
-               mclk_div = WM8510_MCLKDIV_4;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 16000:
-               pll_out = 24576000;
-               mclk_div = WM8510_MCLKDIV_6;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 11025:
-               pll_out = 22579200;
-               mclk_div = WM8510_MCLKDIV_8;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       case 8000:
-               pll_out = 24576000;
-               mclk_div = WM8510_MCLKDIV_12;
-               bclk = WM8510_BCLKDIV_8;
-               break;
-
-       default:
-               pr_warning("playpaq_wm8510: Unsupported sample rate %d\n",
-                          params_rate(params));
-               return -EINVAL;
-       }
-
-
-       /*
-        * set CPU and CODEC DAI configuration
-        */
-       ret = snd_soc_dai_set_fmt(codec_dai, fmt);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: "
-                          "Failed to set CODEC DAI format (%d)\n",
-                          ret);
-               return ret;
-       }
-       ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: "
-                          "Failed to set CPU DAI format (%d)\n",
-                          ret);
-               return ret;
-       }
-
-
-       /*
-        * Set CPU clock configuration
-        */
-#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-       cd = playpaq_wm8510_calc_ssc_clock(params, cpu_dai);
-       pr_debug("playpaq_wm8510: cmr_div = %d, period = %d\n",
-                cd.cmr_div, cd.period);
-       ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_CMR_DIV, cd.cmr_div);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: Failed to set CPU CMR_DIV (%d)\n",
-                          ret);
-               return ret;
-       }
-       ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_TCMR_PERIOD,
-                                         cd.period);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: "
-                          "Failed to set CPU transmit period (%d)\n",
-                          ret);
-               return ret;
-       }
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
-       /*
-        * Set CODEC clock configuration
-        */
-       pr_debug("playpaq_wm8510: "
-                "pll_in = %ld, pll_out = %u, bclk = %x, mclk = %x\n",
-                clk_get_rate(CODEC_CLK), pll_out, bclk, mclk_div);
-
-
-#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-       ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_BCLKDIV, bclk);
-       if (ret < 0) {
-               pr_warning
-                   ("playpaq_wm8510: Failed to set CODEC DAI BCLKDIV (%d)\n",
-                    ret);
-               return ret;
-       }
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
-       ret = snd_soc_dai_set_pll(codec_dai, 0, 0,
-                                        clk_get_rate(CODEC_CLK), pll_out);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: Failed to set CODEC DAI PLL (%d)\n",
-                          ret);
-               return ret;
-       }
-
-
-       ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_MCLKDIV, mclk_div);
-       if (ret < 0) {
-               pr_warning("playpaq_wm8510: Failed to set CODEC MCLKDIV (%d)\n",
-                          ret);
-               return ret;
-       }
-
-
-       return 0;
-}
-
-
-
-static struct snd_soc_ops playpaq_wm8510_ops = {
-       .hw_params = playpaq_wm8510_hw_params,
-};
-
-
-
-static const struct snd_soc_dapm_widget playpaq_dapm_widgets[] = {
-       SND_SOC_DAPM_MIC("Int Mic", NULL),
-       SND_SOC_DAPM_SPK("Ext Spk", NULL),
-};
-
-
-
-static const struct snd_soc_dapm_route intercon[] = {
-       /* speaker connected to SPKOUT */
-       {"Ext Spk", NULL, "SPKOUTP"},
-       {"Ext Spk", NULL, "SPKOUTN"},
-
-       {"Mic Bias", NULL, "Int Mic"},
-       {"MICN", NULL, "Mic Bias"},
-       {"MICP", NULL, "Mic Bias"},
-};
-
-
-
-static int playpaq_wm8510_init(struct snd_soc_pcm_runtime *rtd)
-{
-       struct snd_soc_codec *codec = rtd->codec;
-       struct snd_soc_dapm_context *dapm = &codec->dapm;
-       int i;
-
-       /*
-        * Add DAPM widgets
-        */
-       for (i = 0; i < ARRAY_SIZE(playpaq_dapm_widgets); i++)
-               snd_soc_dapm_new_control(dapm, &playpaq_dapm_widgets[i]);
-
-
-
-       /*
-        * Setup audio path interconnects
-        */
-       snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
-
-
-
-       /* always connected pins */
-       snd_soc_dapm_enable_pin(dapm, "Int Mic");
-       snd_soc_dapm_enable_pin(dapm, "Ext Spk");
-
-
-
-       /* Make CSB show PLL rate */
-       snd_soc_dai_set_clkdiv(rtd->codec_dai, WM8510_OPCLKDIV,
-                                      WM8510_OPCLKDIV_1 | 4);
-
-       return 0;
-}
-
-
-
-static struct snd_soc_dai_link playpaq_wm8510_dai = {
-       .name = "WM8510",
-       .stream_name = "WM8510 PCM",
-       .cpu_dai_name= "atmel-ssc-dai.0",
-       .platform_name = "atmel-pcm-audio",
-       .codec_name = "wm8510-codec.0-0x1a",
-       .codec_dai_name = "wm8510-hifi",
-       .init = playpaq_wm8510_init,
-       .ops = &playpaq_wm8510_ops,
-};
-
-
-
-static struct snd_soc_card snd_soc_playpaq = {
-       .name = "LRS_PlayPaq_WM8510",
-       .dai_link = &playpaq_wm8510_dai,
-       .num_links = 1,
-};
-
-static struct platform_device *playpaq_snd_device;
-
-
-static int __init playpaq_asoc_init(void)
-{
-       int ret = 0;
-
-       /*
-        * Configure MCLK for WM8510
-        */
-       _gclk0 = clk_get(NULL, "gclk0");
-       if (IS_ERR(_gclk0)) {
-               _gclk0 = NULL;
-               ret = PTR_ERR(_gclk0);
-               goto err_gclk0;
-       }
-       _pll0 = clk_get(NULL, "pll0");
-       if (IS_ERR(_pll0)) {
-               _pll0 = NULL;
-               ret = PTR_ERR(_pll0);
-               goto err_pll0;
-       }
-       ret = clk_set_parent(_gclk0, _pll0);
-       if (ret) {
-               pr_warning("snd-soc-playpaq: "
-                          "Failed to set PLL0 as parent for DAC clock\n");
-               goto err_set_clk;
-       }
-       clk_set_rate(CODEC_CLK, 12000000);
-       clk_enable(CODEC_CLK);
-
-#if defined CONFIG_AT32_ENHANCED_PORTMUX
-       at32_select_periph(MCLK_PIN, MCLK_PERIPH, 0);
-#endif
-
-
-       /*
-        * Create and register platform device
-        */
-       playpaq_snd_device = platform_device_alloc("soc-audio", 0);
-       if (playpaq_snd_device == NULL) {
-               ret = -ENOMEM;
-               goto err_device_alloc;
-       }
-
-       platform_set_drvdata(playpaq_snd_device, &snd_soc_playpaq);
-
-       ret = platform_device_add(playpaq_snd_device);
-       if (ret) {
-               pr_warning("playpaq_wm8510: platform_device_add failed (%d)\n",
-                          ret);
-               goto err_device_add;
-       }
-
-       return 0;
-
-
-err_device_add:
-       if (playpaq_snd_device != NULL) {
-               platform_device_put(playpaq_snd_device);
-               playpaq_snd_device = NULL;
-       }
-err_device_alloc:
-err_set_clk:
-       if (_pll0 != NULL) {
-               clk_put(_pll0);
-               _pll0 = NULL;
-       }
-err_pll0:
-       if (_gclk0 != NULL) {
-               clk_put(_gclk0);
-               _gclk0 = NULL;
-       }
-       return ret;
-}
-
-
-static void __exit playpaq_asoc_exit(void)
-{
-       if (_gclk0 != NULL) {
-               clk_put(_gclk0);
-               _gclk0 = NULL;
-       }
-       if (_pll0 != NULL) {
-               clk_put(_pll0);
-               _pll0 = NULL;
-       }
-
-#if defined CONFIG_AT32_ENHANCED_PORTMUX
-       at32_free_pin(MCLK_PIN);
-#endif
-
-       platform_device_unregister(playpaq_snd_device);
-       playpaq_snd_device = NULL;
-}
-
-module_init(playpaq_asoc_init);
-module_exit(playpaq_asoc_exit);
-
-MODULE_AUTHOR("Geoffrey Wossum <gwossum@acm.org>");
-MODULE_DESCRIPTION("ASoC machine driver for LRS PlayPaq");
-MODULE_LICENSE("GPL");
index 4584514d93d4fd21a92de37847d318951e824bf8..fa787d45d74a920a32c4e8327a69c4a8a3544461 100644 (file)
@@ -33,7 +33,7 @@ config SND_SOC_ALL_CODECS
        select SND_SOC_CX20442
        select SND_SOC_DA7210 if I2C
        select SND_SOC_DFBMCS320
-       select SND_SOC_JZ4740_CODEC if SOC_JZ4740
+       select SND_SOC_JZ4740_CODEC
        select SND_SOC_LM4857 if I2C
        select SND_SOC_MAX98088 if I2C
        select SND_SOC_MAX98095 if I2C
index 444747f0db26615992e360b6a162eaf7b9f9ac44..dd7be0dbbc58189ff153b1a7b5724967bd4caadd 100644 (file)
@@ -34,7 +34,7 @@
 
 #define AD1836_ADC_CTRL2               13
 #define AD1836_ADC_WORD_LEN_MASK       0x30
-#define AD1836_ADC_WORD_OFFSET         5
+#define AD1836_ADC_WORD_OFFSET         4
 #define AD1836_ADC_SERFMT_MASK         (7 << 6)
 #define AD1836_ADC_SERFMT_PCK256       (0x4 << 6)
 #define AD1836_ADC_SERFMT_PCK128       (0x5 << 6)
index f1f237ecec2a6c43dcdf9ee31b72da7f0cdecff6..73f46eb459f15fa43c5aadc89c2d5a61346fb351 100644 (file)
@@ -601,7 +601,6 @@ static int cs4270_soc_suspend(struct snd_soc_codec *codec, pm_message_t mesg)
 static int cs4270_soc_resume(struct snd_soc_codec *codec)
 {
        struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
-       struct i2c_client *i2c_client = to_i2c_client(codec->dev);
        int reg;
 
        regulator_bulk_enable(ARRAY_SIZE(cs4270->supplies),
@@ -612,14 +611,7 @@ static int cs4270_soc_resume(struct snd_soc_codec *codec)
        ndelay(500);
 
        /* first restore the entire register cache ... */
-       for (reg = CS4270_FIRSTREG; reg <= CS4270_LASTREG; reg++) {
-               u8 val = snd_soc_read(codec, reg);
-
-               if (i2c_smbus_write_byte_data(i2c_client, reg, val)) {
-                       dev_err(codec->dev, "i2c write failed\n");
-                       return -EIO;
-               }
-       }
+       snd_soc_cache_sync(codec);
 
        /* ... then disable the power-down bits */
        reg = snd_soc_read(codec, CS4270_PWRCTL);
index 8c3c8205d19e99016e47b1564aa58cbdf91b0bab..1ee66361f61b946e5738798daf03d61baf2f8ecb 100644 (file)
@@ -555,7 +555,7 @@ static int cs42l51_probe(struct snd_soc_codec *codec)
 
 static struct snd_soc_codec_driver soc_codec_device_cs42l51 = {
        .probe =        cs42l51_probe,
-       .reg_cache_size = CS42L51_NUMREGS,
+       .reg_cache_size = CS42L51_NUMREGS + 1,
        .reg_word_size = sizeof(u8),
 };
 
index e373f8f0690731874d0a153190951ddc564500fe..3e1f4e172bfb90c318e4150d8aebbe782b78d4ab 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/io.h>
 
 #include <linux/delay.h>
 
index 9e7e964a5fa3fd1a8824c17c9dad4bbc60923909..dcf6f2a1600ae5726a60d0a7e11ef6e53d57d466 100644 (file)
@@ -106,13 +106,13 @@ static int max9877_set_2reg(struct snd_kcontrol *kcontrol,
        unsigned int mask = mc->max;
        unsigned int val = (ucontrol->value.integer.value[0] & mask);
        unsigned int val2 = (ucontrol->value.integer.value[1] & mask);
-       unsigned int change = 1;
+       unsigned int change = 0;
 
-       if (((max9877_regs[reg] >> shift) & mask) == val)
-               change = 0;
+       if (((max9877_regs[reg] >> shift) & mask) != val)
+               change = 1;
 
-       if (((max9877_regs[reg2] >> shift) & mask) == val2)
-               change = 0;
+       if (((max9877_regs[reg2] >> shift) & mask) != val2)
+               change = 1;
 
        if (change) {
                max9877_regs[reg] &= ~(mask << shift);
index c5ca8cfea60f80f8de27cc5d12ad55f69bd900f2..0441893e270ed2b5621833fecc997b6b20f85159 100644 (file)
@@ -863,13 +863,13 @@ static struct i2c_driver uda1380_i2c_driver = {
 
 static int __init uda1380_modinit(void)
 {
-       int ret;
+       int ret = 0;
 #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
        ret = i2c_add_driver(&uda1380_i2c_driver);
        if (ret != 0)
                pr_err("Failed to register UDA1380 I2C driver: %d\n", ret);
 #endif
-       return 0;
+       return ret;
 }
 module_init(uda1380_modinit);
 
index bfdc52370ad02de96bd9cf1859614db91e682949..d3b0a20744f1950ee2c0c882f77d72b6ab0a1959 100644 (file)
@@ -235,6 +235,7 @@ static int wm8776_hw_params(struct snd_pcm_substream *substream,
        switch (snd_pcm_format_width(params_format(params))) {
        case 16:
                iface = 0;
+               break;
        case 20:
                iface = 0x10;
                break;
index 0293763debe5811160fb3b7cc23284a91df2a94a..5a14d5c0e0e1b6dc98fb39140a4ca47f6bd18294 100644 (file)
@@ -60,6 +60,8 @@ static int wm8958_dsp2_fw(struct snd_soc_codec *codec, const char *name,
        }
 
        if (memcmp(fw->data, "WMFW", 4) != 0) {
+               memcpy(&data32, fw->data, sizeof(data32));
+               data32 = be32_to_cpu(data32);
                dev_err(codec->dev, "%s: firmware has bad file magic %08x\n",
                        name, data32);
                goto err;
index 9c982e47eb99308b377e7143d8024c19863b14c5..d0c545b73d7865c04b9fefd286b85b7fe63fc5b2 100644 (file)
@@ -1325,15 +1325,15 @@ SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
 };
 
 static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
-SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
-                  adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
-SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
-                  adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_VIRT_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
+                       adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_VIRT_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
+                       adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
 };
 
 static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
-SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
-SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
 };
 
 static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
@@ -2357,6 +2357,11 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
        bclk |= best << WM8994_AIF1_BCLK_DIV_SHIFT;
 
        lrclk = bclk_rate / params_rate(params);
+       if (!lrclk) {
+               dev_err(dai->dev, "Unable to generate LRCLK from %dHz BCLK\n",
+                       bclk_rate);
+               return -EINVAL;
+       }
        dev_dbg(dai->dev, "Using LRCLK rate %d for actual LRCLK %dHz\n",
                lrclk, bclk_rate / lrclk);
 
@@ -3178,6 +3183,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
                switch (wm8994->revision) {
                case 0:
                case 1:
+               case 2:
+               case 3:
                        wm8994->hubs.dcs_codes_l = -9;
                        wm8994->hubs.dcs_codes_r = -5;
                        break;
index 645c980d6b80edd81b1f0886c013c34f884346d6..a33b04d1719537409eb186c02a7e5dedfcbc7683 100644 (file)
@@ -1968,6 +1968,7 @@ static int wm8996_set_sysclk(struct snd_soc_dai *dai,
                break;
        case 24576000:
                ratediv = WM8996_SYSCLK_DIV;
+               wm8996->sysclk /= 2;
        case 12288000:
                snd_soc_update_bits(codec, WM8996_AIF_RATE,
                                    WM8996_SYSCLK_RATE, WM8996_SYSCLK_RATE);
index 31af405bda843cc691e755cc6bb4a0afec78925f..ae49f1c78c6de797bd193946b1d350aa00bd8a40 100644 (file)
@@ -392,7 +392,8 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev)
        }
 
        if (strcasecmp(sprop, "i2s-slave") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_I2S;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
 
@@ -409,31 +410,38 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev)
                }
                machine_data->clk_frequency = be32_to_cpup(iprop);
        } else if (strcasecmp(sprop, "i2s-master") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_I2S;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
        } else if (strcasecmp(sprop, "lj-slave") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
        } else if (strcasecmp(sprop, "lj-master") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBS_CFS;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
        } else if (strcasecmp(sprop, "rj-slave") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBM_CFM;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
        } else if (strcasecmp(sprop, "rj-master") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBS_CFS;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
        } else if (strcasecmp(sprop, "ac97-slave") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_AC97;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBM_CFM;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
        } else if (strcasecmp(sprop, "ac97-master") == 0) {
-               machine_data->dai_format = SND_SOC_DAIFMT_AC97;
+               machine_data->dai_format =
+                       SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBS_CFS;
                machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
                machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
        } else {
index b133bfcc5848ea8f6ec3c7cab18772ac18bc07b9..738391757f2ccb1a5aa6a8883de0ba127fedea68 100644 (file)
@@ -28,7 +28,7 @@ config SND_MXC_SOC_WM1133_EV1
 
 config SND_SOC_MX27VIS_AIC32X4
        tristate "SoC audio support for Visstrim M10 boards"
-       depends on MACH_IMX27_VISSTRIM_M10
+       depends on MACH_IMX27_VISSTRIM_M10 && I2C
        select SND_SOC_TLV320AIC32X4
        select SND_MXC_SOC_MX2
        help
index 8f49e165f4d1dd40119143b3b971a2b2f964025c..c62d715235e29ac5fa20639271d79958a57fd853 100644 (file)
@@ -12,6 +12,7 @@ config SND_KIRKWOOD_SOC_I2S
 config SND_KIRKWOOD_SOC_OPENRD
        tristate "SoC Audio support for Kirkwood Openrd Client"
        depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE)
+       depends on I2C
        select SND_KIRKWOOD_SOC_I2S
        select SND_SOC_CS42L51
        help
@@ -20,7 +21,7 @@ config SND_KIRKWOOD_SOC_OPENRD
 
 config SND_KIRKWOOD_SOC_T5325
        tristate "SoC Audio support for HP t5325"
-       depends on SND_KIRKWOOD_SOC && MACH_T5325
+       depends on SND_KIRKWOOD_SOC && MACH_T5325 && I2C
        select SND_KIRKWOOD_SOC_I2S
        select SND_SOC_ALC5623
        help
index dea5aa4aa6473a03231ff22e416bb61684c80046..f39d7dd9fbcb5956cf55f989ae0a869bfe7bd336 100644 (file)
@@ -357,3 +357,6 @@ static void __exit snd_mxs_pcm_exit(void)
        platform_driver_unregister(&mxs_pcm_driver);
 }
 module_exit(snd_mxs_pcm_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-pcm-audio");
index 7fbeaec06eb4d514fdebb8aef5e4e1e568489b4c..1c57f6630a48d8ff1e3a626677114bbe8f024f27 100644 (file)
@@ -171,3 +171,4 @@ module_exit(mxs_sgtl5000_exit);
 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
 MODULE_DESCRIPTION("MXS ALSA SoC Machine driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-sgtl5000");
index ffd2242e305f0827fb742f2c2339041d1859785f..a0f7d3cfa470b0857586b10021290aec5b4fd20f 100644 (file)
@@ -151,6 +151,7 @@ config SND_SOC_ZYLONITE
 config SND_SOC_RAUMFELD
        tristate "SoC Audio support Raumfeld audio adapter"
        depends on SND_PXA2XX_SOC && (MACH_RAUMFELD_SPEAKER || MACH_RAUMFELD_CONNECTOR)
+       depends on I2C && SPI_MASTER
        select SND_PXA_SOC_SSP
        select SND_SOC_CS4270
        select SND_SOC_AK4104
@@ -159,7 +160,7 @@ config SND_SOC_RAUMFELD
 
 config SND_PXA2XX_SOC_HX4700
        tristate "SoC Audio support for HP iPAQ hx4700"
-       depends on SND_PXA2XX_SOC && MACH_H4700
+       depends on SND_PXA2XX_SOC && MACH_H4700 && I2C
        select SND_PXA2XX_SOC_I2S
        select SND_SOC_AK4641
        help
index 65c124831a0063f0b645062a13559348e5af367a..c664e33fb6d732c239e00d115df663a1b4b5908f 100644 (file)
@@ -209,9 +209,10 @@ static int __devinit hx4700_audio_probe(struct platform_device *pdev)
        snd_soc_card_hx4700.dev = &pdev->dev;
        ret = snd_soc_register_card(&snd_soc_card_hx4700);
        if (ret)
-               return ret;
+               gpio_free_array(hx4700_audio_gpios,
+                               ARRAY_SIZE(hx4700_audio_gpios));
 
-       return 0;
+       return ret;
 }
 
 static int __devexit hx4700_audio_remove(struct platform_device *pdev)
index 1826acf20f7c96cf3fdca744d083c29f2bdfd837..8e523fd9189e562557b36b39671d9e12b7deefa2 100644 (file)
@@ -101,7 +101,6 @@ static int jive_wm8750_init(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_soc_codec *codec = rtd->codec;
        struct snd_soc_dapm_context *dapm = &codec->dapm;
-       int err;
 
        /* These endpoints are not being used. */
        snd_soc_dapm_nc_pin(dapm, "LINPUT2");
@@ -131,7 +130,7 @@ static struct snd_soc_card snd_soc_machine_jive = {
        .dai_link       = &jive_dai,
        .num_links      = 1,
 
-       .dapm_widgtets  = wm8750_dapm_widgets,
+       .dapm_widgets   = wm8750_dapm_widgets,
        .num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets),
        .dapm_routes    = audio_map,
        .num_dapm_routes = ARRAY_SIZE(audio_map),
index 3a0dbfc793f0fc5d6c36b9bc924ec11ff07970ff..8bd1dc5706bf1f423f5d0c7d63a7316c5b9191d0 100644 (file)
@@ -12,6 +12,7 @@
  *
  */
 
+#include <linux/module.h>
 #include <sound/soc.h>
 
 static struct snd_soc_card smdk2443;
index f75e43997d5beb5b27a82f8d023eddd33311538e..ad9ac42522e2539faf84c6286cc1614573e441f2 100644 (file)
@@ -9,6 +9,7 @@
 
 #include "../codecs/wm8994.h"
 #include <sound/pcm_params.h>
+#include <linux/module.h>
 
  /*
   * Default CFG switch settings to use this driver:
index 85bf541a771d05226b761d6d274e7affd8cba6e2..4b8e35410eb1962623cc31882967397747e5b3e7 100644 (file)
@@ -191,7 +191,7 @@ static int speyside_late_probe(struct snd_soc_card *card)
        snd_soc_dapm_ignore_suspend(&card->dapm, "Headset Mic");
        snd_soc_dapm_ignore_suspend(&card->dapm, "Main AMIC");
        snd_soc_dapm_ignore_suspend(&card->dapm, "Main DMIC");
-       snd_soc_dapm_ignore_suspend(&card->dapm, "Speaker");
+       snd_soc_dapm_ignore_suspend(&card->dapm, "Main Speaker");
        snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Output");
        snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Input");
 
index a5d3685a5d38049313391ddb8e174edd9c28a21b..a25fa63ce9a27501a4f2d4a6334911076200532e 100644 (file)
@@ -709,6 +709,12 @@ int snd_soc_resume(struct device *dev)
        struct snd_soc_card *card = dev_get_drvdata(dev);
        int i, ac97_control = 0;
 
+       /* If the initialization of this soc device failed, there is no codec
+        * associated with it. Just bail out in this case.
+        */
+       if (list_empty(&card->codec_dev_list))
+               return 0;
+
        /* AC97 devices might have other drivers hanging off them so
         * need to resume immediately.  Other drivers don't have that
         * problem and may take a substantial amount of time to resume
index 0c12b98484bdd8316418358b5cead696e8774c57..4220bb0f27301aa962964b9eb645fd0f5e51e17c 100644 (file)
@@ -58,7 +58,36 @@ int snd_soc_params_to_bclk(struct snd_pcm_hw_params *params)
 }
 EXPORT_SYMBOL_GPL(snd_soc_params_to_bclk);
 
-static struct snd_soc_platform_driver dummy_platform;
+static const struct snd_pcm_hardware dummy_dma_hardware = {
+       .formats                = 0xffffffff,
+       .channels_min           = 1,
+       .channels_max           = UINT_MAX,
+
+       /* Random values to keep userspace happy when checking constraints */
+       .info                   = SNDRV_PCM_INFO_INTERLEAVED |
+                                 SNDRV_PCM_INFO_BLOCK_TRANSFER,
+       .buffer_bytes_max       = 128*1024,
+       .period_bytes_min       = PAGE_SIZE,
+       .period_bytes_max       = PAGE_SIZE*2,
+       .periods_min            = 2,
+       .periods_max            = 128,
+};
+
+static int dummy_dma_open(struct snd_pcm_substream *substream)
+{
+       snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
+
+       return 0;
+}
+
+static struct snd_pcm_ops dummy_dma_ops = {
+       .open           = dummy_dma_open,
+       .ioctl          = snd_pcm_lib_ioctl,
+};
+
+static struct snd_soc_platform_driver dummy_platform = {
+       .ops = &dummy_dma_ops,
+};
 
 static __devinit int snd_soc_dummy_probe(struct platform_device *pdev)
 {
index b61945f3af9e594aa28b2d992239bd4f4a714c6b..32d2a21f2e3b5a401948d24b81c945e239e8fa4e 100644 (file)
@@ -1632,6 +1632,37 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                }
        }
 },
+{
+       /* Roland GAIA SH-01 */
+       USB_DEVICE(0x0582, 0x0111),
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .vendor_name = "Roland",
+               .product_name = "GAIA",
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = (const struct snd_usb_audio_quirk[]) {
+                       {
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_STANDARD_INTERFACE
+                       },
+                       {
+                               .ifnum = 2,
+                               .type = QUIRK_MIDI_FIXED_ENDPOINT,
+                               .data = &(const struct snd_usb_midi_endpoint_info) {
+                               .out_cables = 0x0003,
+                               .in_cables  = 0x0003
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
 {
        USB_DEVICE(0x0582, 0x0113),
        .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
index 7d98676808d8722a39623de8983642f987cea7cc..955930e0a5c34cd0852f92aa39a7c73492c159b5 100644 (file)
@@ -463,7 +463,8 @@ static int run_perf_stat(int argc __used, const char **argv)
 
        list_for_each_entry(counter, &evsel_list->entries, node) {
                if (create_perf_stat_counter(counter, first) < 0) {
-                       if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) {
+                       if (errno == EINVAL || errno == ENOSYS ||
+                           errno == ENOENT || errno == EOPNOTSUPP) {
                                if (verbose)
                                        ui__warning("%s event is not supported by the kernel.\n",
                                                    event_name(counter));
index e42626422587851b9c1b6e755dfdb09858640124..d7915d4e77cb629e4560d499ac2c1c902ecce5db 100644 (file)
@@ -34,6 +34,16 @@ int __perf_evsel__sample_size(u64 sample_type)
        return size;
 }
 
+static void hists__init(struct hists *hists)
+{
+       memset(hists, 0, sizeof(*hists));
+       hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
+       hists->entries_in = &hists->entries_in_array[0];
+       hists->entries_collapsed = RB_ROOT;
+       hists->entries = RB_ROOT;
+       pthread_mutex_init(&hists->lock, NULL);
+}
+
 void perf_evsel__init(struct perf_evsel *evsel,
                      struct perf_event_attr *attr, int idx)
 {
index bcd05d05b4f01969906efe3dbfcc899ccdea554a..33c17a2b2a81e739066991a9a9facd2b38ee0416 100644 (file)
@@ -388,7 +388,7 @@ static int write_event_desc(int fd, struct perf_header *h __used,
                /*
                 * write event string as passed on cmdline
                 */
-               ret = do_write_string(fd, attr->name);
+               ret = do_write_string(fd, event_name(attr));
                if (ret < 0)
                        return ret;
                /*
index a36a3fa81ffba45ea6602145530289c1c326acbc..abef2703cd242eb8b8e5f1763cadd50286bdc8be 100644 (file)
@@ -1211,13 +1211,3 @@ size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp)
 
        return ret;
 }
-
-void hists__init(struct hists *hists)
-{
-       memset(hists, 0, sizeof(*hists));
-       hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
-       hists->entries_in = &hists->entries_in_array[0];
-       hists->entries_collapsed = RB_ROOT;
-       hists->entries = RB_ROOT;
-       pthread_mutex_init(&hists->lock, NULL);
-}
index c86c1d27bd1eca09cef6c00949293a251b345ef9..89289c8e935e78973a906fb96edd164973d20ac9 100644 (file)
@@ -63,8 +63,6 @@ struct hists {
        struct callchain_cursor callchain_cursor;
 };
 
-void hists__init(struct hists *hists);
-
 struct hist_entry *__hists__add_entry(struct hists *self,
                                      struct addr_location *al,
                                      struct symbol *parent, u64 period);
index 85c1e6b76f0a4bbdd3d2c5b9e0d7359733dbfda7..0f4555ce90635a767f4a609b917905b5f58bdd0a 100644 (file)
@@ -1333,6 +1333,10 @@ int perf_session__cpu_bitmap(struct perf_session *session,
        }
 
        map = cpu_map__new(cpu_list);
+       if (map == NULL) {
+               pr_err("Invalid cpu_list\n");
+               return -1;
+       }
 
        for (i = 0; i < map->nr; i++) {
                int cpu = map->map[i];
index 0a7ed5b5e281c88b321de87ced66a3d29ebb003d..6c164dc9ee957dbf3df642f712b2fdc1485d2dc6 100644 (file)
@@ -1537,6 +1537,8 @@ process_flags(struct event *event, struct print_arg *arg, char **tok)
        field = malloc_or_die(sizeof(*field));
 
        type = process_arg(event, field, &token);
+       while (type == EVENT_OP)
+               type = process_op(event, field, &token);
        if (test_type_token(type, token, EVENT_DELIM, ","))
                goto out_free;
 
index 3ad0925d23a9c85e39b508021ef9fcf2bbb1ad81..758e3b36d4cfd525846a1968987d80bea8e4bcce 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/pci.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/namei.h>
+#include <linux/fs.h>
 #include "irq.h"
 
 static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
@@ -480,12 +482,76 @@ out:
        return r;
 }
 
+/*
+ * We want to test whether the caller has been granted permissions to
+ * use this device.  To be able to configure and control the device,
+ * the user needs access to PCI configuration space and BAR resources.
+ * These are accessed through PCI sysfs.  PCI config space is often
+ * passed to the process calling this ioctl via file descriptor, so we
+ * can't rely on access to that file.  We can check for permissions
+ * on each of the BAR resource files, which is a pretty clear
+ * indicator that the user has been granted access to the device.
+ */
+static int probe_sysfs_permissions(struct pci_dev *dev)
+{
+#ifdef CONFIG_SYSFS
+       int i;
+       bool bar_found = false;
+
+       for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) {
+               char *kpath, *syspath;
+               struct path path;
+               struct inode *inode;
+               int r;
+
+               if (!pci_resource_len(dev, i))
+                       continue;
+
+               kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
+               if (!kpath)
+                       return -ENOMEM;
+
+               /* Per sysfs-rules, sysfs is always at /sys */
+               syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i);
+               kfree(kpath);
+               if (!syspath)
+                       return -ENOMEM;
+
+               r = kern_path(syspath, LOOKUP_FOLLOW, &path);
+               kfree(syspath);
+               if (r)
+                       return r;
+
+               inode = path.dentry->d_inode;
+
+               r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS);
+               path_put(&path);
+               if (r)
+                       return r;
+
+               bar_found = true;
+       }
+
+       /* If no resources, probably something special */
+       if (!bar_found)
+               return -EPERM;
+
+       return 0;
+#else
+       return -EINVAL; /* No way to control the device without sysfs */
+#endif
+}
+
 static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
                                      struct kvm_assigned_pci_dev *assigned_dev)
 {
        int r = 0, idx;
        struct kvm_assigned_dev_kernel *match;
        struct pci_dev *dev;
+       u8 header_type;
+
+       if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
+               return -EINVAL;
 
        mutex_lock(&kvm->lock);
        idx = srcu_read_lock(&kvm->srcu);
@@ -513,6 +579,18 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
                r = -EINVAL;
                goto out_free;
        }
+
+       /* Don't allow bridges to be assigned */
+       pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
+       if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
+               r = -EPERM;
+               goto out_put;
+       }
+
+       r = probe_sysfs_permissions(dev);
+       if (r)
+               goto out_put;
+
        if (pci_enable_device(dev)) {
                printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
                r = -EBUSY;
@@ -544,16 +622,14 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
 
        list_add(&match->list, &kvm->arch.assigned_dev_head);
 
-       if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
-               if (!kvm->arch.iommu_domain) {
-                       r = kvm_iommu_map_guest(kvm);
-                       if (r)
-                               goto out_list_del;
-               }
-               r = kvm_assign_device(kvm, match);
+       if (!kvm->arch.iommu_domain) {
+               r = kvm_iommu_map_guest(kvm);
                if (r)
                        goto out_list_del;
        }
+       r = kvm_assign_device(kvm, match);
+       if (r)
+               goto out_list_del;
 
 out:
        srcu_read_unlock(&kvm->srcu, idx);
@@ -593,8 +669,7 @@ static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
                goto out;
        }
 
-       if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
-               kvm_deassign_device(kvm, match);
+       kvm_deassign_device(kvm, match);
 
        kvm_free_assigned_device(kvm, match);