]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 28 May 2011 02:51:32 +0000 (19:51 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 28 May 2011 02:51:32 +0000 (19:51 -0700)
* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm: (45 commits)
  ARM: 6945/1: Add unwinding support for division functions
  ARM: kill pmd_off()
  ARM: 6944/1: mm: allow ASID 0 to be allocated to tasks
  ARM: 6943/1: mm: use TTBR1 instead of reserved context ID
  ARM: 6942/1: mm: make TTBR1 always point to swapper_pg_dir on ARMv6/7
  ARM: 6941/1: cache: ensure MVA is cacheline aligned in flush_kern_dcache_area
  ARM: add sendmmsg syscall
  ARM: 6863/1: allow hotplug on msm
  ARM: 6832/1: mmci: support for ST-Ericsson db8500v2
  ARM: 6830/1: mach-ux500: force PrimeCell revisions
  ARM: 6829/1: amba: make hardcoded periphid override hardware
  ARM: 6828/1: mach-ux500: delete SSP PrimeCell ID
  ARM: 6827/1: mach-netx: delete hardcoded periphid
  ARM: 6940/1: fiq: Briefly document driver responsibilities for suspend/resume
  ARM: 6938/1: fiq: Refactor {get,set}_fiq_regs() for Thumb-2
  ARM: 6914/1: sparsemem: fix highmem detection when using SPARSEMEM
  ARM: 6913/1: sparsemem: allow pfn_valid to be overridden when using SPARSEMEM
  at91: drop at572d940hf support
  at91rm9200: introduce at91rm9200_set_type to specficy cpu package
  at91: drop boot_params and PLAT_PHYS_OFFSET
  ...

823 files changed:
Documentation/00-INDEX
Documentation/ABI/removed/o2cb [moved from Documentation/ABI/obsolete/o2cb with 65% similarity]
Documentation/ABI/testing/sysfs-kernel-mm-cleancache [new file with mode: 0644]
Documentation/DocBook/dvb/dvbproperty.xml
Documentation/DocBook/media-entities.tmpl
Documentation/DocBook/v4l/media-controller.xml
Documentation/DocBook/v4l/pixfmt.xml
Documentation/DocBook/v4l/subdev-formats.xml
Documentation/accounting/getdelays.c
Documentation/atomic_ops.txt
Documentation/cgroups/cgroups.txt
Documentation/feature-removal-schedule.txt
Documentation/filesystems/configfs/configfs_example_explicit.c
Documentation/filesystems/configfs/configfs_example_macros.c
Documentation/filesystems/ext4.txt
Documentation/filesystems/nfs/idmapper.txt
Documentation/filesystems/ocfs2.txt
Documentation/filesystems/xfs.txt
Documentation/networking/dns_resolver.txt
Documentation/power/regulator/machine.txt
Documentation/security/00-INDEX [new file with mode: 0644]
Documentation/security/SELinux.txt [moved from Documentation/SELinux.txt with 100% similarity]
Documentation/security/Smack.txt [moved from Documentation/Smack.txt with 100% similarity]
Documentation/security/apparmor.txt [moved from Documentation/apparmor.txt with 100% similarity]
Documentation/security/credentials.txt [moved from Documentation/credentials.txt with 99% similarity]
Documentation/security/keys-request-key.txt [moved from Documentation/keys-request-key.txt with 98% similarity]
Documentation/security/keys-trusted-encrypted.txt [moved from Documentation/keys-trusted-encrypted.txt with 100% similarity]
Documentation/security/keys.txt [moved from Documentation/keys.txt with 99% similarity]
Documentation/security/tomoyo.txt [moved from Documentation/tomoyo.txt with 100% similarity]
Documentation/sysctl/kernel.txt
Documentation/vm/cleancache.txt [new file with mode: 0644]
MAINTAINERS
arch/alpha/Kconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/include/asm/bitops.h
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/board-2430sdp.c
arch/arm/mach-omap2/board-3430sdp.c
arch/arm/mach-omap2/board-4430sdp.c
arch/arm/mach-omap2/board-am3517crane.c
arch/arm/mach-omap2/board-am3517evm.c
arch/arm/mach-omap2/board-apollon.c
arch/arm/mach-omap2/board-cm-t35.c
arch/arm/mach-omap2/board-cm-t3517.c
arch/arm/mach-omap2/board-devkit8000.c
arch/arm/mach-omap2/board-igep0020.c
arch/arm/mach-omap2/board-igep0030.c [deleted file]
arch/arm/mach-omap2/board-ldp.c
arch/arm/mach-omap2/board-n8x0.c
arch/arm/mach-omap2/board-omap3beagle.c
arch/arm/mach-omap2/board-omap3evm.c
arch/arm/mach-omap2/board-omap3logic.c
arch/arm/mach-omap2/board-omap3pandora.c
arch/arm/mach-omap2/board-omap3stalker.c
arch/arm/mach-omap2/board-omap3touchbook.c
arch/arm/mach-omap2/board-omap4panda.c
arch/arm/mach-omap2/board-overo.c
arch/arm/mach-omap2/board-rm680.c
arch/arm/mach-omap2/board-rx51-peripherals.c
arch/arm/mach-omap2/board-rx51-video.c
arch/arm/mach-omap2/board-rx51.c
arch/arm/mach-omap2/board-zoom-debugboard.c
arch/arm/mach-omap2/board-zoom-display.c
arch/arm/mach-omap2/board-zoom-peripherals.c
arch/arm/mach-omap2/common-board-devices.c [new file with mode: 0644]
arch/arm/mach-omap2/common-board-devices.h [new file with mode: 0644]
arch/arm/mach-omap2/cpuidle34xx.c
arch/arm/mach-omap2/gpmc-smc91x.c
arch/arm/mach-omap2/gpmc-smsc911x.c
arch/arm/mach-omap2/omap_l3_noc.c
arch/arm/mach-omap2/omap_l3_smx.c
arch/arm/mach-omap2/omap_phy_internal.c
arch/arm/mach-omap2/pm.h
arch/arm/mach-omap2/pm34xx.c
arch/arm/mach-omap2/pm44xx.c
arch/arm/mach-omap2/smartreflex.c
arch/arm/mach-omap2/usb-musb.c
arch/arm/mach-omap2/usb-tusb6010.c
arch/arm/mach-omap2/voltage.c
arch/arm/plat-omap/include/plat/gpmc-smsc911x.h
arch/arm/plat-omap/include/plat/uncompress.h
arch/arm/plat-omap/include/plat/usb.h
arch/avr32/include/asm/bitops.h
arch/blackfin/Kconfig
arch/blackfin/include/asm/kgdb.h
arch/blackfin/include/asm/ptrace.h
arch/cris/Kconfig
arch/frv/Kconfig
arch/frv/include/asm/suspend.h [deleted file]
arch/h8300/Kconfig
arch/ia64/Kconfig
arch/ia64/kernel/time.c
arch/m32r/Kconfig
arch/m32r/include/asm/smp.h
arch/m32r/kernel/smp.c
arch/m32r/kernel/smpboot.c
arch/m68k/Kconfig.nommu
arch/m68k/include/asm/bitops_mm.h
arch/m68k/include/asm/bitops_no.h
arch/microblaze/Kconfig
arch/mips/Kconfig
arch/mips/configs/bcm47xx_defconfig
arch/mips/include/asm/suspend.h
arch/mn10300/Kconfig
arch/mn10300/configs/asb2364_defconfig
arch/parisc/Kconfig
arch/powerpc/Kconfig
arch/powerpc/boot/dts/canyonlands.dts
arch/powerpc/boot/dts/katmai.dts
arch/powerpc/boot/dts/kilauea.dts
arch/powerpc/boot/dts/redwood.dts
arch/powerpc/configs/ppc6xx_defconfig
arch/powerpc/configs/pseries_defconfig
arch/powerpc/include/asm/fsl_lbc.h
arch/powerpc/include/asm/ftrace.h
arch/powerpc/include/asm/hvcall.h
arch/powerpc/include/asm/rio.h
arch/powerpc/include/asm/smp.h
arch/powerpc/include/asm/suspend.h [deleted file]
arch/powerpc/include/asm/syscall.h
arch/powerpc/include/asm/thread_info.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/ftrace.c
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/swsusp.c
arch/powerpc/kernel/traps.c
arch/powerpc/oprofile/op_model_power4.c
arch/powerpc/platforms/40x/Kconfig
arch/powerpc/platforms/44x/Kconfig
arch/powerpc/platforms/cell/interrupt.c
arch/powerpc/platforms/cell/interrupt.h
arch/powerpc/platforms/cell/smp.c
arch/powerpc/sysdev/Kconfig
arch/powerpc/sysdev/Makefile
arch/powerpc/sysdev/fsl_lbc.c
arch/powerpc/sysdev/fsl_rio.c
arch/powerpc/sysdev/ppc4xx_msi.c [new file with mode: 0644]
arch/s390/Kconfig
arch/s390/appldata/appldata_mem.c
arch/s390/include/asm/bitops.h
arch/s390/include/asm/delay.h
arch/s390/include/asm/irq.h
arch/s390/include/asm/s390_ext.h [deleted file]
arch/s390/include/asm/suspend.h [deleted file]
arch/s390/include/asm/topology.h
arch/s390/include/asm/uaccess.h
arch/s390/kernel/Makefile
arch/s390/kernel/dis.c
arch/s390/kernel/irq.c
arch/s390/kernel/s390_ext.c [deleted file]
arch/s390/kernel/smp.c
arch/s390/kernel/time.c
arch/s390/kernel/topology.c
arch/s390/kernel/traps.c
arch/s390/kernel/vtime.c
arch/s390/lib/delay.c
arch/s390/mm/fault.c
arch/s390/mm/init.c
arch/s390/oprofile/hwsampler.c
arch/score/Kconfig
arch/sh/Kconfig
arch/sh/configs/apsh4ad0a_defconfig
arch/sh/configs/sdk7786_defconfig
arch/sh/configs/se7206_defconfig
arch/sh/configs/shx3_defconfig
arch/sh/configs/urquell_defconfig
arch/sh/include/asm/kgdb.h
arch/sh/include/asm/ptrace.h
arch/sh/include/asm/suspend.h
arch/sparc/Kconfig
arch/tile/Kconfig
arch/um/Kconfig.x86
arch/unicore32/include/asm/suspend.h
arch/x86/Kconfig
arch/x86/configs/i386_defconfig
arch/x86/configs/x86_64_defconfig
arch/x86/include/asm/kgdb.h
arch/x86/include/asm/ptrace.h
arch/x86/include/asm/suspend_32.h
arch/x86/include/asm/suspend_64.h
arch/x86/include/asm/tsc.h
arch/x86/include/asm/vdso.h
arch/x86/include/asm/vgtod.h
arch/x86/include/asm/vsyscall.h
arch/x86/include/asm/vvar.h [new file with mode: 0644]
arch/x86/include/asm/xen/hypercall.h
arch/x86/kernel/Makefile
arch/x86/kernel/time.c
arch/x86/kernel/tsc.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/kernel/vread_tsc_64.c [new file with mode: 0644]
arch/x86/kernel/vsyscall_64.c
arch/x86/vdso/Makefile
arch/x86/vdso/vclock_gettime.c
arch/x86/vdso/vdso.lds.S
arch/x86/vdso/vextern.h [deleted file]
arch/x86/vdso/vgetcpu.c
arch/x86/vdso/vma.c
arch/x86/vdso/vvar.c [deleted file]
arch/x86/xen/mmu.c
arch/x86/xen/mmu.h
arch/xtensa/Kconfig
block/blk-cgroup.c
block/blk-core.c
block/genhd.c
drivers/bcma/host_pci.c
drivers/block/brd.c
drivers/block/loop.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/mspec.c
drivers/char/ppdev.c
drivers/dma/timb_dma.c
drivers/edac/amd76x_edac.c
drivers/edac/amd8111_edac.c
drivers/edac/amd8131_edac.c
drivers/edac/cpc925_edac.c
drivers/edac/e752x_edac.c
drivers/edac/e7xxx_edac.c
drivers/edac/edac_core.h
drivers/edac/edac_device.c
drivers/edac/edac_mc.c
drivers/edac/edac_module.c
drivers/edac/edac_pci.c
drivers/edac/i5000_edac.c
drivers/edac/i5400_edac.c
drivers/edac/i7300_edac.c
drivers/edac/i7core_edac.c
drivers/edac/i82860_edac.c
drivers/edac/i82875p_edac.c
drivers/edac/i82975x_edac.c
drivers/edac/mpc85xx_edac.h
drivers/edac/mv64x60_edac.h
drivers/edac/ppc4xx_edac.c
drivers/edac/r82600_edac.c
drivers/gpio/Kconfig
drivers/gpio/Makefile
drivers/gpio/basic_mmio_gpio.c
drivers/gpio/gpiolib.c
drivers/gpio/janz-ttl.c
drivers/gpio/pca953x.c
drivers/gpio/rdc321x-gpio.c
drivers/gpio/timbgpio.c
drivers/gpio/tps65910-gpio.c [new file with mode: 0644]
drivers/hwmon/coretemp.c
drivers/hwmon/pmbus_core.c
drivers/i2c/busses/i2c-ocores.c
drivers/i2c/busses/i2c-xiic.c
drivers/infiniband/Kconfig
drivers/infiniband/core/Makefile
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/device.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/netlink.c [new file with mode: 0644]
drivers/infiniband/core/ucma.c
drivers/infiniband/core/user_mad.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/cxgb3/iwch_provider.h
drivers/infiniband/hw/cxgb3/iwch_qp.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/nes/nes.c
drivers/infiniband/hw/qib/Kconfig
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/input/keyboard/Kconfig
drivers/input/keyboard/Makefile
drivers/input/keyboard/pmic8xxx-keypad.c [new file with mode: 0644]
drivers/input/misc/Kconfig
drivers/input/misc/Makefile
drivers/input/misc/pmic8xxx-pwrkey.c [new file with mode: 0644]
drivers/input/misc/twl4030-vibra.c
drivers/isdn/hardware/eicon/divasfunc.c
drivers/leds/Kconfig
drivers/leds/Makefile
drivers/leds/leds-88pm860x.c
drivers/leds/leds-asic3.c [new file with mode: 0644]
drivers/leds/leds-mc13783.c
drivers/media/dvb/dm1105/dm1105.c
drivers/media/dvb/dvb-usb/lmedm04.c
drivers/media/dvb/frontends/stb0899_algo.c
drivers/media/dvb/frontends/tda8261.c
drivers/media/radio/radio-maxiradio.c
drivers/media/radio/radio-timb.c
drivers/media/radio/radio-wl1273.c
drivers/media/radio/wl128x/fmdrv_v4l2.c
drivers/media/rc/Kconfig
drivers/media/rc/Makefile
drivers/media/rc/fintek-cir.c [new file with mode: 0644]
drivers/media/rc/fintek-cir.h [new file with mode: 0644]
drivers/media/rc/keymaps/rc-lme2510.c
drivers/media/video/Kconfig
drivers/media/video/Makefile
drivers/media/video/cpia2/cpia2_v4l.c
drivers/media/video/cx231xx/cx231xx-avcore.c
drivers/media/video/gspca/kinect.c
drivers/media/video/m5mols/Kconfig [new file with mode: 0644]
drivers/media/video/m5mols/Makefile [new file with mode: 0644]
drivers/media/video/m5mols/m5mols.h [new file with mode: 0644]
drivers/media/video/m5mols/m5mols_capture.c [new file with mode: 0644]
drivers/media/video/m5mols/m5mols_controls.c [new file with mode: 0644]
drivers/media/video/m5mols/m5mols_core.c [new file with mode: 0644]
drivers/media/video/m5mols/m5mols_reg.h [new file with mode: 0644]
drivers/media/video/timblogiw.c
drivers/media/video/uvc/Makefile
drivers/media/video/uvc/uvc_driver.c
drivers/media/video/uvc/uvc_entity.c [new file with mode: 0644]
drivers/media/video/uvc/uvcvideo.h
drivers/mfd/88pm860x-core.c
drivers/mfd/Kconfig
drivers/mfd/Makefile
drivers/mfd/ab3100-core.c
drivers/mfd/ab3550-core.c
drivers/mfd/ab8500-core.c
drivers/mfd/ab8500-gpadc.c
drivers/mfd/asic3.c
drivers/mfd/davinci_voicecodec.c
drivers/mfd/htc-pasic3.c
drivers/mfd/janz-cmodio.c
drivers/mfd/max8925-core.c
drivers/mfd/mc13xxx-core.c
drivers/mfd/mfd-core.c
drivers/mfd/omap-usb-host.c
drivers/mfd/pm8921-core.c [new file with mode: 0644]
drivers/mfd/pm8xxx-irq.c [new file with mode: 0644]
drivers/mfd/rdc321x-southbridge.c
drivers/mfd/t7l66xb.c
drivers/mfd/tc6387xb.c
drivers/mfd/tc6393xb.c
drivers/mfd/timberdale.c
drivers/mfd/tps6105x.c
drivers/mfd/tps6586x.c
drivers/mfd/tps65910-irq.c [new file with mode: 0644]
drivers/mfd/tps65910.c [new file with mode: 0644]
drivers/mfd/tps65911-comparator.c [new file with mode: 0644]
drivers/mfd/twl-core.c
drivers/mfd/twl4030-codec.c
drivers/mfd/twl4030-power.c
drivers/mfd/twl6030-irq.c
drivers/mfd/wl1273-core.c
drivers/mfd/wm831x-core.c
drivers/mfd/wm831x-irq.c
drivers/mfd/wm8400-core.c
drivers/misc/kgdbts.c
drivers/mmc/host/tmio_mmc.c
drivers/mtd/nand/tmio_nand.c
drivers/net/bonding/bond_main.c
drivers/net/can/janz-ican3.c
drivers/net/davinci_emac.c
drivers/net/hamradio/baycom_epp.c
drivers/net/hamradio/baycom_par.c
drivers/net/hamradio/baycom_ser_fdx.c
drivers/net/hamradio/baycom_ser_hdx.c
drivers/net/hamradio/hdlcdrv.c
drivers/net/ks8842.c
drivers/net/wan/pc300_drv.c
drivers/parport/parport_ip32.c
drivers/power/Kconfig
drivers/power/Makefile
drivers/power/bq27x00_battery.c
drivers/power/ds2760_battery.c
drivers/power/ds2780_battery.c [new file with mode: 0644]
drivers/power/gpio-charger.c
drivers/power/isp1704_charger.c
drivers/power/max8903_charger.c [new file with mode: 0644]
drivers/power/max8925_power.c
drivers/power/test_power.c
drivers/power/z2_battery.c
drivers/regulator/88pm8607.c
drivers/regulator/Kconfig
drivers/regulator/Makefile
drivers/regulator/ab3100.c
drivers/regulator/core.c
drivers/regulator/db8500-prcmu.c
drivers/regulator/max8925-regulator.c
drivers/regulator/max8997.c
drivers/regulator/max8998.c
drivers/regulator/mc13783-regulator.c
drivers/regulator/mc13892-regulator.c
drivers/regulator/mc13xxx-regulator-core.c
drivers/regulator/tps6105x-regulator.c
drivers/regulator/tps65023-regulator.c
drivers/regulator/tps6507x-regulator.c
drivers/regulator/tps65910-regulator.c [new file with mode: 0644]
drivers/regulator/twl-regulator.c
drivers/regulator/wm831x-dcdc.c
drivers/regulator/wm8400-regulator.c
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/rtc/rtc-88pm860x.c [new file with mode: 0644]
drivers/rtc/rtc-em3027.c [new file with mode: 0644]
drivers/rtc/rtc-m41t93.c [new file with mode: 0644]
drivers/rtc/rtc-mrst.c
drivers/rtc/rtc-mxc.c
drivers/rtc/rtc-pcf50633.c
drivers/rtc/rtc-rv3029c2.c [new file with mode: 0644]
drivers/rtc/rtc-spear.c [new file with mode: 0644]
drivers/rtc/rtc-vt8500.c [new file with mode: 0644]
drivers/s390/block/dasd_diag.c
drivers/s390/char/sclp.c
drivers/s390/kvm/kvm_virtio.c
drivers/scsi/aacraid/linit.c
drivers/scsi/in2000.c
drivers/scsi/pmcraid.c
drivers/scsi/pmcraid.h
drivers/scsi/wd33c93.c
drivers/spi/Kconfig
drivers/spi/amba-pl022.c
drivers/spi/dw_spi.c
drivers/spi/dw_spi.h
drivers/spi/spi.c
drivers/spi/spi_nuc900.c
drivers/spi/spi_s3c24xx.c
drivers/spi/spi_sh.c
drivers/spi/spi_tegra.c
drivers/spi/xilinx_spi.c
drivers/staging/generic_serial/rio/rioinit.c
drivers/tty/cyclades.c
drivers/tty/nozomi.c
drivers/tty/serial/m32r_sio.c
drivers/usb/otg/twl6030-usb.c
drivers/video/backlight/88pm860x_bl.c
drivers/video/mb862xx/mb862xxfbdrv.c
drivers/video/omap/Makefile
drivers/video/omap/lcd_omap2evm.c [deleted file]
drivers/video/tmiofb.c
drivers/video/via/via-gpio.c
drivers/w1/masters/Kconfig
drivers/w1/masters/ds1wm.c
drivers/w1/slaves/Kconfig
drivers/w1/slaves/Makefile
drivers/w1/slaves/w1_ds2408.c [new file with mode: 0644]
drivers/w1/slaves/w1_ds2780.c [new file with mode: 0644]
drivers/w1/slaves/w1_ds2780.h [new file with mode: 0644]
drivers/w1/w1.c
drivers/w1/w1.h
drivers/w1/w1_family.h
drivers/w1/w1_io.c
drivers/w1/w1_netlink.c
drivers/watchdog/rdc321x_wdt.c
drivers/xen/Makefile
drivers/xen/tmem.c [new file with mode: 0644]
fs/9p/vfs_inode.c
fs/Kconfig
fs/affs/namei.c
fs/afs/dir.c
fs/autofs4/root.c
fs/bfs/dir.c
fs/btrfs/Makefile
fs/btrfs/acl.c
fs/btrfs/btrfs_inode.h
fs/btrfs/compression.c
fs/btrfs/compression.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c [new file with mode: 0644]
fs/btrfs/delayed-inode.h [new file with mode: 0644]
fs/btrfs/delayed-ref.c
fs/btrfs/delayed-ref.h
fs/btrfs/dir-item.c
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/export.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/extent_map.c
fs/btrfs/extent_map.h
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/free-space-cache.h
fs/btrfs/inode-item.c
fs/btrfs/inode-map.c
fs/btrfs/inode-map.h [new file with mode: 0644]
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ioctl.h
fs/btrfs/locking.c
fs/btrfs/locking.h
fs/btrfs/ref-cache.c
fs/btrfs/ref-cache.h
fs/btrfs/relocation.c
fs/btrfs/root-tree.c
fs/btrfs/scrub.c [new file with mode: 0644]
fs/btrfs/super.c
fs/btrfs/sysfs.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/tree-defrag.c
fs/btrfs/tree-log.c
fs/btrfs/tree-log.h
fs/btrfs/version.sh [deleted file]
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/btrfs/xattr.c
fs/buffer.c
fs/cifs/Kconfig
fs/cifs/README
fs/cifs/cache.c
fs/cifs/cifs_debug.c
fs/cifs/cifs_dfs_ref.c
fs/cifs/cifs_fs_sb.h
fs/cifs/cifs_spnego.c
fs/cifs/cifs_spnego.h
fs/cifs/cifsacl.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/fscache.c
fs/cifs/fscache.h
fs/cifs/inode.c
fs/cifs/ioctl.c
fs/cifs/link.c
fs/cifs/misc.c
fs/cifs/netmisc.c
fs/cifs/readdir.c
fs/cifs/sess.c
fs/cifs/transport.c
fs/cifs/xattr.c
fs/coda/dir.c
fs/configfs/dir.c
fs/dlm/main.c
fs/ecryptfs/inode.c
fs/ecryptfs/keystore.c
fs/exec.c
fs/ext3/super.c
fs/ext4/Makefile
fs/ext4/balloc.c
fs/ext4/ext4.h
fs/ext4/ext4_jbd2.c
fs/ext4/ext4_jbd2.h
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/fsync.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/mballoc.h
fs/ext4/migrate.c
fs/ext4/mmp.c [new file with mode: 0644]
fs/ext4/move_extent.c
fs/ext4/namei.c
fs/ext4/page-io.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/fat/namei_msdos.c
fs/fat/namei_vfat.c
fs/fuse/dir.c
fs/gfs2/main.c
fs/hfs/dir.c
fs/hfsplus/dir.c
fs/hostfs/hostfs_kern.c
fs/hpfs/namei.c
fs/hugetlbfs/inode.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/transaction.c
fs/jffs2/dir.c
fs/jfs/namei.c
fs/logfs/dir.c
fs/minix/namei.c
fs/mpage.c
fs/namei.c
fs/namespace.c
fs/ncpfs/dir.c
fs/ncpfs/mmap.c
fs/nilfs2/namei.c
fs/ocfs2/Makefile
fs/ocfs2/alloc.c
fs/ocfs2/alloc.h
fs/ocfs2/cluster/sys.c
fs/ocfs2/dlm/dlmcommon.h
fs/ocfs2/dlm/dlmdebug.c
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/dlmfs/dlmfs.c
fs/ocfs2/file.c
fs/ocfs2/ioctl.c
fs/ocfs2/move_extents.c [new file with mode: 0644]
fs/ocfs2/move_extents.h [new file with mode: 0644]
fs/ocfs2/ocfs2_ioctl.h
fs/ocfs2/ocfs2_trace.h
fs/ocfs2/refcounttree.c
fs/ocfs2/refcounttree.h
fs/ocfs2/super.c
fs/omfs/dir.c
fs/partitions/check.c
fs/partitions/efi.c
fs/proc/array.c
fs/proc/base.c
fs/proc/stat.c
fs/proc/task_mmu.c
fs/proc/vmcore.c
fs/reiserfs/namei.c
fs/reiserfs/xattr.c
fs/squashfs/block.c
fs/squashfs/cache.c
fs/squashfs/decompressor.c
fs/squashfs/decompressor.h
fs/squashfs/dir.c
fs/squashfs/export.c
fs/squashfs/file.c
fs/squashfs/fragment.c
fs/squashfs/id.c
fs/squashfs/inode.c
fs/squashfs/namei.c
fs/squashfs/squashfs.h
fs/squashfs/squashfs_fs.h
fs/squashfs/squashfs_fs_i.h
fs/squashfs/squashfs_fs_sb.h
fs/squashfs/super.c
fs/squashfs/symlink.c
fs/squashfs/xattr.c
fs/squashfs/xattr.h
fs/squashfs/xattr_id.c
fs/squashfs/xz_wrapper.c
fs/squashfs/zlib_wrapper.c
fs/super.c
fs/sysv/namei.c
fs/ubifs/dir.c
fs/udf/namei.c
fs/ufs/balloc.c
fs/ufs/namei.c
fs/ufs/truncate.c
fs/xfs/linux-2.6/xfs_discard.c
fs/xfs/linux-2.6/xfs_discard.h
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/xfs_ag.h
fs/xfs/xfs_alloc.c
fs/xfs/xfs_alloc.h
fs/xfs/xfs_alloc_btree.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_bmap.h
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_log_cil.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_trans.c
include/asm-generic/bitops/find.h
include/asm-generic/bitops/le.h
include/asm-generic/bug.h
include/asm-generic/ptrace.h [new file with mode: 0644]
include/linux/basic_mmio_gpio.h
include/linux/bitops.h
include/linux/buffer_head.h
include/linux/cgroup.h
include/linux/cgroup_subsys.h
include/linux/cleancache.h [new file with mode: 0644]
include/linux/crash_dump.h
include/linux/cred.h
include/linux/flex_array.h
include/linux/fs.h
include/linux/hugetlb.h
include/linux/hugetlb_inline.h
include/linux/i2c/twl.h
include/linux/if_ether.h
include/linux/init_task.h
include/linux/input/pmic8xxx-keypad.h [new file with mode: 0644]
include/linux/input/pmic8xxx-pwrkey.h [new file with mode: 0644]
include/linux/ipmi_smi.h
include/linux/jbd2.h
include/linux/key.h
include/linux/memcontrol.h
include/linux/mfd/88pm860x.h
include/linux/mfd/abx500.h
include/linux/mfd/asic3.h
include/linux/mfd/core.h
include/linux/mfd/max8997-private.h
include/linux/mfd/pm8xxx/core.h [new file with mode: 0644]
include/linux/mfd/pm8xxx/irq.h [new file with mode: 0644]
include/linux/mfd/pm8xxx/pm8921.h [new file with mode: 0644]
include/linux/mfd/tps65910.h [new file with mode: 0644]
include/linux/mfd/twl4030-codec.h
include/linux/mfd/wm831x/core.h
include/linux/mfd/wm831x/pdata.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/mmzone.h
include/linux/net.h
include/linux/netfilter.h
include/linux/netfilter/ipset/ip_set_ahash.h
include/linux/netfilter/ipset/ip_set_timeout.h
include/linux/netlink.h
include/linux/nsproxy.h
include/linux/pid.h
include/linux/power/isp1704_charger.h [new file with mode: 0644]
include/linux/power/max8903_charger.h [new file with mode: 0644]
include/linux/proc_fs.h
include/linux/ratelimit.h
include/linux/regulator/machine.h
include/linux/rtc.h
include/linux/sched.h
include/linux/seqlock.h
include/linux/smp.h
include/linux/spi/spi.h
include/linux/swap.h
include/linux/vm_event_item.h [new file with mode: 0644]
include/linux/vmstat.h
include/media/m5mols.h [new file with mode: 0644]
include/media/videobuf-dvb.h
include/net/ip_vs.h
include/net/net_namespace.h
include/net/net_ratelimit.h [new file with mode: 0644]
include/rdma/Kbuild
include/rdma/ib_user_cm.h
include/rdma/rdma_cm.h
include/rdma/rdma_netlink.h [new file with mode: 0644]
include/trace/events/gpio.h [new file with mode: 0644]
include/xen/interface/xen.h
init/Kconfig
ipc/shm.c
kernel/Makefile
kernel/cgroup.c
kernel/cgroup_freezer.c
kernel/cpuset.c
kernel/cred.c
kernel/fork.c
kernel/irq/proc.c
kernel/ns_cgroup.c [deleted file]
kernel/nsproxy.c
kernel/pm_qos_params.c
kernel/power/hibernate.c
kernel/profile.c
kernel/sched.c
lib/Kconfig
lib/Makefile
lib/find_last_bit.c
lib/find_next_bit.c
lib/flex_array.c
mm/Kconfig
mm/Makefile
mm/cleancache.c [new file with mode: 0644]
mm/filemap.c
mm/fremap.c
mm/hugetlb.c
mm/memcontrol.c
mm/memory.c
mm/mlock.c
mm/mmap.c
mm/page_alloc.c
mm/page_cgroup.c
mm/shmem.c
mm/truncate.c
mm/vmalloc.c
mm/vmscan.c
net/8021q/vlan.c
net/9p/trans_rdma.c
net/atm/atm_sysfs.c
net/atm/lec.c
net/atm/mpc.c
net/bridge/netfilter/ebtables.c
net/can/proc.c
net/core/ethtool.c
net/core/filter.c
net/core/sysctl_net_core.c
net/core/utils.c
net/ipv4/inet_connection_sock.c
net/ipv4/inetpeer.c
net/iucv/iucv.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipvs/ip_vs_ftp.c
net/rds/ib.c
net/rds/ib_cm.c
net/rds/iw.c
net/rds/iw_cm.c
net/rds/rdma_transport.c
net/sunrpc/xprtrdma/svc_rdma_transport.c
net/sunrpc/xprtrdma/verbs.c
scripts/selinux/README
security/apparmor/match.c
security/apparmor/policy_unpack.c
security/device_cgroup.c
security/keys/encrypted.c
security/keys/process_keys.c
security/keys/request_key.c
security/keys/request_key_auth.c
security/keys/trusted.c
security/selinux/avc.c
security/selinux/ss/services.c
sound/core/control.c
sound/core/init.c
sound/core/oss/linear.c
sound/core/pcm_lib.c
sound/core/pcm_native.c
sound/core/seq/seq_queue.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_eld.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_analog.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/soc/atmel/sam9g20_wm8731.c
sound/soc/codecs/cq93vc.c
sound/soc/codecs/twl4030.c
sound/soc/codecs/wl1273.c
sound/soc/codecs/wm1250-ev1.c
sound/soc/codecs/wm8400.c
sound/soc/codecs/wm8731.c
sound/soc/codecs/wm8915.c
sound/soc/davinci/davinci-vcif.c
sound/soc/omap/Kconfig
sound/soc/omap/Makefile
sound/soc/omap/omap2evm.c [deleted file]
sound/soc/pxa/raumfeld.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/usb/card.c
sound/usb/mixer.c
sound/usb/mixer.h
sound/usb/mixer_quirks.c
sound/usb/quirks-table.h
sound/usb/quirks.c
sound/usb/usbaudio.h

index 1b777b9604921396c47497e4bf919adb3d51c0f4..1f89424c36a6036a9daecd3e0b2d0aca93bae3e9 100644 (file)
@@ -192,10 +192,6 @@ kernel-docs.txt
        - listing of various WWW + books that document kernel internals.
 kernel-parameters.txt
        - summary listing of command line / boot prompt args for the kernel.
-keys-request-key.txt
-       - description of the kernel key request service.
-keys.txt
-       - description of the kernel key retention service.
 kobject.txt
        - info of the kobject infrastructure of the Linux kernel.
 kprobes.txt
@@ -294,6 +290,8 @@ scheduler/
        - directory with info on the scheduler.
 scsi/
        - directory with info on Linux scsi support.
+security/
+       - directory that contains security-related info
 serial/
        - directory with info on the low level serial API.
 serial-console.txt
similarity index 65%
rename from Documentation/ABI/obsolete/o2cb
rename to Documentation/ABI/removed/o2cb
index 9c49d8e6c0ccbe5b49f222fd3f1eb98ce2b1b08e..7f5daa465093b898f123b67fb4bc8ab453ce9bc0 100644 (file)
@@ -1,11 +1,10 @@
 What:          /sys/o2cb symlink
-Date:          Dec 2005
-KernelVersion: 2.6.16
+Date:          May 2011
+KernelVersion: 2.6.40
 Contact:       ocfs2-devel@oss.oracle.com
-Description:   This is a symlink: /sys/o2cb to /sys/fs/o2cb. The symlink will
-               be removed when new versions of ocfs2-tools which know to look
+Description:   This is a symlink: /sys/o2cb to /sys/fs/o2cb. The symlink is
+               removed when new versions of ocfs2-tools which know to look
                in /sys/fs/o2cb are sufficiently prevalent. Don't code new
                software to look here, it should try /sys/fs/o2cb instead.
-               See Documentation/ABI/stable/o2cb for more information on usage.
 Users:         ocfs2-tools. It's sufficient to mail proposed changes to
                ocfs2-devel@oss.oracle.com.
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-cleancache b/Documentation/ABI/testing/sysfs-kernel-mm-cleancache
new file mode 100644 (file)
index 0000000..662ae64
--- /dev/null
@@ -0,0 +1,11 @@
+What:          /sys/kernel/mm/cleancache/
+Date:          April 2011
+Contact:       Dan Magenheimer <dan.magenheimer@oracle.com>
+Description:
+               /sys/kernel/mm/cleancache/ contains a number of files which
+               record a count of various cleancache operations
+               (sum across all filesystems):
+                       succ_gets
+                       failed_gets
+                       puts
+                       flushes
index 52d5e3c7cf6c0dd96646b43c05aaa8d2dd97561b..b5365f61d69b05e8ee4b33e26857927d5fe6b6aa 100644 (file)
@@ -141,13 +141,15 @@ struct dtv_properties {
  </row></tbody></tgroup></informaltable>
 </section>
 
+<section>
+       <title>Property types</title>
 <para>
 On <link linkend="FE_GET_PROPERTY">FE_GET_PROPERTY</link>/<link linkend="FE_SET_PROPERTY">FE_SET_PROPERTY</link>,
 the actual action is determined by the dtv_property cmd/data pairs. With one single ioctl, is possible to
 get/set up to 64 properties. The actual meaning of each property is described on the next sections.
 </para>
 
-<para>The Available frontend property types are:</para>
+<para>The available frontend property types are:</para>
 <programlisting>
 #define DTV_UNDEFINED          0
 #define DTV_TUNE               1
@@ -193,6 +195,7 @@ get/set up to 64 properties. The actual meaning of each property is described on
 #define DTV_ISDBT_LAYER_ENABLED        41
 #define DTV_ISDBS_TS_ID                42
 </programlisting>
+</section>
 
 <section id="fe_property_common">
        <title>Parameters that are common to all Digital TV standards</title>
index c8abb23ef1e7b063c8babd317e985fb0814db68c..e5fe09430fd905c19fa04f0d1d401f5f76bd9f2c 100644 (file)
 <!ENTITY sub-yuyv SYSTEM "v4l/pixfmt-yuyv.xml">
 <!ENTITY sub-yvyu SYSTEM "v4l/pixfmt-yvyu.xml">
 <!ENTITY sub-srggb10 SYSTEM "v4l/pixfmt-srggb10.xml">
+<!ENTITY sub-srggb12 SYSTEM "v4l/pixfmt-srggb12.xml">
 <!ENTITY sub-srggb8 SYSTEM "v4l/pixfmt-srggb8.xml">
 <!ENTITY sub-y10 SYSTEM "v4l/pixfmt-y10.xml">
 <!ENTITY sub-y12 SYSTEM "v4l/pixfmt-y12.xml">
 <!ENTITY sub-media-indices SYSTEM "media-indices.tmpl">
 
 <!ENTITY sub-media-controller SYSTEM "v4l/media-controller.xml">
-<!ENTITY sub-media-open SYSTEM "v4l/media-func-open.xml">
-<!ENTITY sub-media-close SYSTEM "v4l/media-func-close.xml">
-<!ENTITY sub-media-ioctl SYSTEM "v4l/media-func-ioctl.xml">
+<!ENTITY sub-media-func-open SYSTEM "v4l/media-func-open.xml">
+<!ENTITY sub-media-func-close SYSTEM "v4l/media-func-close.xml">
+<!ENTITY sub-media-func-ioctl SYSTEM "v4l/media-func-ioctl.xml">
 <!ENTITY sub-media-ioc-device-info SYSTEM "v4l/media-ioc-device-info.xml">
 <!ENTITY sub-media-ioc-enum-entities SYSTEM "v4l/media-ioc-enum-entities.xml">
 <!ENTITY sub-media-ioc-enum-links SYSTEM "v4l/media-ioc-enum-links.xml">
index 2dc25e1d4089f59aa1eedb061a2354ab06818d7b..873ac3a621f013a36148bcdf7f878e6a0bf419c2 100644 (file)
@@ -78,9 +78,9 @@
 <appendix id="media-user-func">
   <title>Function Reference</title>
   <!-- Keep this alphabetically sorted. -->
-  &sub-media-open;
-  &sub-media-close;
-  &sub-media-ioctl;
+  &sub-media-func-open;
+  &sub-media-func-close;
+  &sub-media-func-ioctl;
   <!-- All ioctls go here. -->
   &sub-media-ioc-device-info;
   &sub-media-ioc-enum-entities;
index dbfe3b08435f9a588c2de93a4e4fd4af202c7d82..deb660207f94700e04c1aa37106c8a567582b44d 100644 (file)
@@ -673,6 +673,7 @@ access the palette, this must be done with ioctls of the Linux framebuffer API.<
     &sub-srggb8;
     &sub-sbggr16;
     &sub-srggb10;
+    &sub-srggb12;
   </section>
 
   <section id="yuv-formats">
index a26b10c07857d215077b05d957633edcf1c034b6..8d3409d2c6320978186266c00518fc9e30dd888e 100644 (file)
        <constant>_JPEG</constant> prefix the format code is made of
        the following information.
        <itemizedlist>
-         <listitem>The number of bus samples per entropy encoded byte.</listitem>
-         <listitem>The bus width.</listitem>
+         <listitem><para>The number of bus samples per entropy encoded byte.</para></listitem>
+         <listitem><para>The bus width.</para></listitem>
        </itemizedlist>
+      </para>
 
-       <para>For instance, for a JPEG baseline process and an 8-bit bus width
-         the format will be named <constant>V4L2_MBUS_FMT_JPEG_1X8</constant>.
-       </para>
+      <para>For instance, for a JPEG baseline process and an 8-bit bus width
+        the format will be named <constant>V4L2_MBUS_FMT_JPEG_1X8</constant>.
       </para>
 
       <para>The following table lists existing JPEG compressed formats.</para>
index e9c77788a39d8f2c5c807b59295be6c3f1b3fea6..f6318f6d7bafcdbcceee0f6eb89edc4af26b287b 100644 (file)
@@ -177,6 +177,8 @@ static int get_family_id(int sd)
        rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY,
                        CTRL_ATTR_FAMILY_NAME, (void *)name,
                        strlen(TASKSTATS_GENL_NAME)+1);
+       if (rc < 0)
+               return 0;       /* sendto() failure? */
 
        rep_len = recv(sd, &ans, sizeof(ans), 0);
        if (ans.n.nlmsg_type == NLMSG_ERROR ||
@@ -191,30 +193,37 @@ static int get_family_id(int sd)
        return id;
 }
 
+#define average_ms(t, c) (t / 1000000ULL / (c ? c : 1))
+
 static void print_delayacct(struct taskstats *t)
 {
-       printf("\n\nCPU   %15s%15s%15s%15s\n"
-              "      %15llu%15llu%15llu%15llu\n"
-              "IO    %15s%15s\n"
-              "      %15llu%15llu\n"
-              "SWAP  %15s%15s\n"
-              "      %15llu%15llu\n"
-              "RECLAIM  %12s%15s\n"
-              "      %15llu%15llu\n",
-              "count", "real total", "virtual total", "delay total",
+       printf("\n\nCPU   %15s%15s%15s%15s%15s\n"
+              "      %15llu%15llu%15llu%15llu%15.3fms\n"
+              "IO    %15s%15s%15s\n"
+              "      %15llu%15llu%15llums\n"
+              "SWAP  %15s%15s%15s\n"
+              "      %15llu%15llu%15llums\n"
+              "RECLAIM  %12s%15s%15s\n"
+              "      %15llu%15llu%15llums\n",
+              "count", "real total", "virtual total",
+              "delay total", "delay average",
               (unsigned long long)t->cpu_count,
               (unsigned long long)t->cpu_run_real_total,
               (unsigned long long)t->cpu_run_virtual_total,
               (unsigned long long)t->cpu_delay_total,
-              "count", "delay total",
+              average_ms((double)t->cpu_delay_total, t->cpu_count),
+              "count", "delay total", "delay average",
               (unsigned long long)t->blkio_count,
               (unsigned long long)t->blkio_delay_total,
-              "count", "delay total",
+              average_ms(t->blkio_delay_total, t->blkio_count),
+              "count", "delay total", "delay average",
               (unsigned long long)t->swapin_count,
               (unsigned long long)t->swapin_delay_total,
-              "count", "delay total",
+              average_ms(t->swapin_delay_total, t->swapin_count),
+              "count", "delay total", "delay average",
               (unsigned long long)t->freepages_count,
-              (unsigned long long)t->freepages_delay_total);
+              (unsigned long long)t->freepages_delay_total,
+              average_ms(t->freepages_delay_total, t->freepages_count));
 }
 
 static void task_context_switch_counts(struct taskstats *t)
@@ -433,8 +442,6 @@ int main(int argc, char *argv[])
        }
 
        do {
-               int i;
-
                rep_len = recv(nl_sd, &msg, sizeof(msg), 0);
                PRINTF("received %d bytes\n", rep_len);
 
@@ -459,7 +466,6 @@ int main(int argc, char *argv[])
 
                na = (struct nlattr *) GENLMSG_DATA(&msg);
                len = 0;
-               i = 0;
                while (len < rep_len) {
                        len += NLA_ALIGN(na->nla_len);
                        switch (na->nla_type) {
index ac4d47187122f93fc860db817a9d8970bbc80002..3bd585b449270afe15e8a439cef612a0a696ddef 100644 (file)
@@ -12,7 +12,7 @@ Also, it should be made opaque such that any kind of cast to a normal
 C integer type will fail.  Something like the following should
 suffice:
 
-       typedef struct { volatile int counter; } atomic_t;
+       typedef struct { int counter; } atomic_t;
 
 Historically, counter has been declared volatile.  This is now discouraged.
 See Documentation/volatile-considered-harmful.txt for the complete rationale.
index aedf1bd02fddd0a4427902e496949ba337423e3d..0ed99f08f1f39256bd71067059627ddc9a0203ac 100644 (file)
@@ -236,7 +236,8 @@ containing the following files describing that cgroup:
  - cgroup.procs: list of tgids in the cgroup.  This list is not
    guaranteed to be sorted or free of duplicate tgids, and userspace
    should sort/uniquify the list if this property is required.
-   This is a read-only file, for now.
+   Writing a thread group id into this file moves all threads in that
+   group into this cgroup.
  - notify_on_release flag: run the release agent on exit?
  - release_agent: the path to use for release notifications (this file
    exists in the top cgroup only)
@@ -430,6 +431,12 @@ You can attach the current shell task by echoing 0:
 
 # echo 0 > tasks
 
+You can use the cgroup.procs file instead of the tasks file to move all
+threads in a threadgroup at once. Echoing the pid of any task in a
+threadgroup to cgroup.procs causes all tasks in that threadgroup to be
+be attached to the cgroup. Writing 0 to cgroup.procs moves all tasks
+in the writing task's threadgroup.
+
 Note: Since every task is always a member of exactly one cgroup in each
 mounted hierarchy, to remove a task from its current cgroup you must
 move it into a new cgroup (possibly the root cgroup) by writing to the
@@ -575,7 +582,7 @@ rmdir() will fail with it. From this behavior, pre_destroy() can be
 called multiple times against a cgroup.
 
 int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
-              struct task_struct *task, bool threadgroup)
+              struct task_struct *task)
 (cgroup_mutex held by caller)
 
 Called prior to moving a task into a cgroup; if the subsystem
@@ -584,9 +591,14 @@ task is passed, then a successful result indicates that *any*
 unspecified task can be moved into the cgroup. Note that this isn't
 called on a fork. If this method returns 0 (success) then this should
 remain valid while the caller holds cgroup_mutex and it is ensured that either
-attach() or cancel_attach() will be called in future. If threadgroup is
-true, then a successful result indicates that all threads in the given
-thread's threadgroup can be moved together.
+attach() or cancel_attach() will be called in future.
+
+int can_attach_task(struct cgroup *cgrp, struct task_struct *tsk);
+(cgroup_mutex held by caller)
+
+As can_attach, but for operations that must be run once per task to be
+attached (possibly many when using cgroup_attach_proc). Called after
+can_attach.
 
 void cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
               struct task_struct *task, bool threadgroup)
@@ -598,15 +610,24 @@ function, so that the subsystem can implement a rollback. If not, not necessary.
 This will be called only about subsystems whose can_attach() operation have
 succeeded.
 
+void pre_attach(struct cgroup *cgrp);
+(cgroup_mutex held by caller)
+
+For any non-per-thread attachment work that needs to happen before
+attach_task. Needed by cpuset.
+
 void attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
-           struct cgroup *old_cgrp, struct task_struct *task,
-           bool threadgroup)
+           struct cgroup *old_cgrp, struct task_struct *task)
 (cgroup_mutex held by caller)
 
 Called after the task has been attached to the cgroup, to allow any
 post-attachment activity that requires memory allocations or blocking.
-If threadgroup is true, the subsystem should take care of all threads
-in the specified thread's threadgroup. Currently does not support any
+
+void attach_task(struct cgroup *cgrp, struct task_struct *tsk);
+(cgroup_mutex held by caller)
+
+As attach, but for operations that must be run once per task to be attached,
+like can_attach_task. Called before attach. Currently does not support any
 subsystem that might need the old_cgrp for every thread in the group.
 
 void fork(struct cgroup_subsy *ss, struct task_struct *task)
@@ -630,7 +651,7 @@ always handled well.
 void post_clone(struct cgroup_subsys *ss, struct cgroup *cgrp)
 (cgroup_mutex held by caller)
 
-Called at the end of cgroup_clone() to do any parameter
+Called during cgroup_create() to do any parameter
 initialization which might be required before a task could attach.  For
 example in cpusets, no task may attach before 'cpus' and 'mems' are set
 up.
index 95788ad2506c57575e1ead9af19f6fc3440e59d5..ff31b1cc50aa3ddbf66f12172ec434456b662283 100644 (file)
@@ -262,16 +262,6 @@ Who:       Michael Buesch <mb@bu3sch.de>
 
 ---------------------------
 
-What:  /sys/o2cb symlink
-When:  January 2010
-Why:   /sys/fs/o2cb is the proper location for this information - /sys/o2cb
-       exists as a symlink for backwards compatibility for old versions of
-       ocfs2-tools. 2 years should be sufficient time to phase in new versions
-       which know to look in /sys/fs/o2cb.
-Who:   ocfs2-devel@oss.oracle.com
-
----------------------------
-
 What:  Ability for non root users to shm_get hugetlb pages based on mlock
        resource limits
 When:  2.6.31
index fd53869f5633f2994fe06365fe53c1754747dc6a..1420233dfa556906a503f32f2bd010da9ea3e92e 100644 (file)
@@ -464,9 +464,8 @@ static int __init configfs_example_init(void)
        return 0;
 
 out_unregister:
-       for (; i >= 0; i--) {
+       for (i--; i >= 0; i--)
                configfs_unregister_subsystem(example_subsys[i]);
-       }
 
        return ret;
 }
@@ -475,9 +474,8 @@ static void __exit configfs_example_exit(void)
 {
        int i;
 
-       for (i = 0; example_subsys[i]; i++) {
+       for (i = 0; example_subsys[i]; i++)
                configfs_unregister_subsystem(example_subsys[i]);
-       }
 }
 
 module_init(configfs_example_init);
index d8e30a0378aa2bf3540a79da4ff030d402943497..327dfbc640a9087302ce6ea5c6b30c77ad721acf 100644 (file)
@@ -427,9 +427,8 @@ static int __init configfs_example_init(void)
        return 0;
 
 out_unregister:
-       for (; i >= 0; i--) {
+       for (i--; i >= 0; i--)
                configfs_unregister_subsystem(example_subsys[i]);
-       }
 
        return ret;
 }
@@ -438,9 +437,8 @@ static void __exit configfs_example_exit(void)
 {
        int i;
 
-       for (i = 0; example_subsys[i]; i++) {
+       for (i = 0; example_subsys[i]; i++)
                configfs_unregister_subsystem(example_subsys[i]);
-       }
 }
 
 module_init(configfs_example_init);
index c79ec58fd7f6f3df8ee32d98448cf4acfe5d7705..3ae9bc94352a660f2d3ed9feccc0b3aa8955ffcc 100644 (file)
@@ -226,10 +226,6 @@ acl                        Enables POSIX Access Control Lists support.
 noacl                  This option disables POSIX Access Control List
                        support.
 
-reservation
-
-noreservation
-
 bsddf          (*)     Make 'df' act like BSD.
 minixdf                        Make 'df' act like Minix.
 
index b9b4192ea8b588ebd2d5be15767e69b93c8a7d0a..9c8fd614865642e7b9faf418de164c6088204b50 100644 (file)
@@ -47,8 +47,8 @@ request-key will find the first matching line and corresponding program.  In
 this case, /some/other/program will handle all uid lookups and
 /usr/sbin/nfs.idmap will handle gid, user, and group lookups.
 
-See <file:Documentation/keys-request-keys.txt> for more information about the
-request-key function.
+See <file:Documentation/security/keys-request-keys.txt> for more information
+about the request-key function.
 
 
 =========
index 9ed920a8cd79e92ef871e3fa29201b0777a8ce2a..7618a287aa41f085e8c166393b99ee31672c2a9e 100644 (file)
@@ -46,9 +46,15 @@ errors=panic         Panic and halt the machine if an error occurs.
 intr           (*)     Allow signals to interrupt cluster operations.
 nointr                 Do not allow signals to interrupt cluster
                        operations.
+noatime                        Do not update access time.
+relatime(*)            Update atime if the previous atime is older than
+                       mtime or ctime
+strictatime            Always update atime, but the minimum update interval
+                       is specified by atime_quantum.
 atime_quantum=60(*)    OCFS2 will not update atime unless this number
                        of seconds has passed since the last update.
-                       Set to zero to always update atime.
+                       Set to zero to always update atime. This option need
+                       work with strictatime.
 data=ordered   (*)     All data are forced directly out to the main file
                        system prior to its metadata being committed to the
                        journal.
index 7bff3e4f35df84a9def7507badb42ccdeffef419..3fc0c31a6f5dc5f8ee1220d9bcf8062038297aa5 100644 (file)
@@ -39,6 +39,12 @@ When mounting an XFS filesystem, the following options are accepted.
        drive level write caching to be enabled, for devices that
        support write barriers.
 
+  discard
+       Issue command to let the block device reclaim space freed by the
+       filesystem.  This is useful for SSD devices, thinly provisioned
+       LUNs and virtual machine images, but may have a performance
+       impact.  This option is incompatible with the nodelaylog option.
+
   dmapi
        Enable the DMAPI (Data Management API) event callouts.
        Use with the "mtpt" option.
index 04ca06325b087157b8f21afb3f571350b7e5ef3f..7f531ad83285ced1d2cc24d513d611e0c8156c47 100644 (file)
@@ -139,8 +139,8 @@ the key will be discarded and recreated when the data it holds has expired.
 dns_query() returns a copy of the value attached to the key, or an error if
 that is indicated instead.
 
-See <file:Documentation/keys-request-key.txt> for further information about
-request-key function.
+See <file:Documentation/security/keys-request-key.txt> for further
+information about request-key function.
 
 
 =========
index bdec39b9bd757612c937add6fc0fc9fe87322e27..b42419b52e444063841bcd6f0571ea0cde1eca84 100644 (file)
@@ -53,11 +53,11 @@ static struct regulator_init_data regulator1_data = {
 
 Regulator-1 supplies power to Regulator-2. This relationship must be registered
 with the core so that Regulator-1 is also enabled when Consumer A enables its
-supply (Regulator-2). The supply regulator is set by the supply_regulator_dev
+supply (Regulator-2). The supply regulator is set by the supply_regulator
 field below:-
 
 static struct regulator_init_data regulator2_data = {
-       .supply_regulator_dev = &platform_regulator1_device.dev,
+       .supply_regulator = "regulator_name",
        .constraints = {
                .min_uV = 1800000,
                .max_uV = 2000000,
diff --git a/Documentation/security/00-INDEX b/Documentation/security/00-INDEX
new file mode 100644 (file)
index 0000000..19bc494
--- /dev/null
@@ -0,0 +1,18 @@
+00-INDEX
+       - this file.
+SELinux.txt
+       - how to get started with the SELinux security enhancement.
+Smack.txt
+       - documentation on the Smack Linux Security Module.
+apparmor.txt
+       - documentation on the AppArmor security extension.
+credentials.txt
+       - documentation about credentials in Linux.
+keys-request-key.txt
+       - description of the kernel key request service.
+keys-trusted-encrypted.txt
+       - info on the Trusted and Encrypted keys in the kernel key ring service.
+keys.txt
+       - description of the kernel key retention service.
+tomoyo.txt
+       - documentation on the TOMOYO Linux Security Module.
similarity index 99%
rename from Documentation/credentials.txt
rename to Documentation/security/credentials.txt
index 995baf379c076770962a84268fde19b527caa0be..fc0366cbd7ce6af2392d8a124d6c6a9ab8d7b963 100644 (file)
@@ -216,7 +216,7 @@ The Linux kernel supports the following types of credentials:
      When a process accesses a key, if not already present, it will normally be
      cached on one of these keyrings for future accesses to find.
 
-     For more information on using keys, see Documentation/keys.txt.
+     For more information on using keys, see Documentation/security/keys.txt.
 
  (5) LSM
 
similarity index 98%
rename from Documentation/keys-request-key.txt
rename to Documentation/security/keys-request-key.txt
index 69686ad12c66e3a39de20fcf45b91dfcbee52cdf..51987bfecfedffa8e42ae64bc9aad54f4e4dd796 100644 (file)
@@ -3,8 +3,8 @@
                              ===================
 
 The key request service is part of the key retention service (refer to
-Documentation/keys.txt).  This document explains more fully how the requesting
-algorithm works.
+Documentation/security/keys.txt).  This document explains more fully how
+the requesting algorithm works.
 
 The process starts by either the kernel requesting a service by calling
 request_key*():
similarity index 99%
rename from Documentation/keys.txt
rename to Documentation/security/keys.txt
index 6523a9e6f293675a104ce77d78a896d0e9d190e5..4d75931d2d79e7febde59664b005798827df6d26 100644 (file)
@@ -434,7 +434,7 @@ The main syscalls are:
      /sbin/request-key will be invoked in an attempt to obtain a key. The
      callout_info string will be passed as an argument to the program.
 
-     See also Documentation/keys-request-key.txt.
+     See also Documentation/security/keys-request-key.txt.
 
 
 The keyctl syscall functions are:
@@ -864,7 +864,7 @@ payload contents" for more information.
     If successful, the key will have been attached to the default keyring for
     implicitly obtained request-key keys, as set by KEYCTL_SET_REQKEY_KEYRING.
 
-    See also Documentation/keys-request-key.txt.
+    See also Documentation/security/keys-request-key.txt.
 
 
 (*) To search for a key, passing auxiliary data to the upcaller, call:
index 36f007514db35f76cfb34a97c216c7a36a7dc8ff..5e7cb39ad195039dbb12ecacc3c0c2b7fd84da8a 100644 (file)
@@ -161,7 +161,8 @@ core_pattern is used to specify a core dumpfile pattern name.
        %s      signal number
        %t      UNIX time of dump
        %h      hostname
-       %e      executable filename
+       %e      executable filename (may be shortened)
+       %E      executable path
        %<OTHER> both are dropped
 . If the first character of the pattern is a '|', the kernel will treat
   the rest of the pattern as a command to run.  The core dump will be
diff --git a/Documentation/vm/cleancache.txt b/Documentation/vm/cleancache.txt
new file mode 100644 (file)
index 0000000..36c367c
--- /dev/null
@@ -0,0 +1,278 @@
+MOTIVATION
+
+Cleancache is a new optional feature provided by the VFS layer that
+potentially dramatically increases page cache effectiveness for
+many workloads in many environments at a negligible cost.
+
+Cleancache can be thought of as a page-granularity victim cache for clean
+pages that the kernel's pageframe replacement algorithm (PFRA) would like
+to keep around, but can't since there isn't enough memory.  So when the
+PFRA "evicts" a page, it first attempts to use cleancache code to
+put the data contained in that page into "transcendent memory", memory
+that is not directly accessible or addressable by the kernel and is
+of unknown and possibly time-varying size.
+
+Later, when a cleancache-enabled filesystem wishes to access a page
+in a file on disk, it first checks cleancache to see if it already
+contains it; if it does, the page of data is copied into the kernel
+and a disk access is avoided.
+
+Transcendent memory "drivers" for cleancache are currently implemented
+in Xen (using hypervisor memory) and zcache (using in-kernel compressed
+memory) and other implementations are in development.
+
+FAQs are included below.
+
+IMPLEMENTATION OVERVIEW
+
+A cleancache "backend" that provides transcendent memory registers itself
+to the kernel's cleancache "frontend" by calling cleancache_register_ops,
+passing a pointer to a cleancache_ops structure with funcs set appropriately.
+Note that cleancache_register_ops returns the previous settings so that
+chaining can be performed if desired. The functions provided must conform to
+certain semantics as follows:
+
+Most important, cleancache is "ephemeral".  Pages which are copied into
+cleancache have an indefinite lifetime which is completely unknowable
+by the kernel and so may or may not still be in cleancache at any later time.
+Thus, as its name implies, cleancache is not suitable for dirty pages.
+Cleancache has complete discretion over what pages to preserve and what
+pages to discard and when.
+
+Mounting a cleancache-enabled filesystem should call "init_fs" to obtain a
+pool id which, if positive, must be saved in the filesystem's superblock;
+a negative return value indicates failure.  A "put_page" will copy a
+(presumably about-to-be-evicted) page into cleancache and associate it with
+the pool id, a file key, and a page index into the file.  (The combination
+of a pool id, a file key, and an index is sometimes called a "handle".)
+A "get_page" will copy the page, if found, from cleancache into kernel memory.
+A "flush_page" will ensure the page no longer is present in cleancache;
+a "flush_inode" will flush all pages associated with the specified file;
+and, when a filesystem is unmounted, a "flush_fs" will flush all pages in
+all files specified by the given pool id and also surrender the pool id.
+
+An "init_shared_fs", like init_fs, obtains a pool id but tells cleancache
+to treat the pool as shared using a 128-bit UUID as a key.  On systems
+that may run multiple kernels (such as hard partitioned or virtualized
+systems) that may share a clustered filesystem, and where cleancache
+may be shared among those kernels, calls to init_shared_fs that specify the
+same UUID will receive the same pool id, thus allowing the pages to
+be shared.  Note that any security requirements must be imposed outside
+of the kernel (e.g. by "tools" that control cleancache).  Or a
+cleancache implementation can simply disable shared_init by always
+returning a negative value.
+
+If a get_page is successful on a non-shared pool, the page is flushed (thus
+making cleancache an "exclusive" cache).  On a shared pool, the page
+is NOT flushed on a successful get_page so that it remains accessible to
+other sharers.  The kernel is responsible for ensuring coherency between
+cleancache (shared or not), the page cache, and the filesystem, using
+cleancache flush operations as required.
+
+Note that cleancache must enforce put-put-get coherency and get-get
+coherency.  For the former, if two puts are made to the same handle but
+with different data, say AAA by the first put and BBB by the second, a
+subsequent get can never return the stale data (AAA).  For get-get coherency,
+if a get for a given handle fails, subsequent gets for that handle will
+never succeed unless preceded by a successful put with that handle.
+
+Last, cleancache provides no SMP serialization guarantees; if two
+different Linux threads are simultaneously putting and flushing a page
+with the same handle, the results are indeterminate.  Callers must
+lock the page to ensure serial behavior.
+
+CLEANCACHE PERFORMANCE METRICS
+
+Cleancache monitoring is done by sysfs files in the
+/sys/kernel/mm/cleancache directory.  The effectiveness of cleancache
+can be measured (across all filesystems) with:
+
+succ_gets      - number of gets that were successful
+failed_gets    - number of gets that failed
+puts           - number of puts attempted (all "succeed")
+flushes                - number of flushes attempted
+
+A backend implementatation may provide additional metrics.
+
+FAQ
+
+1) Where's the value? (Andrew Morton)
+
+Cleancache provides a significant performance benefit to many workloads
+in many environments with negligible overhead by improving the
+effectiveness of the pagecache.  Clean pagecache pages are
+saved in transcendent memory (RAM that is otherwise not directly
+addressable to the kernel); fetching those pages later avoids "refaults"
+and thus disk reads.
+
+Cleancache (and its sister code "frontswap") provide interfaces for
+this transcendent memory (aka "tmem"), which conceptually lies between
+fast kernel-directly-addressable RAM and slower DMA/asynchronous devices.
+Disallowing direct kernel or userland reads/writes to tmem
+is ideal when data is transformed to a different form and size (such
+as with compression) or secretly moved (as might be useful for write-
+balancing for some RAM-like devices).  Evicted page-cache pages (and
+swap pages) are a great use for this kind of slower-than-RAM-but-much-
+faster-than-disk transcendent memory, and the cleancache (and frontswap)
+"page-object-oriented" specification provides a nice way to read and
+write -- and indirectly "name" -- the pages.
+
+In the virtual case, the whole point of virtualization is to statistically
+multiplex physical resources across the varying demands of multiple
+virtual machines.  This is really hard to do with RAM and efforts to
+do it well with no kernel change have essentially failed (except in some
+well-publicized special-case workloads).  Cleancache -- and frontswap --
+with a fairly small impact on the kernel, provide a huge amount
+of flexibility for more dynamic, flexible RAM multiplexing.
+Specifically, the Xen Transcendent Memory backend allows otherwise
+"fallow" hypervisor-owned RAM to not only be "time-shared" between multiple
+virtual machines, but the pages can be compressed and deduplicated to
+optimize RAM utilization.  And when guest OS's are induced to surrender
+underutilized RAM (e.g. with "self-ballooning"), page cache pages
+are the first to go, and cleancache allows those pages to be
+saved and reclaimed if overall host system memory conditions allow.
+
+And the identical interface used for cleancache can be used in
+physical systems as well.  The zcache driver acts as a memory-hungry
+device that stores pages of data in a compressed state.  And
+the proposed "RAMster" driver shares RAM across multiple physical
+systems.
+
+2) Why does cleancache have its sticky fingers so deep inside the
+   filesystems and VFS? (Andrew Morton and Christoph Hellwig)
+
+The core hooks for cleancache in VFS are in most cases a single line
+and the minimum set are placed precisely where needed to maintain
+coherency (via cleancache_flush operations) between cleancache,
+the page cache, and disk.  All hooks compile into nothingness if
+cleancache is config'ed off and turn into a function-pointer-
+compare-to-NULL if config'ed on but no backend claims the ops
+functions, or to a compare-struct-element-to-negative if a
+backend claims the ops functions but a filesystem doesn't enable
+cleancache.
+
+Some filesystems are built entirely on top of VFS and the hooks
+in VFS are sufficient, so don't require an "init_fs" hook; the
+initial implementation of cleancache didn't provide this hook.
+But for some filesystems (such as btrfs), the VFS hooks are
+incomplete and one or more hooks in fs-specific code are required.
+And for some other filesystems, such as tmpfs, cleancache may
+be counterproductive.  So it seemed prudent to require a filesystem
+to "opt in" to use cleancache, which requires adding a hook in
+each filesystem.  Not all filesystems are supported by cleancache
+only because they haven't been tested.  The existing set should
+be sufficient to validate the concept, the opt-in approach means
+that untested filesystems are not affected, and the hooks in the
+existing filesystems should make it very easy to add more
+filesystems in the future.
+
+The total impact of the hooks to existing fs and mm files is only
+about 40 lines added (not counting comments and blank lines).
+
+3) Why not make cleancache asynchronous and batched so it can
+   more easily interface with real devices with DMA instead
+   of copying each individual page? (Minchan Kim)
+
+The one-page-at-a-time copy semantics simplifies the implementation
+on both the frontend and backend and also allows the backend to
+do fancy things on-the-fly like page compression and
+page deduplication.  And since the data is "gone" (copied into/out
+of the pageframe) before the cleancache get/put call returns,
+a great deal of race conditions and potential coherency issues
+are avoided.  While the interface seems odd for a "real device"
+or for real kernel-addressable RAM, it makes perfect sense for
+transcendent memory.
+
+4) Why is non-shared cleancache "exclusive"?  And where is the
+   page "flushed" after a "get"? (Minchan Kim)
+
+The main reason is to free up space in transcendent memory and
+to avoid unnecessary cleancache_flush calls.  If you want inclusive,
+the page can be "put" immediately following the "get".  If
+put-after-get for inclusive becomes common, the interface could
+be easily extended to add a "get_no_flush" call.
+
+The flush is done by the cleancache backend implementation.
+
+5) What's the performance impact?
+
+Performance analysis has been presented at OLS'09 and LCA'10.
+Briefly, performance gains can be significant on most workloads,
+especially when memory pressure is high (e.g. when RAM is
+overcommitted in a virtual workload); and because the hooks are
+invoked primarily in place of or in addition to a disk read/write,
+overhead is negligible even in worst case workloads.  Basically
+cleancache replaces I/O with memory-copy-CPU-overhead; on older
+single-core systems with slow memory-copy speeds, cleancache
+has little value, but in newer multicore machines, especially
+consolidated/virtualized machines, it has great value.
+
+6) How do I add cleancache support for filesystem X? (Boaz Harrash)
+
+Filesystems that are well-behaved and conform to certain
+restrictions can utilize cleancache simply by making a call to
+cleancache_init_fs at mount time.  Unusual, misbehaving, or
+poorly layered filesystems must either add additional hooks
+and/or undergo extensive additional testing... or should just
+not enable the optional cleancache.
+
+Some points for a filesystem to consider:
+
+- The FS should be block-device-based (e.g. a ram-based FS such
+  as tmpfs should not enable cleancache)
+- To ensure coherency/correctness, the FS must ensure that all
+  file removal or truncation operations either go through VFS or
+  add hooks to do the equivalent cleancache "flush" operations
+- To ensure coherency/correctness, either inode numbers must
+  be unique across the lifetime of the on-disk file OR the
+  FS must provide an "encode_fh" function.
+- The FS must call the VFS superblock alloc and deactivate routines
+  or add hooks to do the equivalent cleancache calls done there.
+- To maximize performance, all pages fetched from the FS should
+  go through the do_mpag_readpage routine or the FS should add
+  hooks to do the equivalent (cf. btrfs)
+- Currently, the FS blocksize must be the same as PAGESIZE.  This
+  is not an architectural restriction, but no backends currently
+  support anything different.
+- A clustered FS should invoke the "shared_init_fs" cleancache
+  hook to get best performance for some backends.
+
+7) Why not use the KVA of the inode as the key? (Christoph Hellwig)
+
+If cleancache would use the inode virtual address instead of
+inode/filehandle, the pool id could be eliminated.  But, this
+won't work because cleancache retains pagecache data pages
+persistently even when the inode has been pruned from the
+inode unused list, and only flushes the data page if the file
+gets removed/truncated.  So if cleancache used the inode kva,
+there would be potential coherency issues if/when the inode
+kva is reused for a different file.  Alternately, if cleancache
+flushed the pages when the inode kva was freed, much of the value
+of cleancache would be lost because the cache of pages in cleanache
+is potentially much larger than the kernel pagecache and is most
+useful if the pages survive inode cache removal.
+
+8) Why is a global variable required?
+
+The cleancache_enabled flag is checked in all of the frequently-used
+cleancache hooks.  The alternative is a function call to check a static
+variable. Since cleancache is enabled dynamically at runtime, systems
+that don't enable cleancache would suffer thousands (possibly
+tens-of-thousands) of unnecessary function calls per second.  So the
+global variable allows cleancache to be enabled by default at compile
+time, but have insignificant performance impact when cleancache remains
+disabled at runtime.
+
+9) Does cleanache work with KVM?
+
+The memory model of KVM is sufficiently different that a cleancache
+backend may have less value for KVM.  This remains to be tested,
+especially in an overcommitted system.
+
+10) Does cleancache work in userspace?  It sounds useful for
+   memory hungry caches like web browsers.  (Jamie Lokier)
+
+No plans yet, though we agree it sounds useful, at least for
+apps that bypass the page cache (e.g. O_DIRECT).
+
+Last updated: Dan Magenheimer, April 13 2011
index 1ab17de642e572298ef32b38b5a69198739b7cf8..a33b11560d3fa47d0f6af7fd3e160f04c2bbda76 100644 (file)
@@ -931,6 +931,8 @@ F:  drivers/mmc/host/msm_sdcc.h
 F:     drivers/tty/serial/msm_serial.h
 F:     drivers/tty/serial/msm_serial.c
 F:     drivers/platform/msm/
+F:     drivers/*/pm8???-*
+F:     include/linux/mfd/pm8xxx/
 T:     git git://codeaurora.org/quic/kernel/davidb/linux-msm.git
 S:     Maintained
 
@@ -2302,7 +2304,7 @@ F:        net/bridge/netfilter/ebt*.c
 ECRYPT FILE SYSTEM
 M:     Tyler Hicks <tyhicks@linux.vnet.ibm.com>
 M:     Dustin Kirkland <kirkland@canonical.com>
-L:     ecryptfs-devel@lists.launchpad.net
+L:     ecryptfs@vger.kernel.org
 W:     https://launchpad.net/ecryptfs
 S:     Supported
 F:     Documentation/filesystems/ecryptfs.txt
@@ -2582,6 +2584,13 @@ S:       Maintained
 F:     drivers/hwmon/f75375s.c
 F:     include/linux/f75375s.h
 
+FIREWIRE AUDIO DRIVERS
+M:     Clemens Ladisch <clemens@ladisch.de>
+L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
+T:     git git://git.alsa-project.org/alsa-kernel.git
+S:     Maintained
+F:     sound/firewire/
+
 FIREWIRE SUBSYSTEM
 M:     Stefan Richter <stefanr@s5r6.in-berlin.de>
 L:     linux1394-devel@lists.sourceforge.net
@@ -3572,9 +3581,16 @@ M:       Andrew Morton <akpm@linux-foundation.org>
 M:     Jan Kara <jack@suse.cz>
 L:     linux-ext4@vger.kernel.org
 S:     Maintained
-F:     fs/jbd*/
-F:     include/linux/ext*jbd*.h
-F:     include/linux/jbd*.h
+F:     fs/jbd/
+F:     include/linux/ext3_jbd.h
+F:     include/linux/jbd.h
+
+JOURNALLING LAYER FOR BLOCK DEVICES (JBD2)
+M:     "Theodore Ts'o" <tytso@mit.edu>
+L:     linux-ext4@vger.kernel.org
+S:     Maintained
+F:     fs/jbd2/
+F:     include/linux/jbd2.h
 
 JSM Neo PCI based serial card
 M:     Breno Leitao <leitao@linux.vnet.ibm.com>
@@ -3710,7 +3726,7 @@ KEYS/KEYRINGS:
 M:     David Howells <dhowells@redhat.com>
 L:     keyrings@linux-nfs.org
 S:     Maintained
-F:     Documentation/keys.txt
+F:     Documentation/security/keys.txt
 F:     include/linux/key.h
 F:     include/linux/key-type.h
 F:     include/keys/
@@ -3722,7 +3738,7 @@ M:        Mimi Zohar <zohar@us.ibm.com>
 L:     linux-security-module@vger.kernel.org
 L:     keyrings@linux-nfs.org
 S:     Supported
-F:     Documentation/keys-trusted-encrypted.txt
+F:     Documentation/security/keys-trusted-encrypted.txt
 F:     include/keys/trusted-type.h
 F:     security/keys/trusted.c
 F:     security/keys/trusted.h
@@ -3733,7 +3749,7 @@ M:        David Safford <safford@watson.ibm.com>
 L:     linux-security-module@vger.kernel.org
 L:     keyrings@linux-nfs.org
 S:     Supported
-F:     Documentation/keys-trusted-encrypted.txt
+F:     Documentation/security/keys-trusted-encrypted.txt
 F:     include/keys/encrypted-type.h
 F:     security/keys/encrypted.c
 F:     security/keys/encrypted.h
@@ -4138,6 +4154,7 @@ M:        KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/memcontrol.c
+F:     mm/page_cgroup.c
 
 MEMORY TECHNOLOGY DEVICES (MTD)
 M:     David Woodhouse <dwmw2@infradead.org>
@@ -5991,7 +6008,7 @@ F:        Documentation/filesystems/spufs.txt
 F:     arch/powerpc/platforms/cell/spufs/
 
 SQUASHFS FILE SYSTEM
-M:     Phillip Lougher <phillip@lougher.demon.co.uk>
+M:     Phillip Lougher <phillip@squashfs.org.uk>
 L:     squashfs-devel@lists.sourceforge.net (subscribers-only)
 W:     http://squashfs.org.uk
 S:     Maintained
index e3a82775f9da7fc17215b07b57f972bb2b417c0d..60219bf9419884587ed0c6172a084cd5555bddbf 100644 (file)
@@ -41,10 +41,6 @@ config ARCH_HAS_ILOG2_U64
        bool
        default n
 
-config GENERIC_FIND_NEXT_BIT
-       bool
-       default y
-
 config GENERIC_CALIBRATE_DELAY
        bool
        default y
index 076db52ff6723deacf4ec0c9485909b886be1484..d5f00d7eb075002f315aab9527c94cc80b4175f0 100644 (file)
@@ -21,58 +21,22 @@ CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_ARCH_OMAP=y
-CONFIG_ARCH_OMAP2=y
-CONFIG_ARCH_OMAP3=y
-CONFIG_ARCH_OMAP4=y
 CONFIG_OMAP_RESET_CLOCKS=y
 CONFIG_OMAP_MUX_DEBUG=y
-CONFIG_OMAP_32K_TIMER=y
-CONFIG_MACH_OMAP_GENERIC=y
-CONFIG_ARCH_OMAP2420=y
-CONFIG_ARCH_OMAP2430=y
-CONFIG_ARCH_OMAP3430=y
-CONFIG_MACH_OMAP_H4=y
-CONFIG_MACH_OMAP_APOLLON=y
-CONFIG_MACH_OMAP_2430SDP=y
-CONFIG_MACH_OMAP3_BEAGLE=y
-CONFIG_MACH_DEVKIT8000=y
-CONFIG_MACH_OMAP_LDP=y
-CONFIG_MACH_OVERO=y
-CONFIG_MACH_OMAP3EVM=y
-CONFIG_MACH_OMAP3517EVM=y
-CONFIG_MACH_OMAP3_PANDORA=y
-CONFIG_MACH_OMAP3_TOUCHBOOK=y
-CONFIG_MACH_OMAP_3430SDP=y
-CONFIG_MACH_NOKIA_N8X0=y
-CONFIG_MACH_NOKIA_RX51=y
-CONFIG_MACH_OMAP_ZOOM2=y
-CONFIG_MACH_OMAP_ZOOM3=y
-CONFIG_MACH_CM_T35=y
-CONFIG_MACH_IGEP0020=y
-CONFIG_MACH_SBC3530=y
-CONFIG_MACH_OMAP_3630SDP=y
-CONFIG_MACH_OMAP_4430SDP=y
 CONFIG_ARM_THUMBEE=y
-CONFIG_ARM_L1_CACHE_SHIFT=5
 CONFIG_ARM_ERRATA_411920=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=2
-# CONFIG_LOCAL_TIMERS is not set
-CONFIG_AEABI=y
 CONFIG_LEDS=y
 CONFIG_ZBOOT_ROM_TEXT=0x0
 CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO2,115200"
 CONFIG_KEXEC=y
 CONFIG_FPE_NWFPE=y
-CONFIG_VFP=y
-CONFIG_NEON=y
 CONFIG_BINFMT_MISC=y
-CONFIG_PM=y
 CONFIG_PM_DEBUG=y
-CONFIG_PM_RUNTIME=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -89,14 +53,6 @@ CONFIG_IP_PNP_RARP=y
 # CONFIG_IPV6 is not set
 CONFIG_NETFILTER=y
 CONFIG_BT=m
-CONFIG_BT_L2CAP=m
-CONFIG_BT_SCO=m
-CONFIG_BT_RFCOMM=y
-CONFIG_BT_RFCOMM_TTY=y
-CONFIG_BT_BNEP=m
-CONFIG_BT_BNEP_MC_FILTER=y
-CONFIG_BT_BNEP_PROTO_FILTER=y
-CONFIG_BT_HIDP=m
 CONFIG_BT_HCIUART=m
 CONFIG_BT_HCIUART_H4=y
 CONFIG_BT_HCIUART_BCSP=y
@@ -107,11 +63,9 @@ CONFIG_CFG80211=m
 CONFIG_MAC80211=m
 CONFIG_MAC80211_RC_PID=y
 CONFIG_MAC80211_RC_DEFAULT_PID=y
-CONFIG_MAC80211_LEDS=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_CONNECTOR=y
 CONFIG_MTD=y
-CONFIG_MTD_CONCAT=y
 CONFIG_MTD_CMDLINE_PARTS=y
 CONFIG_MTD_CHAR=y
 CONFIG_MTD_BLOCK=y
@@ -127,7 +81,6 @@ CONFIG_MTD_UBI=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=16384
-CONFIG_EEPROM_LEGACY=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_SCSI_MULTI_LUN=y
@@ -158,19 +111,15 @@ CONFIG_TOUCHSCREEN_ADS7846=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_TWL4030_PWRBUTTON=y
 CONFIG_VT_HW_CONSOLE_BINDING=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_LEGACY_PTYS is not set
 CONFIG_SERIAL_8250_NR_UARTS=32
 CONFIG_SERIAL_8250_EXTENDED=y
 CONFIG_SERIAL_8250_MANY_PORTS=y
 CONFIG_SERIAL_8250_SHARE_IRQ=y
 CONFIG_SERIAL_8250_DETECT_IRQ=y
 CONFIG_SERIAL_8250_RSA=y
-# CONFIG_LEGACY_PTYS is not set
 CONFIG_HW_RANDOM=y
-CONFIG_I2C=y
 CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_OMAP=y
 CONFIG_SPI=y
 CONFIG_SPI_OMAP24XX=y
 CONFIG_DEBUG_GPIO=y
@@ -181,10 +130,6 @@ CONFIG_POWER_SUPPLY=y
 CONFIG_WATCHDOG=y
 CONFIG_OMAP_WATCHDOG=y
 CONFIG_TWL4030_WATCHDOG=y
-CONFIG_MENELAUS=y
-CONFIG_TWL4030_CORE=y
-CONFIG_TWL4030_POWER=y
-CONFIG_REGULATOR=y
 CONFIG_REGULATOR_TWL4030=y
 CONFIG_REGULATOR_TPS65023=y
 CONFIG_REGULATOR_TPS6507X=y
@@ -208,7 +153,6 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y
 CONFIG_LCD_CLASS_DEVICE=y
 CONFIG_LCD_PLATFORM=y
 CONFIG_DISPLAY_SUPPORT=y
-# CONFIG_VGA_CONSOLE is not set
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
 CONFIG_FONTS=y
@@ -217,25 +161,20 @@ CONFIG_FONT_8x16=y
 CONFIG_LOGO=y
 CONFIG_SOUND=m
 CONFIG_SND=m
-CONFIG_SND_MIXER_OSS=y
-CONFIG_SND_PCM_OSS=y
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
 CONFIG_SND_VERBOSE_PRINTK=y
 CONFIG_SND_DEBUG=y
-CONFIG_SND_USB_AUDIO=y
-CONFIG_SND_SOC=y
-CONFIG_SND_OMAP_SOC=y
-CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=y
+CONFIG_SND_USB_AUDIO=m
+CONFIG_SND_SOC=m
+CONFIG_SND_OMAP_SOC=m
+CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m
 CONFIG_USB=y
 CONFIG_USB_DEBUG=y
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_DEVICEFS=y
 CONFIG_USB_SUSPEND=y
-# CONFIG_USB_OTG_WHITELIST is not set
 CONFIG_USB_MON=y
-# CONFIG_USB_MUSB_HDRC is not set
-# CONFIG_USB_MUSB_OTG is not set
-# CONFIG_USB_GADGET_MUSB_HDRC is not set
-CONFIG_USB_MUSB_DEBUG=y
 CONFIG_USB_WDM=y
 CONFIG_USB_STORAGE=y
 CONFIG_USB_LIBUSUAL=y
@@ -250,18 +189,12 @@ CONFIG_MMC_UNSAFE_RESUME=y
 CONFIG_SDIO_UART=y
 CONFIG_MMC_OMAP=y
 CONFIG_MMC_OMAP_HS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_TWL92330=y
 CONFIG_RTC_DRV_TWL4030=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_FS_XATTR is not set
-CONFIG_INOTIFY=y
 CONFIG_QUOTA=y
 CONFIG_QFMT_V2=y
 CONFIG_MSDOS_FS=y
@@ -285,12 +218,10 @@ CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
 CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_FS=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_SCHEDSTATS=y
 CONFIG_TIMER_STATS=y
 CONFIG_PROVE_LOCKING=y
-# CONFIG_LOCK_STAT is not set
 CONFIG_DEBUG_SPINLOCK_SLEEP=y
 # CONFIG_DEBUG_BUGVERBOSE is not set
 CONFIG_DEBUG_INFO=y
index 6b7403fd8f54f939e8827b29cc59d6c53a1f9f42..b4892a06442cea0a6b80ed1a99e465d0126db2d0 100644 (file)
@@ -203,8 +203,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
 #define find_first_bit(p,sz)           _find_first_bit_le(p,sz)
 #define find_next_bit(p,sz,off)                _find_next_bit_le(p,sz,off)
 
-#define WORD_BITOFF_TO_LE(x)           ((x))
-
 #else
 /*
  * These are the big endian, atomic definitions.
@@ -214,8 +212,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
 #define find_first_bit(p,sz)           _find_first_bit_be(p,sz)
 #define find_next_bit(p,sz,off)                _find_next_bit_be(p,sz,off)
 
-#define WORD_BITOFF_TO_LE(x)           ((x) ^ 0x18)
-
 #endif
 
 #if __LINUX_ARM_ARCH__ < 5
@@ -287,55 +283,29 @@ static inline int fls(int x)
 #include <asm-generic/bitops/hweight.h>
 #include <asm-generic/bitops/lock.h>
 
-static inline void __set_bit_le(int nr, void *addr)
-{
-       __set_bit(WORD_BITOFF_TO_LE(nr), addr);
-}
-
-static inline void __clear_bit_le(int nr, void *addr)
-{
-       __clear_bit(WORD_BITOFF_TO_LE(nr), addr);
-}
-
-static inline int __test_and_set_bit_le(int nr, void *addr)
-{
-       return __test_and_set_bit(WORD_BITOFF_TO_LE(nr), addr);
-}
-
-static inline int test_and_set_bit_le(int nr, void *addr)
-{
-       return test_and_set_bit(WORD_BITOFF_TO_LE(nr), addr);
-}
-
-static inline int __test_and_clear_bit_le(int nr, void *addr)
-{
-       return __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), addr);
-}
-
-static inline int test_and_clear_bit_le(int nr, void *addr)
-{
-       return test_and_clear_bit(WORD_BITOFF_TO_LE(nr), addr);
-}
-
-static inline int test_bit_le(int nr, const void *addr)
-{
-       return test_bit(WORD_BITOFF_TO_LE(nr), addr);
-}
+#ifdef __ARMEB__
 
 static inline int find_first_zero_bit_le(const void *p, unsigned size)
 {
        return _find_first_zero_bit_le(p, size);
 }
+#define find_first_zero_bit_le find_first_zero_bit_le
 
 static inline int find_next_zero_bit_le(const void *p, int size, int offset)
 {
        return _find_next_zero_bit_le(p, size, offset);
 }
+#define find_next_zero_bit_le find_next_zero_bit_le
 
 static inline int find_next_bit_le(const void *p, int size, int offset)
 {
        return _find_next_bit_le(p, size, offset);
 }
+#define find_next_bit_le find_next_bit_le
+
+#endif
+
+#include <asm-generic/bitops/le.h>
 
 /*
  * Ext2 is defined to use little-endian byte ordering.
index b997a35830fce5e36f1624773916a90159a02865..19d5891c48e325f6afe8e1a101dfdfbca3ad0a68 100644 (file)
@@ -288,6 +288,7 @@ config MACH_IGEP0030
        depends on ARCH_OMAP3
        default y
        select OMAP_PACKAGE_CBB
+       select MACH_IGEP0020
 
 config MACH_SBC3530
        bool "OMAP3 SBC STALKER board"
index 66dfbccacd25e0cabfdbbbf802f8cb59526d8fc7..b14807794401243fac61205cfcafdb925398dfbd 100644 (file)
@@ -229,8 +229,6 @@ obj-$(CONFIG_MACH_CM_T35)           += board-cm-t35.o \
 obj-$(CONFIG_MACH_CM_T3517)            += board-cm-t3517.o
 obj-$(CONFIG_MACH_IGEP0020)            += board-igep0020.o \
                                           hsmmc.o
-obj-$(CONFIG_MACH_IGEP0030)            += board-igep0030.o \
-                                          hsmmc.o
 obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK)     += board-omap3touchbook.o \
                                           hsmmc.o
 obj-$(CONFIG_MACH_OMAP_4430SDP)                += board-4430sdp.o \
@@ -270,3 +268,5 @@ obj-$(CONFIG_ARCH_OMAP4)            += hwspinlock.o
 
 disp-$(CONFIG_OMAP2_DSS)               := display.o
 obj-y                                  += $(disp-m) $(disp-y)
+
+obj-y                                  += common-board-devices.o
index 1fa6bb896f419d9fccbdca8f6638a4c426673be2..d54969be0a54e90535a3578f48a85c31144f0b9b 100644 (file)
@@ -41,6 +41,7 @@
 
 #include "mux.h"
 #include "hsmmc.h"
+#include "common-board-devices.h"
 
 #define SDP2430_CS0_BASE       0x04000000
 #define SECONDARY_LCD_GPIO             147
@@ -180,15 +181,6 @@ static struct twl4030_platform_data sdp2430_twldata = {
        .vmmc1          = &sdp2430_vmmc1,
 };
 
-static struct i2c_board_info __initdata sdp2430_i2c_boardinfo[] = {
-       {
-               I2C_BOARD_INFO("twl4030", 0x48),
-               .flags = I2C_CLIENT_WAKE,
-               .irq = INT_24XX_SYS_NIRQ,
-               .platform_data = &sdp2430_twldata,
-       },
-};
-
 static struct i2c_board_info __initdata sdp2430_i2c1_boardinfo[] = {
        {
                I2C_BOARD_INFO("isp1301_omap", 0x2D),
@@ -201,8 +193,7 @@ static int __init omap2430_i2c_init(void)
 {
        omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo,
                        ARRAY_SIZE(sdp2430_i2c1_boardinfo));
-       omap_register_i2c_bus(2, 2600, sdp2430_i2c_boardinfo,
-                       ARRAY_SIZE(sdp2430_i2c_boardinfo));
+       omap2_pmic_init("twl4030", &sdp2430_twldata);
        return 0;
 }
 
@@ -217,11 +208,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
        {}      /* Terminator */
 };
 
-static struct omap_musb_board_data musb_board_data = {
-       .interface_type         = MUSB_INTERFACE_ULPI,
-       .mode                   = MUSB_OTG,
-       .power                  = 100,
-};
 static struct omap_usb_config sdp2430_usb_config __initdata = {
        .otg            = 1,
 #ifdef  CONFIG_USB_GADGET_OMAP
@@ -240,8 +226,6 @@ static struct omap_board_mux board_mux[] __initdata = {
 
 static void __init omap_2430sdp_init(void)
 {
-       int ret;
-
        omap2430_mux_init(board_mux, OMAP_PACKAGE_ZAC);
 
        omap_board_config = sdp2430_config;
@@ -255,14 +239,13 @@ static void __init omap_2430sdp_init(void)
        omap2_usbfs_init(&sdp2430_usb_config);
 
        omap_mux_init_signal("usb0hs_stp", OMAP_PULL_ENA | OMAP_PULL_UP);
-       usb_musb_init(&musb_board_data);
+       usb_musb_init(NULL);
 
        board_smc91x_init();
 
        /* Turn off secondary LCD backlight */
-       ret = gpio_request(SECONDARY_LCD_GPIO, "Secondary LCD backlight");
-       if (ret == 0)
-               gpio_direction_output(SECONDARY_LCD_GPIO, 0);
+       gpio_request_one(SECONDARY_LCD_GPIO, GPIOF_OUT_INIT_LOW,
+                        "Secondary LCD backlight");
 }
 
 static void __init omap_2430sdp_map_io(void)
index 23244cd0a5b6fed83adb041056558e8e58f6b13a..ae2963a98041711a091a473cc1a66283a851397c 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/input.h>
 #include <linux/input/matrix_keypad.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/ads7846.h>
 #include <linux/i2c/twl.h>
 #include <linux/regulator/machine.h>
 #include <linux/io.h>
@@ -48,6 +47,7 @@
 #include "hsmmc.h"
 #include "pm.h"
 #include "control.h"
+#include "common-board-devices.h"
 
 #define CONFIG_DISABLE_HFCLK 1
 
 
 #define TWL4030_MSECURE_GPIO 22
 
-/* FIXME: These values need to be updated based on more profiling on 3430sdp*/
-static struct cpuidle_params omap3_cpuidle_params_table[] = {
-       /* C1 */
-       {1, 2, 2, 5},
-       /* C2 */
-       {1, 10, 10, 30},
-       /* C3 */
-       {1, 50, 50, 300},
-       /* C4 */
-       {1, 1500, 1800, 4000},
-       /* C5 */
-       {1, 2500, 7500, 12000},
-       /* C6 */
-       {1, 3000, 8500, 15000},
-       /* C7 */
-       {1, 10000, 30000, 300000},
-};
-
 static uint32_t board_keymap[] = {
        KEY(0, 0, KEY_LEFT),
        KEY(0, 1, KEY_RIGHT),
@@ -123,63 +105,14 @@ static struct twl4030_keypad_data sdp3430_kp_data = {
        .rep            = 1,
 };
 
-static int ts_gpio;    /* Needed for ads7846_get_pendown_state */
-
-/**
- * @brief ads7846_dev_init : Requests & sets GPIO line for pen-irq
- *
- * @return - void. If request gpio fails then Flag KERN_ERR.
- */
-static void ads7846_dev_init(void)
-{
-       if (gpio_request(ts_gpio, "ADS7846 pendown") < 0) {
-               printk(KERN_ERR "can't get ads746 pen down GPIO\n");
-               return;
-       }
-
-       gpio_direction_input(ts_gpio);
-       gpio_set_debounce(ts_gpio, 310);
-}
-
-static int ads7846_get_pendown_state(void)
-{
-       return !gpio_get_value(ts_gpio);
-}
-
-static struct ads7846_platform_data tsc2046_config __initdata = {
-       .get_pendown_state      = ads7846_get_pendown_state,
-       .keep_vref_on           = 1,
-       .wakeup                         = true,
-};
-
-
-static struct omap2_mcspi_device_config tsc2046_mcspi_config = {
-       .turbo_mode     = 0,
-       .single_channel = 1,    /* 0: slave, 1: master */
-};
-
-static struct spi_board_info sdp3430_spi_board_info[] __initdata = {
-       [0] = {
-               /*
-                * TSC2046 operates at a max freqency of 2MHz, so
-                * operate slightly below at 1.5MHz
-                */
-               .modalias               = "ads7846",
-               .bus_num                = 1,
-               .chip_select            = 0,
-               .max_speed_hz           = 1500000,
-               .controller_data        = &tsc2046_mcspi_config,
-               .irq                    = 0,
-               .platform_data          = &tsc2046_config,
-       },
-};
-
-
 #define SDP3430_LCD_PANEL_BACKLIGHT_GPIO       8
 #define SDP3430_LCD_PANEL_ENABLE_GPIO          5
 
-static unsigned backlight_gpio;
-static unsigned enable_gpio;
+static struct gpio sdp3430_dss_gpios[] __initdata = {
+       {SDP3430_LCD_PANEL_ENABLE_GPIO,    GPIOF_OUT_INIT_LOW, "LCD reset"    },
+       {SDP3430_LCD_PANEL_BACKLIGHT_GPIO, GPIOF_OUT_INIT_LOW, "LCD Backlight"},
+};
+
 static int lcd_enabled;
 static int dvi_enabled;
 
@@ -187,29 +120,11 @@ static void __init sdp3430_display_init(void)
 {
        int r;
 
-       enable_gpio    = SDP3430_LCD_PANEL_ENABLE_GPIO;
-       backlight_gpio = SDP3430_LCD_PANEL_BACKLIGHT_GPIO;
-
-       r = gpio_request(enable_gpio, "LCD reset");
-       if (r) {
-               printk(KERN_ERR "failed to get LCD reset GPIO\n");
-               goto err0;
-       }
-
-       r = gpio_request(backlight_gpio, "LCD Backlight");
-       if (r) {
-               printk(KERN_ERR "failed to get LCD backlight GPIO\n");
-               goto err1;
-       }
-
-       gpio_direction_output(enable_gpio, 0);
-       gpio_direction_output(backlight_gpio, 0);
+       r = gpio_request_array(sdp3430_dss_gpios,
+                              ARRAY_SIZE(sdp3430_dss_gpios));
+       if (r)
+               printk(KERN_ERR "failed to get LCD control GPIOs\n");
 
-       return;
-err1:
-       gpio_free(enable_gpio);
-err0:
-       return;
 }
 
 static int sdp3430_panel_enable_lcd(struct omap_dss_device *dssdev)
@@ -219,8 +134,8 @@ static int sdp3430_panel_enable_lcd(struct omap_dss_device *dssdev)
                return -EINVAL;
        }
 
-       gpio_direction_output(enable_gpio, 1);
-       gpio_direction_output(backlight_gpio, 1);
+       gpio_direction_output(SDP3430_LCD_PANEL_ENABLE_GPIO, 1);
+       gpio_direction_output(SDP3430_LCD_PANEL_BACKLIGHT_GPIO, 1);
 
        lcd_enabled = 1;
 
@@ -231,8 +146,8 @@ static void sdp3430_panel_disable_lcd(struct omap_dss_device *dssdev)
 {
        lcd_enabled = 0;
 
-       gpio_direction_output(enable_gpio, 0);
-       gpio_direction_output(backlight_gpio, 0);
+       gpio_direction_output(SDP3430_LCD_PANEL_ENABLE_GPIO, 0);
+       gpio_direction_output(SDP3430_LCD_PANEL_BACKLIGHT_GPIO, 0);
 }
 
 static int sdp3430_panel_enable_dvi(struct omap_dss_device *dssdev)
@@ -360,12 +275,10 @@ static int sdp3430_twl_gpio_setup(struct device *dev,
        omap2_hsmmc_init(mmc);
 
        /* gpio + 7 is "sub_lcd_en_bkl" (output/PWM1) */
-       gpio_request(gpio + 7, "sub_lcd_en_bkl");
-       gpio_direction_output(gpio + 7, 0);
+       gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "sub_lcd_en_bkl");
 
        /* gpio + 15 is "sub_lcd_nRST" (output) */
-       gpio_request(gpio + 15, "sub_lcd_nRST");
-       gpio_direction_output(gpio + 15, 0);
+       gpio_request_one(gpio + 15, GPIOF_OUT_INIT_LOW, "sub_lcd_nRST");
 
        return 0;
 }
@@ -580,20 +493,10 @@ static struct twl4030_platform_data sdp3430_twldata = {
        .vpll2          = &sdp3430_vpll2,
 };
 
-static struct i2c_board_info __initdata sdp3430_i2c_boardinfo[] = {
-       {
-               I2C_BOARD_INFO("twl4030", 0x48),
-               .flags = I2C_CLIENT_WAKE,
-               .irq = INT_34XX_SYS_NIRQ,
-               .platform_data = &sdp3430_twldata,
-       },
-};
-
 static int __init omap3430_i2c_init(void)
 {
        /* i2c1 for PMIC only */
-       omap_register_i2c_bus(1, 2600, sdp3430_i2c_boardinfo,
-                       ARRAY_SIZE(sdp3430_i2c_boardinfo));
+       omap3_pmic_init("twl4030", &sdp3430_twldata);
        /* i2c2 on camera connector (for sensor control) and optional isp1301 */
        omap_register_i2c_bus(2, 400, NULL, 0);
        /* i2c3 on display connector (for DVI, tfp410) */
@@ -872,30 +775,22 @@ static struct flash_partitions sdp_flash_partitions[] = {
        },
 };
 
-static struct omap_musb_board_data musb_board_data = {
-       .interface_type         = MUSB_INTERFACE_ULPI,
-       .mode                   = MUSB_OTG,
-       .power                  = 100,
-};
-
 static void __init omap_3430sdp_init(void)
 {
+       int gpio_pendown;
+
        omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
        omap_board_config = sdp3430_config;
        omap_board_config_size = ARRAY_SIZE(sdp3430_config);
-       omap3_pm_init_cpuidle(omap3_cpuidle_params_table);
        omap3430_i2c_init();
        omap_display_init(&sdp3430_dss_data);
        if (omap_rev() > OMAP3430_REV_ES1_0)
-               ts_gpio = SDP3430_TS_GPIO_IRQ_SDPV2;
+               gpio_pendown = SDP3430_TS_GPIO_IRQ_SDPV2;
        else
-               ts_gpio = SDP3430_TS_GPIO_IRQ_SDPV1;
-       sdp3430_spi_board_info[0].irq = gpio_to_irq(ts_gpio);
-       spi_register_board_info(sdp3430_spi_board_info,
-                               ARRAY_SIZE(sdp3430_spi_board_info));
-       ads7846_dev_init();
+               gpio_pendown = SDP3430_TS_GPIO_IRQ_SDPV1;
+       omap_ads7846_init(1, gpio_pendown, 310, NULL);
        board_serial_init();
-       usb_musb_init(&musb_board_data);
+       usb_musb_init(NULL);
        board_smc91x_init();
        board_flash_init(sdp_flash_partitions, chip_sel_3430, 0);
        sdp3430_display_init();
index 93edd7fcf4512bbeebd40a9801b6982d317b5d77..73fa90bb6953dcffb47119328a0aa0f059ae6164 100644 (file)
@@ -42,6 +42,7 @@
 #include "hsmmc.h"
 #include "timer-gp.h"
 #include "control.h"
+#include "common-board-devices.h"
 
 #define ETH_KS8851_IRQ                 34
 #define ETH_KS8851_POWER_ON            48
@@ -251,58 +252,22 @@ static struct spi_board_info sdp4430_spi_board_info[] __initdata = {
        },
 };
 
+static struct gpio sdp4430_eth_gpios[] __initdata = {
+       { ETH_KS8851_POWER_ON,  GPIOF_OUT_INIT_HIGH,    "eth_power"     },
+       { ETH_KS8851_QUART,     GPIOF_OUT_INIT_HIGH,    "quart"         },
+       { ETH_KS8851_IRQ,       GPIOF_IN,               "eth_irq"       },
+};
+
 static int omap_ethernet_init(void)
 {
        int status;
 
        /* Request of GPIO lines */
+       status = gpio_request_array(sdp4430_eth_gpios,
+                                   ARRAY_SIZE(sdp4430_eth_gpios));
+       if (status)
+               pr_err("Cannot request ETH GPIOs\n");
 
-       status = gpio_request(ETH_KS8851_POWER_ON, "eth_power");
-       if (status) {
-               pr_err("Cannot request GPIO %d\n", ETH_KS8851_POWER_ON);
-               return status;
-       }
-
-       status = gpio_request(ETH_KS8851_QUART, "quart");
-       if (status) {
-               pr_err("Cannot request GPIO %d\n", ETH_KS8851_QUART);
-               goto error1;
-       }
-
-       status = gpio_request(ETH_KS8851_IRQ, "eth_irq");
-       if (status) {
-               pr_err("Cannot request GPIO %d\n", ETH_KS8851_IRQ);
-               goto error2;
-       }
-
-       /* Configuration of requested GPIO lines */
-
-       status = gpio_direction_output(ETH_KS8851_POWER_ON, 1);
-       if (status) {
-               pr_err("Cannot set output GPIO %d\n", ETH_KS8851_IRQ);
-               goto error3;
-       }
-
-       status = gpio_direction_output(ETH_KS8851_QUART, 1);
-       if (status) {
-               pr_err("Cannot set output GPIO %d\n", ETH_KS8851_QUART);
-               goto error3;
-       }
-
-       status = gpio_direction_input(ETH_KS8851_IRQ);
-       if (status) {
-               pr_err("Cannot set input GPIO %d\n", ETH_KS8851_IRQ);
-               goto error3;
-       }
-
-       return 0;
-
-error3:
-       gpio_free(ETH_KS8851_IRQ);
-error2:
-       gpio_free(ETH_KS8851_QUART);
-error1:
-       gpio_free(ETH_KS8851_POWER_ON);
        return status;
 }
 
@@ -575,14 +540,6 @@ static struct twl4030_platform_data sdp4430_twldata = {
        .usb            = &omap4_usbphy_data
 };
 
-static struct i2c_board_info __initdata sdp4430_i2c_boardinfo[] = {
-       {
-               I2C_BOARD_INFO("twl6030", 0x48),
-               .flags = I2C_CLIENT_WAKE,
-               .irq = OMAP44XX_IRQ_SYS_1N,
-               .platform_data = &sdp4430_twldata,
-       },
-};
 static struct i2c_board_info __initdata sdp4430_i2c_3_boardinfo[] = {
        {
                I2C_BOARD_INFO("tmp105", 0x48),
@@ -598,12 +555,7 @@ static struct i2c_board_info __initdata sdp4430_i2c_4_boardinfo[] = {
 };
 static int __init omap4_i2c_init(void)
 {
-       /*
-        * Phoenix Audio IC needs I2C1 to
-        * start with 400 KHz or less
-        */
-       omap_register_i2c_bus(1, 400, sdp4430_i2c_boardinfo,
-                       ARRAY_SIZE(sdp4430_i2c_boardinfo));
+       omap4_pmic_init("twl6030", &sdp4430_twldata);
        omap_register_i2c_bus(2, 400, NULL, 0);
        omap_register_i2c_bus(3, 400, sdp4430_i2c_3_boardinfo,
                                ARRAY_SIZE(sdp4430_i2c_3_boardinfo));
@@ -614,21 +566,13 @@ static int __init omap4_i2c_init(void)
 
 static void __init omap_sfh7741prox_init(void)
 {
-       int  error;
+       int error;
 
-       error = gpio_request(OMAP4_SFH7741_ENABLE_GPIO, "sfh7741");
-       if (error < 0) {
+       error = gpio_request_one(OMAP4_SFH7741_ENABLE_GPIO,
+                                GPIOF_OUT_INIT_LOW, "sfh7741");
+       if (error < 0)
                pr_err("%s:failed to request GPIO %d, error %d\n",
                        __func__, OMAP4_SFH7741_ENABLE_GPIO, error);
-               return;
-       }
-
-       error = gpio_direction_output(OMAP4_SFH7741_ENABLE_GPIO , 0);
-       if (error < 0) {
-               pr_err("%s: GPIO configuration failed: GPIO %d,error %d\n",
-                        __func__, OMAP4_SFH7741_ENABLE_GPIO, error);
-               gpio_free(OMAP4_SFH7741_ENABLE_GPIO);
-       }
 }
 
 static void sdp4430_hdmi_mux_init(void)
@@ -645,27 +589,19 @@ static void sdp4430_hdmi_mux_init(void)
                        OMAP_PIN_INPUT_PULLUP);
 }
 
+static struct gpio sdp4430_hdmi_gpios[] = {
+       { HDMI_GPIO_HPD,        GPIOF_OUT_INIT_HIGH,    "hdmi_gpio_hpd"   },
+       { HDMI_GPIO_LS_OE,      GPIOF_OUT_INIT_HIGH,    "hdmi_gpio_ls_oe" },
+};
+
 static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev)
 {
        int status;
 
-       status = gpio_request_one(HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH,
-                                                       "hdmi_gpio_hpd");
-       if (status) {
-               pr_err("Cannot request GPIO %d\n", HDMI_GPIO_HPD);
-               return status;
-       }
-       status = gpio_request_one(HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH,
-                                                       "hdmi_gpio_ls_oe");
-       if (status) {
-               pr_err("Cannot request GPIO %d\n", HDMI_GPIO_LS_OE);
-               goto error1;
-       }
-
-       return 0;
-
-error1:
-       gpio_free(HDMI_GPIO_HPD);
+       status = gpio_request_array(sdp4430_hdmi_gpios,
+                                   ARRAY_SIZE(sdp4430_hdmi_gpios));
+       if (status)
+               pr_err("%s: Cannot request HDMI GPIOs\n", __func__);
 
        return status;
 }
index a890d244fec688fdb93b1e44a7d8e51b35b2a373..5e438a77cd726f35e5df2ea43df0788f8e34d16b 100644 (file)
@@ -89,19 +89,13 @@ static void __init am3517_crane_init(void)
                return;
        }
 
-       ret = gpio_request(GPIO_USB_POWER, "usb_ehci_enable");
+       ret = gpio_request_one(GPIO_USB_POWER, GPIOF_OUT_INIT_HIGH,
+                              "usb_ehci_enable");
        if (ret < 0) {
                pr_err("Can not request GPIO %d\n", GPIO_USB_POWER);
                return;
        }
 
-       ret = gpio_direction_output(GPIO_USB_POWER, 1);
-       if (ret < 0) {
-               gpio_free(GPIO_USB_POWER);
-               pr_err("Unable to initialize EHCI power\n");
-               return;
-       }
-
        usbhs_init(&usbhs_bdata);
 }
 
index ff8c59be36e5ff121fd6dcb0cda4efebfdf68da5..63af4171c0436d85c3d7ea35d6a21679380414fa 100644 (file)
@@ -174,19 +174,14 @@ static void __init am3517_evm_rtc_init(void)
        int r;
 
        omap_mux_init_gpio(GPIO_RTCS35390A_IRQ, OMAP_PIN_INPUT_PULLUP);
-       r = gpio_request(GPIO_RTCS35390A_IRQ, "rtcs35390a-irq");
+
+       r = gpio_request_one(GPIO_RTCS35390A_IRQ, GPIOF_IN, "rtcs35390a-irq");
        if (r < 0) {
                printk(KERN_WARNING "failed to request GPIO#%d\n",
                                GPIO_RTCS35390A_IRQ);
                return;
        }
-       r = gpio_direction_input(GPIO_RTCS35390A_IRQ);
-       if (r < 0) {
-               printk(KERN_WARNING "GPIO#%d cannot be configured as input\n",
-                               GPIO_RTCS35390A_IRQ);
-               gpio_free(GPIO_RTCS35390A_IRQ);
-               return;
-       }
+
        am3517evm_i2c1_boardinfo[0].irq = gpio_to_irq(GPIO_RTCS35390A_IRQ);
 }
 
@@ -242,6 +237,15 @@ static int dvi_enabled;
 
 #if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \
                defined(CONFIG_PANEL_SHARP_LQ043T1DG01_MODULE)
+static struct gpio am3517_evm_dss_gpios[] __initdata = {
+       /* GPIO 182 = LCD Backlight Power */
+       { LCD_PANEL_BKLIGHT_PWR, GPIOF_OUT_INIT_HIGH, "lcd_backlight_pwr" },
+       /* GPIO 181 = LCD Panel PWM */
+       { LCD_PANEL_PWM,         GPIOF_OUT_INIT_HIGH, "lcd bl enable"     },
+       /* GPIO 176 = LCD Panel Power enable pin */
+       { LCD_PANEL_PWR,         GPIOF_OUT_INIT_HIGH, "dvi enable"        },
+};
+
 static void __init am3517_evm_display_init(void)
 {
        int r;
@@ -249,41 +253,15 @@ static void __init am3517_evm_display_init(void)
        omap_mux_init_gpio(LCD_PANEL_PWR, OMAP_PIN_INPUT_PULLUP);
        omap_mux_init_gpio(LCD_PANEL_BKLIGHT_PWR, OMAP_PIN_INPUT_PULLDOWN);
        omap_mux_init_gpio(LCD_PANEL_PWM, OMAP_PIN_INPUT_PULLDOWN);
-       /*
-        * Enable GPIO 182 = LCD Backlight Power
-        */
-       r = gpio_request(LCD_PANEL_BKLIGHT_PWR, "lcd_backlight_pwr");
+
+       r = gpio_request_array(am3517_evm_dss_gpios,
+                              ARRAY_SIZE(am3517_evm_dss_gpios));
        if (r) {
-               printk(KERN_ERR "failed to get lcd_backlight_pwr\n");
+               printk(KERN_ERR "failed to get DSS panel control GPIOs\n");
                return;
        }
-       gpio_direction_output(LCD_PANEL_BKLIGHT_PWR, 1);
-       /*
-        * Enable GPIO 181 = LCD Panel PWM
-        */
-       r = gpio_request(LCD_PANEL_PWM, "lcd_pwm");
-       if (r) {
-               printk(KERN_ERR "failed to get lcd_pwm\n");
-               goto err_1;
-       }
-       gpio_direction_output(LCD_PANEL_PWM, 1);
-       /*
-        * Enable GPIO 176 = LCD Panel Power enable pin
-        */
-       r = gpio_request(LCD_PANEL_PWR, "lcd_panel_pwr");
-       if (r) {
-               printk(KERN_ERR "failed to get lcd_panel_pwr\n");
-               goto err_2;
-       }
-       gpio_direction_output(LCD_PANEL_PWR, 1);
 
        printk(KERN_INFO "Display initialized successfully\n");
-       return;
-
-err_2:
-       gpio_free(LCD_PANEL_PWM);
-err_1:
-       gpio_free(LCD_PANEL_BKLIGHT_PWR);
 }
 #else
 static void __init am3517_evm_display_init(void) {}
@@ -396,7 +374,7 @@ static struct omap_musb_board_data musb_board_data = {
        .power                  = 500,
        .set_phy_power          = am35x_musb_phy_power,
        .clear_irq              = am35x_musb_clear_irq,
-       .set_mode               = am35x_musb_set_mode,
+       .set_mode               = am35x_set_mode,
        .reset                  = am35x_musb_reset,
 };
 
index f4f8374a02982d59b3bf4f1527834e15eabbc23b..f3beb8eeef77a2a2cc1c2d229f0d17420041460a 100644 (file)
@@ -202,6 +202,7 @@ static inline void __init apollon_init_smc91x(void)
        unsigned int rate;
        struct clk *gpmc_fck;
        int eth_cs;
+       int err;
 
        gpmc_fck = clk_get(NULL, "gpmc_fck");   /* Always on ENABLE_ON_INIT */
        if (IS_ERR(gpmc_fck)) {
@@ -245,15 +246,13 @@ static inline void __init apollon_init_smc91x(void)
        apollon_smc91x_resources[0].end   = base + 0x30f;
        udelay(100);
 
-       omap_mux_init_gpio(74, 0);
-       if (gpio_request(APOLLON_ETHR_GPIO_IRQ, "SMC91x irq") < 0) {
+       omap_mux_init_gpio(APOLLON_ETHR_GPIO_IRQ, 0);
+       err = gpio_request_one(APOLLON_ETHR_GPIO_IRQ, GPIOF_IN, "SMC91x irq");
+       if (err) {
                printk(KERN_ERR "Failed to request GPIO%d for smc91x IRQ\n",
                        APOLLON_ETHR_GPIO_IRQ);
                gpmc_cs_free(APOLLON_ETH_CS);
-               goto out;
        }
-       gpio_direction_input(APOLLON_ETHR_GPIO_IRQ);
-
 out:
        clk_disable(gpmc_fck);
        clk_put(gpmc_fck);
@@ -280,20 +279,19 @@ static void __init omap_apollon_init_early(void)
        omap2_init_common_devices(NULL, NULL);
 }
 
+static struct gpio apollon_gpio_leds[] __initdata = {
+       { LED0_GPIO13, GPIOF_OUT_INIT_LOW, "LED0" }, /* LED0 - AA10 */
+       { LED1_GPIO14, GPIOF_OUT_INIT_LOW, "LED1" }, /* LED1 - AA6  */
+       { LED2_GPIO15, GPIOF_OUT_INIT_LOW, "LED2" }, /* LED2 - AA4  */
+};
+
 static void __init apollon_led_init(void)
 {
-       /* LED0 - AA10 */
        omap_mux_init_signal("vlynq_clk.gpio_13", 0);
-       gpio_request(LED0_GPIO13, "LED0");
-       gpio_direction_output(LED0_GPIO13, 0);
-       /* LED1  - AA6 */
        omap_mux_init_signal("vlynq_rx1.gpio_14", 0);
-       gpio_request(LED1_GPIO14, "LED1");
-       gpio_direction_output(LED1_GPIO14, 0);
-       /* LED2  - AA4 */
        omap_mux_init_signal("vlynq_rx0.gpio_15", 0);
-       gpio_request(LED2_GPIO15, "LED2");
-       gpio_direction_output(LED2_GPIO15, 0);
+
+       gpio_request_array(apollon_gpio_leds, ARRAY_SIZE(apollon_gpio_leds));
 }
 
 static void __init apollon_usb_init(void)
@@ -301,8 +299,7 @@ static void __init apollon_usb_init(void)
        /* USB device */
        /* DEVICE_SUSPEND */
        omap_mux_init_signal("mcbsp2_clkx.gpio_12", 0);
-       gpio_request(12, "USB suspend");
-       gpio_direction_output(12, 0);
+       gpio_request_one(12, GPIOF_OUT_INIT_LOW, "USB suspend");
        omap2_usbfs_init(&apollon_usb_config);
 }
 
index 9340f6a06f4a0a218bc56beb91508e9d22cdd0b6..c63115bc15368d7e953b3b784562f4a22c612ad2 100644 (file)
@@ -54,6 +54,7 @@
 #include "mux.h"
 #include "sdram-micron-mt46h32m32lf-6.h"
 #include "hsmmc.h"
+#include "common-board-devices.h"
 
 #define CM_T35_GPIO_PENDOWN    57
 
 
 #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
 #include <linux/smsc911x.h>
+#include <plat/gpmc-smsc911x.h>
 
-static struct smsc911x_platform_config cm_t35_smsc911x_config = {
-       .irq_polarity   = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
-       .irq_type       = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
-       .flags          = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
-       .phy_interface  = PHY_INTERFACE_MODE_MII,
-};
-
-static struct resource cm_t35_smsc911x_resources[] = {
-       {
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .start  = OMAP_GPIO_IRQ(CM_T35_SMSC911X_GPIO),
-               .end    = OMAP_GPIO_IRQ(CM_T35_SMSC911X_GPIO),
-               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
-       },
-};
-
-static struct platform_device cm_t35_smsc911x_device = {
-       .name           = "smsc911x",
+static struct omap_smsc911x_platform_data cm_t35_smsc911x_cfg = {
        .id             = 0,
-       .num_resources  = ARRAY_SIZE(cm_t35_smsc911x_resources),
-       .resource       = cm_t35_smsc911x_resources,
-       .dev            = {
-               .platform_data = &cm_t35_smsc911x_config,
-       },
-};
-
-static struct resource sb_t35_smsc911x_resources[] = {
-       {
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .start  = OMAP_GPIO_IRQ(SB_T35_SMSC911X_GPIO),
-               .end    = OMAP_GPIO_IRQ(SB_T35_SMSC911X_GPIO),
-               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
-       },
+       .cs             = CM_T35_SMSC911X_CS,
+       .gpio_irq       = CM_T35_SMSC911X_GPIO,
+       .gpio_reset     = -EINVAL,
+       .flags          = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
 };
 
-static struct platform_device sb_t35_smsc911x_device = {
-       .name           = "smsc911x",
+static struct omap_smsc911x_platform_data sb_t35_smsc911x_cfg = {
        .id             = 1,
-       .num_resources  = ARRAY_SIZE(sb_t35_smsc911x_resources),
-       .resource       = sb_t35_smsc911x_resources,
-       .dev            = {
-               .platform_data = &cm_t35_smsc911x_config,
-       },
+       .cs             = SB_T35_SMSC911X_CS,
+       .gpio_irq       = SB_T35_SMSC911X_GPIO,
+       .gpio_reset     = -EINVAL,
+       .flags          = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
 };
 
-static void __init cm_t35_init_smsc911x(struct platform_device *dev,
-                                       int cs, int irq_gpio)
-{
-       unsigned long cs_mem_base;
-
-       if (gpmc_cs_request(cs, SZ_16M, &cs_mem_base) < 0) {
-               pr_err("CM-T35: Failed request for GPMC mem for smsc911x\n");
-               return;
-       }
-
-       dev->resource[0].start = cs_mem_base + 0x0;
-       dev->resource[0].end   = cs_mem_base + 0xff;
-
-       if ((gpio_request(irq_gpio, "ETH IRQ") == 0) &&
-           (gpio_direction_input(irq_gpio) == 0)) {
-               gpio_export(irq_gpio, 0);
-       } else {
-               pr_err("CM-T35: could not obtain gpio for SMSC911X IRQ\n");
-               return;
-       }
-
-       platform_device_register(dev);
-}
-
 static void __init cm_t35_init_ethernet(void)
 {
-       cm_t35_init_smsc911x(&cm_t35_smsc911x_device,
-                            CM_T35_SMSC911X_CS, CM_T35_SMSC911X_GPIO);
-       cm_t35_init_smsc911x(&sb_t35_smsc911x_device,
-                            SB_T35_SMSC911X_CS, SB_T35_SMSC911X_GPIO);
+       gpmc_smsc911x_init(&cm_t35_smsc911x_cfg);
+       gpmc_smsc911x_init(&sb_t35_smsc911x_cfg);
 }
 #else
 static inline void __init cm_t35_init_ethernet(void) { return; }
@@ -235,69 +178,10 @@ static void __init cm_t35_init_nand(void)
 static inline void cm_t35_init_nand(void) {}
 #endif
 
-#if defined(CONFIG_TOUCHSCREEN_ADS7846) || \
-       defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
-#include <linux/spi/ads7846.h>
-
-static struct omap2_mcspi_device_config ads7846_mcspi_config = {
-       .turbo_mode     = 0,
-       .single_channel = 1,    /* 0: slave, 1: master */
-};
-
-static int ads7846_get_pendown_state(void)
-{
-       return !gpio_get_value(CM_T35_GPIO_PENDOWN);
-}
-
-static struct ads7846_platform_data ads7846_config = {
-       .x_max                  = 0x0fff,
-       .y_max                  = 0x0fff,
-       .x_plate_ohms           = 180,
-       .pressure_max           = 255,
-       .debounce_max           = 10,
-       .debounce_tol           = 3,
-       .debounce_rep           = 1,
-       .get_pendown_state      = ads7846_get_pendown_state,
-       .keep_vref_on           = 1,
-};
-
-static struct spi_board_info cm_t35_spi_board_info[] __initdata = {
-       {
-               .modalias               = "ads7846",
-               .bus_num                = 1,
-               .chip_select            = 0,
-               .max_speed_hz           = 1500000,
-               .controller_data        = &ads7846_mcspi_config,
-               .irq                    = OMAP_GPIO_IRQ(CM_T35_GPIO_PENDOWN),
-               .platform_data          = &ads7846_config,
-       },
-};
-
-static void __init cm_t35_init_ads7846(void)
-{
-       if ((gpio_request(CM_T35_GPIO_PENDOWN, "ADS7846_PENDOWN") == 0) &&
-           (gpio_direction_input(CM_T35_GPIO_PENDOWN) == 0)) {
-               gpio_export(CM_T35_GPIO_PENDOWN, 0);
-       } else {
-               pr_err("CM-T35: could not obtain gpio for ADS7846_PENDOWN\n");
-               return;
-       }
-
-       spi_register_board_info(cm_t35_spi_board_info,
-                               ARRAY_SIZE(cm_t35_spi_board_info));
-}
-#else
-static inline void cm_t35_init_ads7846(void) {}
-#endif
-
 #define CM_T35_LCD_EN_GPIO 157
 #define CM_T35_LCD_BL_GPIO 58
 #define CM_T35_DVI_EN_GPIO 54
 
-static int lcd_bl_gpio;
-static int lcd_en_gpio;
-static int dvi_en_gpio;
-
 static int lcd_enabled;
 static int dvi_enabled;
 
@@ -308,8 +192,8 @@ static int cm_t35_panel_enable_lcd(struct omap_dss_device *dssdev)
                return -EINVAL;
        }
 
-       gpio_set_value(lcd_en_gpio, 1);
-       gpio_set_value(lcd_bl_gpio, 1);
+       gpio_set_value(CM_T35_LCD_EN_GPIO, 1);
+       gpio_set_value(CM_T35_LCD_BL_GPIO, 1);
 
        lcd_enabled = 1;
 
@@ -320,8 +204,8 @@ static void cm_t35_panel_disable_lcd(struct omap_dss_device *dssdev)
 {
        lcd_enabled = 0;
 
-       gpio_set_value(lcd_bl_gpio, 0);
-       gpio_set_value(lcd_en_gpio, 0);
+       gpio_set_value(CM_T35_LCD_BL_GPIO, 0);
+       gpio_set_value(CM_T35_LCD_EN_GPIO, 0);
 }
 
 static int cm_t35_panel_enable_dvi(struct omap_dss_device *dssdev)
@@ -331,7 +215,7 @@ static int cm_t35_panel_enable_dvi(struct omap_dss_device *dssdev)
                return -EINVAL;
        }
 
-       gpio_set_value(dvi_en_gpio, 0);
+       gpio_set_value(CM_T35_DVI_EN_GPIO, 0);
        dvi_enabled = 1;
 
        return 0;
@@ -339,7 +223,7 @@ static int cm_t35_panel_enable_dvi(struct omap_dss_device *dssdev)
 
 static void cm_t35_panel_disable_dvi(struct omap_dss_device *dssdev)
 {
-       gpio_set_value(dvi_en_gpio, 1);
+       gpio_set_value(CM_T35_DVI_EN_GPIO, 1);
        dvi_enabled = 0;
 }
 
@@ -421,62 +305,38 @@ static struct spi_board_info cm_t35_lcd_spi_board_info[] __initdata = {
        },
 };
 
+static struct gpio cm_t35_dss_gpios[] __initdata = {
+       { CM_T35_LCD_EN_GPIO, GPIOF_OUT_INIT_LOW,  "lcd enable"    },
+       { CM_T35_LCD_BL_GPIO, GPIOF_OUT_INIT_LOW,  "lcd bl enable" },
+       { CM_T35_DVI_EN_GPIO, GPIOF_OUT_INIT_HIGH, "dvi enable"    },
+};
+
 static void __init cm_t35_init_display(void)
 {
        int err;
 
-       lcd_en_gpio = CM_T35_LCD_EN_GPIO;
-       lcd_bl_gpio = CM_T35_LCD_BL_GPIO;
-       dvi_en_gpio = CM_T35_DVI_EN_GPIO;
-
        spi_register_board_info(cm_t35_lcd_spi_board_info,
                                ARRAY_SIZE(cm_t35_lcd_spi_board_info));
 
-       err = gpio_request(lcd_en_gpio, "LCD RST");
-       if (err) {
-               pr_err("CM-T35: failed to get LCD reset GPIO\n");
-               goto out;
-       }
-
-       err = gpio_request(lcd_bl_gpio, "LCD BL");
+       err = gpio_request_array(cm_t35_dss_gpios,
+                                ARRAY_SIZE(cm_t35_dss_gpios));
        if (err) {
-               pr_err("CM-T35: failed to get LCD backlight control GPIO\n");
-               goto err_lcd_bl;
-       }
-
-       err = gpio_request(dvi_en_gpio, "DVI EN");
-       if (err) {
-               pr_err("CM-T35: failed to get DVI reset GPIO\n");
-               goto err_dvi_en;
+               pr_err("CM-T35: failed to request DSS control GPIOs\n");
+               return;
        }
 
-       gpio_export(lcd_en_gpio, 0);
-       gpio_export(lcd_bl_gpio, 0);
-       gpio_export(dvi_en_gpio, 0);
-       gpio_direction_output(lcd_en_gpio, 0);
-       gpio_direction_output(lcd_bl_gpio, 0);
-       gpio_direction_output(dvi_en_gpio, 1);
+       gpio_export(CM_T35_LCD_EN_GPIO, 0);
+       gpio_export(CM_T35_LCD_BL_GPIO, 0);
+       gpio_export(CM_T35_DVI_EN_GPIO, 0);
 
        msleep(50);
-       gpio_set_value(lcd_en_gpio, 1);
+       gpio_set_value(CM_T35_LCD_EN_GPIO, 1);
 
        err = omap_display_init(&cm_t35_dss_data);
        if (err) {
                pr_err("CM-T35: failed to register DSS device\n");
-               goto err_dev_reg;
+               gpio_free_array(cm_t35_dss_gpios, ARRAY_SIZE(cm_t35_dss_gpios));
        }
-
-       return;
-
-err_dev_reg:
-       gpio_free(dvi_en_gpio);
-err_dvi_en:
-       gpio_free(lcd_bl_gpio);
-err_lcd_bl:
-       gpio_free(lcd_en_gpio);
-out:
-
-       return;
 }
 
 static struct regulator_consumer_supply cm_t35_vmmc1_supply = {
@@ -609,10 +469,8 @@ static int cm_t35_twl_gpio_setup(struct device *dev, unsigned gpio,
 {
        int wlan_rst = gpio + 2;
 
-       if ((gpio_request(wlan_rst, "WLAN RST") == 0) &&
-           (gpio_direction_output(wlan_rst, 1) == 0)) {
+       if (gpio_request_one(wlan_rst, GPIOF_OUT_INIT_HIGH, "WLAN RST") == 0) {
                gpio_export(wlan_rst, 0);
-
                udelay(10);
                gpio_set_value(wlan_rst, 0);
                udelay(10);
@@ -653,19 +511,9 @@ static struct twl4030_platform_data cm_t35_twldata = {
        .vpll2          = &cm_t35_vpll2,
 };
 
-static struct i2c_board_info __initdata cm_t35_i2c_boardinfo[] = {
-       {
-               I2C_BOARD_INFO("tps65930", 0x48),
-               .flags          = I2C_CLIENT_WAKE,
-               .irq            = INT_34XX_SYS_NIRQ,
-               .platform_data  = &cm_t35_twldata,
-       },
-};
-
 static void __init cm_t35_init_i2c(void)
 {
-       omap_register_i2c_bus(1, 2600, cm_t35_i2c_boardinfo,
-                             ARRAY_SIZE(cm_t35_i2c_boardinfo));
+       omap3_pmic_init("tps65930", &cm_t35_twldata);
 }
 
 static void __init cm_t35_init_early(void)
@@ -775,12 +623,6 @@ static struct omap_board_mux board_mux[] __initdata = {
 };
 #endif
 
-static struct omap_musb_board_data musb_board_data = {
-       .interface_type         = MUSB_INTERFACE_ULPI,
-       .mode                   = MUSB_OTG,
-       .power                  = 100,
-};
-
 static struct omap_board_config_kernel cm_t35_config[] __initdata = {
 };
 
@@ -792,12 +634,12 @@ static void __init cm_t35_init(void)
        omap_serial_init();
        cm_t35_init_i2c();
        cm_t35_init_nand();
-       cm_t35_init_ads7846();
+       omap_ads7846_init(1, CM_T35_GPIO_PENDOWN, 0, NULL);
        cm_t35_init_ethernet();
        cm_t35_init_led();
        cm_t35_init_display();
 
-       usb_musb_init(&musb_board_data);
+       usb_musb_init(NULL);
        usbhs_init(&usbhs_bdata);
 }
 
index a27e3eee829259e7c7f608200f593a0d1400f385..08f08e812492920e24cb79d8f04eec5d3cc88659 100644 (file)
@@ -148,14 +148,13 @@ static void __init cm_t3517_init_rtc(void)
 {
        int err;
 
-       err = gpio_request(RTC_CS_EN_GPIO, "rtc cs en");
+       err = gpio_request_one(RTC_CS_EN_GPIO, GPIOF_OUT_INIT_HIGH,
+                              "rtc cs en");
        if (err) {
                pr_err("CM-T3517: rtc cs en gpio request failed: %d\n", err);
                return;
        }
 
-       gpio_direction_output(RTC_CS_EN_GPIO, 1);
-
        platform_device_register(&cm_t3517_rtc_device);
 }
 #else
@@ -182,11 +181,11 @@ static int cm_t3517_init_usbh(void)
 {
        int err;
 
-       err = gpio_request(USB_HUB_RESET_GPIO, "usb hub rst");
+       err = gpio_request_one(USB_HUB_RESET_GPIO, GPIOF_OUT_INIT_LOW,
+                              "usb hub rst");
        if (err) {
                pr_err("CM-T3517: usb hub rst gpio request failed: %d\n", err);
        } else {
-               gpio_direction_output(USB_HUB_RESET_GPIO, 0);
                udelay(10);
                gpio_set_value(USB_HUB_RESET_GPIO, 1);
                msleep(1);
index 1d1b56a29fb1cf89b2f506ede3304e953cdf64dc..cf520d7dd614a999879ebdb64ff1bda2c6d2ba44 100644 (file)
@@ -51,7 +51,6 @@
 #include <plat/mcspi.h>
 #include <linux/input/matrix_keypad.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/ads7846.h>
 #include <linux/dm9000.h>
 #include <linux/interrupt.h>
 
@@ -60,6 +59,7 @@
 #include "mux.h"
 #include "hsmmc.h"
 #include "timer-gp.h"
+#include "common-board-devices.h"
 
 #define NAND_BLOCK_SIZE                SZ_128K
 
@@ -97,13 +97,6 @@ static struct mtd_partition devkit8000_nand_partitions[] = {
        },
 };
 
-static struct omap_nand_platform_data devkit8000_nand_data = {
-       .options        = NAND_BUSWIDTH_16,
-       .parts          = devkit8000_nand_partitions,
-       .nr_parts       = ARRAY_SIZE(devkit8000_nand_partitions),
-       .dma_channel    = -1,           /* disable DMA in OMAP NAND driver */
-};
-
 static struct omap2_hsmmc_info mmc[] = {
        {
                .mmc            = 1,
@@ -249,7 +242,7 @@ static int devkit8000_twl_gpio_setup(struct device *dev,
        /* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */
        devkit8000_lcd_device.reset_gpio = gpio + TWL4030_GPIO_MAX + 0;
        ret = gpio_request_one(devkit8000_lcd_device.reset_gpio,
-                       GPIOF_DIR_OUT | GPIOF_INIT_LOW, "LCD_PWREN");
+                              GPIOF_OUT_INIT_LOW, "LCD_PWREN");
        if (ret < 0) {
                devkit8000_lcd_device.reset_gpio = -EINVAL;
                printk(KERN_ERR "Failed to request GPIO for LCD_PWRN\n");
@@ -258,7 +251,7 @@ static int devkit8000_twl_gpio_setup(struct device *dev,
        /* gpio + 7 is "DVI_PD" (out, active low) */
        devkit8000_dvi_device.reset_gpio = gpio + 7;
        ret = gpio_request_one(devkit8000_dvi_device.reset_gpio,
-                       GPIOF_DIR_OUT | GPIOF_INIT_LOW, "DVI PowerDown");
+                              GPIOF_OUT_INIT_LOW, "DVI PowerDown");
        if (ret < 0) {
                devkit8000_dvi_device.reset_gpio = -EINVAL;
                printk(KERN_ERR "Failed to request GPIO for DVI PowerDown\n");
@@ -366,19 +359,9 @@ static struct twl4030_platform_data devkit8000_twldata = {
        .keypad         = &devkit8000_kp_data,
 };
 
-static struct i2c_board_info __initdata devkit8000_i2c_boardinfo[] = {
-       {
-               I2C_BOARD_INFO("tps65930", 0x48),
-               .flags = I2C_CLIENT_WAKE,
-               .irq = INT_34XX_SYS_NIRQ,
-               .platform_data = &devkit8000_twldata,
-       },
-};
-
 static int __init devkit8000_i2c_init(void)
 {
-       omap_register_i2c_bus(1, 2600, devkit8000_i2c_boardinfo,
-                       ARRAY_SIZE(devkit8000_i2c_boardinfo));
+       omap3_pmic_init("tps65930", &devkit8000_twldata);
        /* Bus 3 is attached to the DVI port where devices like the pico DLP
         * projector don't work reliably with 400kHz */
        omap_register_i2c_bus(3, 400, NULL, 0);
@@ -463,56 +446,6 @@ static void __init devkit8000_init_irq(void)
 #endif
 }
 
-static void __init devkit8000_ads7846_init(void)
-{
-       int gpio = OMAP3_DEVKIT_TS_GPIO;
-       int ret;
-
-       ret = gpio_request(gpio, "ads7846_pen_down");
-       if (ret < 0) {
-               printk(KERN_ERR "Failed to request GPIO %d for "
-                               "ads7846 pen down IRQ\n", gpio);
-               return;
-       }
-
-       gpio_direction_input(gpio);
-}
-
-static int ads7846_get_pendown_state(void)
-{
-       return !gpio_get_value(OMAP3_DEVKIT_TS_GPIO);
-}
-
-static struct ads7846_platform_data ads7846_config = {
-       .x_max                  = 0x0fff,
-       .y_max                  = 0x0fff,
-       .x_plate_ohms           = 180,
-       .pressure_max           = 255,
-       .debounce_max           = 10,
-       .debounce_tol           = 5,
-       .debounce_rep           = 1,
-       .get_pendown_state      = ads7846_get_pendown_state,
-       .keep_vref_on           = 1,
-       .settle_delay_usecs     = 150,
-};
-
-static struct omap2_mcspi_device_config ads7846_mcspi_config = {
-       .turbo_mode     = 0,
-       .single_channel = 1,    /* 0: slave, 1: master */
-};
-
-static struct spi_board_info devkit8000_spi_board_info[] __initdata = {
-       {
-               .modalias               = "ads7846",
-               .bus_num                = 2,
-               .chip_select            = 0,
-               .max_speed_hz           = 1500000,
-               .controller_data        = &ads7846_mcspi_config,
-               .irq                    = OMAP_GPIO_IRQ(OMAP3_DEVKIT_TS_GPIO),
-               .platform_data          = &ads7846_config,
-       }
-};
-
 #define OMAP_DM9000_BASE       0x2c000000
 
 static struct resource omap_dm9000_resources[] = {
@@ -550,14 +483,14 @@ static void __init omap_dm9000_init(void)
 {
        unsigned char *eth_addr = omap_dm9000_platdata.dev_addr;
        struct omap_die_id odi;
+       int ret;
 
-       if (gpio_request(OMAP_DM9000_GPIO_IRQ, "dm9000 irq") < 0) {
+       ret = gpio_request_one(OMAP_DM9000_GPIO_IRQ, GPIOF_IN, "dm9000 irq");
+       if (ret < 0) {
                printk(KERN_ERR "Failed to request GPIO%d for dm9000 IRQ\n",
                        OMAP_DM9000_GPIO_IRQ);
                return;
-               }
-
-       gpio_direction_input(OMAP_DM9000_GPIO_IRQ);
+       }
 
        /* init the mac address using DIE id */
        omap_get_die_id(&odi);
@@ -576,45 +509,6 @@ static struct platform_device *devkit8000_devices[] __initdata = {
        &omap_dm9000_dev,
 };
 
-static void __init devkit8000_flash_init(void)
-{
-       u8 cs = 0;
-       u8 nandcs = GPMC_CS_NUM + 1;
-
-       /* find out the chip-select on which NAND exists */
-       while (cs < GPMC_CS_NUM) {
-               u32 ret = 0;
-               ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
-
-               if ((ret & 0xC00) == 0x800) {
-                       printk(KERN_INFO "Found NAND on CS%d\n", cs);
-                       if (nandcs > GPMC_CS_NUM)
-                               nandcs = cs;
-               }
-               cs++;
-       }
-
-       if (nandcs > GPMC_CS_NUM) {
-               printk(KERN_INFO "NAND: Unable to find configuration "
-                                "in GPMC\n ");
-               return;
-       }
-
-       if (nandcs < GPMC_CS_NUM) {
-               devkit8000_nand_data.cs = nandcs;
-
-               printk(KERN_INFO "Registering NAND on CS%d\n", nandcs);
-               if (gpmc_nand_init(&devkit8000_nand_data) < 0)
-                       printk(KERN_ERR "Unable to register NAND device\n");
-       }
-}
-
-static struct omap_musb_board_data musb_board_data = {
-       .interface_type         = MUSB_INTERFACE_ULPI,
-       .mode                   = MUSB_OTG,
-       .power                  = 100,
-};
-
 static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
 
        .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
@@ -795,14 +689,13 @@ static void __init devkit8000_init(void)
                        ARRAY_SIZE(devkit8000_devices));
 
        omap_display_init(&devkit8000_dss_data);
-       spi_register_board_info(devkit8000_spi_board_info,
-       ARRAY_SIZE(devkit8000_spi_board_info));
 
-       devkit8000_ads7846_init();
+       omap_ads7846_init(2, OMAP3_DEVKIT_TS_GPIO, 0, NULL);
 
-       usb_musb_init(&musb_board_data);
+       usb_musb_init(NULL);
        usbhs_init(&usbhs_bdata);
-       devkit8000_flash_init();
+       omap_nand_flash_init(NAND_BUSWIDTH_16, devkit8000_nand_partitions,
+                            ARRAY_SIZE(devkit8000_nand_partitions));
 
        /* Ensure SDRC pins are mux'd for self-refresh */
        omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
index 3da64d361651a9077e84a990bcccbb4e1a297b3d..0c1bfca3f731cfdd4b8a9fa7aaa0793cca01e18b 100644 (file)
@@ -38,6 +38,7 @@
 #include "mux.h"
 #include "hsmmc.h"
 #include "sdram-numonyx-m65kxxxxam.h"
+#include "common-board-devices.h"
 
 #define IGEP2_SMSC911X_CS       5
 #define IGEP2_SMSC911X_GPIO     176
 #define IGEP2_RC_GPIO_WIFI_NRESET  139
 #define IGEP2_RC_GPIO_BT_NRESET    137
 
+#define IGEP3_GPIO_LED0_GREEN  54
+#define IGEP3_GPIO_LED0_RED    53
+#define IGEP3_GPIO_LED1_RED    16
+#define IGEP3_GPIO_USBH_NRESET  183
+
 /*
  * IGEP2 Hardware Revision Table
  *
@@ -68,6 +74,7 @@
 
 #define IGEP2_BOARD_HWREV_B    0
 #define IGEP2_BOARD_HWREV_C    1
+#define IGEP3_BOARD_HWREV      2
 
 static u8 hwrev;
 
@@ -75,24 +82,29 @@ static void __init igep2_get_revision(void)
 {
        u8 ret;
 
+       if (machine_is_igep0030()) {
+               hwrev = IGEP3_BOARD_HWREV;
+               return;
+       }
+
        omap_mux_init_gpio(IGEP2_GPIO_LED1_RED, OMAP_PIN_INPUT);
 
-       if ((gpio_request(IGEP2_GPIO_LED1_RED, "GPIO_HW0_REV") == 0) &&
-           (gpio_direction_input(IGEP2_GPIO_LED1_RED) == 0)) {
-               ret = gpio_get_value(IGEP2_GPIO_LED1_RED);
-               if (ret == 0) {
-                       pr_info("IGEP2: Hardware Revision C (B-NON compatible)\n");
-                       hwrev = IGEP2_BOARD_HWREV_C;
-               } else if (ret ==  1) {
-                       pr_info("IGEP2: Hardware Revision B/C (B compatible)\n");
-                       hwrev = IGEP2_BOARD_HWREV_B;
-               } else {
-                       pr_err("IGEP2: Unknown Hardware Revision\n");
-                       hwrev = -1;
-               }
-       } else {
+       if (gpio_request_one(IGEP2_GPIO_LED1_RED, GPIOF_IN, "GPIO_HW0_REV")) {
                pr_warning("IGEP2: Could not obtain gpio GPIO_HW0_REV\n");
                pr_err("IGEP2: Unknown Hardware Revision\n");
+               return;
+       }
+
+       ret = gpio_get_value(IGEP2_GPIO_LED1_RED);
+       if (ret == 0) {
+               pr_info("IGEP2: Hardware Revision C (B-NON compatible)\n");
+               hwrev = IGEP2_BOARD_HWREV_C;
+       } else if (ret ==  1) {
+               pr_info("IGEP2: Hardware Revision B/C (B compatible)\n");
+               hwrev = IGEP2_BOARD_HWREV_B;
+       } else {
+               pr_err("IGEP2: Unknown Hardware Revision\n");
+               hwrev = -1;
        }
 
        gpio_free(IGEP2_GPIO_LED1_RED);
@@ -111,7 +123,7 @@ static void __init igep2_get_revision(void)
  * So MTD regards it as 4KiB page size and 256KiB block size 64*(2*2048)
  */
 
-static struct mtd_partition igep2_onenand_partitions[] = {
+static struct mtd_partition igep_onenand_partitions[] = {
        {
                .name           = "X-Loader",
                .offset         = 0,
@@ -139,21 +151,21 @@ static struct mtd_partition igep2_onenand_partitions[] = {
        },
 };
 
-static struct omap_onenand_platform_data igep2_onenand_data = {
-       .parts = igep2_onenand_partitions,
-       .nr_parts = ARRAY_SIZE(igep2_onenand_partitions),
+static struct omap_onenand_platform_data igep_onenand_data = {
+       .parts = igep_onenand_partitions,
+       .nr_parts = ARRAY_SIZE(igep_onenand_partitions),
        .dma_channel    = -1,   /* disable DMA in OMAP OneNAND driver */
 };
 
-static struct platform_device igep2_onenand_device = {
+static struct platform_device igep_onenand_device = {
        .name           = "omap2-onenand",
        .id             = -1,
        .dev = {
-               .platform_data = &igep2_onenand_data,
+               .platform_data = &igep_onenand_data,
        },
 };
 
-static void __init igep2_flash_init(void)
+static void __init igep_flash_init(void)
 {
        u8 cs = 0;
        u8 onenandcs = GPMC_CS_NUM + 1;
@@ -165,7 +177,7 @@ static void __init igep2_flash_init(void)
                /* Check if NAND/oneNAND is configured */
                if ((ret & 0xC00) == 0x800)
                        /* NAND found */
-                       pr_err("IGEP2: Unsupported NAND found\n");
+                       pr_err("IGEP: Unsupported NAND found\n");
                else {
                        ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
                        if ((ret & 0x3F) == (ONENAND_MAP >> 24))
@@ -175,85 +187,46 @@ static void __init igep2_flash_init(void)
        }
 
        if (onenandcs > GPMC_CS_NUM) {
-               pr_err("IGEP2: Unable to find configuration in GPMC\n");
+               pr_err("IGEP: Unable to find configuration in GPMC\n");
                return;
        }
 
-       igep2_onenand_data.cs = onenandcs;
+       igep_onenand_data.cs = onenandcs;
 
-       if (platform_device_register(&igep2_onenand_device) < 0)
-               pr_err("IGEP2: Unable to register OneNAND device\n");
+       if (platform_device_register(&igep_onenand_device) < 0)
+               pr_err("IGEP: Unable to register OneNAND device\n");
 }
 
 #else
-static void __init igep2_flash_init(void) {}
+static void __init igep_flash_init(void) {}
 #endif
 
 #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
 
 #include <linux/smsc911x.h>
+#include <plat/gpmc-smsc911x.h>
 
-static struct smsc911x_platform_config igep2_smsc911x_config = {
-       .irq_polarity   = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
-       .irq_type       = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
-       .flags          = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS  ,
-       .phy_interface  = PHY_INTERFACE_MODE_MII,
-};
-
-static struct resource igep2_smsc911x_resources[] = {
-       {
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .start  = OMAP_GPIO_IRQ(IGEP2_SMSC911X_GPIO),
-               .end    = OMAP_GPIO_IRQ(IGEP2_SMSC911X_GPIO),
-               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
-       },
-};
-
-static struct platform_device igep2_smsc911x_device = {
-       .name           = "smsc911x",
-       .id             = 0,
-       .num_resources  = ARRAY_SIZE(igep2_smsc911x_resources),
-       .resource       = igep2_smsc911x_resources,
-       .dev            = {
-               .platform_data = &igep2_smsc911x_config,
-       },
+static struct omap_smsc911x_platform_data smsc911x_cfg = {
+       .cs             = IGEP2_SMSC911X_CS,
+       .gpio_irq       = IGEP2_SMSC911X_GPIO,
+       .gpio_reset     = -EINVAL,
+       .flags          = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
 };
 
 static inline void __init igep2_init_smsc911x(void)
 {
-       unsigned long cs_mem_base;
-
-       if (gpmc_cs_request(IGEP2_SMSC911X_CS, SZ_16M, &cs_mem_base) < 0) {
-               pr_err("IGEP v2: Failed request for GPMC mem for smsc911x\n");
-               gpmc_cs_free(IGEP2_SMSC911X_CS);
-               return;
-       }
-
-       igep2_smsc911x_resources[0].start = cs_mem_base + 0x0;
-       igep2_smsc911x_resources[0].end   = cs_mem_base + 0xff;
-
-       if ((gpio_request(IGEP2_SMSC911X_GPIO, "SMSC911X IRQ") == 0) &&
-           (gpio_direction_input(IGEP2_SMSC911X_GPIO) == 0)) {
-               gpio_export(IGEP2_SMSC911X_GPIO, 0);
-       } else {
-               pr_err("IGEP v2: Could not obtain gpio for for SMSC911X IRQ\n");
-               return;
-       }
-
-       platform_device_register(&igep2_smsc911x_device);
+       gpmc_smsc911x_init(&smsc911x_cfg);
 }
 
 #else
 static inline void __init igep2_init_smsc911x(void) { }
 #endif
 
-static struct regulator_consumer_supply igep2_vmmc1_supply =
+static struct regulator_consumer_supply igep_vmmc1_supply =
        REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0");
 
 /* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
-static struct regulator_init_data igep2_vmmc1 = {
+static struct regulator_init_data igep_vmmc1 = {
        .constraints = {
                .min_uV                 = 1850000,
                .max_uV                 = 3150000,
@@ -264,13 +237,13 @@ static struct regulator_init_data igep2_vmmc1 = {
                                        | REGULATOR_CHANGE_STATUS,
        },
        .num_consumer_supplies  = 1,
-       .consumer_supplies      = &igep2_vmmc1_supply,
+       .consumer_supplies      = &igep_vmmc1_supply,
 };
 
-static struct regulator_consumer_supply igep2_vio_supply =
+static struct regulator_consumer_supply igep_vio_supply =
        REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1");
 
-static struct regulator_init_data igep2_vio = {
+static struct regulator_init_data igep_vio = {
        .constraints = {
                .min_uV                 = 1800000,
                .max_uV                 = 1800000,
@@ -282,34 +255,34 @@ static struct regulator_init_data igep2_vio = {
                                        | REGULATOR_CHANGE_STATUS,
        },
        .num_consumer_supplies  = 1,
-       .consumer_supplies      = &igep2_vio_supply,
+       .consumer_supplies      = &igep_vio_supply,
 };
 
-static struct regulator_consumer_supply igep2_vmmc2_supply =
+static struct regulator_consumer_supply igep_vmmc2_supply =
        REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1");
 
-static struct regulator_init_data igep2_vmmc2 = {
+static struct regulator_init_data igep_vmmc2 = {
        .constraints            = {
                .valid_modes_mask       = REGULATOR_MODE_NORMAL,
                .always_on              = 1,
        },
        .num_consumer_supplies  = 1,
-       .consumer_supplies      = &igep2_vmmc2_supply,
+       .consumer_supplies      = &igep_vmmc2_supply,
 };
 
-static struct fixed_voltage_config igep2_vwlan = {
+static struct fixed_voltage_config igep_vwlan = {
        .supply_name            = "vwlan",
        .microvolts             = 3300000,
        .gpio                   = -EINVAL,
        .enabled_at_boot        = 1,
-       .init_data              = &igep2_vmmc2,
+       .init_data              = &igep_vmmc2,
 };
 
-static struct platform_device igep2_vwlan_device = {
+static struct platform_device igep_vwlan_device = {
        .name           = "reg-fixed-voltage",
        .id             = 0,
        .dev = {
-               .platform_data  = &igep2_vwlan,
+               .platform_data  = &igep_vwlan,
        },
 };
 
@@ -334,20 +307,17 @@ static struct omap2_hsmmc_info mmc[] = {
 #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
 #include <linux/leds.h>
 
-static struct gpio_led igep2_gpio_leds[] = {
+static struct gpio_led igep_gpio_leds[] = {
        [0] = {
                .name                   = "gpio-led:red:d0",
-               .gpio                   = IGEP2_GPIO_LED0_RED,
                .default_trigger        = "default-off"
        },
        [1] = {
                .name                   = "gpio-led:green:d0",
-               .gpio                   = IGEP2_GPIO_LED0_GREEN,
                .default_trigger        = "default-off",
        },
        [2] = {
                .name                   = "gpio-led:red:d1",
-               .gpio                   = IGEP2_GPIO_LED1_RED,
                .default_trigger        = "default-off",
        },
        [3] = {
@@ -358,94 +328,119 @@ static struct gpio_led igep2_gpio_leds[] = {
        },
 };
 
-static struct gpio_led_platform_data igep2_led_pdata = {
-       .leds           = igep2_gpio_leds,
-       .num_leds       = ARRAY_SIZE(igep2_gpio_leds),
+static struct gpio_led_platform_data igep_led_pdata = {
+       .leds           = igep_gpio_leds,
+       .num_leds       = ARRAY_SIZE(igep_gpio_leds),
 };
 
-static struct platform_device igep2_led_device = {
+static struct platform_device igep_led_device = {
         .name   = "leds-gpio",
         .id     = -1,
         .dev    = {
-                .platform_data  =  &igep2_led_pdata,
+                .platform_data  =  &igep_led_pdata,
        },
 };
 
-static void __init igep2_leds_init(void)
+static void __init igep_leds_init(void)
 {
-       platform_device_register(&igep2_led_device);
+       if (machine_is_igep0020()) {
+               igep_gpio_leds[0].gpio = IGEP2_GPIO_LED0_RED;
+               igep_gpio_leds[1].gpio = IGEP2_GPIO_LED0_GREEN;
+               igep_gpio_leds[2].gpio = IGEP2_GPIO_LED1_RED;
+       } else {
+               igep_gpio_leds[0].gpio = IGEP3_GPIO_LED0_RED;
+               igep_gpio_leds[1].gpio = IGEP3_GPIO_LED0_GREEN;
+               igep_gpio_leds[2].gpio = IGEP3_GPIO_LED1_RED;
+       }
+
+       platform_device_register(&igep_led_device);
 }
 
 #else
-static inline void igep2_leds_init(void)
+static struct gpio igep_gpio_leds[] __initdata = {
+       { -EINVAL,      GPIOF_OUT_INIT_LOW, "gpio-led:red:d0"   },
+       { -EINVAL,      GPIOF_OUT_INIT_LOW, "gpio-led:green:d0" },
+       { -EINVAL,      GPIOF_OUT_INIT_LOW, "gpio-led:red:d1"   },
+};
+
+static inline void igep_leds_init(void)
 {
-       if ((gpio_request(IGEP2_GPIO_LED0_RED, "gpio-led:red:d0") == 0) &&
-           (gpio_direction_output(IGEP2_GPIO_LED0_RED, 0) == 0))
-               gpio_export(IGEP2_GPIO_LED0_RED, 0);
-       else
-               pr_warning("IGEP v2: Could not obtain gpio GPIO_LED0_RED\n");
+       int i;
 
-       if ((gpio_request(IGEP2_GPIO_LED0_GREEN, "gpio-led:green:d0") == 0) &&
-           (gpio_direction_output(IGEP2_GPIO_LED0_GREEN, 0) == 0))
-               gpio_export(IGEP2_GPIO_LED0_GREEN, 0);
-       else
-               pr_warning("IGEP v2: Could not obtain gpio GPIO_LED0_GREEN\n");
+       if (machine_is_igep0020()) {
+               igep_gpio_leds[0].gpio = IGEP2_GPIO_LED0_RED;
+               igep_gpio_leds[1].gpio = IGEP2_GPIO_LED0_GREEN;
+               igep_gpio_leds[2].gpio = IGEP2_GPIO_LED1_RED;
+       } else {
+               igep_gpio_leds[0].gpio = IGEP3_GPIO_LED0_RED;
+               igep_gpio_leds[1].gpio = IGEP3_GPIO_LED0_GREEN;
+               igep_gpio_leds[2].gpio = IGEP3_GPIO_LED1_RED;
+       }
 
-       if ((gpio_request(IGEP2_GPIO_LED1_RED, "gpio-led:red:d1") == 0) &&
-           (gpio_direction_output(IGEP2_GPIO_LED1_RED, 0) == 0))
-               gpio_export(IGEP2_GPIO_LED1_RED, 0);
-       else
-               pr_warning("IGEP v2: Could not obtain gpio GPIO_LED1_RED\n");
+       if (gpio_request_array(igep_gpio_leds, ARRAY_SIZE(igep_gpio_leds))) {
+               pr_warning("IGEP v2: Could not obtain leds gpios\n");
+               return;
+       }
 
+       for (i = 0; i < ARRAY_SIZE(igep_gpio_leds); i++)
+               gpio_export(igep_gpio_leds[i].gpio, 0);
 }
 #endif
 
-static int igep2_twl_gpio_setup(struct device *dev,
+static struct gpio igep2_twl_gpios[] = {
+       { -EINVAL, GPIOF_IN,            "GPIO_EHCI_NOC"  },
+       { -EINVAL, GPIOF_OUT_INIT_LOW,  "GPIO_USBH_CPEN" },
+};
+
+static int igep_twl_gpio_setup(struct device *dev,
                unsigned gpio, unsigned ngpio)
 {
+       int ret;
+
        /* gpio + 0 is "mmc0_cd" (input/IRQ) */
        mmc[0].gpio_cd = gpio + 0;
        omap2_hsmmc_init(mmc);
 
-       /*
-        * REVISIT: need ehci-omap hooks for external VBUS
-        * power switch and overcurrent detect
-        */
-       if ((gpio_request(gpio + 1, "GPIO_EHCI_NOC") < 0) ||
-           (gpio_direction_input(gpio + 1) < 0))
-               pr_err("IGEP2: Could not obtain gpio for EHCI NOC");
-
-       /*
-        * TWL4030_GPIO_MAX + 0 == ledA, GPIO_USBH_CPEN
-        * (out, active low)
-        */
-       if ((gpio_request(gpio + TWL4030_GPIO_MAX, "GPIO_USBH_CPEN") < 0) ||
-           (gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0) < 0))
-               pr_err("IGEP2: Could not obtain gpio for USBH_CPEN");
-
        /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */
 #if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE)
-       if ((gpio_request(gpio+TWL4030_GPIO_MAX+1, "gpio-led:green:d1") == 0)
-           && (gpio_direction_output(gpio + TWL4030_GPIO_MAX + 1, 1) == 0))
+       ret = gpio_request_one(gpio + TWL4030_GPIO_MAX + 1, GPIOF_OUT_INIT_HIGH,
+                              "gpio-led:green:d1");
+       if (ret == 0)
                gpio_export(gpio + TWL4030_GPIO_MAX + 1, 0);
        else
-               pr_warning("IGEP v2: Could not obtain gpio GPIO_LED1_GREEN\n");
+               pr_warning("IGEP: Could not obtain gpio GPIO_LED1_GREEN\n");
 #else
-       igep2_gpio_leds[3].gpio = gpio + TWL4030_GPIO_MAX + 1;
+       igep_gpio_leds[3].gpio = gpio + TWL4030_GPIO_MAX + 1;
 #endif
 
+       if (machine_is_igep0030())
+               return 0;
+
+       /*
+        * REVISIT: need ehci-omap hooks for external VBUS
+        * power switch and overcurrent detect
+        */
+       igep2_twl_gpios[0].gpio = gpio + 1;
+
+       /* TWL4030_GPIO_MAX + 0 == ledA, GPIO_USBH_CPEN (out, active low) */
+       igep2_twl_gpios[1].gpio = gpio + TWL4030_GPIO_MAX;
+
+       ret = gpio_request_array(igep2_twl_gpios, ARRAY_SIZE(igep2_twl_gpios));
+       if (ret < 0)
+               pr_err("IGEP2: Could not obtain gpio for USBH_CPEN");
+
        return 0;
 };
 
-static struct twl4030_gpio_platform_data igep2_twl4030_gpio_pdata = {
+static struct twl4030_gpio_platform_data igep_twl4030_gpio_pdata = {
        .gpio_base      = OMAP_MAX_GPIO_LINES,
        .irq_base       = TWL4030_GPIO_IRQ_BASE,
        .irq_end        = TWL4030_GPIO_IRQ_END,
        .use_leds       = true,
-       .setup          = igep2_twl_gpio_setup,
+       .setup          = igep_twl_gpio_setup,
 };
 
-static struct twl4030_usb_data igep2_usb_data = {
+static struct twl4030_usb_data igep_usb_data = {
        .usb_mode       = T2_USB_MODE_ULPI,
 };
 
@@ -507,16 +502,17 @@ static struct regulator_init_data igep2_vpll2 = {
 
 static void __init igep2_display_init(void)
 {
-       if (gpio_request(IGEP2_GPIO_DVI_PUP, "GPIO_DVI_PUP") &&
-           gpio_direction_output(IGEP2_GPIO_DVI_PUP, 1))
+       int err = gpio_request_one(IGEP2_GPIO_DVI_PUP, GPIOF_OUT_INIT_HIGH,
+                                  "GPIO_DVI_PUP");
+       if (err)
                pr_err("IGEP v2: Could not obtain gpio GPIO_DVI_PUP\n");
 }
 
-static struct platform_device *igep2_devices[] __initdata = {
-       &igep2_vwlan_device,
+static struct platform_device *igep_devices[] __initdata = {
+       &igep_vwlan_device,
 };
 
-static void __init igep2_init_early(void)
+static void __init igep_init_early(void)
 {
        omap2_init_common_infrastructure();
        omap2_init_common_devices(m65kxxxxam_sdrc_params,
@@ -561,27 +557,15 @@ static struct twl4030_keypad_data igep2_keypad_pdata = {
        .rep            = 1,
 };
 
-static struct twl4030_platform_data igep2_twldata = {
+static struct twl4030_platform_data igep_twldata = {
        .irq_base       = TWL4030_IRQ_BASE,
        .irq_end        = TWL4030_IRQ_END,
 
        /* platform_data for children goes here */
-       .usb            = &igep2_usb_data,
-       .codec          = &igep2_codec_data,
-       .gpio           = &igep2_twl4030_gpio_pdata,
-       .keypad         = &igep2_keypad_pdata,
-       .vmmc1          = &igep2_vmmc1,
-       .vpll2          = &igep2_vpll2,
-       .vio            = &igep2_vio,
-};
-
-static struct i2c_board_info __initdata igep2_i2c1_boardinfo[] = {
-       {
-               I2C_BOARD_INFO("twl4030", 0x48),
-               .flags          = I2C_CLIENT_WAKE,
-               .irq            = INT_34XX_SYS_NIRQ,
-               .platform_data  = &igep2_twldata,
-       },
+       .usb            = &igep_usb_data,
+       .gpio           = &igep_twl4030_gpio_pdata,
+       .vmmc1          = &igep_vmmc1,
+       .vio            = &igep_vio,
 };
 
 static struct i2c_board_info __initdata igep2_i2c3_boardinfo[] = {
@@ -590,32 +574,29 @@ static struct i2c_board_info __initdata igep2_i2c3_boardinfo[] = {
        },
 };
 
-static void __init igep2_i2c_init(void)
+static void __init igep_i2c_init(void)
 {
        int ret;
 
-       ret = omap_register_i2c_bus(1, 2600, igep2_i2c1_boardinfo,
-               ARRAY_SIZE(igep2_i2c1_boardinfo));
-       if (ret)
-               pr_warning("IGEP2: Could not register I2C1 bus (%d)\n", ret);
+       if (machine_is_igep0020()) {
+               /*
+                * Bus 3 is attached to the DVI port where devices like the
+                * pico DLP projector don't work reliably with 400kHz
+                */
+               ret = omap_register_i2c_bus(3, 100, igep2_i2c3_boardinfo,
+                                           ARRAY_SIZE(igep2_i2c3_boardinfo));
+               if (ret)
+                       pr_warning("IGEP2: Could not register I2C3 bus (%d)\n", ret);
+
+               igep_twldata.codec      = &igep2_codec_data;
+               igep_twldata.keypad     = &igep2_keypad_pdata;
+               igep_twldata.vpll2      = &igep2_vpll2;
+       }
 
-       /*
-        * Bus 3 is attached to the DVI port where devices like the pico DLP
-        * projector don't work reliably with 400kHz
-        */
-       ret = omap_register_i2c_bus(3, 100, igep2_i2c3_boardinfo,
-               ARRAY_SIZE(igep2_i2c3_boardinfo));
-       if (ret)
-               pr_warning("IGEP2: Could not register I2C3 bus (%d)\n", ret);
+       omap3_pmic_init("twl4030", &igep_twldata);
 }
 
-static struct omap_musb_board_data musb_board_data = {
-       .interface_type         = MUSB_INTERFACE_ULPI,
-       .mode                   = MUSB_OTG,
-       .power                  = 100,
-};
-
-static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
+static const struct usbhs_omap_board_data igep2_usbhs_bdata __initconst = {
        .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
        .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED,
        .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,
@@ -626,6 +607,17 @@ static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
        .reset_gpio_port[2] = -EINVAL,
 };
 
+static const struct usbhs_omap_board_data igep3_usbhs_bdata __initconst = {
+       .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED,
+       .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
+       .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,
+
+       .phy_reset = true,
+       .reset_gpio_port[0] = -EINVAL,
+       .reset_gpio_port[1] = IGEP3_GPIO_USBH_NRESET,
+       .reset_gpio_port[2] = -EINVAL,
+};
+
 #ifdef CONFIG_OMAP_MUX
 static struct omap_board_mux board_mux[] __initdata = {
        { .reg_offset = OMAP_MUX_TERMINATOR },
@@ -633,82 +625,95 @@ static struct omap_board_mux board_mux[] __initdata = {
 #endif
 
 #if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE)
+static struct gpio igep_wlan_bt_gpios[] __initdata = {
+       { -EINVAL, GPIOF_OUT_INIT_HIGH, "GPIO_WIFI_NPD"    },
+       { -EINVAL, GPIOF_OUT_INIT_HIGH, "GPIO_WIFI_NRESET" },
+       { -EINVAL, GPIOF_OUT_INIT_HIGH, "GPIO_BT_NRESET"   },
+};
 
-static void __init igep2_wlan_bt_init(void)
+static void __init igep_wlan_bt_init(void)
 {
-       unsigned npd, wreset, btreset;
+       int err;
 
        /* GPIO's for WLAN-BT combo depends on hardware revision */
        if (hwrev == IGEP2_BOARD_HWREV_B) {
-               npd = IGEP2_RB_GPIO_WIFI_NPD;
-               wreset = IGEP2_RB_GPIO_WIFI_NRESET;
-               btreset = IGEP2_RB_GPIO_BT_NRESET;
-       } else if (hwrev == IGEP2_BOARD_HWREV_C) {
-               npd = IGEP2_RC_GPIO_WIFI_NPD;
-               wreset = IGEP2_RC_GPIO_WIFI_NRESET;
-               btreset = IGEP2_RC_GPIO_BT_NRESET;
+               igep_wlan_bt_gpios[0].gpio = IGEP2_RB_GPIO_WIFI_NPD;
+               igep_wlan_bt_gpios[1].gpio = IGEP2_RB_GPIO_WIFI_NRESET;
+               igep_wlan_bt_gpios[2].gpio = IGEP2_RB_GPIO_BT_NRESET;
+       } else if (hwrev == IGEP2_BOARD_HWREV_C || machine_is_igep0030()) {
+               igep_wlan_bt_gpios[0].gpio = IGEP2_RC_GPIO_WIFI_NPD;
+               igep_wlan_bt_gpios[1].gpio = IGEP2_RC_GPIO_WIFI_NRESET;
+               igep_wlan_bt_gpios[2].gpio = IGEP2_RC_GPIO_BT_NRESET;
        } else
                return;
 
-       /* Set GPIO's for  WLAN-BT combo module */
-       if ((gpio_request(npd, "GPIO_WIFI_NPD") == 0) &&
-           (gpio_direction_output(npd, 1) == 0)) {
-               gpio_export(npd, 0);
-       } else
-               pr_warning("IGEP2: Could not obtain gpio GPIO_WIFI_NPD\n");
-
-       if ((gpio_request(wreset, "GPIO_WIFI_NRESET") == 0) &&
-           (gpio_direction_output(wreset, 1) == 0)) {
-               gpio_export(wreset, 0);
-               gpio_set_value(wreset, 0);
-               udelay(10);
-               gpio_set_value(wreset, 1);
-       } else
-               pr_warning("IGEP2: Could not obtain gpio GPIO_WIFI_NRESET\n");
+       err = gpio_request_array(igep_wlan_bt_gpios,
+                                ARRAY_SIZE(igep_wlan_bt_gpios));
+       if (err) {
+               pr_warning("IGEP2: Could not obtain WIFI/BT gpios\n");
+               return;
+       }
+
+       gpio_export(igep_wlan_bt_gpios[0].gpio, 0);
+       gpio_export(igep_wlan_bt_gpios[1].gpio, 0);
+       gpio_export(igep_wlan_bt_gpios[2].gpio, 0);
+
+       gpio_set_value(igep_wlan_bt_gpios[1].gpio, 0);
+       udelay(10);
+       gpio_set_value(igep_wlan_bt_gpios[1].gpio, 1);
 
-       if ((gpio_request(btreset, "GPIO_BT_NRESET") == 0) &&
-           (gpio_direction_output(btreset, 1) == 0)) {
-               gpio_export(btreset, 0);
-       } else
-               pr_warning("IGEP2: Could not obtain gpio GPIO_BT_NRESET\n");
 }
 #else
-static inline void __init igep2_wlan_bt_init(void) { }
+static inline void __init igep_wlan_bt_init(void) { }
 #endif
 
-static void __init igep2_init(void)
+static void __init igep_init(void)
 {
        omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
 
        /* Get IGEP2 hardware revision */
        igep2_get_revision();
        /* Register I2C busses and drivers */
-       igep2_i2c_init();
-       platform_add_devices(igep2_devices, ARRAY_SIZE(igep2_devices));
-       omap_display_init(&igep2_dss_data);
+       igep_i2c_init();
+       platform_add_devices(igep_devices, ARRAY_SIZE(igep_devices));
        omap_serial_init();
-       usb_musb_init(&musb_board_data);
-       usbhs_init(&usbhs_bdata);
+       usb_musb_init(NULL);
 
-       igep2_flash_init();
-       igep2_leds_init();
-       igep2_display_init();
-       igep2_init_smsc911x();
+       igep_flash_init();
+       igep_leds_init();
 
        /*
         * WLAN-BT combo module from MuRata which has a Marvell WLAN
         * (88W8686) + CSR Bluetooth chipset. Uses SDIO interface.
         */
-       igep2_wlan_bt_init();
+       igep_wlan_bt_init();
 
+       if (machine_is_igep0020()) {
+               omap_display_init(&igep2_dss_data);
+               igep2_display_init();
+               igep2_init_smsc911x();
+               usbhs_init(&igep2_usbhs_bdata);
+       } else {
+               usbhs_init(&igep3_usbhs_bdata);
+       }
 }
 
 MACHINE_START(IGEP0020, "IGEP v2 board")
        .boot_params    = 0x80000100,
        .reserve        = omap_reserve,
        .map_io         = omap3_map_io,
-       .init_early     = igep2_init_early,
+       .init_early     = igep_init_early,
+       .init_irq       = omap_init_irq,
+       .init_machine   = igep_init,
+       .timer          = &omap_timer,
+MACHINE_END
+
+MACHINE_START(IGEP0030, "IGEP OMAP3 module")
+       .boot_params    = 0x80000100,
+       .reserve        = omap_reserve,
+       .map_io         = omap3_map_io,
+       .init_early     = igep_init_early,
        .init_irq       = omap_init_irq,
-       .init_machine   = igep2_init,
+       .init_machine   = igep_init,
        .timer          = &omap_timer,
 MACHINE_END
diff --git a/arch/arm/mach-omap2/board-igep0030.c b/arch/arm/mach-omap2/board-igep0030.c
deleted file mode 100644 (file)
index 2cf86c3..0000000
+++ /dev/null
@@ -1,458 +0,0 @@
-/*
- * Copyright (C) 2010 - ISEE 2007 SL
- *
- * Modified from mach-omap2/board-generic.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-
-#include <linux/regulator/machine.h>
-#include <linux/regulator/fixed.h>
-#include <linux/i2c/twl.h>
-#include <linux/mmc/host.h>
-
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-
-#include <plat/board.h>
-#include <plat/common.h>
-#include <plat/gpmc.h>
-#include <plat/usb.h>
-#include <plat/onenand.h>
-
-#include "mux.h"
-#include "hsmmc.h"
-#include "sdram-numonyx-m65kxxxxam.h"
-
-#define IGEP3_GPIO_LED0_GREEN  54
-#define IGEP3_GPIO_LED0_RED    53
-#define IGEP3_GPIO_LED1_RED    16
-
-#define IGEP3_GPIO_WIFI_NPD    138
-#define IGEP3_GPIO_WIFI_NRESET 139
-#define IGEP3_GPIO_BT_NRESET   137
-
-#define IGEP3_GPIO_USBH_NRESET  183
-
-
-#if defined(CONFIG_MTD_ONENAND_OMAP2) || \
-       defined(CONFIG_MTD_ONENAND_OMAP2_MODULE)
-
-#define ONENAND_MAP             0x20000000
-
-/*
- * x2 Flash built-in COMBO POP MEMORY
- * Since the device is equipped with two DataRAMs, and two-plane NAND
- * Flash memory array, these two component enables simultaneous program
- * of 4KiB. Plane1 has only even blocks such as block0, block2, block4
- * while Plane2 has only odd blocks such as block1, block3, block5.
- * So MTD regards it as 4KiB page size and 256KiB block size 64*(2*2048)
- */
-
-static struct mtd_partition igep3_onenand_partitions[] = {
-       {
-               .name           = "X-Loader",
-               .offset         = 0,
-               .size           = 2 * (64*(2*2048))
-       },
-       {
-               .name           = "U-Boot",
-               .offset         = MTDPART_OFS_APPEND,
-               .size           = 6 * (64*(2*2048)),
-       },
-       {
-               .name           = "Environment",
-               .offset         = MTDPART_OFS_APPEND,
-               .size           = 2 * (64*(2*2048)),
-       },
-       {
-               .name           = "Kernel",
-               .offset         = MTDPART_OFS_APPEND,
-               .size           = 12 * (64*(2*2048)),
-       },
-       {
-               .name           = "File System",
-               .offset         = MTDPART_OFS_APPEND,
-               .size           = MTDPART_SIZ_FULL,
-       },
-};
-
-static struct omap_onenand_platform_data igep3_onenand_pdata = {
-       .parts = igep3_onenand_partitions,
-       .nr_parts = ARRAY_SIZE(igep3_onenand_partitions),
-       .onenand_setup = NULL,
-       .dma_channel    = -1,   /* disable DMA in OMAP OneNAND driver */
-};
-
-static struct platform_device igep3_onenand_device = {
-       .name           = "omap2-onenand",
-       .id             = -1,
-       .dev = {
-               .platform_data = &igep3_onenand_pdata,
-       },
-};
-
-static void __init igep3_flash_init(void)
-{
-       u8 cs = 0;
-       u8 onenandcs = GPMC_CS_NUM + 1;
-
-       for (cs = 0; cs < GPMC_CS_NUM; cs++) {
-               u32 ret;
-               ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
-
-               /* Check if NAND/oneNAND is configured */
-               if ((ret & 0xC00) == 0x800)
-                       /* NAND found */
-                       pr_err("IGEP3: Unsupported NAND found\n");
-               else {
-                       ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
-
-                       if ((ret & 0x3F) == (ONENAND_MAP >> 24))
-                               /* OneNAND found */
-                               onenandcs = cs;
-               }
-       }
-
-       if (onenandcs > GPMC_CS_NUM) {
-               pr_err("IGEP3: Unable to find configuration in GPMC\n");
-               return;
-       }
-
-       igep3_onenand_pdata.cs = onenandcs;
-
-       if (platform_device_register(&igep3_onenand_device) < 0)
-               pr_err("IGEP3: Unable to register OneNAND device\n");
-}
-
-#else
-static void __init igep3_flash_init(void) {}
-#endif
-
-static struct regulator_consumer_supply igep3_vmmc1_supply =
-       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0");
-
-/* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */
-static struct regulator_init_data igep3_vmmc1 = {
-       .constraints = {
-               .min_uV                 = 1850000,
-               .max_uV                 = 3150000,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_VOLTAGE
-                                       | REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &igep3_vmmc1_supply,
-};
-
-static struct regulator_consumer_supply igep3_vio_supply =
-       REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1");
-
-static struct regulator_init_data igep3_vio = {
-       .constraints = {
-               .min_uV                 = 1800000,
-               .max_uV                 = 1800000,
-               .apply_uV               = 1,
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL
-                                       | REGULATOR_MODE_STANDBY,
-               .valid_ops_mask         = REGULATOR_CHANGE_VOLTAGE
-                                       | REGULATOR_CHANGE_MODE
-                                       | REGULATOR_CHANGE_STATUS,
-       },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &igep3_vio_supply,
-};
-
-static struct regulator_consumer_supply igep3_vmmc2_supply =
-       REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1");
-
-static struct regulator_init_data igep3_vmmc2 = {
-       .constraints    = {
-               .valid_modes_mask       = REGULATOR_MODE_NORMAL,
-               .always_on              = 1,
-       },
-       .num_consumer_supplies  = 1,
-       .consumer_supplies      = &igep3_vmmc2_supply,
-};
-
-static struct fixed_voltage_config igep3_vwlan = {
-       .supply_name            = "vwlan",
-       .microvolts             = 3300000,
-       .gpio                   = -EINVAL,
-       .enabled_at_boot        = 1,
-       .init_data              = &igep3_vmmc2,
-};
-
-static struct platform_device igep3_vwlan_device = {
-       .name   = "reg-fixed-voltage",
-       .id     = 0,
-       .dev    = {
-               .platform_data = &igep3_vwlan,
-       },
-};
-
-static struct omap2_hsmmc_info mmc[] = {
-       [0] = {
-               .mmc            = 1,
-               .caps           = MMC_CAP_4_BIT_DATA,
-               .gpio_cd        = -EINVAL,
-               .gpio_wp        = -EINVAL,
-       },
-#if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE)
-       [1] = {
-               .mmc            = 2,
-               .caps           = MMC_CAP_4_BIT_DATA,
-               .gpio_cd        = -EINVAL,
-               .gpio_wp        = -EINVAL,
-       },
-#endif
-       {}      /* Terminator */
-};
-
-#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
-#include <linux/leds.h>
-
-static struct gpio_led igep3_gpio_leds[] = {
-       [0] = {
-               .name                   = "gpio-led:red:d0",
-               .gpio                   = IGEP3_GPIO_LED0_RED,
-               .default_trigger        = "default-off"
-       },
-       [1] = {
-               .name                   = "gpio-led:green:d0",
-               .gpio                   = IGEP3_GPIO_LED0_GREEN,
-               .default_trigger        = "default-off",
-       },
-       [2] = {
-               .name                   = "gpio-led:red:d1",
-               .gpio                   = IGEP3_GPIO_LED1_RED,
-               .default_trigger        = "default-off",
-       },
-       [3] = {
-               .name                   = "gpio-led:green:d1",
-               .default_trigger        = "heartbeat",
-               .gpio                   = -EINVAL, /* gets replaced */
-       },
-};
-
-static struct gpio_led_platform_data igep3_led_pdata = {
-       .leds           = igep3_gpio_leds,
-       .num_leds       = ARRAY_SIZE(igep3_gpio_leds),
-};
-
-static struct platform_device igep3_led_device = {
-        .name   = "leds-gpio",
-        .id     = -1,
-        .dev    = {
-                .platform_data = &igep3_led_pdata,
-       },
-};
-
-static void __init igep3_leds_init(void)
-{
-       platform_device_register(&igep3_led_device);
-}
-
-#else
-static inline void igep3_leds_init(void)
-{
-       if ((gpio_request(IGEP3_GPIO_LED0_RED, "gpio-led:red:d0") == 0) &&
-           (gpio_direction_output(IGEP3_GPIO_LED0_RED, 1) == 0)) {
-               gpio_export(IGEP3_GPIO_LED0_RED, 0);
-               gpio_set_value(IGEP3_GPIO_LED0_RED, 1);
-       } else
-               pr_warning("IGEP3: Could not obtain gpio GPIO_LED0_RED\n");
-
-       if ((gpio_request(IGEP3_GPIO_LED0_GREEN, "gpio-led:green:d0") == 0) &&
-           (gpio_direction_output(IGEP3_GPIO_LED0_GREEN, 1) == 0)) {
-               gpio_export(IGEP3_GPIO_LED0_GREEN, 0);
-               gpio_set_value(IGEP3_GPIO_LED0_GREEN, 1);
-       } else
-               pr_warning("IGEP3: Could not obtain gpio GPIO_LED0_GREEN\n");
-
-       if ((gpio_request(IGEP3_GPIO_LED1_RED, "gpio-led:red:d1") == 0) &&
-               (gpio_direction_output(IGEP3_GPIO_LED1_RED, 1) == 0)) {
-               gpio_export(IGEP3_GPIO_LED1_RED, 0);
-               gpio_set_value(IGEP3_GPIO_LED1_RED, 1);
-       } else
-               pr_warning("IGEP3: Could not obtain gpio GPIO_LED1_RED\n");
-}
-#endif
-
-static int igep3_twl4030_gpio_setup(struct device *dev,
-               unsigned gpio, unsigned ngpio)
-{
-       /* gpio + 0 is "mmc0_cd" (input/IRQ) */
-       mmc[0].gpio_cd = gpio + 0;
-       omap2_hsmmc_init(mmc);
-
-       /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */
-#if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE)
-       if ((gpio_request(gpio+TWL4030_GPIO_MAX+1, "gpio-led:green:d1") == 0)
-           && (gpio_direction_output(gpio + TWL4030_GPIO_MAX + 1, 1) == 0)) {
-               gpio_export(gpio + TWL4030_GPIO_MAX + 1, 0);
-               gpio_set_value(gpio + TWL4030_GPIO_MAX + 1, 0);
-       } else
-               pr_warning("IGEP3: Could not obtain gpio GPIO_LED1_GREEN\n");
-#else
-       igep3_gpio_leds[3].gpio = gpio + TWL4030_GPIO_MAX + 1;
-#endif
-
-       return 0;
-};
-
-static struct twl4030_gpio_platform_data igep3_twl4030_gpio_pdata = {
-       .gpio_base      = OMAP_MAX_GPIO_LINES,
-       .irq_base       = TWL4030_GPIO_IRQ_BASE,
-       .irq_end        = TWL4030_GPIO_IRQ_END,
-       .use_leds       = true,
-       .setup          = igep3_twl4030_gpio_setup,
-};
-
-static struct twl4030_usb_data igep3_twl4030_usb_data = {
-       .usb_mode       = T2_USB_MODE_ULPI,
-};
-
-static struct platform_device *igep3_devices[] __initdata = {
-       &igep3_vwlan_device,
-};
-
-static void __init igep3_init_early(void)
-{
-       omap2_init_common_infrastructure();
-       omap2_init_common_devices(m65kxxxxam_sdrc_params,
-                                 m65kxxxxam_sdrc_params);
-}
-
-static struct twl4030_platform_data igep3_twl4030_pdata = {
-       .irq_base       = TWL4030_IRQ_BASE,
-       .irq_end        = TWL4030_IRQ_END,
-
-       /* platform_data for children goes here */
-       .usb            = &igep3_twl4030_usb_data,
-       .gpio           = &igep3_twl4030_gpio_pdata,
-       .vmmc1          = &igep3_vmmc1,
-       .vio            = &igep3_vio,
-};
-
-static struct i2c_board_info __initdata igep3_i2c_boardinfo[] = {
-       {
-               I2C_BOARD_INFO("twl4030", 0x48),
-               .flags          = I2C_CLIENT_WAKE,
-               .irq            = INT_34XX_SYS_NIRQ,
-               .platform_data  = &igep3_twl4030_pdata,
-       },
-};
-
-static int __init igep3_i2c_init(void)
-{
-       omap_register_i2c_bus(1, 2600, igep3_i2c_boardinfo,
-                       ARRAY_SIZE(igep3_i2c_boardinfo));
-
-       return 0;
-}
-
-static struct omap_musb_board_data musb_board_data = {
-       .interface_type = MUSB_INTERFACE_ULPI,
-       .mode           = MUSB_OTG,
-       .power          = 100,
-};
-
-#if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE)
-
-static void __init igep3_wifi_bt_init(void)
-{
-       /* Configure MUX values for W-LAN + Bluetooth GPIO's */
-       omap_mux_init_gpio(IGEP3_GPIO_WIFI_NPD, OMAP_PIN_OUTPUT);
-       omap_mux_init_gpio(IGEP3_GPIO_WIFI_NRESET, OMAP_PIN_OUTPUT);
-       omap_mux_init_gpio(IGEP3_GPIO_BT_NRESET, OMAP_PIN_OUTPUT);
-
-       /* Set GPIO's for  W-LAN + Bluetooth combo module */
-       if ((gpio_request(IGEP3_GPIO_WIFI_NPD, "GPIO_WIFI_NPD") == 0) &&
-           (gpio_direction_output(IGEP3_GPIO_WIFI_NPD, 1) == 0)) {
-               gpio_export(IGEP3_GPIO_WIFI_NPD, 0);
-       } else
-               pr_warning("IGEP3: Could not obtain gpio GPIO_WIFI_NPD\n");
-
-       if ((gpio_request(IGEP3_GPIO_WIFI_NRESET, "GPIO_WIFI_NRESET") == 0) &&
-           (gpio_direction_output(IGEP3_GPIO_WIFI_NRESET, 1) == 0)) {
-               gpio_export(IGEP3_GPIO_WIFI_NRESET, 0);
-               gpio_set_value(IGEP3_GPIO_WIFI_NRESET, 0);
-               udelay(10);
-               gpio_set_value(IGEP3_GPIO_WIFI_NRESET, 1);
-       } else
-               pr_warning("IGEP3: Could not obtain gpio GPIO_WIFI_NRESET\n");
-
-       if ((gpio_request(IGEP3_GPIO_BT_NRESET, "GPIO_BT_NRESET") == 0) &&
-           (gpio_direction_output(IGEP3_GPIO_BT_NRESET, 1) == 0)) {
-               gpio_export(IGEP3_GPIO_BT_NRESET, 0);
-       } else
-               pr_warning("IGEP3: Could not obtain gpio GPIO_BT_NRESET\n");
-}
-#else
-void __init igep3_wifi_bt_init(void) {}
-#endif
-
-static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
-       .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED,
-       .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY,
-       .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED,
-
-       .phy_reset = true,
-       .reset_gpio_port[0] = -EINVAL,
-       .reset_gpio_port[1] = IGEP3_GPIO_USBH_NRESET,
-       .reset_gpio_port[2] = -EINVAL,
-};
-
-#ifdef CONFIG_OMAP_MUX
-static struct omap_board_mux board_mux[] __initdata = {
-       OMAP3_MUX(I2C2_SDA, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT),
-       { .reg_offset = OMAP_MUX_TERMINATOR },
-};
-#endif
-
-static void __init igep3_init(void)
-{
-       omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
-
-       /* Register I2C busses and drivers */
-       igep3_i2c_init();
-       platform_add_devices(igep3_devices, ARRAY_SIZE(igep3_devices));
-       omap_serial_init();
-       usb_musb_init(&musb_board_data);
-       usbhs_init(&usbhs_bdata);
-
-       igep3_flash_init();
-       igep3_leds_init();
-
-       /*
-        * WLAN-BT combo module from MuRata which has a Marvell WLAN
-        * (88W8686) + CSR Bluetooth chipset. Uses SDIO interface.
-        */
-       igep3_wifi_bt_init();
-
-}
-
-MACHINE_START(IGEP0030, "IGEP OMAP3 module")
-       .boot_params    = 0x80000100,
-       .reserve        = omap_reserve,
-       .map_io         = omap3_map_io,
-       .init_early     = igep3_init_early,
-       .init_irq       = omap_init_irq,
-       .init_machine   = igep3_init,
-       .timer          = &omap_timer,
-MACHINE_END
index e2ba77957a8c305196a25956633724932cff2d11..f7d6038075f0721be241eff8a6b08859c1a86daa 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/err.h>
 #include <linux/clk.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/ads7846.h>
 #include <linux/regulator/machine.h>
 #include <linux/i2c/twl.h>
 #include <linux/io.h>
 
 #include <asm/delay.h>
 #include <plat/usb.h>
+#include <plat/gpmc-smsc911x.h>
 
 #include "board-flash.h"
 #include "mux.h"
 #include "hsmmc.h"
 #include "control.h"
+#include "common-board-devices.h"
 
 #define LDP_SMSC911X_CS                1
 #define LDP_SMSC911X_GPIO      152
 #define DEBUG_BASE             0x08000000
 #define LDP_ETHR_START         DEBUG_BASE
 
-static struct resource ldp_smsc911x_resources[] = {
-       [0] = {
-               .start  = LDP_ETHR_START,
-               .end    = LDP_ETHR_START + SZ_4K,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = 0,
-               .end    = 0,
-               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
-       },
-};
-
-static struct smsc911x_platform_config ldp_smsc911x_config = {
-       .irq_polarity   = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
-       .irq_type       = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
-       .flags          = SMSC911X_USE_32BIT,
-       .phy_interface  = PHY_INTERFACE_MODE_MII,
-};
-
-static struct platform_device ldp_smsc911x_device = {
-       .name           = "smsc911x",
-       .id             = -1,
-       .num_resources  = ARRAY_SIZE(ldp_smsc911x_resources),
-       .resource       = ldp_smsc911x_resources,
-       .dev            = {
-               .platform_data = &ldp_smsc911x_config,
-       },
-};
-
 static uint32_t board_keymap[] = {
        KEY(0, 0, KEY_1),
        KEY(1, 0, KEY_2),
@@ -197,82 +168,16 @@ static struct platform_device ldp_gpio_keys_device = {
        },
 };
 
-static int ts_gpio;
-
-/**
- * @brief ads7846_dev_init : Requests & sets GPIO line for pen-irq
- *
- * @return - void. If request gpio fails then Flag KERN_ERR.
- */
-static void ads7846_dev_init(void)
-{
-       if (gpio_request(ts_gpio, "ads7846 irq") < 0) {
-               printk(KERN_ERR "can't get ads746 pen down GPIO\n");
-               return;
-       }
-
-       gpio_direction_input(ts_gpio);
-       gpio_set_debounce(ts_gpio, 310);
-}
-
-static int ads7846_get_pendown_state(void)
-{
-       return !gpio_get_value(ts_gpio);
-}
-
-static struct ads7846_platform_data tsc2046_config __initdata = {
-       .get_pendown_state      = ads7846_get_pendown_state,
-       .keep_vref_on           = 1,
-};
-
-static struct omap2_mcspi_device_config tsc2046_mcspi_config = {
-       .turbo_mode     = 0,
-       .single_channel = 1,    /* 0: slave, 1: master */
-};
-
-static struct spi_board_info ldp_spi_board_info[] __initdata = {
-       [0] = {
-               /*
-                * TSC2046 operates at a max freqency of 2MHz, so
-                * operate slightly below at 1.5MHz
-                */
-               .modalias               = "ads7846",
-               .bus_num                = 1,
-               .chip_select            = 0,
-               .max_speed_hz           = 1500000,
-               .controller_data        = &tsc2046_mcspi_config,
-               .irq                    = 0,
-               .platform_data          = &tsc2046_config,
-       },
+static struct omap_smsc911x_platform_data smsc911x_cfg = {
+       .cs             = LDP_SMSC911X_CS,
+       .gpio_irq       = LDP_SMSC911X_GPIO,
+       .gpio_reset     = -EINVAL,
+       .flags          = SMSC911X_USE_32BIT,
 };
 
 static inline void __init ldp_init_smsc911x(void)
 {
-       int eth_cs;
-       unsigned long cs_mem_base;
-       int eth_gpio = 0;
-
-       eth_cs = LDP_SMSC911X_CS;
-
-       if (gpmc_cs_request(eth_cs, SZ_16M, &cs_mem_base) < 0) {
-               printk(KERN_ERR "Failed to request GPMC mem for smsc911x\n");
-               return;
-       }
-
-       ldp_smsc911x_resources[0].start = cs_mem_base + 0x0;
-       ldp_smsc911x_resources[0].end   = cs_mem_base + 0xff;
-       udelay(100);
-
-       eth_gpio = LDP_SMSC911X_GPIO;
-
-       ldp_smsc911x_resources[1].start = OMAP_GPIO_IRQ(eth_gpio);
-
-       if (gpio_request(eth_gpio, "smsc911x irq") < 0) {
-               printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n",
-                               eth_gpio);
-               return;
-       }
-       gpio_direction_input(eth_gpio);
+       gpmc_smsc911x_init(&smsc911x_cfg);
 }
 
 static struct platform_device ldp_lcd_device = {
@@ -360,19 +265,9 @@ static struct twl4030_platform_data ldp_twldata = {
        .keypad         = &ldp_kp_twl4030_data,
 };
 
-static struct i2c_board_info __initdata ldp_i2c_boardinfo[] = {
-       {
-               I2C_BOARD_INFO("twl4030", 0x48),
-               .flags = I2C_CLIENT_WAKE,
-               .irq = INT_34XX_SYS_NIRQ,
-               .platform_data = &ldp_twldata,
-       },
-};
-
 static int __init omap_i2c_init(void)
 {
-       omap_register_i2c_bus(1, 2600, ldp_i2c_boardinfo,
-                       ARRAY_SIZE(ldp_i2c_boardinfo));
+       omap3_pmic_init("twl4030", &ldp_twldata);
        omap_register_i2c_bus(2, 400, NULL, 0);
        omap_register_i2c_bus(3, 400, NULL, 0);
        return 0;
@@ -389,7 +284,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
 };
 
 static struct platform_device *ldp_devices[] __initdata = {
-       &ldp_smsc911x_device,
        &ldp_lcd_device,
        &ldp_gpio_keys_device,
 };
@@ -400,12 +294,6 @@ static struct omap_board_mux board_mux[] __initdata = {
 };
 #endif
 
-static struct omap_musb_board_data musb_board_data = {
-       .interface_type         = MUSB_INTERFACE_ULPI,
-       .mode                   = MUSB_OTG,
-       .power                  = 100,
-};
-
 static struct mtd_partition ldp_nand_partitions[] = {
        /* All the partition sizes are listed in terms of NAND block size */
        {
@@ -446,13 +334,9 @@ static void __init omap_ldp_init(void)
        ldp_init_smsc911x();
        omap_i2c_init();
        platform_add_devices(ldp_devices, ARRAY_SIZE(ldp_devices));
-       ts_gpio = 54;
-       ldp_spi_board_info[0].irq = gpio_to_irq(ts_gpio);
-       spi_register_board_info(ldp_spi_board_info,
-                               ARRAY_SIZE(ldp_spi_board_info));
-       ads7846_dev_init();
+       omap_ads7846_init(1, 54, 310, NULL);
        omap_serial_init();
-       usb_musb_init(&musb_board_data);
+       usb_musb_init(NULL);
        board_nand_init(ldp_nand_partitions,
                ARRAY_SIZE(ldp_nand_partitions), ZOOM_NAND_CS, 0);
 
index e710cd9e079ba59d528a8ecf7a80d2d481c593be..8d74318ed495efcf2522c30e00035463cb0abb67 100644 (file)
@@ -106,14 +106,13 @@ static void __init n8x0_usb_init(void)
        static char     announce[] __initdata = KERN_INFO "TUSB 6010\n";
 
        /* PM companion chip power control pin */
-       ret = gpio_request(TUSB6010_GPIO_ENABLE, "TUSB6010 enable");
+       ret = gpio_request_one(TUSB6010_GPIO_ENABLE, GPIOF_OUT_INIT_LOW,
+                              "TUSB6010 enable");
        if (ret != 0) {
                printk(KERN_ERR "Could not get TUSB power GPIO%i\n",
                       TUSB6010_GPIO_ENABLE);
                return;
        }
-       gpio_direction_output(TUSB6010_GPIO_ENABLE, 0);
-
        tusb_set_power(0);
 
        ret = tusb6010_setup_interface(&tusb_data, TUSB6010_REFCLK_19, 2,
@@ -494,8 +493,12 @@ static struct omap_mmc_platform_data mmc1_data = {
 
 static struct omap_mmc_platform_data *mmc_data[OMAP24XX_NR_MMC];
 
-static void __init n8x0_mmc_init(void)
+static struct gpio n810_emmc_gpios[] __initdata = {
+       { N810_EMMC_VSD_GPIO, GPIOF_OUT_INIT_LOW,  "MMC slot 2 Vddf" },
+       { N810_EMMC_VIO_GPIO, GPIOF_OUT_INIT_LOW,  "MMC slot 2 Vdd"  },
+};
 
+static void __init n8x0_mmc_init(void)
 {
        int err;
 
@@ -512,27 +515,18 @@ static void __init n8x0_mmc_init(void)
                mmc1_data.slots[1].ban_openended = 1;
        }
 
-       err = gpio_request(N8X0_SLOT_SWITCH_GPIO, "MMC slot switch");
+       err = gpio_request_one(N8X0_SLOT_SWITCH_GPIO, GPIOF_OUT_INIT_LOW,
+                              "MMC slot switch");
        if (err)
                return;
 
-       gpio_direction_output(N8X0_SLOT_SWITCH_GPIO, 0);
-
        if (machine_is_nokia_n810()) {
-               err = gpio_request(N810_EMMC_VSD_GPIO, "MMC slot 2 Vddf");
-               if (err) {
-                       gpio_free(N8X0_SLOT_SWITCH_GPIO);
-                       return;
-               }
-               gpio_direction_output(N810_EMMC_VSD_GPIO, 0);
-
-               err = gpio_request(N810_EMMC_VIO_GPIO, "MMC slot 2 Vdd");
+               err = gpio_request_array(n810_emmc_gpios,
+                                        ARRAY_SIZE(n810_emmc_gpios));
                if (err) {
                        gpio_free(N8X0_SLOT_SWITCH_GPIO);
-                       gpio_free(N810_EMMC_VSD_GPIO);
                        return;
                }
-               gpio_direction_output(N810_EMMC_VIO_GPIO, 0);
        }
 
        mmc_data[0] = &mmc1_data;
index 97750d483a701e7a9cbdf0a78fe69f16aa9510c1..be71426359f2ecda9644f41a7e6cd6e64ab6e58b 100644 (file)
@@ -52,6 +52,7 @@
 #include "hsmmc.h"
 #include "timer-gp.h"
 #include "pm.h"
+#include "common-board-devices.h"
 
 #define NAND_BLOCK_SIZE                SZ_128K
 
@@ -79,6 +80,12 @@ static u8 omap3_beagle_get_rev(void)
        return omap3_beagle_version;
 }
 
+static struct gpio omap3_beagle_rev_gpios[] __initdata = {
+       { 171, GPIOF_IN, "rev_id_0"    },
+       { 172, GPIOF_IN, "rev_id_1" },
+       { 173, GPIOF_IN, "rev_id_2"    },
+};
+
 static void __init omap3_beagle_init_rev(void)
 {
        int ret;
@@ -88,21 +95,13 @@ static void __init omap3_beagle_init_rev(void)
        omap_mux_init_gpio(172, OMAP_PIN_INPUT_PULLUP);
        omap_mux_init_gpio(173, OMAP_PIN_INPUT_PULLUP);
 
-       ret = gpio_request(171, "rev_id_0");
-       if (ret < 0)
-               goto fail0;
-
-       ret = gpio_request(172, "rev_id_1");
-       if (ret < 0)
-               goto fail1;
-
-       ret = gpio_request(173, "rev_id_2");
-       if (ret < 0)
-               goto fail2;
-
-       gpio_direction_input(171);
-       gpio_direction_input(172);
-       gpio_direction_input(173);
+       ret = gpio_request_array(omap3_beagle_rev_gpios,
+                                ARRAY_SIZE(omap3_beagle_rev_gpios));
+       if (ret < 0) {
+               printk(KERN_ERR "Unable to get revision detection GPIO pins\n");
+               omap3_beagle_version = OMAP3BEAGLE_BOARD_UNKN;
+               return;
+       }
 
        beagle_rev = gpio_get_value(171) | (gpio_get_value(172) << 1)
                        | (gpio_get_value(173) << 2);
@@ -128,18 +127,6 @@ static void __init omap3_beagle_init_rev(void)
                printk(KERN_INFO "OMAP3 Beagle Rev: unknown %hd\n", beagle_rev);
                omap3_beagle_version = OMAP3BEAGLE_BOARD_UNKN;
        }
-
-       return;
-
-fail2:
-       gpio_free(172);
-fail1:
-       gpio_free(171);
-fail0:
-       printk(KERN_ERR "Unable to get revision detection GPIO pins\n");
-       omap3_beagle_version = OMAP3BEAGLE_BOARD_UNKN;
-
-       return;
 }
 
 static struct mtd_partition omap3beagle_nand_partitions[] = {
@@ -173,15 +160,6 @@ static struct mtd_partition omap3beagle_nand_partitions[] = {
        },
 };
 
-static struct omap_nand_platform_data omap3beagle_nand_data = {
-       .options        = NAND_BUSWIDTH_16,
-       .parts          = omap3beagle_nand_partitions,
-       .nr_parts       = ARRAY_SIZE(omap3beagle_nand_partitions),
-       .dma_channel    = -1,           /* disable DMA in OMAP NAND driver */
-       .nand_setup     = NULL,
-       .dev_ready      = NULL,
-};
-
 /* DSS */
 
 static int beagle_enable_dvi(struct omap_dss_device *dssdev)
@@ -243,13 +221,10 @@ static void __init beagle_display_init(void)
 {
        int r;
 
-       r = gpio_request(beagle_dvi_device.reset_gpio, "DVI reset");
-       if (r < 0) {
+       r = gpio_request_one(beagle_dvi_device.reset_gpio, GPIOF_OUT_INIT_LOW,
+                            "DVI reset");
+       if (r < 0)
                printk(KERN_ERR "Unable to get DVI reset GPIO\n");
-               return;
-       }
-
-       gpio_direction_output(beagle_dvi_device.reset_gpio, 0);
 }
 
 #include "sdram-micron-mt46h32m32lf-6.h"
@@ -276,7 +251,7 @@ static struct gpio_led gpio_leds[];
 static int beagle_twl_gpio_setup(struct device *dev,
                unsigned gpio, unsigned ngpio)
 {
-       int r;
+       int r, usb_pwr_level;
 
        if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) {
                mmc[0].gpio_wp = -EINVAL;
@@ -295,66 +270,46 @@ static int beagle_twl_gpio_setup(struct device *dev,
        beagle_vmmc1_supply.dev = mmc[0].dev;
        beagle_vsim_supply.dev = mmc[0].dev;
 
-       /* REVISIT: need ehci-omap hooks for external VBUS
-        * power switch and overcurrent detect
-        */
-       if (omap3_beagle_get_rev() != OMAP3BEAGLE_BOARD_XM) {
-               r = gpio_request(gpio + 1, "EHCI_nOC");
-               if (!r) {
-                       r = gpio_direction_input(gpio + 1);
-                       if (r)
-                               gpio_free(gpio + 1);
-               }
-               if (r)
-                       pr_err("%s: unable to configure EHCI_nOC\n", __func__);
-       }
-
        /*
         * TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, XM active
         * high / others active low)
-        */
-       gpio_request(gpio + TWL4030_GPIO_MAX, "nEN_USB_PWR");
-       if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM)
-               gpio_direction_output(gpio + TWL4030_GPIO_MAX, 1);
-       else
-               gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0);
-
-       /* DVI reset GPIO is different between beagle revisions */
-       if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM)
-               beagle_dvi_device.reset_gpio = 129;
-       else
-               beagle_dvi_device.reset_gpio = 170;
-
-       /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
-       gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
-
-       /*
-        * gpio + 1 on Xm controls the TFP410's enable line (active low)
-        * gpio + 2 control varies depending on the board rev as follows:
-        * P7/P8 revisions(prototype): Camera EN
-        * A2+ revisions (production): LDO (supplies DVI, serial, led blocks)
+        * DVI reset GPIO is different between beagle revisions
         */
        if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) {
-               r = gpio_request(gpio + 1, "nDVI_PWR_EN");
-               if (!r) {
-                       r = gpio_direction_output(gpio + 1, 0);
-                       if (r)
-                               gpio_free(gpio + 1);
-               }
+               usb_pwr_level = GPIOF_OUT_INIT_HIGH;
+               beagle_dvi_device.reset_gpio = 129;
+               /*
+                * gpio + 1 on Xm controls the TFP410's enable line (active low)
+                * gpio + 2 control varies depending on the board rev as below:
+                * P7/P8 revisions(prototype): Camera EN
+                * A2+ revisions (production): LDO (DVI, serial, led blocks)
+                */
+               r = gpio_request_one(gpio + 1, GPIOF_OUT_INIT_LOW,
+                                    "nDVI_PWR_EN");
                if (r)
                        pr_err("%s: unable to configure nDVI_PWR_EN\n",
                                __func__);
-               r = gpio_request(gpio + 2, "DVI_LDO_EN");
-               if (!r) {
-                       r = gpio_direction_output(gpio + 2, 1);
-                       if (r)
-                               gpio_free(gpio + 2);
-               }
+               r = gpio_request_one(gpio + 2, GPIOF_OUT_INIT_HIGH,
+                                    "DVI_LDO_EN");
                if (r)
                        pr_err("%s: unable to configure DVI_LDO_EN\n",
                                __func__);
+       } else {
+               usb_pwr_level = GPIOF_OUT_INIT_LOW;
+               beagle_dvi_device.reset_gpio = 170;
+               /*
+                * REVISIT: need ehci-omap hooks for external VBUS
+                * power switch and overcurrent detect
+                */
+               if (gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC"))
+                       pr_err("%s: unable to configure EHCI_nOC\n", __func__);
        }
 
+       gpio_request_one(gpio + TWL4030_GPIO_MAX, usb_pwr_level, "nEN_USB_PWR");
+
+       /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
+       gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
+
        return 0;
 }
 
@@ -453,15 +408,6 @@ static struct twl4030_platform_data beagle_twldata = {
        .vpll2          = &beagle_vpll2,
 };
 
-static struct i2c_board_info __initdata beagle_i2c_boardinfo[] = {
-       {
-               I2C_BOARD_INFO("twl4030", 0x48),
-               .flags = I2C_CLIENT_WAKE,
-               .irq = INT_34XX_SYS_NIRQ,
-               .platform_data = &beagle_twldata,
-       },
-};
-
 static struct i2c_board_info __initdata beagle_i2c_eeprom[] = {
        {
                I2C_BOARD_INFO("eeprom", 0x50),
@@ -470,8 +416,7 @@ static struct i2c_board_info __initdata beagle_i2c_eeprom[] = {
 
 static int __init omap3_beagle_i2c_init(void)
 {
-       omap_register_i2c_bus(1, 2600, beagle_i2c_boardinfo,
-                       ARRAY_SIZE(beagle_i2c_boardinfo));
+       omap3_pmic_init("twl4030", &beagle_twldata);
        /* Bus 3 is attached to the DVI port where devices like the pico DLP
         * projector don't work reliably with 400kHz */
        omap_register_i2c_bus(3, 100, beagle_i2c_eeprom, ARRAY_SIZE(beagle_i2c_eeprom));
@@ -551,39 +496,6 @@ static struct platform_device *omap3_beagle_devices[] __initdata = {
        &keys_gpio,
 };
 
-static void __init omap3beagle_flash_init(void)
-{
-       u8 cs = 0;
-       u8 nandcs = GPMC_CS_NUM + 1;
-
-       /* find out the chip-select on which NAND exists */
-       while (cs < GPMC_CS_NUM) {
-               u32 ret = 0;
-               ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
-
-               if ((ret & 0xC00) == 0x800) {
-                       printk(KERN_INFO "Found NAND on CS%d\n", cs);
-                       if (nandcs > GPMC_CS_NUM)
-                               nandcs = cs;
-               }
-               cs++;
-       }
-
-       if (nandcs > GPMC_CS_NUM) {
-               printk(KERN_INFO "NAND: Unable to find configuration "
-                                "in GPMC\n ");
-               return;
-       }
-
-       if (nandcs < GPMC_CS_NUM) {
-               omap3beagle_nand_data.cs = nandcs;
-
-               printk(KERN_INFO "Registering NAND on CS%d\n", nandcs);
-               if (gpmc_nand_init(&omap3beagle_nand_data) < 0)
-                       printk(KERN_ERR "Unable to register NAND device\n");
-       }
-}
-
 static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
 
        .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
@@ -602,12 +514,6 @@ static struct omap_board_mux board_mux[] __initdata = {
 };
 #endif
 
-static struct omap_musb_board_data musb_board_data = {
-       .interface_type         = MUSB_INTERFACE_ULPI,
-       .mode                   = MUSB_OTG,
-       .power                  = 100,
-};
-
 static void __init beagle_opp_init(void)
 {
        int r = 0;
@@ -665,13 +571,13 @@ static void __init omap3_beagle_init(void)
        omap_serial_init();
 
        omap_mux_init_gpio(170, OMAP_PIN_INPUT);
-       gpio_request(170, "DVI_nPD");
        /* REVISIT leave DVI powered down until it's needed ... */
-       gpio_direction_output(170, true);
+       gpio_request_one(170, GPIOF_OUT_INIT_HIGH, "DVI_nPD");
 
-       usb_musb_init(&musb_board_data);
+       usb_musb_init(NULL);
        usbhs_init(&usbhs_bdata);
-       omap3beagle_flash_init();
+       omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions,
+                            ARRAY_SIZE(omap3beagle_nand_partitions));
 
        /* Ensure SDRC pins are mux'd for self-refresh */
        omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
index 7f94cccdb07685a9826fc9467eaedba0c438998d..b4d43464a303f43e5a6283c61dd09b55f657d624 100644 (file)
@@ -50,6 +50,7 @@
 #include "mux.h"
 #include "sdram-micron-mt46h32m32lf-6.h"
 #include "hsmmc.h"
+#include "common-board-devices.h"
 
 #define OMAP3_EVM_TS_GPIO      175
 #define OMAP3_EVM_EHCI_VBUS    22
@@ -101,49 +102,20 @@ static void __init omap3_evm_get_revision(void)
 }
 
 #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
-static struct resource omap3evm_smsc911x_resources[] = {
-       [0] =   {
-               .start  = OMAP3EVM_ETHR_START,
-               .end    = (OMAP3EVM_ETHR_START + OMAP3EVM_ETHR_SIZE - 1),
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] =   {
-               .start  = OMAP_GPIO_IRQ(OMAP3EVM_ETHR_GPIO_IRQ),
-               .end    = OMAP_GPIO_IRQ(OMAP3EVM_ETHR_GPIO_IRQ),
-               .flags  = (IORESOURCE_IRQ | IRQF_TRIGGER_LOW),
-       },
-};
+#include <plat/gpmc-smsc911x.h>
 
-static struct smsc911x_platform_config smsc911x_config = {
-       .phy_interface  = PHY_INTERFACE_MODE_MII,
-       .irq_polarity   = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
-       .irq_type       = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
-       .flags          = (SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS),
-};
-
-static struct platform_device omap3evm_smsc911x_device = {
-       .name           = "smsc911x",
-       .id             = -1,
-       .num_resources  = ARRAY_SIZE(omap3evm_smsc911x_resources),
-       .resource       = &omap3evm_smsc911x_resources[0],
-       .dev            = {
-               .platform_data = &smsc911x_config,
-       },
+static struct omap_smsc911x_platform_data smsc911x_cfg = {
+       .cs             = OMAP3EVM_SMSC911X_CS,
+       .gpio_irq       = OMAP3EVM_ETHR_GPIO_IRQ,
+       .gpio_reset     = -EINVAL,
+       .flags          = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
 };
 
 static inline void __init omap3evm_init_smsc911x(void)
 {
-       int eth_cs, eth_rst;
        struct clk *l3ck;
        unsigned int rate;
 
-       if (get_omap3_evm_rev() == OMAP3EVM_BOARD_GEN_1)
-               eth_rst = OMAP3EVM_GEN1_ETHR_GPIO_RST;
-       else
-               eth_rst = OMAP3EVM_GEN2_ETHR_GPIO_RST;
-
-       eth_cs = OMAP3EVM_SMSC911X_CS;
-
        l3ck = clk_get(NULL, "l3_ck");
        if (IS_ERR(l3ck))
                rate = 100000000;
@@ -152,33 +124,13 @@ static inline void __init omap3evm_init_smsc911x(void)
 
        /* Configure ethernet controller reset gpio */
        if (cpu_is_omap3430()) {
-               if (gpio_request(eth_rst, "SMSC911x gpio") < 0) {
-                       pr_err(KERN_ERR "Failed to request %d for smsc911x\n",
-                                       eth_rst);
-                       return;
-               }
-
-               if (gpio_direction_output(eth_rst, 1) < 0) {
-                       pr_err(KERN_ERR "Failed to set direction of %d for" \
-                                       " smsc911x\n", eth_rst);
-                       return;
-               }
-               /* reset pulse to ethernet controller*/
-               usleep_range(150, 220);
-               gpio_set_value(eth_rst, 0);
-               usleep_range(150, 220);
-               gpio_set_value(eth_rst, 1);
-               usleep_range(1, 2);
-       }
-
-       if (gpio_request(OMAP3EVM_ETHR_GPIO_IRQ, "SMSC911x irq") < 0) {
-               printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n",
-                       OMAP3EVM_ETHR_GPIO_IRQ);
-               return;
+               if (get_omap3_evm_rev() == OMAP3EVM_BOARD_GEN_1)
+                       smsc911x_cfg.gpio_reset = OMAP3EVM_GEN1_ETHR_GPIO_RST;
+               else
+                       smsc911x_cfg.gpio_reset = OMAP3EVM_GEN2_ETHR_GPIO_RST;
        }
 
-       gpio_direction_input(OMAP3EVM_ETHR_GPIO_IRQ);
-       platform_device_register(&omap3evm_smsc911x_device);
+       gpmc_smsc911x_init(&smsc911x_cfg);
 }
 
 #else
@@ -197,6 +149,15 @@ static inline void __init omap3evm_init_smsc911x(void) { return; }
 #define OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO        210
 #define OMAP3EVM_DVI_PANEL_EN_GPIO     199
 
+static struct gpio omap3_evm_dss_gpios[] __initdata = {
+       { OMAP3EVM_LCD_PANEL_RESB,  GPIOF_OUT_INIT_HIGH, "lcd_panel_resb"  },
+       { OMAP3EVM_LCD_PANEL_INI,   GPIOF_OUT_INIT_HIGH, "lcd_panel_ini"   },
+       { OMAP3EVM_LCD_PANEL_QVGA,  GPIOF_OUT_INIT_LOW,  "lcd_panel_qvga"  },
+       { OMAP3EVM_LCD_PANEL_LR,    GPIOF_OUT_INIT_HIGH, "lcd_panel_lr"    },
+       { OMAP3EVM_LCD_PANEL_UD,    GPIOF_OUT_INIT_HIGH, "lcd_panel_ud"    },
+       { OMAP3EVM_LCD_PANEL_ENVDD, GPIOF_OUT_INIT_LOW,  "lcd_panel_envdd" },
+};
+
 static int lcd_enabled;
 static int dvi_enabled;
 
@@ -204,61 +165,10 @@ static void __init omap3_evm_display_init(void)
 {
        int r;
 
-       r = gpio_request(OMAP3EVM_LCD_PANEL_RESB, "lcd_panel_resb");
-       if (r) {
-               printk(KERN_ERR "failed to get lcd_panel_resb\n");
-               return;
-       }
-       gpio_direction_output(OMAP3EVM_LCD_PANEL_RESB, 1);
-
-       r = gpio_request(OMAP3EVM_LCD_PANEL_INI, "lcd_panel_ini");
-       if (r) {
-               printk(KERN_ERR "failed to get lcd_panel_ini\n");
-               goto err_1;
-       }
-       gpio_direction_output(OMAP3EVM_LCD_PANEL_INI, 1);
-
-       r = gpio_request(OMAP3EVM_LCD_PANEL_QVGA, "lcd_panel_qvga");
-       if (r) {
-               printk(KERN_ERR "failed to get lcd_panel_qvga\n");
-               goto err_2;
-       }
-       gpio_direction_output(OMAP3EVM_LCD_PANEL_QVGA, 0);
-
-       r = gpio_request(OMAP3EVM_LCD_PANEL_LR, "lcd_panel_lr");
-       if (r) {
-               printk(KERN_ERR "failed to get lcd_panel_lr\n");
-               goto err_3;
-       }
-       gpio_direction_output(OMAP3EVM_LCD_PANEL_LR, 1);
-
-       r = gpio_request(OMAP3EVM_LCD_PANEL_UD, "lcd_panel_ud");
-       if (r) {
-               printk(KERN_ERR "failed to get lcd_panel_ud\n");
-               goto err_4;
-       }
-       gpio_direction_output(OMAP3EVM_LCD_PANEL_UD, 1);
-
-       r = gpio_request(OMAP3EVM_LCD_PANEL_ENVDD, "lcd_panel_envdd");
-       if (r) {
-               printk(KERN_ERR "failed to get lcd_panel_envdd\n");
-               goto err_5;
-       }
-       gpio_direction_output(OMAP3EVM_LCD_PANEL_ENVDD, 0);
-
-       return;
-
-err_5:
-       gpio_free(OMAP3EVM_LCD_PANEL_UD);
-err_4:
-       gpio_free(OMAP3EVM_LCD_PANEL_LR);
-err_3:
-       gpio_free(OMAP3EVM_LCD_PANEL_QVGA);
-err_2:
-       gpio_free(OMAP3EVM_LCD_PANEL_INI);
-err_1:
-       gpio_free(OMAP3EVM_LCD_PANEL_RESB);
-
+       r = gpio_request_array(omap3_evm_dss_gpios,
+                              ARRAY_SIZE(omap3_evm_dss_gpios));
+       if (r)
+               printk(KERN_ERR "failed to get lcd_panel_* gpios\n");
 }
 
 static int omap3_evm_enable_lcd(struct omap_dss_device *dssdev)
@@ -448,7 +358,7 @@ static struct platform_device leds_gpio = {
 static int omap3evm_twl_gpio_setup(struct device *dev,
                unsigned gpio, unsigned ngpio)
 {
-       int r;
+       int r, lcd_bl_en;
 
        /* gpio + 0 is "mmc0_cd" (input/IRQ) */
        omap_mux_init_gpio(63, OMAP_PIN_INPUT);
@@ -465,16 +375,14 @@ static int omap3evm_twl_gpio_setup(struct device *dev,
         */
 
        /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */
-       r = gpio_request(gpio + TWL4030_GPIO_MAX, "EN_LCD_BKL");
-       if (!r)
-               r = gpio_direction_output(gpio + TWL4030_GPIO_MAX,
-                       (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) ? 1 : 0);
+       lcd_bl_en = get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2 ?
+               GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
+       r = gpio_request_one(gpio + TWL4030_GPIO_MAX, lcd_bl_en, "EN_LCD_BKL");
        if (r)
                printk(KERN_ERR "failed to get/set lcd_bkl gpio\n");
 
        /* gpio + 7 == DVI Enable */
-       gpio_request(gpio + 7, "EN_DVI");
-       gpio_direction_output(gpio + 7, 0);
+       gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "EN_DVI");
 
        /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */
        gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
@@ -652,78 +560,18 @@ static struct twl4030_platform_data omap3evm_twldata = {
        .vdac           = &omap3_evm_vdac,
        .vpll2          = &omap3_evm_vpll2,
        .vio            = &omap3evm_vio,
-};
-
-static struct i2c_board_info __initdata omap3evm_i2c_boardinfo[] = {
-       {
-               I2C_BOARD_INFO("twl4030", 0x48),
-               .flags = I2C_CLIENT_WAKE,
-               .irq = INT_34XX_SYS_NIRQ,
-               .platform_data = &omap3evm_twldata,
-       },
+       .vmmc1          = &omap3evm_vmmc1,
+       .vsim           = &omap3evm_vsim,
 };
 
 static int __init omap3_evm_i2c_init(void)
 {
-       /*
-        * REVISIT: These entries can be set in omap3evm_twl_data
-        * after a merge with MFD tree
-        */
-       omap3evm_twldata.vmmc1 = &omap3evm_vmmc1;
-       omap3evm_twldata.vsim = &omap3evm_vsim;
-
-       omap_register_i2c_bus(1, 2600, omap3evm_i2c_boardinfo,
-                       ARRAY_SIZE(omap3evm_i2c_boardinfo));
+       omap3_pmic_init("twl4030", &omap3evm_twldata);
        omap_register_i2c_bus(2, 400, NULL, 0);
        omap_register_i2c_bus(3, 400, NULL, 0);
        return 0;
 }
 
-static void ads7846_dev_init(void)
-{
-       if (gpio_request(OMAP3_EVM_TS_GPIO, "ADS7846 pendown") < 0)
-               printk(KERN_ERR "can't get ads7846 pen down GPIO\n");
-
-       gpio_direction_input(OMAP3_EVM_TS_GPIO);
-       gpio_set_debounce(OMAP3_EVM_TS_GPIO, 310);
-}
-
-static int ads7846_get_pendown_state(void)
-{
-       return !gpio_get_value(OMAP3_EVM_TS_GPIO);
-}
-
-static struct ads7846_platform_data ads7846_config = {
-       .x_max                  = 0x0fff,
-       .y_max                  = 0x0fff,
-       .x_plate_ohms           = 180,
-       .pressure_max           = 255,
-       .debounce_max           = 10,
-       .debounce_tol           = 3,
-       .debounce_rep           = 1,
-       .get_pendown_state      = ads7846_get_pendown_state,
-       .keep_vref_on           = 1,
-       .settle_delay_usecs     = 150,
-       .wakeup                         = true,
-};
-
-static struct omap2_mcspi_device_config ads7846_mcspi_config = {
-       .turbo_mode     = 0,
-       .single_channel = 1,    /* 0: slave, 1: master */
-};
-
-static struct spi_board_info omap3evm_spi_board_info[] = {
-       [0] = {
-               .modalias               = "ads7846",
-               .bus_num                = 1,
-               .chip_select            = 0,
-               .max_speed_hz           = 1500000,
-               .controller_data        = &ads7846_mcspi_config,
-               .irq                    = OMAP_GPIO_IRQ(OMAP3_EVM_TS_GPIO),
-               .platform_data          = &ads7846_config,
-       },
-};
-
 static struct omap_board_config_kernel omap3_evm_config[] __initdata = {
 };
 
@@ -825,6 +673,11 @@ static struct omap_musb_board_data musb_board_data = {
        .power                  = 100,
 };
 
+static struct gpio omap3_evm_ehci_gpios[] __initdata = {
+       { OMAP3_EVM_EHCI_VBUS,   GPIOF_OUT_INIT_HIGH,  "enable EHCI VBUS" },
+       { OMAP3_EVM_EHCI_SELECT, GPIOF_OUT_INIT_LOW,   "select EHCI port" },
+};
+
 static void __init omap3_evm_init(void)
 {
        omap3_evm_get_revision();
@@ -841,9 +694,6 @@ static void __init omap3_evm_init(void)
 
        omap_display_init(&omap3_evm_dss_data);
 
-       spi_register_board_info(omap3evm_spi_board_info,
-                               ARRAY_SIZE(omap3evm_spi_board_info));
-
        omap_serial_init();
 
        /* OMAP3EVM uses ISP1504 phy and so register nop transceiver */
@@ -851,16 +701,12 @@ static void __init omap3_evm_init(void)
 
        if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) {
                /* enable EHCI VBUS using GPIO22 */
-               omap_mux_init_gpio(22, OMAP_PIN_INPUT_PULLUP);
-               gpio_request(OMAP3_EVM_EHCI_VBUS, "enable EHCI VBUS");
-               gpio_direction_output(OMAP3_EVM_EHCI_VBUS, 0);
-               gpio_set_value(OMAP3_EVM_EHCI_VBUS, 1);
-
+               omap_mux_init_gpio(OMAP3_EVM_EHCI_VBUS, OMAP_PIN_INPUT_PULLUP);
                /* Select EHCI port on main board */
-               omap_mux_init_gpio(61, OMAP_PIN_INPUT_PULLUP);
-               gpio_request(OMAP3_EVM_EHCI_SELECT, "select EHCI port");
-               gpio_direction_output(OMAP3_EVM_EHCI_SELECT, 0);
-               gpio_set_value(OMAP3_EVM_EHCI_SELECT, 0);
+               omap_mux_init_gpio(OMAP3_EVM_EHCI_SELECT,
+                                  OMAP_PIN_INPUT_PULLUP);
+               gpio_request_array(omap3_evm_ehci_gpios,
+                                  ARRAY_SIZE(omap3_evm_ehci_gpios));
 
                /* setup EHCI phy reset config */
                omap_mux_init_gpio(21, OMAP_PIN_INPUT_PULLUP);
@@ -876,7 +722,7 @@ static void __init omap3_evm_init(void)
        }
        usb_musb_init(&musb_board_data);
        usbhs_init(&usbhs_bdata);
-       ads7846_dev_init();
+       omap_ads7846_init(1, OMAP3_EVM_TS_GPIO, 310, NULL);
        omap3evm_init_smsc911x();
        omap3_evm_display_init();
 
index b726943d7c93da3b92b79c71d10e790f51cb1539..60d9be49dbab1a3595b12291a65abe36d6724bc4 100644 (file)
@@ -37,6 +37,7 @@
 #include "hsmmc.h"
 #include "timer-gp.h"
 #include "control.h"
+#include "common-board-devices.h"
 
 #include <plat/mux.h>
 #include <plat/board.h>
@@ -93,19 +94,9 @@ static struct twl4030_platform_data omap3logic_twldata = {
        .vmmc1          = &omap3logic_vmmc1,
 };
 
-static struct i2c_board_info __initdata omap3logic_i2c_boardinfo[] = {
-       {
-               I2C_BOARD_INFO("twl4030", 0x48),
-               .flags = I2C_CLIENT_WAKE,
-               .irq = INT_34XX_SYS_NIRQ,
-               .platform_data = &omap3logic_twldata,
-       },
-};
-
 static int __init omap3logic_i2c_init(void)
 {
-       omap_register_i2c_bus(1, 2600, omap3logic_i2c_boardinfo,
-                               ARRAY_SIZE(omap3logic_i2c_boardinfo));
+       omap3_pmic_init("twl4030", &omap3logic_twldata);
        return 0;
 }
 
@@ -147,7 +138,6 @@ static struct omap_smsc911x_platform_data __initdata board_smsc911x_data = {
        .cs             = OMAP3LOGIC_SMSC911X_CS,
        .gpio_irq       = -EINVAL,
        .gpio_reset     = -EINVAL,
-       .flags          = IORESOURCE_IRQ_LOWLEVEL,
 };
 
 /* TODO/FIXME (comment by Peter Barada, LogicPD):
index 1db15492d82bca96dce8279da960736032309925..1d10736c6d3c1d358cbef88468294cb7d4f1834b 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/platform_device.h>
 
 #include <linux/spi/spi.h>
-#include <linux/spi/ads7846.h>
 #include <linux/regulator/machine.h>
 #include <linux/i2c/twl.h>
 #include <linux/wl12xx.h>
@@ -52,6 +51,7 @@
 #include "mux.h"
 #include "sdram-micron-mt46h32m32lf-6.h"
 #include "hsmmc.h"
+#include "common-board-devices.h"
 
 #define PANDORA_WIFI_IRQ_GPIO          21
 #define PANDORA_WIFI_NRESET_GPIO       23
@@ -305,24 +305,13 @@ static int omap3pandora_twl_gpio_setup(struct device *dev,
 
        /* gpio + 13 drives 32kHz buffer for wifi module */
        gpio_32khz = gpio + 13;
-       ret = gpio_request(gpio_32khz, "wifi 32kHz");
+       ret = gpio_request_one(gpio_32khz, GPIOF_OUT_INIT_HIGH, "wifi 32kHz");
        if (ret < 0) {
                pr_err("Cannot get GPIO line %d, ret=%d\n", gpio_32khz, ret);
-               goto fail;
-       }
-
-       ret = gpio_direction_output(gpio_32khz, 1);
-       if (ret < 0) {
-               pr_err("Cannot set GPIO line %d, ret=%d\n", gpio_32khz, ret);
-               goto fail_direction;
+               return -ENODEV;
        }
 
        return 0;
-
-fail_direction:
-       gpio_free(gpio_32khz);
-fail:
-       return -ENODEV;
 }
 
 static struct twl4030_gpio_platform_data omap3pandora_gpio_data = {
@@ -544,15 +533,6 @@ static struct twl4030_platform_data omap3pandora_twldata = {
        .bci            = &pandora_bci_data,
 };
 
-static struct i2c_board_info __initdata omap3pandora_i2c_boardinfo[] = {
-       {
-               I2C_BOARD_INFO("tps65950", 0x48),
-               .flags = I2C_CLIENT_WAKE,
-               .irq = INT_34XX_SYS_NIRQ,
-               .platform_data = &omap3pandora_twldata,
-       },
-};
-
 static struct i2c_board_info __initdata omap3pandora_i2c3_boardinfo[] = {
        {
                I2C_BOARD_INFO("bq27500", 0x55),
@@ -562,61 +542,15 @@ static struct i2c_board_info __initdata omap3pandora_i2c3_boardinfo[] = {
 
 static int __init omap3pandora_i2c_init(void)
 {
-       omap_register_i2c_bus(1, 2600, omap3pandora_i2c_boardinfo,
-                       ARRAY_SIZE(omap3pandora_i2c_boardinfo));
+       omap3_pmic_init("tps65950", &omap3pandora_twldata);
        /* i2c2 pins are not connected */
        omap_register_i2c_bus(3, 100, omap3pandora_i2c3_boardinfo,
                        ARRAY_SIZE(omap3pandora_i2c3_boardinfo));
        return 0;
 }
 
-static void __init omap3pandora_ads7846_init(void)
-{
-       int gpio = OMAP3_PANDORA_TS_GPIO;
-       int ret;
-
-       ret = gpio_request(gpio, "ads7846_pen_down");
-       if (ret < 0) {
-               printk(KERN_ERR "Failed to request GPIO %d for "
-                               "ads7846 pen down IRQ\n", gpio);
-               return;
-       }
-
-       gpio_direction_input(gpio);
-}
-
-static int ads7846_get_pendown_state(void)
-{
-       return !gpio_get_value(OMAP3_PANDORA_TS_GPIO);
-}
-
-static struct ads7846_platform_data ads7846_config = {
-       .x_max                  = 0x0fff,
-       .y_max                  = 0x0fff,
-       .x_plate_ohms           = 180,
-       .pressure_max           = 255,
-       .debounce_max           = 10,
-       .debounce_tol           = 3,
-       .debounce_rep           = 1,
-       .get_pendown_state      = ads7846_get_pendown_state,
-       .keep_vref_on           = 1,
-};
-
-static struct omap2_mcspi_device_config ads7846_mcspi_config = {
-       .turbo_mode     = 0,
-       .single_channel = 1,    /* 0: slave, 1: master */
-};
-
 static struct spi_board_info omap3pandora_spi_board_info[] __initdata = {
        {
-               .modalias               = "ads7846",
-               .bus_num                = 1,
-               .chip_select            = 0,
-               .max_speed_hz           = 1500000,
-               .controller_data        = &ads7846_mcspi_config,
-               .irq                    = OMAP_GPIO_IRQ(OMAP3_PANDORA_TS_GPIO),
-               .platform_data          = &ads7846_config,
-       }, {
                .modalias               = "tpo_td043mtea1_panel_spi",
                .bus_num                = 1,
                .chip_select            = 1,
@@ -639,14 +573,10 @@ static void __init pandora_wl1251_init(void)
 
        memset(&pandora_wl1251_pdata, 0, sizeof(pandora_wl1251_pdata));
 
-       ret = gpio_request(PANDORA_WIFI_IRQ_GPIO, "wl1251 irq");
+       ret = gpio_request_one(PANDORA_WIFI_IRQ_GPIO, GPIOF_IN, "wl1251 irq");
        if (ret < 0)
                goto fail;
 
-       ret = gpio_direction_input(PANDORA_WIFI_IRQ_GPIO);
-       if (ret < 0)
-               goto fail_irq;
-
        pandora_wl1251_pdata.irq = gpio_to_irq(PANDORA_WIFI_IRQ_GPIO);
        if (pandora_wl1251_pdata.irq < 0)
                goto fail_irq;
@@ -688,12 +618,6 @@ static struct omap_board_mux board_mux[] __initdata = {
 };
 #endif
 
-static struct omap_musb_board_data musb_board_data = {
-       .interface_type         = MUSB_INTERFACE_ULPI,
-       .mode                   = MUSB_OTG,
-       .power                  = 100,
-};
-
 static void __init omap3pandora_init(void)
 {
        omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
@@ -705,9 +629,9 @@ static void __init omap3pandora_init(void)
        omap_serial_init();
        spi_register_board_info(omap3pandora_spi_board_info,
                        ARRAY_SIZE(omap3pandora_spi_board_info));
-       omap3pandora_ads7846_init();
+       omap_ads7846_init(1, OMAP3_PANDORA_TS_GPIO, 0, NULL);
        usbhs_init(&usbhs_bdata);
-       usb_musb_init(&musb_board_data);
+       usb_musb_init(NULL);
        gpmc_nand_init(&pandora_nand_data);
 
        /* Ensure SDRC pins are mux'd for self-refresh */
index a72c90a08c8a940c8f0ffa6d4b628f6507b54e01..0c108a212ea2190904bb6e66ee11aad696f40c15 100644 (file)
@@ -45,7 +45,6 @@
 #include <plat/mcspi.h>
 #include <linux/input/matrix_keypad.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/ads7846.h>
 #include <linux/interrupt.h>
 #include <linux/smsc911x.h>
 #include <linux/i2c/at24.h>
 #include "mux.h"
 #include "hsmmc.h"
 #include "timer-gp.h"
+#include "common-board-devices.h"
 
 #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
+#include <plat/gpmc-smsc911x.h>
+
 #define OMAP3STALKER_ETHR_START        0x2c000000
 #define OMAP3STALKER_ETHR_SIZE 1024
 #define OMAP3STALKER_ETHR_GPIO_IRQ     19
 #define OMAP3STALKER_SMC911X_CS        5
 
-static struct resource omap3stalker_smsc911x_resources[] = {
-       [0] = {
-              .start   = OMAP3STALKER_ETHR_START,
-              .end     =
-              (OMAP3STALKER_ETHR_START + OMAP3STALKER_ETHR_SIZE - 1),
-              .flags   = IORESOURCE_MEM,
-       },
-       [1] = {
-              .start   = OMAP_GPIO_IRQ(OMAP3STALKER_ETHR_GPIO_IRQ),
-              .end     = OMAP_GPIO_IRQ(OMAP3STALKER_ETHR_GPIO_IRQ),
-              .flags   = (IORESOURCE_IRQ | IRQF_TRIGGER_LOW),
-       },
-};
-
-static struct smsc911x_platform_config smsc911x_config = {
-       .phy_interface  = PHY_INTERFACE_MODE_MII,
-       .irq_polarity   = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
-       .irq_type       = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+static struct omap_smsc911x_platform_data smsc911x_cfg = {
+       .cs             = OMAP3STALKER_SMC911X_CS,
+       .gpio_irq       = OMAP3STALKER_ETHR_GPIO_IRQ,
+       .gpio_reset     = -EINVAL,
        .flags          = (SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS),
 };
 
-static struct platform_device omap3stalker_smsc911x_device = {
-       .name           = "smsc911x",
-       .id             = -1,
-       .num_resources  = ARRAY_SIZE(omap3stalker_smsc911x_resources),
-       .resource       = &omap3stalker_smsc911x_resources[0],
-       .dev            = {
-               .platform_data  = &smsc911x_config,
-       },
-};
-
 static inline void __init omap3stalker_init_eth(void)
 {
-       int eth_cs;
        struct clk *l3ck;
        unsigned int rate;
 
-       eth_cs = OMAP3STALKER_SMC911X_CS;
-
        l3ck = clk_get(NULL, "l3_ck");
        if (IS_ERR(l3ck))
                rate = 100000000;
@@ -107,16 +82,7 @@ static inline void __init omap3stalker_init_eth(void)
                rate = clk_get_rate(l3ck);
 
        omap_mux_init_gpio(19, OMAP_PIN_INPUT_PULLUP);
-       if (gpio_request(OMAP3STALKER_ETHR_GPIO_IRQ, "SMC911x irq") < 0) {
-               printk(KERN_ERR
-                      "Failed to request GPIO%d for smc911x IRQ\n",
-                      OMAP3STALKER_ETHR_GPIO_IRQ);
-               return;
-       }
-
-       gpio_direction_input(OMAP3STALKER_ETHR_GPIO_IRQ);
-
-       platform_device_register(&omap3stalker_smsc911x_device);
+       gpmc_smsc911x_init(&smsc911x_cfg);
 }
 
 #else
@@ -365,12 +331,11 @@ omap3stalker_twl_gpio_setup(struct device *dev,
         */
 
        /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */
-       gpio_request(gpio + TWL4030_GPIO_MAX, "EN_LCD_BKL");
-       gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0);
+       gpio_request_one(gpio + TWL4030_GPIO_MAX, GPIOF_OUT_INIT_LOW,
+                        "EN_LCD_BKL");
 
        /* gpio + 7 == DVI Enable */
-       gpio_request(gpio + 7, "EN_DVI");
-       gpio_direction_output(gpio + 7, 0);
+       gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "EN_DVI");
 
        /* TWL4030_GPIO_MAX + 1 == ledB (out, mmc0) */
        gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
@@ -489,15 +454,8 @@ static struct twl4030_platform_data omap3stalker_twldata = {
        .codec          = &omap3stalker_codec_data,
        .vdac           = &omap3_stalker_vdac,
        .vpll2          = &omap3_stalker_vpll2,
-};
-
-static struct i2c_board_info __initdata omap3stalker_i2c_boardinfo[] = {
-       {
-        I2C_BOARD_INFO("twl4030", 0x48),
-        .flags         = I2C_CLIENT_WAKE,
-        .irq           = INT_34XX_SYS_NIRQ,
-        .platform_data = &omap3stalker_twldata,
-        },
+       .vmmc1          = &omap3stalker_vmmc1,
+       .vsim           = &omap3stalker_vsim,
 };
 
 static struct at24_platform_data fram_info = {
@@ -516,15 +474,7 @@ static struct i2c_board_info __initdata omap3stalker_i2c_boardinfo3[] = {
 
 static int __init omap3_stalker_i2c_init(void)
 {
-       /*
-        * REVISIT: These entries can be set in omap3evm_twl_data
-        * after a merge with MFD tree
-        */
-       omap3stalker_twldata.vmmc1 = &omap3stalker_vmmc1;
-       omap3stalker_twldata.vsim = &omap3stalker_vsim;
-
-       omap_register_i2c_bus(1, 2600, omap3stalker_i2c_boardinfo,
-                             ARRAY_SIZE(omap3stalker_i2c_boardinfo));
+       omap3_pmic_init("twl4030", &omap3stalker_twldata);
        omap_register_i2c_bus(2, 400, NULL, 0);
        omap_register_i2c_bus(3, 400, omap3stalker_i2c_boardinfo3,
                              ARRAY_SIZE(omap3stalker_i2c_boardinfo3));
@@ -532,49 +482,6 @@ static int __init omap3_stalker_i2c_init(void)
 }
 
 #define OMAP3_STALKER_TS_GPIO  175
-static void ads7846_dev_init(void)
-{
-       if (gpio_request(OMAP3_STALKER_TS_GPIO, "ADS7846 pendown") < 0)
-               printk(KERN_ERR "can't get ads7846 pen down GPIO\n");
-
-       gpio_direction_input(OMAP3_STALKER_TS_GPIO);
-       gpio_set_debounce(OMAP3_STALKER_TS_GPIO, 310);
-}
-
-static int ads7846_get_pendown_state(void)
-{
-       return !gpio_get_value(OMAP3_STALKER_TS_GPIO);
-}
-
-static struct ads7846_platform_data ads7846_config = {
-       .x_max                  = 0x0fff,
-       .y_max                  = 0x0fff,
-       .x_plate_ohms           = 180,
-       .pressure_max           = 255,
-       .debounce_max           = 10,
-       .debounce_tol           = 3,
-       .debounce_rep           = 1,
-       .get_pendown_state      = ads7846_get_pendown_state,
-       .keep_vref_on           = 1,
-       .settle_delay_usecs     = 150,
-};
-
-static struct omap2_mcspi_device_config ads7846_mcspi_config = {
-       .turbo_mode             = 0,
-       .single_channel         = 1,    /* 0: slave, 1: master */
-};
-
-static struct spi_board_info omap3stalker_spi_board_info[] = {
-       [0] = {
-              .modalias        = "ads7846",
-              .bus_num         = 1,
-              .chip_select     = 0,
-              .max_speed_hz    = 1500000,
-              .controller_data = &ads7846_mcspi_config,
-              .irq             = OMAP_GPIO_IRQ(OMAP3_STALKER_TS_GPIO),
-              .platform_data   = &ads7846_config,
-       },
-};
 
 static struct omap_board_config_kernel omap3_stalker_config[] __initdata = {
 };
@@ -618,12 +525,6 @@ static struct omap_board_mux board_mux[] __initdata = {
 };
 #endif
 
-static struct omap_musb_board_data musb_board_data = {
-       .interface_type = MUSB_INTERFACE_ULPI,
-       .mode           = MUSB_OTG,
-       .power          = 100,
-};
-
 static void __init omap3_stalker_init(void)
 {
        omap3_mux_init(board_mux, OMAP_PACKAGE_CUS);
@@ -636,13 +537,11 @@ static void __init omap3_stalker_init(void)
                             ARRAY_SIZE(omap3_stalker_devices));
 
        omap_display_init(&omap3_stalker_dss_data);
-       spi_register_board_info(omap3stalker_spi_board_info,
-                               ARRAY_SIZE(omap3stalker_spi_board_info));
 
        omap_serial_init();
-       usb_musb_init(&musb_board_data);
+       usb_musb_init(NULL);
        usbhs_init(&usbhs_bdata);
-       ads7846_dev_init();
+       omap_ads7846_init(1, OMAP3_STALKER_TS_GPIO, 310, NULL);
 
        omap_mux_init_gpio(21, OMAP_PIN_OUTPUT);
        omap_mux_init_gpio(18, OMAP_PIN_INPUT_PULLUP);
index 127cb1752bddea97edff181a4a3dc3022c306480..82872d7d313b7dd624bf5f39c0a819e61bf4bf9e 100644 (file)
@@ -52,6 +52,7 @@
 #include "mux.h"
 #include "hsmmc.h"
 #include "timer-gp.h"
+#include "common-board-devices.h"
 
 #include <asm/setup.h>
 
@@ -95,15 +96,6 @@ static struct mtd_partition omap3touchbook_nand_partitions[] = {
        },
 };
 
-static struct omap_nand_platform_data omap3touchbook_nand_data = {
-       .options        = NAND_BUSWIDTH_16,
-       .parts          = omap3touchbook_nand_partitions,
-       .nr_parts       = ARRAY_SIZE(omap3touchbook_nand_partitions),
-       .dma_channel    = -1,           /* disable DMA in OMAP NAND driver */
-       .nand_setup     = NULL,
-       .dev_ready      = NULL,
-};
-
 #include "sdram-micron-mt46h32m32lf-6.h"
 
 static struct omap2_hsmmc_info mmc[] = {
@@ -154,13 +146,11 @@ static int touchbook_twl_gpio_setup(struct device *dev,
        /* REVISIT: need ehci-omap hooks for external VBUS
         * power switch and overcurrent detect
         */
-
-       gpio_request(gpio + 1, "EHCI_nOC");
-       gpio_direction_input(gpio + 1);
+       gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC");
 
        /* TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, active low) */
-       gpio_request(gpio + TWL4030_GPIO_MAX, "nEN_USB_PWR");
-       gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0);
+       gpio_request_one(gpio + TWL4030_GPIO_MAX, GPIOF_OUT_INIT_LOW,
+                        "nEN_USB_PWR");
 
        /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
        gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
@@ -273,15 +263,6 @@ static struct twl4030_platform_data touchbook_twldata = {
        .vpll2          = &touchbook_vpll2,
 };
 
-static struct i2c_board_info __initdata touchbook_i2c_boardinfo[] = {
-       {
-               I2C_BOARD_INFO("twl4030", 0x48),
-               .flags = I2C_CLIENT_WAKE,
-               .irq = INT_34XX_SYS_NIRQ,
-               .platform_data = &touchbook_twldata,
-       },
-};
-
 static struct i2c_board_info __initdata touchBook_i2c_boardinfo[] = {
        {
                I2C_BOARD_INFO("bq27200", 0x55),
@@ -291,8 +272,7 @@ static struct i2c_board_info __initdata touchBook_i2c_boardinfo[] = {
 static int __init omap3_touchbook_i2c_init(void)
 {
        /* Standard TouchBook bus */
-       omap_register_i2c_bus(1, 2600, touchbook_i2c_boardinfo,
-                       ARRAY_SIZE(touchbook_i2c_boardinfo));
+       omap3_pmic_init("twl4030", &touchbook_twldata);
 
        /* Additional TouchBook bus */
        omap_register_i2c_bus(3, 100, touchBook_i2c_boardinfo,
@@ -301,19 +281,7 @@ static int __init omap3_touchbook_i2c_init(void)
        return 0;
 }
 
-static void __init omap3_ads7846_init(void)
-{
-       if (gpio_request(OMAP3_TS_GPIO, "ads7846_pen_down")) {
-               printk(KERN_ERR "Failed to request GPIO %d for "
-                               "ads7846 pen down IRQ\n", OMAP3_TS_GPIO);
-               return;
-       }
-
-       gpio_direction_input(OMAP3_TS_GPIO);
-       gpio_set_debounce(OMAP3_TS_GPIO, 310);
-}
-
-static struct ads7846_platform_data ads7846_config = {
+static struct ads7846_platform_data ads7846_pdata = {
        .x_min                  = 100,
        .y_min                  = 265,
        .x_max                  = 3950,
@@ -327,23 +295,6 @@ static struct ads7846_platform_data ads7846_config = {
        .keep_vref_on           = 1,
 };
 
-static struct omap2_mcspi_device_config ads7846_mcspi_config = {
-       .turbo_mode     = 0,
-       .single_channel = 1,    /* 0: slave, 1: master */
-};
-
-static struct spi_board_info omap3_ads7846_spi_board_info[] __initdata = {
-       {
-               .modalias               = "ads7846",
-               .bus_num                = 4,
-               .chip_select            = 0,
-               .max_speed_hz           = 1500000,
-               .controller_data        = &ads7846_mcspi_config,
-               .irq                    = OMAP_GPIO_IRQ(OMAP3_TS_GPIO),
-               .platform_data          = &ads7846_config,
-       }
-};
-
 static struct gpio_led gpio_leds[] = {
        {
                .name                   = "touchbook::usr0",
@@ -434,39 +385,6 @@ static struct platform_device *omap3_touchbook_devices[] __initdata = {
        &keys_gpio,
 };
 
-static void __init omap3touchbook_flash_init(void)
-{
-       u8 cs = 0;
-       u8 nandcs = GPMC_CS_NUM + 1;
-
-       /* find out the chip-select on which NAND exists */
-       while (cs < GPMC_CS_NUM) {
-               u32 ret = 0;
-               ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
-
-               if ((ret & 0xC00) == 0x800) {
-                       printk(KERN_INFO "Found NAND on CS%d\n", cs);
-                       if (nandcs > GPMC_CS_NUM)
-                               nandcs = cs;
-               }
-               cs++;
-       }
-
-       if (nandcs > GPMC_CS_NUM) {
-               printk(KERN_INFO "NAND: Unable to find configuration "
-                                "in GPMC\n ");
-               return;
-       }
-
-       if (nandcs < GPMC_CS_NUM) {
-               omap3touchbook_nand_data.cs = nandcs;
-
-               printk(KERN_INFO "Registering NAND on CS%d\n", nandcs);
-               if (gpmc_nand_init(&omap3touchbook_nand_data) < 0)
-                       printk(KERN_ERR "Unable to register NAND device\n");
-       }
-}
-
 static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
 
        .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY,
@@ -481,15 +399,10 @@ static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
 
 static void omap3_touchbook_poweroff(void)
 {
-       int r;
+       int pwr_off = TB_KILL_POWER_GPIO;
 
-       r = gpio_request(TB_KILL_POWER_GPIO, "DVI reset");
-       if (r < 0) {
+       if (gpio_request_one(pwr_off, GPIOF_OUT_INIT_LOW, "DVI reset") < 0)
                printk(KERN_ERR "Unable to get kill power GPIO\n");
-               return;
-       }
-
-       gpio_direction_output(TB_KILL_POWER_GPIO, 0);
 }
 
 static int __init early_touchbook_revision(char *p)
@@ -501,12 +414,6 @@ static int __init early_touchbook_revision(char *p)
 }
 early_param("tbr", early_touchbook_revision);
 
-static struct omap_musb_board_data musb_board_data = {
-       .interface_type         = MUSB_INTERFACE_ULPI,
-       .mode                   = MUSB_OTG,
-       .power                  = 100,
-};
-
 static void __init omap3_touchbook_init(void)
 {
        omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
@@ -521,17 +428,15 @@ static void __init omap3_touchbook_init(void)
        omap_serial_init();
 
        omap_mux_init_gpio(170, OMAP_PIN_INPUT);
-       gpio_request(176, "DVI_nPD");
        /* REVISIT leave DVI powered down until it's needed ... */
-       gpio_direction_output(176, true);
+       gpio_request_one(176, GPIOF_OUT_INIT_HIGH, "DVI_nPD");
 
        /* Touchscreen and accelerometer */
-       spi_register_board_info(omap3_ads7846_spi_board_info,
-                               ARRAY_SIZE(omap3_ads7846_spi_board_info));
-       omap3_ads7846_init();
-       usb_musb_init(&musb_board_data);
+       omap_ads7846_init(4, OMAP3_TS_GPIO, 310, &ads7846_pdata);
+       usb_musb_init(NULL);
        usbhs_init(&usbhs_bdata);
-       omap3touchbook_flash_init();
+       omap_nand_flash_init(NAND_BUSWIDTH_16, omap3touchbook_nand_partitions,
+                            ARRAY_SIZE(omap3touchbook_nand_partitions));
 
        /* Ensure SDRC pins are mux'd for self-refresh */
        omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
index e4973ac77cbc8d3d18a27cb94bffab0204194588..90485fced973db9ee265465eac4c474a1adf3848 100644 (file)
@@ -46,6 +46,7 @@
 #include "hsmmc.h"
 #include "control.h"
 #include "mux.h"
+#include "common-board-devices.h"
 
 #define GPIO_HUB_POWER         1
 #define GPIO_HUB_NRESET                62
@@ -111,6 +112,11 @@ static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
        .reset_gpio_port[2]  = -EINVAL
 };
 
+static struct gpio panda_ehci_gpios[] __initdata = {
+       { GPIO_HUB_POWER,       GPIOF_OUT_INIT_LOW,  "hub_power"  },
+       { GPIO_HUB_NRESET,      GPIOF_OUT_INIT_LOW,  "hub_nreset" },
+};
+
 static void __init omap4_ehci_init(void)
 {
        int ret;
@@ -120,44 +126,27 @@ static void __init omap4_ehci_init(void)
        phy_ref_clk = clk_get(NULL, "auxclk3_ck");
        if (IS_ERR(phy_ref_clk)) {
                pr_err("Cannot request auxclk3\n");
-               goto error1;
+               return;
        }
        clk_set_rate(phy_ref_clk, 19200000);
        clk_enable(phy_ref_clk);
 
-       /* disable the power to the usb hub prior to init */
-       ret = gpio_request(GPIO_HUB_POWER, "hub_power");
+       /* disable the power to the usb hub prior to init and reset phy+hub */
+       ret = gpio_request_array(panda_ehci_gpios,
+                                ARRAY_SIZE(panda_ehci_gpios));
        if (ret) {
-               pr_err("Cannot request GPIO %d\n", GPIO_HUB_POWER);
-               goto error1;
+               pr_err("Unable to initialize EHCI power/reset\n");
+               return;
        }
-       gpio_export(GPIO_HUB_POWER, 0);
-       gpio_direction_output(GPIO_HUB_POWER, 0);
-       gpio_set_value(GPIO_HUB_POWER, 0);
 
-       /* reset phy+hub */
-       ret = gpio_request(GPIO_HUB_NRESET, "hub_nreset");
-       if (ret) {
-               pr_err("Cannot request GPIO %d\n", GPIO_HUB_NRESET);
-               goto error2;
-       }
+       gpio_export(GPIO_HUB_POWER, 0);
        gpio_export(GPIO_HUB_NRESET, 0);
-       gpio_direction_output(GPIO_HUB_NRESET, 0);
-       gpio_set_value(GPIO_HUB_NRESET, 0);
        gpio_set_value(GPIO_HUB_NRESET, 1);
 
        usbhs_init(&usbhs_bdata);
 
        /* enable power to hub */
        gpio_set_value(GPIO_HUB_POWER, 1);
-       return;
-
-error2:
-       gpio_free(GPIO_HUB_POWER);
-error1:
-       pr_err("Unable to initialize EHCI power/reset\n");
-       return;
-
 }
 
 static struct omap_musb_board_data musb_board_data = {
@@ -408,15 +397,6 @@ static struct twl4030_platform_data omap4_panda_twldata = {
        .usb            = &omap4_usbphy_data,
 };
 
-static struct i2c_board_info __initdata omap4_panda_i2c_boardinfo[] = {
-       {
-               I2C_BOARD_INFO("twl6030", 0x48),
-               .flags = I2C_CLIENT_WAKE,
-               .irq = OMAP44XX_IRQ_SYS_1N,
-               .platform_data = &omap4_panda_twldata,
-       },
-};
-
 /*
  * Display monitor features are burnt in their EEPROM as EDID data. The EEPROM
  * is connected as I2C slave device, and can be accessed at address 0x50
@@ -429,12 +409,7 @@ static struct i2c_board_info __initdata panda_i2c_eeprom[] = {
 
 static int __init omap4_panda_i2c_init(void)
 {
-       /*
-        * Phoenix Audio IC needs I2C1 to
-        * start with 400 KHz or less
-        */
-       omap_register_i2c_bus(1, 400, omap4_panda_i2c_boardinfo,
-                       ARRAY_SIZE(omap4_panda_i2c_boardinfo));
+       omap4_pmic_init("twl6030", &omap4_panda_twldata);
        omap_register_i2c_bus(2, 400, NULL, 0);
        /*
         * Bus 3 is attached to the DVI port where devices like the pico DLP
@@ -651,27 +626,19 @@ static void omap4_panda_hdmi_mux_init(void)
                        OMAP_PIN_INPUT_PULLUP);
 }
 
+static struct gpio panda_hdmi_gpios[] = {
+       { HDMI_GPIO_HPD,        GPIOF_OUT_INIT_HIGH, "hdmi_gpio_hpd"   },
+       { HDMI_GPIO_LS_OE,      GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" },
+};
+
 static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev)
 {
        int status;
 
-       status = gpio_request_one(HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH,
-                                                       "hdmi_gpio_hpd");
-       if (status) {
-               pr_err("Cannot request GPIO %d\n", HDMI_GPIO_HPD);
-               return status;
-       }
-       status = gpio_request_one(HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH,
-                                                       "hdmi_gpio_ls_oe");
-       if (status) {
-               pr_err("Cannot request GPIO %d\n", HDMI_GPIO_LS_OE);
-               goto error1;
-       }
-
-       return 0;
-
-error1:
-       gpio_free(HDMI_GPIO_HPD);
+       status = gpio_request_array(panda_hdmi_gpios,
+                                   ARRAY_SIZE(panda_hdmi_gpios));
+       if (status)
+               pr_err("Cannot request HDMI GPIOs\n");
 
        return status;
 }
index 9d192ff3b9ac113307c4de5e42f98abd2b60cefa..1555918e3ffa05c0a79e501e24cf6041b4cca30c 100644 (file)
@@ -56,6 +56,7 @@
 #include "mux.h"
 #include "sdram-micron-mt46h32m32lf-6.h"
 #include "hsmmc.h"
+#include "common-board-devices.h"
 
 #define OVERO_GPIO_BT_XGATE    15
 #define OVERO_GPIO_W2W_NRESET  16
 #if defined(CONFIG_TOUCHSCREEN_ADS7846) || \
        defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
 
-#include <linux/spi/ads7846.h>
-
-static struct omap2_mcspi_device_config ads7846_mcspi_config = {
-       .turbo_mode     = 0,
-       .single_channel = 1,    /* 0: slave, 1: master */
-};
-
-static int ads7846_get_pendown_state(void)
-{
-       return !gpio_get_value(OVERO_GPIO_PENDOWN);
-}
-
-static struct ads7846_platform_data ads7846_config = {
-       .x_max                  = 0x0fff,
-       .y_max                  = 0x0fff,
-       .x_plate_ohms           = 180,
-       .pressure_max           = 255,
-       .debounce_max           = 10,
-       .debounce_tol           = 3,
-       .debounce_rep           = 1,
-       .get_pendown_state      = ads7846_get_pendown_state,
-       .keep_vref_on           = 1,
-};
-
 /* fixed regulator for ads7846 */
 static struct regulator_consumer_supply ads7846_supply =
        REGULATOR_SUPPLY("vcc", "spi1.0");
@@ -128,14 +105,7 @@ static struct platform_device vads7846_device = {
 
 static void __init overo_ads7846_init(void)
 {
-       if ((gpio_request(OVERO_GPIO_PENDOWN, "ADS7846_PENDOWN") == 0) &&
-           (gpio_direction_input(OVERO_GPIO_PENDOWN) == 0)) {
-               gpio_export(OVERO_GPIO_PENDOWN, 0);
-       } else {
-               printk(KERN_ERR "could not obtain gpio for ADS7846_PENDOWN\n");
-               return;
-       }
-
+       omap_ads7846_init(1, OVERO_GPIO_PENDOWN, 0, NULL);
        platform_device_register(&vads7846_device);
 }
 
@@ -146,106 +116,28 @@ static inline void __init overo_ads7846_init(void) { return; }
 #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
 
 #include <linux/smsc911x.h>
+#include <plat/gpmc-smsc911x.h>
 
-static struct resource overo_smsc911x_resources[] = {
-       {
-               .name   = "smsc911x-memory",
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
-       },
-};
-
-static struct resource overo_smsc911x2_resources[] = {
-       {
-               .name   = "smsc911x2-memory",
-               .flags  = IORESOURCE_MEM,
-       },
-       {
-               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
-       },
-};
-
-static struct smsc911x_platform_config overo_smsc911x_config = {
-       .irq_polarity   = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
-       .irq_type       = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
-       .flags          = SMSC911X_USE_32BIT ,
-       .phy_interface  = PHY_INTERFACE_MODE_MII,
-};
-
-static struct platform_device overo_smsc911x_device = {
-       .name           = "smsc911x",
+static struct omap_smsc911x_platform_data smsc911x_cfg = {
        .id             = 0,
-       .num_resources  = ARRAY_SIZE(overo_smsc911x_resources),
-       .resource       = overo_smsc911x_resources,
-       .dev            = {
-               .platform_data = &overo_smsc911x_config,
-       },
+       .cs             = OVERO_SMSC911X_CS,
+       .gpio_irq       = OVERO_SMSC911X_GPIO,
+       .gpio_reset     = -EINVAL,
+       .flags          = SMSC911X_USE_32BIT,
 };
 
-static struct platform_device overo_smsc911x2_device = {
-       .name           = "smsc911x",
+static struct omap_smsc911x_platform_data smsc911x2_cfg = {
        .id             = 1,
-       .num_resources  = ARRAY_SIZE(overo_smsc911x2_resources),
-       .resource       = overo_smsc911x2_resources,
-       .dev            = {
-               .platform_data = &overo_smsc911x_config,
-       },
+       .cs             = OVERO_SMSC911X2_CS,
+       .gpio_irq       = OVERO_SMSC911X2_GPIO,
+       .gpio_reset     = -EINVAL,
+       .flags          = SMSC911X_USE_32BIT,
 };
 
-static struct platform_device *smsc911x_devices[] = {
-       &overo_smsc911x_device,
-       &overo_smsc911x2_device,
-};
-
-static inline void __init overo_init_smsc911x(void)
+static void __init overo_init_smsc911x(void)
 {
-       unsigned long cs_mem_base, cs_mem_base2;
-
-       /* set up first smsc911x chip */
-
-       if (gpmc_cs_request(OVERO_SMSC911X_CS, SZ_16M, &cs_mem_base) < 0) {
-               printk(KERN_ERR "Failed request for GPMC mem for smsc911x\n");
-               return;
-       }
-
-       overo_smsc911x_resources[0].start = cs_mem_base + 0x0;
-       overo_smsc911x_resources[0].end   = cs_mem_base + 0xff;
-
-       if ((gpio_request(OVERO_SMSC911X_GPIO, "SMSC911X IRQ") == 0) &&
-           (gpio_direction_input(OVERO_SMSC911X_GPIO) == 0)) {
-               gpio_export(OVERO_SMSC911X_GPIO, 0);
-       } else {
-               printk(KERN_ERR "could not obtain gpio for SMSC911X IRQ\n");
-               return;
-       }
-
-       overo_smsc911x_resources[1].start = OMAP_GPIO_IRQ(OVERO_SMSC911X_GPIO);
-       overo_smsc911x_resources[1].end   = 0;
-
-       /* set up second smsc911x chip */
-
-       if (gpmc_cs_request(OVERO_SMSC911X2_CS, SZ_16M, &cs_mem_base2) < 0) {
-               printk(KERN_ERR "Failed request for GPMC mem for smsc911x2\n");
-               return;
-       }
-
-       overo_smsc911x2_resources[0].start = cs_mem_base2 + 0x0;
-       overo_smsc911x2_resources[0].end   = cs_mem_base2 + 0xff;
-
-       if ((gpio_request(OVERO_SMSC911X2_GPIO, "SMSC911X2 IRQ") == 0) &&
-           (gpio_direction_input(OVERO_SMSC911X2_GPIO) == 0)) {
-               gpio_export(OVERO_SMSC911X2_GPIO, 0);
-       } else {
-               printk(KERN_ERR "could not obtain gpio for SMSC911X2 IRQ\n");
-               return;
-       }
-
-       overo_smsc911x2_resources[1].start = OMAP_GPIO_IRQ(OVERO_SMSC911X2_GPIO);
-       overo_smsc911x2_resources[1].end   = 0;
-
-       platform_add_devices(smsc911x_devices, ARRAY_SIZE(smsc911x_devices));
+       gpmc_smsc911x_init(&smsc911x_cfg);
+       gpmc_smsc911x_init(&smsc911x2_cfg);
 }
 
 #else
@@ -259,21 +151,20 @@ static int dvi_enabled;
 #define OVERO_GPIO_LCD_EN 144
 #define OVERO_GPIO_LCD_BL 145
 
+static struct gpio overo_dss_gpios[] __initdata = {
+       { OVERO_GPIO_LCD_EN, GPIOF_OUT_INIT_HIGH, "OVERO_GPIO_LCD_EN" },
+       { OVERO_GPIO_LCD_BL, GPIOF_OUT_INIT_HIGH, "OVERO_GPIO_LCD_BL" },
+};
+
 static void __init overo_display_init(void)
 {
-       if ((gpio_request(OVERO_GPIO_LCD_EN, "OVERO_GPIO_LCD_EN") == 0) &&
-           (gpio_direction_output(OVERO_GPIO_LCD_EN, 1) == 0))
-               gpio_export(OVERO_GPIO_LCD_EN, 0);
-       else
-               printk(KERN_ERR "could not obtain gpio for "
-                                       "OVERO_GPIO_LCD_EN\n");
+       if (gpio_request_array(overo_dss_gpios, ARRAY_SIZE(overo_dss_gpios))) {
+               printk(KERN_ERR "could not obtain DSS control GPIOs\n");
+               return;
+       }
 
-       if ((gpio_request(OVERO_GPIO_LCD_BL, "OVERO_GPIO_LCD_BL") == 0) &&
-           (gpio_direction_output(OVERO_GPIO_LCD_BL, 1) == 0))
-               gpio_export(OVERO_GPIO_LCD_BL, 0);
-       else
-               printk(KERN_ERR "could not obtain gpio for "
-                                       "OVERO_GPIO_LCD_BL\n");
+       gpio_export(OVERO_GPIO_LCD_EN, 0);
+       gpio_export(OVERO_GPIO_LCD_BL, 0);
 }
 
 static int overo_panel_enable_dvi(struct omap_dss_device *dssdev)
@@ -412,45 +303,6 @@ static struct mtd_partition overo_nand_partitions[] = {
        },
 };
 
-static struct omap_nand_platform_data overo_nand_data = {
-       .parts = overo_nand_partitions,
-       .nr_parts = ARRAY_SIZE(overo_nand_partitions),
-       .dma_channel = -1,      /* disable DMA in OMAP NAND driver */
-};
-
-static void __init overo_flash_init(void)
-{
-       u8 cs = 0;
-       u8 nandcs = GPMC_CS_NUM + 1;
-
-       /* find out the chip-select on which NAND exists */
-       while (cs < GPMC_CS_NUM) {
-               u32 ret = 0;
-               ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
-
-               if ((ret & 0xC00) == 0x800) {
-                       printk(KERN_INFO "Found NAND on CS%d\n", cs);
-                       if (nandcs > GPMC_CS_NUM)
-                               nandcs = cs;
-               }
-               cs++;
-       }
-
-       if (nandcs > GPMC_CS_NUM) {
-               printk(KERN_INFO "NAND: Unable to find configuration "
-                                "in GPMC\n ");
-               return;
-       }
-
-       if (nandcs < GPMC_CS_NUM) {
-               overo_nand_data.cs = nandcs;
-
-               printk(KERN_INFO "Registering NAND on CS%d\n", nandcs);
-               if (gpmc_nand_init(&overo_nand_data) < 0)
-                       printk(KERN_ERR "Unable to register NAND device\n");
-       }
-}
-
 static struct omap2_hsmmc_info mmc[] = {
        {
                .mmc            = 1,
@@ -648,37 +500,15 @@ static struct twl4030_platform_data overo_twldata = {
        .vpll2          = &overo_vpll2,
 };
 
-static struct i2c_board_info __initdata overo_i2c_boardinfo[] = {
-       {
-               I2C_BOARD_INFO("tps65950", 0x48),
-               .flags = I2C_CLIENT_WAKE,
-               .irq = INT_34XX_SYS_NIRQ,
-               .platform_data = &overo_twldata,
-       },
-};
-
 static int __init overo_i2c_init(void)
 {
-       omap_register_i2c_bus(1, 2600, overo_i2c_boardinfo,
-                       ARRAY_SIZE(overo_i2c_boardinfo));
+       omap3_pmic_init("tps65950", &overo_twldata);
        /* i2c2 pins are used for gpio */
        omap_register_i2c_bus(3, 400, NULL, 0);
        return 0;
 }
 
 static struct spi_board_info overo_spi_board_info[] __initdata = {
-#if defined(CONFIG_TOUCHSCREEN_ADS7846) || \
-       defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
-       {
-               .modalias               = "ads7846",
-               .bus_num                = 1,
-               .chip_select            = 0,
-               .max_speed_hz           = 1500000,
-               .controller_data        = &ads7846_mcspi_config,
-               .irq                    = OMAP_GPIO_IRQ(OVERO_GPIO_PENDOWN),
-               .platform_data          = &ads7846_config,
-       },
-#endif
 #if defined(CONFIG_PANEL_LGPHILIPS_LB035Q02) || \
        defined(CONFIG_PANEL_LGPHILIPS_LB035Q02_MODULE)
        {
@@ -722,20 +552,22 @@ static struct omap_board_mux board_mux[] __initdata = {
 };
 #endif
 
-static struct omap_musb_board_data musb_board_data = {
-       .interface_type         = MUSB_INTERFACE_ULPI,
-       .mode                   = MUSB_OTG,
-       .power                  = 100,
+static struct gpio overo_bt_gpios[] __initdata = {
+       { OVERO_GPIO_BT_XGATE,  GPIOF_OUT_INIT_LOW,     "lcd enable"    },
+       { OVERO_GPIO_BT_NRESET, GPIOF_OUT_INIT_HIGH,    "lcd bl enable" },
 };
 
 static void __init overo_init(void)
 {
+       int ret;
+
        omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
        overo_i2c_init();
        omap_display_init(&overo_dss_data);
        omap_serial_init();
-       overo_flash_init();
-       usb_musb_init(&musb_board_data);
+       omap_nand_flash_init(0, overo_nand_partitions,
+                            ARRAY_SIZE(overo_nand_partitions));
+       usb_musb_init(NULL);
        usbhs_init(&usbhs_bdata);
        overo_spi_init();
        overo_ads7846_init();
@@ -748,9 +580,9 @@ static void __init overo_init(void)
        omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
        omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
 
-       if ((gpio_request(OVERO_GPIO_W2W_NRESET,
-                         "OVERO_GPIO_W2W_NRESET") == 0) &&
-           (gpio_direction_output(OVERO_GPIO_W2W_NRESET, 1) == 0)) {
+       ret = gpio_request_one(OVERO_GPIO_W2W_NRESET, GPIOF_OUT_INIT_HIGH,
+                              "OVERO_GPIO_W2W_NRESET");
+       if (ret == 0) {
                gpio_export(OVERO_GPIO_W2W_NRESET, 0);
                gpio_set_value(OVERO_GPIO_W2W_NRESET, 0);
                udelay(10);
@@ -760,25 +592,20 @@ static void __init overo_init(void)
                                        "OVERO_GPIO_W2W_NRESET\n");
        }
 
-       if ((gpio_request(OVERO_GPIO_BT_XGATE, "OVERO_GPIO_BT_XGATE") == 0) &&
-           (gpio_direction_output(OVERO_GPIO_BT_XGATE, 0) == 0))
+       ret = gpio_request_array(overo_bt_gpios, ARRAY_SIZE(overo_bt_gpios));
+       if (ret) {
+               pr_err("%s: could not obtain BT gpios\n", __func__);
+       } else {
                gpio_export(OVERO_GPIO_BT_XGATE, 0);
-       else
-               printk(KERN_ERR "could not obtain gpio for OVERO_GPIO_BT_XGATE\n");
-
-       if ((gpio_request(OVERO_GPIO_BT_NRESET, "OVERO_GPIO_BT_NRESET") == 0) &&
-           (gpio_direction_output(OVERO_GPIO_BT_NRESET, 1) == 0)) {
                gpio_export(OVERO_GPIO_BT_NRESET, 0);
                gpio_set_value(OVERO_GPIO_BT_NRESET, 0);
                mdelay(6);
                gpio_set_value(OVERO_GPIO_BT_NRESET, 1);
-       } else {
-               printk(KERN_ERR "could not obtain gpio for "
-                                       "OVERO_GPIO_BT_NRESET\n");
        }
 
-       if ((gpio_request(OVERO_GPIO_USBH_CPEN, "OVERO_GPIO_USBH_CPEN") == 0) &&
-           (gpio_direction_output(OVERO_GPIO_USBH_CPEN, 1) == 0))
+       ret = gpio_request_one(OVERO_GPIO_USBH_CPEN, GPIOF_OUT_INIT_HIGH,
+                              "OVERO_GPIO_USBH_CPEN");
+       if (ret == 0)
                gpio_export(OVERO_GPIO_USBH_CPEN, 0);
        else
                printk(KERN_ERR "could not obtain gpio for "
index 2af8b05e786d58e5a57fd8f13897804cf7a4d69c..42d10b12da3ccedadacf35f05be4273b37214964 100644 (file)
@@ -31,6 +31,7 @@
 #include "mux.h"
 #include "hsmmc.h"
 #include "sdram-nokia.h"
+#include "common-board-devices.h"
 
 static struct regulator_consumer_supply rm680_vemmc_consumers[] = {
        REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"),
@@ -90,19 +91,9 @@ static struct twl4030_platform_data rm680_twl_data = {
        /* add rest of the children here */
 };
 
-static struct i2c_board_info __initdata rm680_twl_i2c_board_info[] = {
-       {
-               I2C_BOARD_INFO("twl5031", 0x48),
-               .flags          = I2C_CLIENT_WAKE,
-               .irq            = INT_34XX_SYS_NIRQ,
-               .platform_data  = &rm680_twl_data,
-       },
-};
-
 static void __init rm680_i2c_init(void)
 {
-       omap_register_i2c_bus(1, 2900, rm680_twl_i2c_board_info,
-                               ARRAY_SIZE(rm680_twl_i2c_board_info));
+       omap_pmic_init(1, 2900, "twl5031", INT_34XX_SYS_NIRQ, &rm680_twl_data);
        omap_register_i2c_bus(2, 400, NULL, 0);
        omap_register_i2c_bus(3, 400, NULL, 0);
 }
@@ -153,17 +144,11 @@ static struct omap_board_mux board_mux[] __initdata = {
 };
 #endif
 
-static struct omap_musb_board_data rm680_musb_data = {
-       .interface_type = MUSB_INTERFACE_ULPI,
-       .mode           = MUSB_PERIPHERAL,
-       .power          = 100,
-};
-
 static void __init rm680_init(void)
 {
        omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
        omap_serial_init();
-       usb_musb_init(&rm680_musb_data);
+       usb_musb_init(NULL);
        rm680_peripherals_init();
 }
 
index bbcb6775a6a3006486b5d64890aca629e66e3ab3..f6247e71a194419063064c906f690655bf83fceb 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/gpio.h>
 #include <linux/gpio_keys.h>
 #include <linux/mmc/host.h>
+#include <linux/power/isp1704_charger.h>
 
 #include <plat/mcspi.h>
 #include <plat/board.h>
@@ -43,6 +44,7 @@
 
 #include "mux.h"
 #include "hsmmc.h"
+#include "common-board-devices.h"
 
 #define SYSTEM_REV_B_USES_VAUX3        0x1699
 #define SYSTEM_REV_S_USES_VAUX3 0x8
@@ -52,6 +54,8 @@
 #define RX51_FMTX_RESET_GPIO           163
 #define RX51_FMTX_IRQ                  53
 
+#define RX51_USB_TRANSCEIVER_RST_GPIO  67
+
 /* list all spi devices here */
 enum {
        RX51_SPI_WL1251,
@@ -110,10 +114,30 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
        },
 };
 
+static void rx51_charger_set_power(bool on)
+{
+       gpio_set_value(RX51_USB_TRANSCEIVER_RST_GPIO, on);
+}
+
+static struct isp1704_charger_data rx51_charger_data = {
+       .set_power      = rx51_charger_set_power,
+};
+
 static struct platform_device rx51_charger_device = {
-       .name = "isp1704_charger",
+       .name   = "isp1704_charger",
+       .dev    = {
+               .platform_data = &rx51_charger_data,
+       },
 };
 
+static void __init rx51_charger_init(void)
+{
+       WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
+               GPIOF_OUT_INIT_LOW, "isp1704_reset"));
+
+       platform_device_register(&rx51_charger_device);
+}
+
 #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE)
 
 #define RX51_GPIO_CAMERA_LENS_COVER    110
@@ -557,10 +581,8 @@ static __init void rx51_init_si4713(void)
 static int rx51_twlgpio_setup(struct device *dev, unsigned gpio, unsigned n)
 {
        /* FIXME this gpio setup is just a placeholder for now */
-       gpio_request(gpio + 6, "backlight_pwm");
-       gpio_direction_output(gpio + 6, 0);
-       gpio_request(gpio + 7, "speaker_en");
-       gpio_direction_output(gpio + 7, 1);
+       gpio_request_one(gpio + 6, GPIOF_OUT_INIT_LOW, "backlight_pwm");
+       gpio_request_one(gpio + 7, GPIOF_OUT_INIT_HIGH, "speaker_en");
 
        return 0;
 }
@@ -730,7 +752,7 @@ static struct twl4030_resconfig twl4030_rconfig[] __initdata = {
        { .resource = RES_RESET, .devgroup = -1,
          .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
        },
-       { .resource = RES_Main_Ref, .devgroup = -1,
+       { .resource = RES_MAIN_REF, .devgroup = -1,
          .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1
        },
        { 0, 0},
@@ -777,15 +799,6 @@ static struct tpa6130a2_platform_data rx51_tpa6130a2_data __initdata_or_module =
        .power_gpio             = 98,
 };
 
-static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_1[] = {
-       {
-               I2C_BOARD_INFO("twl5030", 0x48),
-               .flags = I2C_CLIENT_WAKE,
-               .irq = INT_34XX_SYS_NIRQ,
-               .platform_data = &rx51_twldata,
-       },
-};
-
 /* Audio setup data */
 static struct aic3x_setup_data rx51_aic34_setup = {
        .gpio_func[0] = AIC3X_GPIO1_FUNC_DISABLED,
@@ -833,8 +846,7 @@ static int __init rx51_i2c_init(void)
                rx51_twldata.vaux3 = &rx51_vaux3_cam;
        }
        rx51_twldata.vmmc2 = &rx51_vmmc2;
-       omap_register_i2c_bus(1, 2200, rx51_peripherals_i2c_board_info_1,
-                             ARRAY_SIZE(rx51_peripherals_i2c_board_info_1));
+       omap_pmic_init(1, 2200, "twl5030", INT_34XX_SYS_NIRQ, &rx51_twldata);
        omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2,
                              ARRAY_SIZE(rx51_peripherals_i2c_board_info_2));
        omap_register_i2c_bus(3, 400, NULL, 0);
@@ -921,26 +933,20 @@ static void rx51_wl1251_set_power(bool enable)
        gpio_set_value(RX51_WL1251_POWER_GPIO, enable);
 }
 
+static struct gpio rx51_wl1251_gpios[] __initdata = {
+       { RX51_WL1251_POWER_GPIO, GPIOF_OUT_INIT_LOW,   "wl1251 power"  },
+       { RX51_WL1251_IRQ_GPIO,   GPIOF_IN,             "wl1251 irq"    },
+};
+
 static void __init rx51_init_wl1251(void)
 {
        int irq, ret;
 
-       ret = gpio_request(RX51_WL1251_POWER_GPIO, "wl1251 power");
+       ret = gpio_request_array(rx51_wl1251_gpios,
+                                ARRAY_SIZE(rx51_wl1251_gpios));
        if (ret < 0)
                goto error;
 
-       ret = gpio_direction_output(RX51_WL1251_POWER_GPIO, 0);
-       if (ret < 0)
-               goto err_power;
-
-       ret = gpio_request(RX51_WL1251_IRQ_GPIO, "wl1251 irq");
-       if (ret < 0)
-               goto err_power;
-
-       ret = gpio_direction_input(RX51_WL1251_IRQ_GPIO);
-       if (ret < 0)
-               goto err_irq;
-
        irq = gpio_to_irq(RX51_WL1251_IRQ_GPIO);
        if (irq < 0)
                goto err_irq;
@@ -952,10 +958,7 @@ static void __init rx51_init_wl1251(void)
 
 err_irq:
        gpio_free(RX51_WL1251_IRQ_GPIO);
-
-err_power:
        gpio_free(RX51_WL1251_POWER_GPIO);
-
 error:
        printk(KERN_ERR "wl1251 board initialisation failed\n");
        wl1251_pdata.set_power = NULL;
@@ -981,6 +984,6 @@ void __init rx51_peripherals_init(void)
        if (partition)
                omap2_hsmmc_init(mmc);
 
-       platform_device_register(&rx51_charger_device);
+       rx51_charger_init();
 }
 
index 2df10b6a5940ae74786112d61a2bd7e2edb3a7ff..2c1289bd5e6ad7a2ee30d736d5bb3a14bf6591f0 100644 (file)
@@ -76,13 +76,12 @@ static int __init rx51_video_init(void)
                return 0;
        }
 
-       if (gpio_request(RX51_LCD_RESET_GPIO, "LCD ACX565AKM reset")) {
+       if (gpio_request_one(RX51_LCD_RESET_GPIO, GPIOF_OUT_INIT_HIGH,
+                            "LCD ACX565AKM reset")) {
                pr_err("%s failed to get LCD Reset GPIO\n", __func__);
                return 0;
        }
 
-       gpio_direction_output(RX51_LCD_RESET_GPIO, 1);
-
        omap_display_init(&rx51_dss_board_info);
        return 0;
 }
index f8ba20a14e625554f95fded51abc2177e7326b96..fec4cac8fa0ab858fd6c09f55af0b08ce8e0ad1a 100644 (file)
@@ -58,21 +58,25 @@ static struct platform_device leds_gpio = {
        },
 };
 
+/*
+ * cpuidle C-states definition override from the default values.
+ * The 'exit_latency' field is the sum of sleep and wake-up latencies.
+ */
 static struct cpuidle_params rx51_cpuidle_params[] = {
        /* C1 */
-       {1, 110, 162, 5},
+       {110 + 162, 5 , 1},
        /* C2 */
-       {1, 106, 180, 309},
+       {106 + 180, 309, 1},
        /* C3 */
-       {0, 107, 410, 46057},
+       {107 + 410, 46057, 0},
        /* C4 */
-       {0, 121, 3374, 46057},
+       {121 + 3374, 46057, 0},
        /* C5 */
-       {1, 855, 1146, 46057},
+       {855 + 1146, 46057, 1},
        /* C6 */
-       {0, 7580, 4134, 484329},
+       {7580 + 4134, 484329, 0},
        /* C7 */
-       {1, 7505, 15274, 484329},
+       {7505 + 15274, 484329, 1},
 };
 
 static struct omap_lcd_config rx51_lcd_config = {
index 007ebdc6c993eec5a8d73243a18a6be382557501..6402e781c458c7ebbdd48017acc9527e038f6ef4 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/interrupt.h>
 
 #include <plat/gpmc.h>
+#include <plat/gpmc-smsc911x.h>
 
 #include <mach/board-zoom.h>
 
 #define DEBUG_BASE             0x08000000
 #define ZOOM_ETHR_START        DEBUG_BASE
 
-static struct resource zoom_smsc911x_resources[] = {
-       [0] = {
-               .start  = ZOOM_ETHR_START,
-               .end    = ZOOM_ETHR_START + SZ_4K,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
-       },
-};
-
-static struct smsc911x_platform_config zoom_smsc911x_config = {
-       .irq_polarity   = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
-       .irq_type       = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+static struct omap_smsc911x_platform_data zoom_smsc911x_cfg = {
+       .cs             = ZOOM_SMSC911X_CS,
+       .gpio_irq       = ZOOM_SMSC911X_GPIO,
+       .gpio_reset     = -EINVAL,
        .flags          = SMSC911X_USE_32BIT,
-       .phy_interface  = PHY_INTERFACE_MODE_MII,
-};
-
-static struct platform_device zoom_smsc911x_device = {
-       .name           = "smsc911x",
-       .id             = -1,
-       .num_resources  = ARRAY_SIZE(zoom_smsc911x_resources),
-       .resource       = zoom_smsc911x_resources,
-       .dev            = {
-               .platform_data = &zoom_smsc911x_config,
-       },
 };
 
 static inline void __init zoom_init_smsc911x(void)
 {
-       int eth_cs;
-       unsigned long cs_mem_base;
-       int eth_gpio = 0;
-
-       eth_cs = ZOOM_SMSC911X_CS;
-
-       if (gpmc_cs_request(eth_cs, SZ_16M, &cs_mem_base) < 0) {
-               printk(KERN_ERR "Failed to request GPMC mem for smsc911x\n");
-               return;
-       }
-
-       zoom_smsc911x_resources[0].start = cs_mem_base + 0x0;
-       zoom_smsc911x_resources[0].end   = cs_mem_base + 0xff;
-
-       eth_gpio = ZOOM_SMSC911X_GPIO;
-
-       zoom_smsc911x_resources[1].start = OMAP_GPIO_IRQ(eth_gpio);
-
-       if (gpio_request(eth_gpio, "smsc911x irq") < 0) {
-               printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n",
-                               eth_gpio);
-               return;
-       }
-       gpio_direction_input(eth_gpio);
+       gpmc_smsc911x_init(&zoom_smsc911x_cfg);
 }
 
 static struct plat_serial8250_port serial_platform_data[] = {
@@ -120,12 +77,9 @@ static inline void __init zoom_init_quaduart(void)
 
        quart_gpio = ZOOM_QUADUART_GPIO;
 
-       if (gpio_request(quart_gpio, "TL16CP754C GPIO") < 0) {
+       if (gpio_request_one(quart_gpio, GPIOF_IN, "TL16CP754C GPIO") < 0)
                printk(KERN_ERR "Failed to request GPIO%d for TL16CP754C\n",
                                                                quart_gpio);
-               return;
-       }
-       gpio_direction_input(quart_gpio);
 }
 
 static inline int omap_zoom_debugboard_detect(void)
@@ -135,12 +89,12 @@ static inline int omap_zoom_debugboard_detect(void)
 
        debug_board_detect = ZOOM_SMSC911X_GPIO;
 
-       if (gpio_request(debug_board_detect, "Zoom debug board detect") < 0) {
+       if (gpio_request_one(debug_board_detect, GPIOF_IN,
+                            "Zoom debug board detect") < 0) {
                printk(KERN_ERR "Failed to request GPIO%d for Zoom debug"
                "board detect\n", debug_board_detect);
                return 0;
        }
-       gpio_direction_input(debug_board_detect);
 
        if (!gpio_get_value(debug_board_detect)) {
                ret = 0;
@@ -150,7 +104,6 @@ static inline int omap_zoom_debugboard_detect(void)
 }
 
 static struct platform_device *zoom_devices[] __initdata = {
-       &zoom_smsc911x_device,
        &zoom_debugboard_serial_device,
 };
 
index 60e8645db59da15c6342d5537e50000140095514..c7c6beb1ec24743737c5b9abe99a85909d5e310b 100644 (file)
 #define LCD_PANEL_RESET_GPIO_PILOT     55
 #define LCD_PANEL_QVGA_GPIO            56
 
+static struct gpio zoom_lcd_gpios[] __initdata = {
+       { -EINVAL,              GPIOF_OUT_INIT_HIGH, "lcd reset" },
+       { LCD_PANEL_QVGA_GPIO,  GPIOF_OUT_INIT_HIGH, "lcd qvga"  },
+};
+
 static void zoom_lcd_panel_init(void)
 {
-       int ret;
-       unsigned char lcd_panel_reset_gpio;
-
-       lcd_panel_reset_gpio = (omap_rev() > OMAP3430_REV_ES3_0) ?
+       zoom_lcd_gpios[0].gpio = (omap_rev() > OMAP3430_REV_ES3_0) ?
                        LCD_PANEL_RESET_GPIO_PROD :
                        LCD_PANEL_RESET_GPIO_PILOT;
 
-       ret = gpio_request(lcd_panel_reset_gpio, "lcd reset");
-       if (ret) {
-               pr_err("Failed to get LCD reset GPIO (gpio%d).\n",
-                       lcd_panel_reset_gpio);
-               return;
-       }
-       gpio_direction_output(lcd_panel_reset_gpio, 1);
-
-       ret = gpio_request(LCD_PANEL_QVGA_GPIO, "lcd qvga");
-       if (ret) {
-               pr_err("Failed to get LCD_PANEL_QVGA_GPIO (gpio%d).\n",
-                       LCD_PANEL_QVGA_GPIO);
-               goto err0;
-       }
-       gpio_direction_output(LCD_PANEL_QVGA_GPIO, 1);
-
-       return;
-err0:
-       gpio_free(lcd_panel_reset_gpio);
+       if (gpio_request_array(zoom_lcd_gpios, ARRAY_SIZE(zoom_lcd_gpios)))
+               pr_err("%s: Failed to get LCD GPIOs.\n", __func__);
 }
 
 static int zoom_panel_enable_lcd(struct omap_dss_device *dssdev)
index 8dee7549fbdf5330553859c0df43bfed335fcf66..118c6f53c5eb00f3815ba3f21d72623aacdcc828 100644 (file)
@@ -31,6 +31,7 @@
 
 #include "mux.h"
 #include "hsmmc.h"
+#include "common-board-devices.h"
 
 #define OMAP_ZOOM_WLAN_PMENA_GPIO      (101)
 #define OMAP_ZOOM_WLAN_IRQ_GPIO                (162)
@@ -276,13 +277,11 @@ static int zoom_twl_gpio_setup(struct device *dev,
        zoom_vsim_supply.dev = mmc[0].dev;
        zoom_vmmc2_supply.dev = mmc[1].dev;
 
-       ret = gpio_request(LCD_PANEL_ENABLE_GPIO, "lcd enable");
-       if (ret) {
+       ret = gpio_request_one(LCD_PANEL_ENABLE_GPIO, GPIOF_OUT_INIT_LOW,
+                              "lcd enable");
+       if (ret)
                pr_err("Failed to get LCD_PANEL_ENABLE_GPIO (gpio%d).\n",
                                LCD_PANEL_ENABLE_GPIO);
-               return ret;
-       }
-       gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 0);
 
        return ret;
 }
@@ -349,15 +348,6 @@ static struct twl4030_platform_data zoom_twldata = {
        .vdac           = &zoom_vdac,
 };
 
-static struct i2c_board_info __initdata zoom_i2c_boardinfo[] = {
-       {
-               I2C_BOARD_INFO("twl5030", 0x48),
-               .flags          = I2C_CLIENT_WAKE,
-               .irq            = INT_34XX_SYS_NIRQ,
-               .platform_data  = &zoom_twldata,
-       },
-};
-
 static int __init omap_i2c_init(void)
 {
        if (machine_is_omap_zoom2()) {
@@ -365,19 +355,12 @@ static int __init omap_i2c_init(void)
                zoom_audio_data.hs_extmute = 1;
                zoom_audio_data.set_hs_extmute = zoom2_set_hs_extmute;
        }
-       omap_register_i2c_bus(1, 2400, zoom_i2c_boardinfo,
-                       ARRAY_SIZE(zoom_i2c_boardinfo));
+       omap_pmic_init(1, 2400, "twl5030", INT_34XX_SYS_NIRQ, &zoom_twldata);
        omap_register_i2c_bus(2, 400, NULL, 0);
        omap_register_i2c_bus(3, 400, NULL, 0);
        return 0;
 }
 
-static struct omap_musb_board_data musb_board_data = {
-       .interface_type         = MUSB_INTERFACE_ULPI,
-       .mode                   = MUSB_OTG,
-       .power                  = 100,
-};
-
 static void enable_board_wakeup_source(void)
 {
        /* T2 interrupt line (keypad) */
@@ -392,7 +375,7 @@ void __init zoom_peripherals_init(void)
 
        omap_i2c_init();
        platform_device_register(&omap_vwlan_device);
-       usb_musb_init(&musb_board_data);
+       usb_musb_init(NULL);
        enable_board_wakeup_source();
        omap_serial_init();
 }
diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c
new file mode 100644 (file)
index 0000000..e94903b
--- /dev/null
@@ -0,0 +1,163 @@
+/*
+ * common-board-devices.c
+ *
+ * Copyright (C) 2011 CompuLab, Ltd.
+ * Author: Mike Rapoport <mike@compulab.co.il>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/i2c.h>
+#include <linux/i2c/twl.h>
+
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/ads7846.h>
+
+#include <plat/i2c.h>
+#include <plat/mcspi.h>
+#include <plat/nand.h>
+
+#include "common-board-devices.h"
+
+static struct i2c_board_info __initdata pmic_i2c_board_info = {
+       .addr           = 0x48,
+       .flags          = I2C_CLIENT_WAKE,
+};
+
+void __init omap_pmic_init(int bus, u32 clkrate,
+                          const char *pmic_type, int pmic_irq,
+                          struct twl4030_platform_data *pmic_data)
+{
+       strncpy(pmic_i2c_board_info.type, pmic_type,
+               sizeof(pmic_i2c_board_info.type));
+       pmic_i2c_board_info.irq = pmic_irq;
+       pmic_i2c_board_info.platform_data = pmic_data;
+
+       omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
+}
+
+#if defined(CONFIG_TOUCHSCREEN_ADS7846) || \
+       defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
+static struct omap2_mcspi_device_config ads7846_mcspi_config = {
+       .turbo_mode     = 0,
+       .single_channel = 1,    /* 0: slave, 1: master */
+};
+
+static struct ads7846_platform_data ads7846_config = {
+       .x_max                  = 0x0fff,
+       .y_max                  = 0x0fff,
+       .x_plate_ohms           = 180,
+       .pressure_max           = 255,
+       .debounce_max           = 10,
+       .debounce_tol           = 3,
+       .debounce_rep           = 1,
+       .gpio_pendown           = -EINVAL,
+       .keep_vref_on           = 1,
+};
+
+static struct spi_board_info ads7846_spi_board_info __initdata = {
+       .modalias               = "ads7846",
+       .bus_num                = -EINVAL,
+       .chip_select            = 0,
+       .max_speed_hz           = 1500000,
+       .controller_data        = &ads7846_mcspi_config,
+       .irq                    = -EINVAL,
+       .platform_data          = &ads7846_config,
+};
+
+void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
+                             struct ads7846_platform_data *board_pdata)
+{
+       struct spi_board_info *spi_bi = &ads7846_spi_board_info;
+       int err;
+
+       err = gpio_request(gpio_pendown, "TS PenDown");
+       if (err) {
+               pr_err("Could not obtain gpio for TS PenDown: %d\n", err);
+               return;
+       }
+
+       gpio_direction_input(gpio_pendown);
+       gpio_export(gpio_pendown, 0);
+
+       if (gpio_debounce)
+               gpio_set_debounce(gpio_pendown, gpio_debounce);
+
+       ads7846_config.gpio_pendown = gpio_pendown;
+
+       spi_bi->bus_num = bus_num;
+       spi_bi->irq     = OMAP_GPIO_IRQ(gpio_pendown);
+
+       if (board_pdata)
+               spi_bi->platform_data = board_pdata;
+
+       spi_register_board_info(&ads7846_spi_board_info, 1);
+}
+#else
+void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
+                             struct ads7846_platform_data *board_pdata)
+{
+}
+#endif
+
+#if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE)
+static struct omap_nand_platform_data nand_data = {
+       .dma_channel    = -1,           /* disable DMA in OMAP NAND driver */
+};
+
+void __init omap_nand_flash_init(int options, struct mtd_partition *parts,
+                                int nr_parts)
+{
+       u8 cs = 0;
+       u8 nandcs = GPMC_CS_NUM + 1;
+
+       /* find out the chip-select on which NAND exists */
+       while (cs < GPMC_CS_NUM) {
+               u32 ret = 0;
+               ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
+
+               if ((ret & 0xC00) == 0x800) {
+                       printk(KERN_INFO "Found NAND on CS%d\n", cs);
+                       if (nandcs > GPMC_CS_NUM)
+                               nandcs = cs;
+               }
+               cs++;
+       }
+
+       if (nandcs > GPMC_CS_NUM) {
+               printk(KERN_INFO "NAND: Unable to find configuration "
+                                "in GPMC\n ");
+               return;
+       }
+
+       if (nandcs < GPMC_CS_NUM) {
+               nand_data.cs = nandcs;
+               nand_data.parts = parts;
+               nand_data.nr_parts = nr_parts;
+               nand_data.options = options;
+
+               printk(KERN_INFO "Registering NAND on CS%d\n", nandcs);
+               if (gpmc_nand_init(&nand_data) < 0)
+                       printk(KERN_ERR "Unable to register NAND device\n");
+       }
+}
+#else
+void __init omap_nand_flash_init(int options, struct mtd_partition *parts,
+                                int nr_parts)
+{
+}
+#endif
diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h
new file mode 100644 (file)
index 0000000..eb80b3b
--- /dev/null
@@ -0,0 +1,35 @@
+#ifndef __OMAP_COMMON_BOARD_DEVICES__
+#define __OMAP_COMMON_BOARD_DEVICES__
+
+struct twl4030_platform_data;
+struct mtd_partition;
+
+void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq,
+                   struct twl4030_platform_data *pmic_data);
+
+static inline void omap2_pmic_init(const char *pmic_type,
+                                  struct twl4030_platform_data *pmic_data)
+{
+       omap_pmic_init(2, 2600, pmic_type, INT_24XX_SYS_NIRQ, pmic_data);
+}
+
+static inline void omap3_pmic_init(const char *pmic_type,
+                                  struct twl4030_platform_data *pmic_data)
+{
+       omap_pmic_init(1, 2600, pmic_type, INT_34XX_SYS_NIRQ, pmic_data);
+}
+
+static inline void omap4_pmic_init(const char *pmic_type,
+                                  struct twl4030_platform_data *pmic_data)
+{
+       /* Phoenix Audio IC needs I2C1 to start with 400 KHz or less */
+       omap_pmic_init(1, 400, pmic_type, OMAP44XX_IRQ_SYS_1N, pmic_data);
+}
+
+struct ads7846_platform_data;
+
+void omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
+                      struct ads7846_platform_data *board_pdata);
+void omap_nand_flash_init(int opts, struct mtd_partition *parts, int n_parts);
+
+#endif /* __OMAP_COMMON_BOARD_DEVICES__ */
index 1c240eff3918c08161243279e62206b4eed5caa4..4bf6e6e8b1001ae5b14ab0e86c44dec7c8b1043d 100644 (file)
 
 #ifdef CONFIG_CPU_IDLE
 
-#define OMAP3_MAX_STATES 7
-#define OMAP3_STATE_C1 0 /* C1 - MPU WFI + Core active */
-#define OMAP3_STATE_C2 1 /* C2 - MPU WFI + Core inactive */
-#define OMAP3_STATE_C3 2 /* C3 - MPU CSWR + Core inactive */
-#define OMAP3_STATE_C4 3 /* C4 - MPU OFF + Core iactive */
-#define OMAP3_STATE_C5 4 /* C5 - MPU RET + Core RET */
-#define OMAP3_STATE_C6 5 /* C6 - MPU OFF + Core RET */
-#define OMAP3_STATE_C7 6 /* C7 - MPU OFF + Core OFF */
-
-#define OMAP3_STATE_MAX OMAP3_STATE_C7
-
-#define CPUIDLE_FLAG_CHECK_BM  0x10000 /* use omap3_enter_idle_bm() */
-
-struct omap3_processor_cx {
-       u8 valid;
-       u8 type;
-       u32 sleep_latency;
-       u32 wakeup_latency;
-       u32 mpu_state;
-       u32 core_state;
-       u32 threshold;
-       u32 flags;
-       const char *desc;
-};
-
-struct omap3_processor_cx omap3_power_states[OMAP3_MAX_STATES];
-struct omap3_processor_cx current_cx_state;
-struct powerdomain *mpu_pd, *core_pd, *per_pd;
-struct powerdomain *cam_pd;
-
 /*
  * The latencies/thresholds for various C states have
  * to be configured from the respective board files.
@@ -75,27 +45,31 @@ struct powerdomain *cam_pd;
  */
 static struct cpuidle_params cpuidle_params_table[] = {
        /* C1 */
-       {1, 2, 2, 5},
+       {2 + 2, 5, 1},
        /* C2 */
-       {1, 10, 10, 30},
+       {10 + 10, 30, 1},
        /* C3 */
-       {1, 50, 50, 300},
+       {50 + 50, 300, 1},
        /* C4 */
-       {1, 1500, 1800, 4000},
+       {1500 + 1800, 4000, 1},
        /* C5 */
-       {1, 2500, 7500, 12000},
+       {2500 + 7500, 12000, 1},
        /* C6 */
-       {1, 3000, 8500, 15000},
+       {3000 + 8500, 15000, 1},
        /* C7 */
-       {1, 10000, 30000, 300000},
+       {10000 + 30000, 300000, 1},
 };
+#define OMAP3_NUM_STATES ARRAY_SIZE(cpuidle_params_table)
 
-static int omap3_idle_bm_check(void)
-{
-       if (!omap3_can_sleep())
-               return 1;
-       return 0;
-}
+/* Mach specific information to be recorded in the C-state driver_data */
+struct omap3_idle_statedata {
+       u32 mpu_state;
+       u32 core_state;
+       u8 valid;
+};
+struct omap3_idle_statedata omap3_idle_data[OMAP3_NUM_STATES];
+
+struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
 
 static int _cpuidle_allow_idle(struct powerdomain *pwrdm,
                                struct clockdomain *clkdm)
@@ -122,12 +96,10 @@ static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
 static int omap3_enter_idle(struct cpuidle_device *dev,
                        struct cpuidle_state *state)
 {
-       struct omap3_processor_cx *cx = cpuidle_get_statedata(state);
+       struct omap3_idle_statedata *cx = cpuidle_get_statedata(state);
        struct timespec ts_preidle, ts_postidle, ts_idle;
        u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
 
-       current_cx_state = *cx;
-
        /* Used to keep track of the total time in idle */
        getnstimeofday(&ts_preidle);
 
@@ -140,7 +112,8 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
        if (omap_irq_pending() || need_resched())
                goto return_sleep_time;
 
-       if (cx->type == OMAP3_STATE_C1) {
+       /* Deny idle for C1 */
+       if (state == &dev->states[0]) {
                pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
                pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
        }
@@ -148,7 +121,8 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
        /* Execute ARM wfi */
        omap_sram_idle();
 
-       if (cx->type == OMAP3_STATE_C1) {
+       /* Re-allow idle for C1 */
+       if (state == &dev->states[0]) {
                pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
                pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
        }
@@ -164,41 +138,53 @@ return_sleep_time:
 }
 
 /**
- * next_valid_state - Find next valid c-state
+ * next_valid_state - Find next valid C-state
  * @dev: cpuidle device
- * @state: Currently selected c-state
+ * @state: Currently selected C-state
  *
  * If the current state is valid, it is returned back to the caller.
  * Else, this function searches for a lower c-state which is still
- * valid (as defined in omap3_power_states[]).
+ * valid.
+ *
+ * A state is valid if the 'valid' field is enabled and
+ * if it satisfies the enable_off_mode condition.
  */
 static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
-                                               struct cpuidle_state *curr)
+                                             struct cpuidle_state *curr)
 {
        struct cpuidle_state *next = NULL;
-       struct omap3_processor_cx *cx;
+       struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr);
+       u32 mpu_deepest_state = PWRDM_POWER_RET;
+       u32 core_deepest_state = PWRDM_POWER_RET;
 
-       cx = (struct omap3_processor_cx *)cpuidle_get_statedata(curr);
+       if (enable_off_mode) {
+               mpu_deepest_state = PWRDM_POWER_OFF;
+               /*
+                * Erratum i583: valable for ES rev < Es1.2 on 3630.
+                * CORE OFF mode is not supported in a stable form, restrict
+                * instead the CORE state to RET.
+                */
+               if (!IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
+                       core_deepest_state = PWRDM_POWER_OFF;
+       }
 
        /* Check if current state is valid */
-       if (cx->valid) {
+       if ((cx->valid) &&
+           (cx->mpu_state >= mpu_deepest_state) &&
+           (cx->core_state >= core_deepest_state)) {
                return curr;
        } else {
-               u8 idx = OMAP3_STATE_MAX;
+               int idx = OMAP3_NUM_STATES - 1;
 
-               /*
-                * Reach the current state starting at highest C-state
-                */
-               for (; idx >= OMAP3_STATE_C1; idx--) {
+               /* Reach the current state starting at highest C-state */
+               for (; idx >= 0; idx--) {
                        if (&dev->states[idx] == curr) {
                                next = &dev->states[idx];
                                break;
                        }
                }
 
-               /*
-                * Should never hit this condition.
-                */
+               /* Should never hit this condition */
                WARN_ON(next == NULL);
 
                /*
@@ -206,17 +192,17 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
                 * Start search from the next (lower) state.
                 */
                idx--;
-               for (; idx >= OMAP3_STATE_C1; idx--) {
-                       struct omap3_processor_cx *cx;
-
+               for (; idx >= 0; idx--) {
                        cx = cpuidle_get_statedata(&dev->states[idx]);
-                       if (cx->valid) {
+                       if ((cx->valid) &&
+                           (cx->mpu_state >= mpu_deepest_state) &&
+                           (cx->core_state >= core_deepest_state)) {
                                next = &dev->states[idx];
                                break;
                        }
                }
                /*
-                * C1 and C2 are always valid.
+                * C1 is always valid.
                 * So, no need to check for 'next==NULL' outside this loop.
                 */
        }
@@ -229,36 +215,22 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
  * @dev: cpuidle device
  * @state: The target state to be programmed
  *
- * Used for C states with CPUIDLE_FLAG_CHECK_BM flag set. This
- * function checks for any pending activity and then programs the
- * device to the specified or a safer state.
+ * This function checks for any pending activity and then programs
+ * the device to the specified or a safer state.
  */
 static int omap3_enter_idle_bm(struct cpuidle_device *dev,
                               struct cpuidle_state *state)
 {
-       struct cpuidle_state *new_state = next_valid_state(dev, state);
-       u32 core_next_state, per_next_state = 0, per_saved_state = 0;
-       u32 cam_state;
-       struct omap3_processor_cx *cx;
+       struct cpuidle_state *new_state;
+       u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state;
+       struct omap3_idle_statedata *cx;
        int ret;
 
-       if ((state->flags & CPUIDLE_FLAG_CHECK_BM) && omap3_idle_bm_check()) {
-               BUG_ON(!dev->safe_state);
+       if (!omap3_can_sleep()) {
                new_state = dev->safe_state;
                goto select_state;
        }
 
-       cx = cpuidle_get_statedata(state);
-       core_next_state = cx->core_state;
-
-       /*
-        * FIXME: we currently manage device-specific idle states
-        *        for PER and CORE in combination with CPU-specific
-        *        idle states.  This is wrong, and device-specific
-        *        idle management needs to be separated out into 
-        *        its own code.
-        */
-
        /*
         * Prevent idle completely if CAM is active.
         * CAM does not have wakeup capability in OMAP3.
@@ -269,10 +241,20 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
                goto select_state;
        }
 
+       /*
+        * FIXME: we currently manage device-specific idle states
+        *        for PER and CORE in combination with CPU-specific
+        *        idle states.  This is wrong, and device-specific
+        *        idle management needs to be separated out into
+        *        its own code.
+        */
+
        /*
         * Prevent PER off if CORE is not in retention or off as this
         * would disable PER wakeups completely.
         */
+       cx = cpuidle_get_statedata(state);
+       core_next_state = cx->core_state;
        per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
        if ((per_next_state == PWRDM_POWER_OFF) &&
            (core_next_state > PWRDM_POWER_RET))
@@ -282,6 +264,8 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
        if (per_next_state != per_saved_state)
                pwrdm_set_next_pwrst(per_pd, per_next_state);
 
+       new_state = next_valid_state(dev, state);
+
 select_state:
        dev->last_state = new_state;
        ret = omap3_enter_idle(dev, new_state);
@@ -295,31 +279,6 @@ select_state:
 
 DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
 
-/**
- * omap3_cpuidle_update_states() - Update the cpuidle states
- * @mpu_deepest_state: Enable states up to and including this for mpu domain
- * @core_deepest_state:        Enable states up to and including this for core domain
- *
- * This goes through the list of states available and enables and disables the
- * validity of C states based on deepest state that can be achieved for the
- * variable domain
- */
-void omap3_cpuidle_update_states(u32 mpu_deepest_state, u32 core_deepest_state)
-{
-       int i;
-
-       for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
-               struct omap3_processor_cx *cx = &omap3_power_states[i];
-
-               if ((cx->mpu_state >= mpu_deepest_state) &&
-                   (cx->core_state >= core_deepest_state)) {
-                       cx->valid = 1;
-               } else {
-                       cx->valid = 0;
-               }
-       }
-}
-
 void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params)
 {
        int i;
@@ -327,212 +286,109 @@ void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params)
        if (!cpuidle_board_params)
                return;
 
-       for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
-               cpuidle_params_table[i].valid =
-                       cpuidle_board_params[i].valid;
-               cpuidle_params_table[i].sleep_latency =
-                       cpuidle_board_params[i].sleep_latency;
-               cpuidle_params_table[i].wake_latency =
-                       cpuidle_board_params[i].wake_latency;
-               cpuidle_params_table[i].threshold =
-                       cpuidle_board_params[i].threshold;
+       for (i = 0; i < OMAP3_NUM_STATES; i++) {
+               cpuidle_params_table[i].valid = cpuidle_board_params[i].valid;
+               cpuidle_params_table[i].exit_latency =
+                       cpuidle_board_params[i].exit_latency;
+               cpuidle_params_table[i].target_residency =
+                       cpuidle_board_params[i].target_residency;
        }
        return;
 }
 
-/* omap3_init_power_states - Initialises the OMAP3 specific C states.
- *
- * Below is the desciption of each C state.
- *     C1 . MPU WFI + Core active
- *     C2 . MPU WFI + Core inactive
- *     C3 . MPU CSWR + Core inactive
- *     C4 . MPU OFF + Core inactive
- *     C5 . MPU CSWR + Core CSWR
- *     C6 . MPU OFF + Core CSWR
- *     C7 . MPU OFF + Core OFF
- */
-void omap_init_power_states(void)
-{
-       /* C1 . MPU WFI + Core active */
-       omap3_power_states[OMAP3_STATE_C1].valid =
-                       cpuidle_params_table[OMAP3_STATE_C1].valid;
-       omap3_power_states[OMAP3_STATE_C1].type = OMAP3_STATE_C1;
-       omap3_power_states[OMAP3_STATE_C1].sleep_latency =
-                       cpuidle_params_table[OMAP3_STATE_C1].sleep_latency;
-       omap3_power_states[OMAP3_STATE_C1].wakeup_latency =
-                       cpuidle_params_table[OMAP3_STATE_C1].wake_latency;
-       omap3_power_states[OMAP3_STATE_C1].threshold =
-                       cpuidle_params_table[OMAP3_STATE_C1].threshold;
-       omap3_power_states[OMAP3_STATE_C1].mpu_state = PWRDM_POWER_ON;
-       omap3_power_states[OMAP3_STATE_C1].core_state = PWRDM_POWER_ON;
-       omap3_power_states[OMAP3_STATE_C1].flags = CPUIDLE_FLAG_TIME_VALID;
-       omap3_power_states[OMAP3_STATE_C1].desc = "MPU ON + CORE ON";
-
-       /* C2 . MPU WFI + Core inactive */
-       omap3_power_states[OMAP3_STATE_C2].valid =
-                       cpuidle_params_table[OMAP3_STATE_C2].valid;
-       omap3_power_states[OMAP3_STATE_C2].type = OMAP3_STATE_C2;
-       omap3_power_states[OMAP3_STATE_C2].sleep_latency =
-                       cpuidle_params_table[OMAP3_STATE_C2].sleep_latency;
-       omap3_power_states[OMAP3_STATE_C2].wakeup_latency =
-                       cpuidle_params_table[OMAP3_STATE_C2].wake_latency;
-       omap3_power_states[OMAP3_STATE_C2].threshold =
-                       cpuidle_params_table[OMAP3_STATE_C2].threshold;
-       omap3_power_states[OMAP3_STATE_C2].mpu_state = PWRDM_POWER_ON;
-       omap3_power_states[OMAP3_STATE_C2].core_state = PWRDM_POWER_ON;
-       omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID |
-                               CPUIDLE_FLAG_CHECK_BM;
-       omap3_power_states[OMAP3_STATE_C2].desc = "MPU ON + CORE ON";
-
-       /* C3 . MPU CSWR + Core inactive */
-       omap3_power_states[OMAP3_STATE_C3].valid =
-                       cpuidle_params_table[OMAP3_STATE_C3].valid;
-       omap3_power_states[OMAP3_STATE_C3].type = OMAP3_STATE_C3;
-       omap3_power_states[OMAP3_STATE_C3].sleep_latency =
-                       cpuidle_params_table[OMAP3_STATE_C3].sleep_latency;
-       omap3_power_states[OMAP3_STATE_C3].wakeup_latency =
-                       cpuidle_params_table[OMAP3_STATE_C3].wake_latency;
-       omap3_power_states[OMAP3_STATE_C3].threshold =
-                       cpuidle_params_table[OMAP3_STATE_C3].threshold;
-       omap3_power_states[OMAP3_STATE_C3].mpu_state = PWRDM_POWER_RET;
-       omap3_power_states[OMAP3_STATE_C3].core_state = PWRDM_POWER_ON;
-       omap3_power_states[OMAP3_STATE_C3].flags = CPUIDLE_FLAG_TIME_VALID |
-                               CPUIDLE_FLAG_CHECK_BM;
-       omap3_power_states[OMAP3_STATE_C3].desc = "MPU RET + CORE ON";
-
-       /* C4 . MPU OFF + Core inactive */
-       omap3_power_states[OMAP3_STATE_C4].valid =
-                       cpuidle_params_table[OMAP3_STATE_C4].valid;
-       omap3_power_states[OMAP3_STATE_C4].type = OMAP3_STATE_C4;
-       omap3_power_states[OMAP3_STATE_C4].sleep_latency =
-                       cpuidle_params_table[OMAP3_STATE_C4].sleep_latency;
-       omap3_power_states[OMAP3_STATE_C4].wakeup_latency =
-                       cpuidle_params_table[OMAP3_STATE_C4].wake_latency;
-       omap3_power_states[OMAP3_STATE_C4].threshold =
-                       cpuidle_params_table[OMAP3_STATE_C4].threshold;
-       omap3_power_states[OMAP3_STATE_C4].mpu_state = PWRDM_POWER_OFF;
-       omap3_power_states[OMAP3_STATE_C4].core_state = PWRDM_POWER_ON;
-       omap3_power_states[OMAP3_STATE_C4].flags = CPUIDLE_FLAG_TIME_VALID |
-                               CPUIDLE_FLAG_CHECK_BM;
-       omap3_power_states[OMAP3_STATE_C4].desc = "MPU OFF + CORE ON";
-
-       /* C5 . MPU CSWR + Core CSWR*/
-       omap3_power_states[OMAP3_STATE_C5].valid =
-                       cpuidle_params_table[OMAP3_STATE_C5].valid;
-       omap3_power_states[OMAP3_STATE_C5].type = OMAP3_STATE_C5;
-       omap3_power_states[OMAP3_STATE_C5].sleep_latency =
-                       cpuidle_params_table[OMAP3_STATE_C5].sleep_latency;
-       omap3_power_states[OMAP3_STATE_C5].wakeup_latency =
-                       cpuidle_params_table[OMAP3_STATE_C5].wake_latency;
-       omap3_power_states[OMAP3_STATE_C5].threshold =
-                       cpuidle_params_table[OMAP3_STATE_C5].threshold;
-       omap3_power_states[OMAP3_STATE_C5].mpu_state = PWRDM_POWER_RET;
-       omap3_power_states[OMAP3_STATE_C5].core_state = PWRDM_POWER_RET;
-       omap3_power_states[OMAP3_STATE_C5].flags = CPUIDLE_FLAG_TIME_VALID |
-                               CPUIDLE_FLAG_CHECK_BM;
-       omap3_power_states[OMAP3_STATE_C5].desc = "MPU RET + CORE RET";
-
-       /* C6 . MPU OFF + Core CSWR */
-       omap3_power_states[OMAP3_STATE_C6].valid =
-                       cpuidle_params_table[OMAP3_STATE_C6].valid;
-       omap3_power_states[OMAP3_STATE_C6].type = OMAP3_STATE_C6;
-       omap3_power_states[OMAP3_STATE_C6].sleep_latency =
-                       cpuidle_params_table[OMAP3_STATE_C6].sleep_latency;
-       omap3_power_states[OMAP3_STATE_C6].wakeup_latency =
-                       cpuidle_params_table[OMAP3_STATE_C6].wake_latency;
-       omap3_power_states[OMAP3_STATE_C6].threshold =
-                       cpuidle_params_table[OMAP3_STATE_C6].threshold;
-       omap3_power_states[OMAP3_STATE_C6].mpu_state = PWRDM_POWER_OFF;
-       omap3_power_states[OMAP3_STATE_C6].core_state = PWRDM_POWER_RET;
-       omap3_power_states[OMAP3_STATE_C6].flags = CPUIDLE_FLAG_TIME_VALID |
-                               CPUIDLE_FLAG_CHECK_BM;
-       omap3_power_states[OMAP3_STATE_C6].desc = "MPU OFF + CORE RET";
-
-       /* C7 . MPU OFF + Core OFF */
-       omap3_power_states[OMAP3_STATE_C7].valid =
-                       cpuidle_params_table[OMAP3_STATE_C7].valid;
-       omap3_power_states[OMAP3_STATE_C7].type = OMAP3_STATE_C7;
-       omap3_power_states[OMAP3_STATE_C7].sleep_latency =
-                       cpuidle_params_table[OMAP3_STATE_C7].sleep_latency;
-       omap3_power_states[OMAP3_STATE_C7].wakeup_latency =
-                       cpuidle_params_table[OMAP3_STATE_C7].wake_latency;
-       omap3_power_states[OMAP3_STATE_C7].threshold =
-                       cpuidle_params_table[OMAP3_STATE_C7].threshold;
-       omap3_power_states[OMAP3_STATE_C7].mpu_state = PWRDM_POWER_OFF;
-       omap3_power_states[OMAP3_STATE_C7].core_state = PWRDM_POWER_OFF;
-       omap3_power_states[OMAP3_STATE_C7].flags = CPUIDLE_FLAG_TIME_VALID |
-                               CPUIDLE_FLAG_CHECK_BM;
-       omap3_power_states[OMAP3_STATE_C7].desc = "MPU OFF + CORE OFF";
-
-       /*
-        * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
-        * enable OFF mode in a stable form for previous revisions.
-        * we disable C7 state as a result.
-        */
-       if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
-               omap3_power_states[OMAP3_STATE_C7].valid = 0;
-               cpuidle_params_table[OMAP3_STATE_C7].valid = 0;
-               pr_warn("%s: core off state C7 disabled due to i583\n",
-                               __func__);
-       }
-}
-
 struct cpuidle_driver omap3_idle_driver = {
        .name =         "omap3_idle",
        .owner =        THIS_MODULE,
 };
 
+/* Helper to fill the C-state common data and register the driver_data */
+static inline struct omap3_idle_statedata *_fill_cstate(
+                                       struct cpuidle_device *dev,
+                                       int idx, const char *descr)
+{
+       struct omap3_idle_statedata *cx = &omap3_idle_data[idx];
+       struct cpuidle_state *state = &dev->states[idx];
+
+       state->exit_latency     = cpuidle_params_table[idx].exit_latency;
+       state->target_residency = cpuidle_params_table[idx].target_residency;
+       state->flags            = CPUIDLE_FLAG_TIME_VALID;
+       state->enter            = omap3_enter_idle_bm;
+       cx->valid               = cpuidle_params_table[idx].valid;
+       sprintf(state->name, "C%d", idx + 1);
+       strncpy(state->desc, descr, CPUIDLE_DESC_LEN);
+       cpuidle_set_statedata(state, cx);
+
+       return cx;
+}
+
 /**
  * omap3_idle_init - Init routine for OMAP3 idle
  *
- * Registers the OMAP3 specific cpuidle driver with the cpuidle
+ * Registers the OMAP3 specific cpuidle driver to the cpuidle
  * framework with the valid set of states.
  */
 int __init omap3_idle_init(void)
 {
-       int i, count = 0;
-       struct omap3_processor_cx *cx;
-       struct cpuidle_state *state;
        struct cpuidle_device *dev;
+       struct omap3_idle_statedata *cx;
 
        mpu_pd = pwrdm_lookup("mpu_pwrdm");
        core_pd = pwrdm_lookup("core_pwrdm");
        per_pd = pwrdm_lookup("per_pwrdm");
        cam_pd = pwrdm_lookup("cam_pwrdm");
 
-       omap_init_power_states();
        cpuidle_register_driver(&omap3_idle_driver);
-
        dev = &per_cpu(omap3_idle_dev, smp_processor_id());
 
-       for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
-               cx = &omap3_power_states[i];
-               state = &dev->states[count];
-
-               if (!cx->valid)
-                       continue;
-               cpuidle_set_statedata(state, cx);
-               state->exit_latency = cx->sleep_latency + cx->wakeup_latency;
-               state->target_residency = cx->threshold;
-               state->flags = cx->flags;
-               state->enter = (state->flags & CPUIDLE_FLAG_CHECK_BM) ?
-                       omap3_enter_idle_bm : omap3_enter_idle;
-               if (cx->type == OMAP3_STATE_C1)
-                       dev->safe_state = state;
-               sprintf(state->name, "C%d", count+1);
-               strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
-               count++;
-       }
+       /* C1 . MPU WFI + Core active */
+       cx = _fill_cstate(dev, 0, "MPU ON + CORE ON");
+       (&dev->states[0])->enter = omap3_enter_idle;
+       dev->safe_state = &dev->states[0];
+       cx->valid = 1;  /* C1 is always valid */
+       cx->mpu_state = PWRDM_POWER_ON;
+       cx->core_state = PWRDM_POWER_ON;
 
-       if (!count)
-               return -EINVAL;
-       dev->state_count = count;
+       /* C2 . MPU WFI + Core inactive */
+       cx = _fill_cstate(dev, 1, "MPU ON + CORE ON");
+       cx->mpu_state = PWRDM_POWER_ON;
+       cx->core_state = PWRDM_POWER_ON;
+
+       /* C3 . MPU CSWR + Core inactive */
+       cx = _fill_cstate(dev, 2, "MPU RET + CORE ON");
+       cx->mpu_state = PWRDM_POWER_RET;
+       cx->core_state = PWRDM_POWER_ON;
 
-       if (enable_off_mode)
-               omap3_cpuidle_update_states(PWRDM_POWER_OFF, PWRDM_POWER_OFF);
-       else
-               omap3_cpuidle_update_states(PWRDM_POWER_RET, PWRDM_POWER_RET);
+       /* C4 . MPU OFF + Core inactive */
+       cx = _fill_cstate(dev, 3, "MPU OFF + CORE ON");
+       cx->mpu_state = PWRDM_POWER_OFF;
+       cx->core_state = PWRDM_POWER_ON;
+
+       /* C5 . MPU RET + Core RET */
+       cx = _fill_cstate(dev, 4, "MPU RET + CORE RET");
+       cx->mpu_state = PWRDM_POWER_RET;
+       cx->core_state = PWRDM_POWER_RET;
+
+       /* C6 . MPU OFF + Core RET */
+       cx = _fill_cstate(dev, 5, "MPU OFF + CORE RET");
+       cx->mpu_state = PWRDM_POWER_OFF;
+       cx->core_state = PWRDM_POWER_RET;
+
+       /* C7 . MPU OFF + Core OFF */
+       cx = _fill_cstate(dev, 6, "MPU OFF + CORE OFF");
+       /*
+        * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
+        * enable OFF mode in a stable form for previous revisions.
+        * We disable C7 state as a result.
+        */
+       if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
+               cx->valid = 0;
+               pr_warn("%s: core off state C7 disabled due to i583\n",
+                       __func__);
+       }
+       cx->mpu_state = PWRDM_POWER_OFF;
+       cx->core_state = PWRDM_POWER_OFF;
 
+       dev->state_count = OMAP3_NUM_STATES;
        if (cpuidle_register_device(dev)) {
                printk(KERN_ERR "%s: CPUidle register device failed\n",
                       __func__);
index 877c6f5807b7b1d5e68a623715bfa78711286dfd..ba10c24f3d8dcef3378490a9f615c74b471e22a2 100644 (file)
@@ -147,25 +147,24 @@ void __init gpmc_smc91x_init(struct omap_smc91x_platform_data *board_data)
                        goto free1;
        }
 
-       if (gpio_request(gpmc_cfg->gpio_irq, "SMC91X irq") < 0)
+       if (gpio_request_one(gpmc_cfg->gpio_irq, GPIOF_IN, "SMC91X irq") < 0)
                goto free1;
 
-       gpio_direction_input(gpmc_cfg->gpio_irq);
        gpmc_smc91x_resources[1].start = gpio_to_irq(gpmc_cfg->gpio_irq);
 
        if (gpmc_cfg->gpio_pwrdwn) {
-               ret = gpio_request(gpmc_cfg->gpio_pwrdwn, "SMC91X powerdown");
+               ret = gpio_request_one(gpmc_cfg->gpio_pwrdwn,
+                                      GPIOF_OUT_INIT_LOW, "SMC91X powerdown");
                if (ret)
                        goto free2;
-               gpio_direction_output(gpmc_cfg->gpio_pwrdwn, 0);
        }
 
        if (gpmc_cfg->gpio_reset) {
-               ret = gpio_request(gpmc_cfg->gpio_reset, "SMC91X reset");
+               ret = gpio_request_one(gpmc_cfg->gpio_reset,
+                                      GPIOF_OUT_INIT_LOW, "SMC91X reset");
                if (ret)
                        goto free3;
 
-               gpio_direction_output(gpmc_cfg->gpio_reset, 0);
                gpio_set_value(gpmc_cfg->gpio_reset, 1);
                msleep(100);
                gpio_set_value(gpmc_cfg->gpio_reset, 0);
index 703f150dd01dbd7b86f54e279ddb116d48fe6316..997033129d2642fc022702c316c71e4376679fb1 100644 (file)
@@ -10,6 +10,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#define pr_fmt(fmt) "%s: " fmt, __func__
 
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
@@ -30,7 +31,7 @@ static struct resource gpmc_smsc911x_resources[] = {
                .flags          = IORESOURCE_MEM,
        },
        [1] = {
-               .flags          = IORESOURCE_IRQ,
+               .flags          = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL,
        },
 };
 
@@ -41,16 +42,6 @@ static struct smsc911x_platform_config gpmc_smsc911x_config = {
        .flags          = SMSC911X_USE_16BIT,
 };
 
-static struct platform_device gpmc_smsc911x_device = {
-       .name           = "smsc911x",
-       .id             = -1,
-       .num_resources  = ARRAY_SIZE(gpmc_smsc911x_resources),
-       .resource       = gpmc_smsc911x_resources,
-       .dev            = {
-               .platform_data = &gpmc_smsc911x_config,
-       },
-};
-
 /*
  * Initialize smsc911x device connected to the GPMC. Note that we
  * assume that pin multiplexing is done in the board-*.c file,
@@ -58,46 +49,49 @@ static struct platform_device gpmc_smsc911x_device = {
  */
 void __init gpmc_smsc911x_init(struct omap_smsc911x_platform_data *board_data)
 {
+       struct platform_device *pdev;
        unsigned long cs_mem_base;
        int ret;
 
        gpmc_cfg = board_data;
 
        if (gpmc_cs_request(gpmc_cfg->cs, SZ_16M, &cs_mem_base) < 0) {
-               printk(KERN_ERR "Failed to request GPMC mem for smsc911x\n");
+               pr_err("Failed to request GPMC mem region\n");
                return;
        }
 
        gpmc_smsc911x_resources[0].start = cs_mem_base + 0x0;
        gpmc_smsc911x_resources[0].end = cs_mem_base + 0xff;
 
-       if (gpio_request(gpmc_cfg->gpio_irq, "smsc911x irq") < 0) {
-               printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n",
-                               gpmc_cfg->gpio_irq);
+       if (gpio_request_one(gpmc_cfg->gpio_irq, GPIOF_IN, "smsc911x irq")) {
+               pr_err("Failed to request IRQ GPIO%d\n", gpmc_cfg->gpio_irq);
                goto free1;
        }
 
-       gpio_direction_input(gpmc_cfg->gpio_irq);
        gpmc_smsc911x_resources[1].start = gpio_to_irq(gpmc_cfg->gpio_irq);
-       gpmc_smsc911x_resources[1].flags |=
-                                       (gpmc_cfg->flags & IRQF_TRIGGER_MASK);
 
        if (gpio_is_valid(gpmc_cfg->gpio_reset)) {
-               ret = gpio_request(gpmc_cfg->gpio_reset, "smsc911x reset");
+               ret = gpio_request_one(gpmc_cfg->gpio_reset,
+                                      GPIOF_OUT_INIT_HIGH, "smsc911x reset");
                if (ret) {
-                       printk(KERN_ERR "Failed to request GPIO%d for smsc911x reset\n",
-                                       gpmc_cfg->gpio_reset);
+                       pr_err("Failed to request reset GPIO%d\n",
+                              gpmc_cfg->gpio_reset);
                        goto free2;
                }
 
-               gpio_direction_output(gpmc_cfg->gpio_reset, 1);
                gpio_set_value(gpmc_cfg->gpio_reset, 0);
                msleep(100);
                gpio_set_value(gpmc_cfg->gpio_reset, 1);
        }
 
-       if (platform_device_register(&gpmc_smsc911x_device) < 0) {
-               printk(KERN_ERR "Unable to register smsc911x device\n");
+       if (gpmc_cfg->flags)
+               gpmc_smsc911x_config.flags = gpmc_cfg->flags;
+
+       pdev = platform_device_register_resndata(NULL, "smsc911x", gpmc_cfg->id,
+                gpmc_smsc911x_resources, ARRAY_SIZE(gpmc_smsc911x_resources),
+                &gpmc_smsc911x_config, sizeof(gpmc_smsc911x_config));
+       if (!pdev) {
+               pr_err("Unable to register platform device\n");
                gpio_free(gpmc_cfg->gpio_reset);
                goto free2;
        }
@@ -109,5 +103,5 @@ free2:
 free1:
        gpmc_cs_free(gpmc_cfg->cs);
 
-       printk(KERN_ERR "Could not initialize smsc911x\n");
+       pr_err("Could not initialize smsc911x device\n");
 }
index 82632c24076f443f80bbc88f78e12a6c15ad0c76..7b9f1909ddb2a3bc110d83b721a2466ec574f01a 100644 (file)
@@ -63,10 +63,7 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
        char *source_name;
 
        /* Get the Type of interrupt */
-       if (irq == l3->app_irq)
-               inttype = L3_APPLICATION_ERROR;
-       else
-               inttype = L3_DEBUG_ERROR;
+       inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
 
        for (i = 0; i < L3_MODULES; i++) {
                /*
@@ -84,10 +81,10 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
 
                        err_src = j;
                        /* Read the stderrlog_main_source from clk domain */
-                       std_err_main_addr = base + (*(l3_targ[i] + err_src));
-                       std_err_main =  readl(std_err_main_addr);
+                       std_err_main_addr = base + *(l3_targ[i] + err_src);
+                       std_err_main = readl(std_err_main_addr);
 
-                       switch ((std_err_main & CUSTOM_ERROR)) {
+                       switch (std_err_main & CUSTOM_ERROR) {
                        case STANDARD_ERROR:
                                source_name =
                                l3_targ_stderrlog_main_name[i][err_src];
@@ -132,49 +129,49 @@ static int __init omap4_l3_probe(struct platform_device *pdev)
 
        l3 = kzalloc(sizeof(*l3), GFP_KERNEL);
        if (!l3)
-               ret = -ENOMEM;
+               return -ENOMEM;
 
        platform_set_drvdata(pdev, l3);
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                dev_err(&pdev->dev, "couldn't find resource 0\n");
                ret = -ENODEV;
-               goto err1;
+               goto err0;
        }
 
        l3->l3_base[0] = ioremap(res->start, resource_size(res));
-       if (!(l3->l3_base[0])) {
+       if (!l3->l3_base[0]) {
                dev_err(&pdev->dev, "ioremap failed\n");
                ret = -ENOMEM;
-               goto err2;
+               goto err0;
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
        if (!res) {
                dev_err(&pdev->dev, "couldn't find resource 1\n");
                ret = -ENODEV;
-               goto err3;
+               goto err1;
        }
 
        l3->l3_base[1] = ioremap(res->start, resource_size(res));
-       if (!(l3->l3_base[1])) {
+       if (!l3->l3_base[1]) {
                dev_err(&pdev->dev, "ioremap failed\n");
                ret = -ENOMEM;
-               goto err4;
+               goto err1;
        }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
        if (!res) {
                dev_err(&pdev->dev, "couldn't find resource 2\n");
                ret = -ENODEV;
-               goto err5;
+               goto err2;
        }
 
        l3->l3_base[2] = ioremap(res->start, resource_size(res));
-       if (!(l3->l3_base[2])) {
+       if (!l3->l3_base[2]) {
                dev_err(&pdev->dev, "ioremap failed\n");
                ret = -ENOMEM;
-               goto err6;
+               goto err2;
        }
 
        /*
@@ -187,7 +184,7 @@ static int __init omap4_l3_probe(struct platform_device *pdev)
        if (ret) {
                pr_crit("L3: request_irq failed to register for 0x%x\n",
                                         OMAP44XX_IRQ_L3_DBG);
-               goto err7;
+               goto err3;
        }
        l3->debug_irq = irq;
 
@@ -198,24 +195,22 @@ static int __init omap4_l3_probe(struct platform_device *pdev)
        if (ret) {
                pr_crit("L3: request_irq failed to register for 0x%x\n",
                                         OMAP44XX_IRQ_L3_APP);
-               goto err8;
+               goto err4;
        }
        l3->app_irq = irq;
 
-       goto err0;
-err8:
-err7:
-       iounmap(l3->l3_base[2]);
-err6:
-err5:
-       iounmap(l3->l3_base[1]);
+       return 0;
+
 err4:
+       free_irq(l3->debug_irq, l3);
 err3:
-       iounmap(l3->l3_base[0]);
+       iounmap(l3->l3_base[2]);
 err2:
+       iounmap(l3->l3_base[1]);
 err1:
-       kfree(l3);
+       iounmap(l3->l3_base[0]);
 err0:
+       kfree(l3);
        return ret;
 }
 
index 4321e79389291c9695e4695d91132f64b48cc803..873c0e33b512e1da521e2277a8122b60aa84a779 100644 (file)
@@ -155,7 +155,7 @@ static irqreturn_t omap3_l3_block_irq(struct omap3_l3 *l3,
        u8                      multi = error & L3_ERROR_LOG_MULTI;
        u32                     address = omap3_l3_decode_addr(error_addr);
 
-       WARN(true, "%s Error seen by %s %s at address %x\n",
+       WARN(true, "%s seen by %s %s at address %x\n",
                                 omap3_l3_code_string(code),
                          omap3_l3_initiator_string(initid),
                             multi ? "Multiple Errors" : "",
@@ -167,21 +167,15 @@ static irqreturn_t omap3_l3_block_irq(struct omap3_l3 *l3,
 static irqreturn_t omap3_l3_app_irq(int irq, void *_l3)
 {
        struct omap3_l3         *l3 = _l3;
-
        u64                     status, clear;
        u64                     error;
        u64                     error_addr;
        u64                     err_source = 0;
        void                    __iomem *base;
        int                     int_type;
-
        irqreturn_t             ret = IRQ_NONE;
 
-       if (irq == l3->app_irq)
-               int_type = L3_APPLICATION_ERROR;
-       else
-               int_type = L3_DEBUG_ERROR;
-
+       int_type = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
        if (!int_type) {
                status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_0);
                /*
@@ -202,7 +196,6 @@ static irqreturn_t omap3_l3_app_irq(int irq, void *_l3)
 
        base = l3->rt + *(omap3_l3_bases[int_type] + err_source);
        error = omap3_l3_readll(base, L3_ERROR_LOG);
-
        if (error) {
                error_addr = omap3_l3_readll(base, L3_ERROR_LOG_ADDR);
 
@@ -210,9 +203,8 @@ static irqreturn_t omap3_l3_app_irq(int irq, void *_l3)
        }
 
        /* Clear the status register */
-       clear = ((L3_AGENT_STATUS_CLEAR_IA << int_type) |
-                (L3_AGENT_STATUS_CLEAR_TA));
-
+       clear = (L3_AGENT_STATUS_CLEAR_IA << int_type) |
+               L3_AGENT_STATUS_CLEAR_TA;
        omap3_l3_writell(base, L3_AGENT_STATUS, clear);
 
        /* clear the error log register */
@@ -228,10 +220,8 @@ static int __init omap3_l3_probe(struct platform_device *pdev)
        int                     ret;
 
        l3 = kzalloc(sizeof(*l3), GFP_KERNEL);
-       if (!l3) {
-               ret = -ENOMEM;
-               goto err0;
-       }
+       if (!l3)
+               return -ENOMEM;
 
        platform_set_drvdata(pdev, l3);
 
@@ -239,13 +229,13 @@ static int __init omap3_l3_probe(struct platform_device *pdev)
        if (!res) {
                dev_err(&pdev->dev, "couldn't find resource\n");
                ret = -ENODEV;
-               goto err1;
+               goto err0;
        }
        l3->rt = ioremap(res->start, resource_size(res));
-       if (!(l3->rt)) {
+       if (!l3->rt) {
                dev_err(&pdev->dev, "ioremap failed\n");
                ret = -ENOMEM;
-               goto err2;
+               goto err0;
        }
 
        l3->debug_irq = platform_get_irq(pdev, 0);
@@ -254,28 +244,26 @@ static int __init omap3_l3_probe(struct platform_device *pdev)
                "l3-debug-irq", l3);
        if (ret) {
                dev_err(&pdev->dev, "couldn't request debug irq\n");
-               goto err3;
+               goto err1;
        }
 
        l3->app_irq = platform_get_irq(pdev, 1);
        ret = request_irq(l3->app_irq, omap3_l3_app_irq,
                IRQF_DISABLED | IRQF_TRIGGER_RISING,
                "l3-app-irq", l3);
-
        if (ret) {
                dev_err(&pdev->dev, "couldn't request app irq\n");
-               goto err4;
+               goto err2;
        }
 
-       goto err0;
+       return 0;
 
-err4:
-err3:
-       iounmap(l3->rt);
 err2:
+       free_irq(l3->debug_irq, l3);
 err1:
-       kfree(l3);
+       iounmap(l3->rt);
 err0:
+       kfree(l3);
        return ret;
 }
 
index 05f6abc96b0d6032edfb3e2b21f85bc90099ba5d..f47813edd95143d938c554fd3dc19b71f492237a 100644 (file)
@@ -50,13 +50,16 @@ int omap4430_phy_init(struct device *dev)
 {
        ctrl_base = ioremap(OMAP443X_SCM_BASE, SZ_1K);
        if (!ctrl_base) {
-               dev_err(dev, "control module ioremap failed\n");
+               pr_err("control module ioremap failed\n");
                return -ENOMEM;
        }
        /* Power down the phy */
        __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF);
-       phyclk = clk_get(dev, "ocp2scp_usb_phy_ick");
 
+       if (!dev)
+               return 0;
+
+       phyclk = clk_get(dev, "ocp2scp_usb_phy_ick");
        if (IS_ERR(phyclk)) {
                dev_err(dev, "cannot clk_get ocp2scp_usb_phy_ick\n");
                iounmap(ctrl_base);
@@ -228,7 +231,7 @@ void am35x_musb_clear_irq(void)
        regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR);
 }
 
-void am35x_musb_set_mode(u8 musb_mode)
+void am35x_set_mode(u8 musb_mode)
 {
        u32 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2);
 
index 797bfd12b643fe3cc697bf2b7a7b09be522accc7..45bcfce7735248732a609f647870cee35d19f8a9 100644 (file)
@@ -36,11 +36,16 @@ static inline int omap4_opp_init(void)
 }
 #endif
 
+/*
+ * cpuidle mach specific parameters
+ *
+ * The board code can override the default C-states definition using
+ * omap3_pm_init_cpuidle
+ */
 struct cpuidle_params {
-       u8  valid;
-       u32 sleep_latency;
-       u32 wake_latency;
-       u32 threshold;
+       u32 exit_latency;       /* exit_latency = sleep + wake-up latencies */
+       u32 target_residency;
+       u8 valid;               /* validates the C-state */
 };
 
 #if defined(CONFIG_PM) && defined(CONFIG_CPU_IDLE)
@@ -73,10 +78,6 @@ extern u32 sleep_while_idle;
 #define sleep_while_idle 0
 #endif
 
-#if defined(CONFIG_CPU_IDLE)
-extern void omap3_cpuidle_update_states(u32, u32);
-#endif
-
 #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
 extern void pm_dbg_update_time(struct powerdomain *pwrdm, int prev);
 extern int pm_dbg_regset_save(int reg_set);
index 0c5e3a46a3ada0a51403f1770542923e273fad8a..c155c9d1c82cbe3b7868e545b02033e530f0afca 100644 (file)
@@ -779,18 +779,6 @@ void omap3_pm_off_mode_enable(int enable)
        else
                state = PWRDM_POWER_RET;
 
-#ifdef CONFIG_CPU_IDLE
-       /*
-        * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
-        * enable OFF mode in a stable form for previous revisions, restrict
-        * instead to RET
-        */
-       if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
-               omap3_cpuidle_update_states(state, PWRDM_POWER_RET);
-       else
-               omap3_cpuidle_update_states(state, state);
-#endif
-
        list_for_each_entry(pwrst, &pwrst_list, node) {
                if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) &&
                                pwrst->pwrdm == core_pwrdm &&
@@ -895,8 +883,6 @@ static int __init omap3_pm_init(void)
 
        pm_errata_configure();
 
-       printk(KERN_ERR "Power Management for TI OMAP3.\n");
-
        /* XXX prcm_setup_regs needs to be before enabling hw
         * supervised mode for powerdomains */
        prcm_setup_regs();
index 76cfff2db5141e26b2b0b7a0396abb37f342ec92..59a870be8390289931bde435d0499952ec9ecb00 100644 (file)
@@ -105,13 +105,11 @@ static int __init omap4_pm_init(void)
 
        pr_err("Power Management for TI OMAP4.\n");
 
-#ifdef CONFIG_PM
        ret = pwrdm_for_each(pwrdms_setup, NULL);
        if (ret) {
                pr_err("Failed to setup powerdomains\n");
                goto err2;
        }
-#endif
 
 #ifdef CONFIG_SUSPEND
        suspend_set_ops(&omap_pm_ops);
index 13e24f913dd4044199faebbfcb9bd462ad6fc22d..fb7dc52394a8fd8a872fb0e1f22d0cc867a3df9b 100644 (file)
@@ -847,6 +847,14 @@ static int __init omap_sr_probe(struct platform_device *pdev)
                goto err_free_devinfo;
        }
 
+       mem = request_mem_region(mem->start, resource_size(mem),
+                                       dev_name(&pdev->dev));
+       if (!mem) {
+               dev_err(&pdev->dev, "%s: no mem region\n", __func__);
+               ret = -EBUSY;
+               goto err_free_devinfo;
+       }
+
        irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
 
        pm_runtime_enable(&pdev->dev);
@@ -883,7 +891,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
                ret = sr_late_init(sr_info);
                if (ret) {
                        pr_warning("%s: Error in SR late init\n", __func__);
-                       goto err_release_region;
+                       return ret;
                }
        }
 
@@ -896,7 +904,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
        vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm);
        if (!vdd_dbg_dir) {
                ret = -EINVAL;
-               goto err_release_region;
+               goto err_iounmap;
        }
 
        sr_info->dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir);
@@ -904,7 +912,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
                        __func__);
                ret = PTR_ERR(sr_info->dbg_dir);
-               goto err_release_region;
+               goto err_iounmap;
        }
 
        (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR,
@@ -921,7 +929,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "%s: Unable to create debugfs directory"
                        "for n-values\n", __func__);
                ret = PTR_ERR(nvalue_dir);
-               goto err_release_region;
+               goto err_debugfs;
        }
 
        omap_voltage_get_volttable(sr_info->voltdm, &volt_data);
@@ -931,7 +939,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
                        "entries for n-values\n",
                        __func__, sr_info->voltdm->name);
                ret = -ENODATA;
-               goto err_release_region;
+               goto err_debugfs;
        }
 
        for (i = 0; i < sr_info->nvalue_count; i++) {
@@ -945,6 +953,11 @@ static int __init omap_sr_probe(struct platform_device *pdev)
 
        return ret;
 
+err_debugfs:
+       debugfs_remove_recursive(sr_info->dbg_dir);
+err_iounmap:
+       list_del(&sr_info->node);
+       iounmap(sr_info->base);
 err_release_region:
        release_mem_region(mem->start, resource_size(mem));
 err_free_devinfo:
index 35559f77e2deb8e47fa7e2d84caffebdf0740bf1..c7ed540d868d218a56a1b4c07db3b417a2f91845 100644 (file)
@@ -108,7 +108,13 @@ static void usb_musb_mux_init(struct omap_musb_board_data *board_data)
        }
 }
 
-void __init usb_musb_init(struct omap_musb_board_data *board_data)
+static struct omap_musb_board_data musb_default_board_data = {
+       .interface_type         = MUSB_INTERFACE_ULPI,
+       .mode                   = MUSB_OTG,
+       .power                  = 100,
+};
+
+void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
 {
        struct omap_hwmod               *oh;
        struct omap_device              *od;
@@ -116,11 +122,12 @@ void __init usb_musb_init(struct omap_musb_board_data *board_data)
        struct device                   *dev;
        int                             bus_id = -1;
        const char                      *oh_name, *name;
+       struct omap_musb_board_data     *board_data;
 
-       if (cpu_is_omap3517() || cpu_is_omap3505()) {
-       } else if (cpu_is_omap44xx()) {
-               usb_musb_mux_init(board_data);
-       }
+       if (musb_board_data)
+               board_data = musb_board_data;
+       else
+               board_data = &musb_default_board_data;
 
        /*
         * REVISIT: This line can be removed once all the platforms using
@@ -164,10 +171,15 @@ void __init usb_musb_init(struct omap_musb_board_data *board_data)
        dev->dma_mask = &musb_dmamask;
        dev->coherent_dma_mask = musb_dmamask;
        put_device(dev);
+
+       if (cpu_is_omap44xx())
+               omap4430_phy_init(dev);
 }
 
 #else
 void __init usb_musb_init(struct omap_musb_board_data *board_data)
 {
+       if (cpu_is_omap44xx())
+               omap4430_phy_init(NULL);
 }
 #endif /* CONFIG_USB_MUSB_SOC */
index 8a3c05f3c1d6eff4db8b10e9d2ee3ffe0fc5d10a..8dd26b765b7d3f3aa976d8907f6eb8d0b631b1d1 100644 (file)
@@ -293,12 +293,11 @@ tusb6010_setup_interface(struct musb_hdrc_platform_data *data,
                        );
 
        /* IRQ */
-       status = gpio_request(irq, "TUSB6010 irq");
+       status = gpio_request_one(irq, GPIOF_IN, "TUSB6010 irq");
        if (status < 0) {
                printk(error, 3, status);
                return status;
        }
-       gpio_direction_input(irq);
        tusb_resources[2].start = irq + IH_GPIO_BASE;
 
        /* set up memory timings ... can speed them up later */
index 0c1552d9d99508c3bc0d2a3aaf4ae5038becc089..9ef3789ded4b0f1c5e48d1782c7004067a03d8cd 100644 (file)
@@ -148,7 +148,6 @@ static int vp_volt_debug_get(void *data, u64 *val)
        }
 
        vsel = vdd->read_reg(prm_mod_offs, vdd->vp_data->voltage);
-       pr_notice("curr_vsel = %x\n", vsel);
 
        if (!vdd->pmic_info->vsel_to_uv) {
                pr_warning("PMIC function to convert vsel to voltage"
index 872de0bf1e6bb777368b1b1d61eb6d564dc3d473..ea6c9c88c725d1eee7e77390bdf2a6d4f68bf3b2 100644 (file)
 #ifndef __ASM_ARCH_OMAP_GPMC_SMSC911X_H__
 
 struct omap_smsc911x_platform_data {
+       int     id;
        int     cs;
        int     gpio_irq;
        int     gpio_reset;
        u32     flags;
 };
 
-#if defined(CONFIG_SMSC911X) || \
-       defined(CONFIG_SMSC911X_MODULE)
+#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
 
 extern void gpmc_smsc911x_init(struct omap_smsc911x_platform_data *d);
 
index 565d2664f5a739adc503bca1d86751b4e997d2c7..ac4b60d9aa299c86208e66cdbe00df1432b6a650 100644 (file)
@@ -129,7 +129,6 @@ static inline void __arch_decomp_setup(unsigned long arch_id)
                DEBUG_LL_OMAP1(3, sx1);
 
                /* omap2 based boards using UART1 */
-               DEBUG_LL_OMAP2(1, omap2evm);
                DEBUG_LL_OMAP2(1, omap_2430sdp);
                DEBUG_LL_OMAP2(1, omap_apollon);
                DEBUG_LL_OMAP2(1, omap_h4);
index 02b96c8f6a17cd687f63a5e439d44ba57f621339..17d3c939775c7164e2c246f8a150bb212bbb0647 100644 (file)
@@ -113,7 +113,7 @@ extern int omap4430_phy_suspend(struct device *dev, int suspend);
 extern void am35x_musb_reset(void);
 extern void am35x_musb_phy_power(u8 on);
 extern void am35x_musb_clear_irq(void);
-extern void am35x_musb_set_mode(u8 musb_mode);
+extern void am35x_set_mode(u8 musb_mode);
 
 /*
  * FIXME correct answer depends on hmc_mode,
index 72444d97f80cd759551256591bc1b9d8ff733b15..b70c19bab63af141d8815e1ec76fb96f3b06b63b 100644 (file)
@@ -270,14 +270,21 @@ static inline int __fls(unsigned long word)
 
 unsigned long find_first_zero_bit(const unsigned long *addr,
                                  unsigned long size);
+#define find_first_zero_bit find_first_zero_bit
+
 unsigned long find_next_zero_bit(const unsigned long *addr,
                                 unsigned long size,
                                 unsigned long offset);
+#define find_next_zero_bit find_next_zero_bit
+
 unsigned long find_first_bit(const unsigned long *addr,
                             unsigned long size);
+#define find_first_bit find_first_bit
+
 unsigned long find_next_bit(const unsigned long *addr,
                                 unsigned long size,
                                 unsigned long offset);
+#define find_next_bit find_next_bit
 
 /*
  * ffs: find first bit set. This is defined the same way as
@@ -299,6 +306,14 @@ static inline int ffs(unsigned long word)
 #include <asm-generic/bitops/hweight.h>
 #include <asm-generic/bitops/lock.h>
 
+extern unsigned long find_next_zero_bit_le(const void *addr,
+               unsigned long size, unsigned long offset);
+#define find_next_zero_bit_le find_next_zero_bit_le
+
+extern unsigned long find_next_bit_le(const void *addr,
+               unsigned long size, unsigned long offset);
+#define find_next_bit_le find_next_bit_le
+
 #include <asm-generic/bitops/le.h>
 #include <asm-generic/bitops/ext2-atomic.h>
 
index a18180f2d007aa3d5e70a5b9fa6da7a01ad7b60c..d619b17c4413009a5baaf89a3af21e2dc556a798 100644 (file)
@@ -47,9 +47,6 @@ config GENERIC_BUG
 config ZONE_DMA
        def_bool y
 
-config GENERIC_FIND_NEXT_BIT
-       def_bool y
-
 config GENERIC_GPIO
        def_bool y
 
index 3ac0c72e9fee864c370955bc6ff1d8cc64c132f1..aaf884591b070cfa6246804f3491efac713afcab 100644 (file)
@@ -108,6 +108,7 @@ static inline void arch_kgdb_breakpoint(void)
 #else
 # define CACHE_FLUSH_IS_SAFE   1
 #endif
+#define GDB_ADJUSTS_BREAK_OFFSET
 #define HW_INST_WATCHPOINT_NUM 6
 #define HW_WATCHPOINT_NUM      8
 #define TYPE_INST_WATCHPOINT   0
index 1066d63e62b5da4683a2a2c0bf393c72eda1b939..7854d4367c15c2247c70ef0a07af9ee933df0887 100644 (file)
@@ -102,9 +102,6 @@ struct pt_regs {
 /* user_mode returns true if only one bit is set in IPEND, other than the
    master interrupt enable.  */
 #define user_mode(regs) (!(((regs)->ipend & ~0x10) & (((regs)->ipend & ~0x10) - 1)))
-#define instruction_pointer(regs) ((regs)->pc)
-#define user_stack_pointer(regs)  ((regs)->usp)
-#define profile_pc(regs) instruction_pointer(regs)
 extern void show_regs(struct pt_regs *);
 
 #define arch_has_single_step() (1)
@@ -128,6 +125,8 @@ extern int is_user_addr_valid(struct task_struct *child,
            ((unsigned long)task_stack_page(task) + \
             (THREAD_SIZE - sizeof(struct pt_regs)))
 
+#include <asm-generic/ptrace.h>
+
 #endif  /*  __KERNEL__  */
 
 #endif                         /* __ASSEMBLY__ */
index a6d03069d0fff21a51bff6c94ec11e7a18be2f9f..b6b94a27d276abc037ae212a471c2cade7930da7 100644 (file)
@@ -31,10 +31,6 @@ config ARCH_HAS_ILOG2_U64
        bool
        default n
 
-config GENERIC_FIND_NEXT_BIT
-       bool
-       default y
-
 config GENERIC_HWEIGHT
        bool
        default y
index 064f62196745d0bbd5d0ddadec022754a2659c79..cb884e48942560f23eb9de40c24cb62d9b7eb1e9 100644 (file)
@@ -19,14 +19,6 @@ config RWSEM_GENERIC_SPINLOCK
 config RWSEM_XCHGADD_ALGORITHM
        bool
 
-config GENERIC_FIND_NEXT_BIT
-       bool
-       default y
-
-config GENERIC_FIND_BIT_LE
-       bool
-       default y
-
 config GENERIC_HWEIGHT
        bool
        default y
diff --git a/arch/frv/include/asm/suspend.h b/arch/frv/include/asm/suspend.h
deleted file mode 100644 (file)
index 5fa7b5a..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/* suspend.h: suspension stuff
- *
- * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#ifndef _ASM_SUSPEND_H
-#define _ASM_SUSPEND_H
-
-static inline int arch_prepare_suspend(void)
-{
-       return 0;
-}
-
-#endif /* _ASM_SUSPEND_H */
index e20322ffcaf839d2e9fabb4ecfafdbc997a8c221..091ed6192ae831f7f2a96ecb663c1528b6c29632 100644 (file)
@@ -41,14 +41,6 @@ config ARCH_HAS_ILOG2_U64
        bool
        default n
 
-config GENERIC_FIND_NEXT_BIT
-       bool
-       default y
-
-config GENERIC_FIND_BIT_LE
-       bool
-       default y
-
 config GENERIC_HWEIGHT
        bool
        default y
index e5cc56ae6ce3089129624b26ef24445592586348..38280ef4a2af219c8a2aba4fef0a43830acc8515 100644 (file)
@@ -78,10 +78,6 @@ config HUGETLB_PAGE_SIZE_VARIABLE
        depends on HUGETLB_PAGE
        default y
 
-config GENERIC_FIND_NEXT_BIT
-       bool
-       default y
-
 config GENERIC_CALIBRATE_DELAY
        bool
        default y
index 04440cc09b40cbd2a124b78474f6bba30a26d0b2..85118dfe9bb5ac3548296848efbd2ff0d1d7890b 100644 (file)
@@ -36,7 +36,7 @@
 static cycle_t itc_get_cycles(struct clocksource *cs);
 
 struct fsyscall_gtod_data_t fsyscall_gtod_data = {
-       .lock = SEQLOCK_UNLOCKED,
+       .lock = __SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock),
 };
 
 struct itc_jitter_data_t itc_jitter_data;
index 736b808d2291bc149c7801cceb73876d50e0f46b..85b44e8582254a5f154ecc2c49455e5f7b0f3157 100644 (file)
@@ -256,14 +256,6 @@ config ARCH_HAS_ILOG2_U64
        bool
        default n
 
-config GENERIC_FIND_NEXT_BIT
-       bool
-       default y
-
-config GENERIC_FIND_BIT_LE
-       bool
-       default y
-
 config GENERIC_HWEIGHT
        bool
        default y
index 8accc1bb026323552b534b70c51e28fb82cd19e9..cf7829a615513e1597d903c6398348d8e0d5ac1e 100644 (file)
@@ -81,11 +81,11 @@ static __inline__ int cpu_number_map(int cpu)
 
 static __inline__ unsigned int num_booting_cpus(void)
 {
-       return cpus_weight(cpu_callout_map);
+       return cpumask_weight(&cpu_callout_map);
 }
 
 extern void smp_send_timer(void);
-extern unsigned long send_IPI_mask_phys(cpumask_t, int, int);
+extern unsigned long send_IPI_mask_phys(const cpumask_t*, int, int);
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
index fc10b39893d42ee0cd4ed8a3e1102a79d7d51273..092d40a6708e4a35ffec5e6f4f966791551d50c6 100644 (file)
@@ -30,6 +30,7 @@
 #include <asm/io.h>
 #include <asm/mmu_context.h>
 #include <asm/m32r.h>
+#include <asm/tlbflush.h>
 
 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
 /* Data structures and variables                                             */
@@ -61,33 +62,22 @@ extern spinlock_t ipi_lock[];
 /* Function Prototypes                                                       */
 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
 
-void smp_send_reschedule(int);
 void smp_reschedule_interrupt(void);
-
-void smp_flush_cache_all(void);
 void smp_flush_cache_all_interrupt(void);
 
-void smp_flush_tlb_all(void);
 static void flush_tlb_all_ipi(void *);
-
-void smp_flush_tlb_mm(struct mm_struct *);
-void smp_flush_tlb_range(struct vm_area_struct *, unsigned long, \
-       unsigned long);
-void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
 static void flush_tlb_others(cpumask_t, struct mm_struct *,
        struct vm_area_struct *, unsigned long);
+
 void smp_invalidate_interrupt(void);
 
-void smp_send_stop(void);
 static void stop_this_cpu(void *);
 
-void smp_send_timer(void);
 void smp_ipi_timer_interrupt(struct pt_regs *);
 void smp_local_timer_interrupt(void);
 
 static void send_IPI_allbutself(int, int);
 static void send_IPI_mask(const struct cpumask *, int, int);
-unsigned long send_IPI_mask_phys(cpumask_t, int, int);
 
 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
 /* Rescheduling request Routines                                             */
@@ -162,10 +152,10 @@ void smp_flush_cache_all(void)
        unsigned long *mask;
 
        preempt_disable();
-       cpumask = cpu_online_map;
-       cpu_clear(smp_processor_id(), cpumask);
+       cpumask_copy(&cpumask, cpu_online_mask);
+       cpumask_clear_cpu(smp_processor_id(), &cpumask);
        spin_lock(&flushcache_lock);
-       mask=cpus_addr(cpumask);
+       mask=cpumask_bits(&cpumask);
        atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
        send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
        _flush_cache_copyback_all();
@@ -263,8 +253,8 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
        preempt_disable();
        cpu_id = smp_processor_id();
        mmc = &mm->context[cpu_id];
-       cpu_mask = *mm_cpumask(mm);
-       cpu_clear(cpu_id, cpu_mask);
+       cpumask_copy(&cpu_mask, mm_cpumask(mm));
+       cpumask_clear_cpu(cpu_id, &cpu_mask);
 
        if (*mmc != NO_CONTEXT) {
                local_irq_save(flags);
@@ -275,7 +265,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
                        cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
                local_irq_restore(flags);
        }
-       if (!cpus_empty(cpu_mask))
+       if (!cpumask_empty(&cpu_mask))
                flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
 
        preempt_enable();
@@ -333,8 +323,8 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
        preempt_disable();
        cpu_id = smp_processor_id();
        mmc = &mm->context[cpu_id];
-       cpu_mask = *mm_cpumask(mm);
-       cpu_clear(cpu_id, cpu_mask);
+       cpumask_copy(&cpu_mask, mm_cpumask(mm));
+       cpumask_clear_cpu(cpu_id, &cpu_mask);
 
 #ifdef DEBUG_SMP
        if (!mm)
@@ -348,7 +338,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
                __flush_tlb_page(va);
                local_irq_restore(flags);
        }
-       if (!cpus_empty(cpu_mask))
+       if (!cpumask_empty(&cpu_mask))
                flush_tlb_others(cpu_mask, mm, vma, va);
 
        preempt_enable();
@@ -395,14 +385,14 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
         * - current CPU must not be in mask
         * - mask must exist :)
         */
-       BUG_ON(cpus_empty(cpumask));
+       BUG_ON(cpumask_empty(&cpumask));
 
-       BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+       BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
        BUG_ON(!mm);
 
        /* If a CPU which we ran on has gone down, OK. */
-       cpus_and(cpumask, cpumask, cpu_online_map);
-       if (cpus_empty(cpumask))
+       cpumask_and(&cpumask, &cpumask, cpu_online_mask);
+       if (cpumask_empty(&cpumask))
                return;
 
        /*
@@ -416,7 +406,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
        flush_mm = mm;
        flush_vma = vma;
        flush_va = va;
-       mask=cpus_addr(cpumask);
+       mask=cpumask_bits(&cpumask);
        atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
 
        /*
@@ -425,7 +415,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
         */
        send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
 
-       while (!cpus_empty(flush_cpumask)) {
+       while (!cpumask_empty((cpumask_t*)&flush_cpumask)) {
                /* nothing. lockup detection does not belong here */
                mb();
        }
@@ -460,7 +450,7 @@ void smp_invalidate_interrupt(void)
        int cpu_id = smp_processor_id();
        unsigned long *mmc = &flush_mm->context[cpu_id];
 
-       if (!cpu_isset(cpu_id, flush_cpumask))
+       if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
                return;
 
        if (flush_va == FLUSH_ALL) {
@@ -478,7 +468,7 @@ void smp_invalidate_interrupt(void)
                        __flush_tlb_page(va);
                }
        }
-       cpu_clear(cpu_id, flush_cpumask);
+       cpumask_clear_cpu(cpu_id, (cpumask_t*)&flush_cpumask);
 }
 
 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
@@ -530,7 +520,7 @@ static void stop_this_cpu(void *dummy)
        /*
         * Remove this CPU:
         */
-       cpu_clear(cpu_id, cpu_online_map);
+       set_cpu_online(cpu_id, false);
 
        /*
         * PSW IE = 1;
@@ -725,8 +715,8 @@ static void send_IPI_allbutself(int ipi_num, int try)
 {
        cpumask_t cpumask;
 
-       cpumask = cpu_online_map;
-       cpu_clear(smp_processor_id(), cpumask);
+       cpumask_copy(&cpumask, cpu_online_mask);
+       cpumask_clear_cpu(smp_processor_id(), &cpumask);
 
        send_IPI_mask(&cpumask, ipi_num, try);
 }
@@ -763,13 +753,13 @@ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
        cpumask_and(&tmp, cpumask, cpu_online_mask);
        BUG_ON(!cpumask_equal(cpumask, &tmp));
 
-       physid_mask = CPU_MASK_NONE;
+       cpumask_clear(&physid_mask);
        for_each_cpu(cpu_id, cpumask) {
                if ((phys_id = cpu_to_physid(cpu_id)) != -1)
-                       cpu_set(phys_id, physid_mask);
+                       cpumask_set_cpu(phys_id, &physid_mask);
        }
 
-       send_IPI_mask_phys(physid_mask, ipi_num, try);
+       send_IPI_mask_phys(&physid_mask, ipi_num, try);
 }
 
 /*==========================================================================*
@@ -792,14 +782,14 @@ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
  * ---------- --- --------------------------------------------------------
  *
  *==========================================================================*/
-unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
+unsigned long send_IPI_mask_phys(const cpumask_t *physid_mask, int ipi_num,
        int try)
 {
        spinlock_t *ipilock;
        volatile unsigned long *ipicr_addr;
        unsigned long ipicr_val;
        unsigned long my_physid_mask;
-       unsigned long mask = cpus_addr(physid_mask)[0];
+       unsigned long mask = cpumask_bits(physid_mask)[0];
 
 
        if (mask & ~physids_coerce(phys_cpu_present_map))
index e034844cfc0d948095104ca874d8a9ca02135184..cfdbe5d15002070d1b9f84e3a26656184be5c16d 100644 (file)
@@ -135,9 +135,9 @@ void __devinit smp_prepare_boot_cpu(void)
 {
        bsp_phys_id = hard_smp_processor_id();
        physid_set(bsp_phys_id, phys_cpu_present_map);
-       cpu_set(0, cpu_online_map);     /* BSP's cpu_id == 0 */
-       cpu_set(0, cpu_callout_map);
-       cpu_set(0, cpu_callin_map);
+       set_cpu_online(0, true);        /* BSP's cpu_id == 0 */
+       cpumask_set_cpu(0, &cpu_callout_map);
+       cpumask_set_cpu(0, &cpu_callin_map);
 
        /*
         * Initialize the logical to physical CPU number mapping
@@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
                physid_set(phys_id, phys_cpu_present_map);
 #ifndef CONFIG_HOTPLUG_CPU
-       init_cpu_present(&cpu_possible_map);
+       init_cpu_present(cpu_possible_mask);
 #endif
 
        show_mp_info(nr_cpu);
@@ -294,10 +294,10 @@ static void __init do_boot_cpu(int phys_id)
        send_status = 0;
        boot_status = 0;
 
-       cpu_set(phys_id, cpu_bootout_map);
+       cpumask_set_cpu(phys_id, &cpu_bootout_map);
 
        /* Send Startup IPI */
-       send_IPI_mask_phys(cpumask_of_cpu(phys_id), CPU_BOOT_IPI, 0);
+       send_IPI_mask_phys(cpumask_of(phys_id), CPU_BOOT_IPI, 0);
 
        Dprintk("Waiting for send to finish...\n");
        timeout = 0;
@@ -306,7 +306,7 @@ static void __init do_boot_cpu(int phys_id)
        do {
                Dprintk("+");
                udelay(1000);
-               send_status = !cpu_isset(phys_id, cpu_bootin_map);
+               send_status = !cpumask_test_cpu(phys_id, &cpu_bootin_map);
        } while (send_status && (timeout++ < 100));
 
        Dprintk("After Startup.\n");
@@ -316,19 +316,19 @@ static void __init do_boot_cpu(int phys_id)
                 * allow APs to start initializing.
                 */
                Dprintk("Before Callout %d.\n", cpu_id);
-               cpu_set(cpu_id, cpu_callout_map);
+               cpumask_set_cpu(cpu_id, &cpu_callout_map);
                Dprintk("After Callout %d.\n", cpu_id);
 
                /*
                 * Wait 5s total for a response
                 */
                for (timeout = 0; timeout < 5000; timeout++) {
-                       if (cpu_isset(cpu_id, cpu_callin_map))
+                       if (cpumask_test_cpu(cpu_id, &cpu_callin_map))
                                break;  /* It has booted */
                        udelay(1000);
                }
 
-               if (cpu_isset(cpu_id, cpu_callin_map)) {
+               if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
                        /* number CPUs logically, starting from 1 (BSP is 0) */
                        Dprintk("OK.\n");
                } else {
@@ -340,9 +340,9 @@ static void __init do_boot_cpu(int phys_id)
 
        if (send_status || boot_status) {
                unmap_cpu_to_physid(cpu_id, phys_id);
-               cpu_clear(cpu_id, cpu_callout_map);
-               cpu_clear(cpu_id, cpu_callin_map);
-               cpu_clear(cpu_id, cpu_initialized);
+               cpumask_clear_cpu(cpu_id, &cpu_callout_map);
+               cpumask_clear_cpu(cpu_id, &cpu_callin_map);
+               cpumask_clear_cpu(cpu_id, &cpu_initialized);
                cpucount--;
        }
 }
@@ -351,17 +351,17 @@ int __cpuinit __cpu_up(unsigned int cpu_id)
 {
        int timeout;
 
-       cpu_set(cpu_id, smp_commenced_mask);
+       cpumask_set_cpu(cpu_id, &smp_commenced_mask);
 
        /*
         * Wait 5s total for a response
         */
        for (timeout = 0; timeout < 5000; timeout++) {
-               if (cpu_isset(cpu_id, cpu_online_map))
+               if (cpu_online(cpu_id))
                        break;
                udelay(1000);
        }
-       if (!cpu_isset(cpu_id, cpu_online_map))
+       if (!cpu_online(cpu_id))
                BUG();
 
        return 0;
@@ -373,11 +373,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
        unsigned long bogosum = 0;
 
        for (timeout = 0; timeout < 5000; timeout++) {
-               if (cpus_equal(cpu_callin_map, cpu_online_map))
+               if (cpumask_equal(&cpu_callin_map, cpu_online_mask))
                        break;
                udelay(1000);
        }
-       if (!cpus_equal(cpu_callin_map, cpu_online_map))
+       if (!cpumask_equal(&cpu_callin_map, cpu_online_mask))
                BUG();
 
        for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++)
@@ -388,7 +388,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
         */
        Dprintk("Before bogomips.\n");
        if (cpucount) {
-               for_each_cpu_mask(cpu_id, cpu_online_map)
+               for_each_cpu(cpu_id,cpu_online_mask)
                        bogosum += cpu_data[cpu_id].loops_per_jiffy;
 
                printk(KERN_INFO "Total of %d processors activated " \
@@ -425,7 +425,7 @@ int __init start_secondary(void *unused)
        cpu_init();
        preempt_disable();
        smp_callin();
-       while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
+       while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask))
                cpu_relax();
 
        smp_online();
@@ -463,7 +463,7 @@ static void __init smp_callin(void)
        int cpu_id = smp_processor_id();
        unsigned long timeout;
 
-       if (cpu_isset(cpu_id, cpu_callin_map)) {
+       if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) {
                printk("huh, phys CPU#%d, CPU#%d already present??\n",
                        phys_id, cpu_id);
                BUG();
@@ -474,7 +474,7 @@ static void __init smp_callin(void)
        timeout = jiffies + (2 * HZ);
        while (time_before(jiffies, timeout)) {
                /* Has the boot CPU finished it's STARTUP sequence ? */
-               if (cpu_isset(cpu_id, cpu_callout_map))
+               if (cpumask_test_cpu(cpu_id, &cpu_callout_map))
                        break;
                cpu_relax();
        }
@@ -486,7 +486,7 @@ static void __init smp_callin(void)
        }
 
        /* Allow the master to continue. */
-       cpu_set(cpu_id, cpu_callin_map);
+       cpumask_set_cpu(cpu_id, &cpu_callin_map);
 }
 
 static void __init smp_online(void)
@@ -503,7 +503,7 @@ static void __init smp_online(void)
        /* Save our processor parameters */
        smp_store_cpu_info(cpu_id);
 
-       cpu_set(cpu_id, cpu_online_map);
+       set_cpu_online(cpu_id, true);
 }
 
 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
index 273bccab9517c719fa893d58c5f6631ba9f1f15e..fc98f9b9d4d2ce417f29b547cad1884e9f7964df 100644 (file)
@@ -2,10 +2,6 @@ config FPU
        bool
        default n
 
-config GENERIC_FIND_NEXT_BIT
-       bool
-       default y
-
 config GENERIC_GPIO
        bool
        default n
index e9020f88a748f4ae5905015eaa300e793a447732..89cf5b814a4d77ced1629524cc7e1a6d5ea481d6 100644 (file)
@@ -200,6 +200,7 @@ out:
        res += ((long)p - (long)vaddr - 4) * 8;
        return res < size ? res : size;
 }
+#define find_first_zero_bit find_first_zero_bit
 
 static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
                                     int offset)
@@ -229,6 +230,7 @@ static inline int find_next_zero_bit(const unsigned long *vaddr, int size,
        /* No zero yet, search remaining full bytes for a zero */
        return offset + find_first_zero_bit(p, size - offset);
 }
+#define find_next_zero_bit find_next_zero_bit
 
 static inline int find_first_bit(const unsigned long *vaddr, unsigned size)
 {
@@ -253,6 +255,7 @@ out:
        res += ((long)p - (long)vaddr - 4) * 8;
        return res < size ? res : size;
 }
+#define find_first_bit find_first_bit
 
 static inline int find_next_bit(const unsigned long *vaddr, int size,
                                int offset)
@@ -282,6 +285,7 @@ static inline int find_next_bit(const unsigned long *vaddr, int size,
        /* No one yet, search remaining full bytes for a one */
        return offset + find_first_bit(p, size - offset);
 }
+#define find_next_bit find_next_bit
 
 /*
  * ffz = Find First Zero in word. Undefined if no zero exists,
@@ -398,6 +402,7 @@ out:
        res += (p - addr) * 32;
        return res < size ? res : size;
 }
+#define find_first_zero_bit_le find_first_zero_bit_le
 
 static inline unsigned long find_next_zero_bit_le(const void *addr,
                unsigned long size, unsigned long offset)
@@ -427,6 +432,7 @@ static inline unsigned long find_next_zero_bit_le(const void *addr,
        /* No zero yet, search remaining full bytes for a zero */
        return offset + find_first_zero_bit_le(p, size - offset);
 }
+#define find_next_zero_bit_le find_next_zero_bit_le
 
 static inline int find_first_bit_le(const void *vaddr, unsigned size)
 {
@@ -451,6 +457,7 @@ out:
        res += (p - addr) * 32;
        return res < size ? res : size;
 }
+#define find_first_bit_le find_first_bit_le
 
 static inline unsigned long find_next_bit_le(const void *addr,
                unsigned long size, unsigned long offset)
@@ -480,6 +487,7 @@ static inline unsigned long find_next_bit_le(const void *addr,
        /* No set bit yet, search remaining full bytes for a set bit */
        return offset + find_first_bit_le(p, size - offset);
 }
+#define find_next_bit_le find_next_bit_le
 
 /* Bitmap functions for the ext2 filesystem. */
 
index 6b0e2d349f0eb2439c0f6663fd643944e3707394..72e85acdd7bd5f6f61f9a071d909a347ea2a5a56 100644 (file)
@@ -319,6 +319,10 @@ found_first:
 found_middle:
        return result + ffz(__swab32(tmp));
 }
+#define find_next_zero_bit_le find_next_zero_bit_le
+
+extern unsigned long find_next_bit_le(const void *addr,
+               unsigned long size, unsigned long offset);
 
 #endif /* __KERNEL__ */
 
index eccdefe70d4e4ba1b4874df00228faff583e948b..e446bab2427bc7c69982190f3e89c0f442752780 100644 (file)
@@ -33,12 +33,6 @@ config ARCH_HAS_ILOG2_U32
 config ARCH_HAS_ILOG2_U64
        def_bool n
 
-config GENERIC_FIND_NEXT_BIT
-       def_bool y
-
-config GENERIC_FIND_BIT_LE
-       def_bool y
-
 config GENERIC_HWEIGHT
        def_bool y
 
index cef1a854487d1fff86c9154aabb86027e5eba2f5..653da62d0682b06bc0df0cf489dc1059a554faec 100644 (file)
@@ -821,14 +821,6 @@ config ARCH_SUPPORTS_OPROFILE
        bool
        default y if !MIPS_MT_SMTC
 
-config GENERIC_FIND_NEXT_BIT
-       bool
-       default y
-
-config GENERIC_FIND_BIT_LE
-       bool
-       default y
-
 config GENERIC_HWEIGHT
        bool
        default y
index 22fdf2f0cc236922976f26a2281b9940589576c8..ad15fb10322b28f8ccf0fbc8d29427ef0606ab93 100644 (file)
@@ -16,7 +16,6 @@ CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_AUDIT=y
 CONFIG_TINY_RCU=y
 CONFIG_CGROUPS=y
-CONFIG_CGROUP_NS=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_RELAY=y
 CONFIG_BLK_DEV_INITRD=y
index 294cdb66c5fcb4b6d49ea338fa7ac4b45c1b27cd..3adac3b53d1930172595b7e3b5dbdfc9dd4cf42d 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef __ASM_SUSPEND_H
 #define __ASM_SUSPEND_H
 
-static inline int arch_prepare_suspend(void) { return 0; }
-
 /* References to section boundaries */
 extern const void __nosave_begin, __nosave_end;
 
index feaf09cc86325b2f49ac423e9e9aafff5cf37744..1f870340ebdd9a1dcd0b559d8004dc2165a9eb32 100644 (file)
@@ -44,9 +44,6 @@ config GENERIC_CALIBRATE_DELAY
 config GENERIC_CMOS_UPDATE
         def_bool n
 
-config GENERIC_FIND_NEXT_BIT
-       def_bool y
-
 config GENERIC_HWEIGHT
        def_bool y
 
index 31d76261a3d5b4851066ac3059391c4760a92b9e..fbb96ae3122a6b91323897906f88f1843f850fbf 100644 (file)
@@ -8,7 +8,6 @@ CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_CGROUPS=y
-CONFIG_CGROUP_NS=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
index 69ff049c857127537a89cc1b5c0339bdedea8dd0..65adc86a230e705d64b41b1c69a886268f9a6eb6 100644 (file)
@@ -47,14 +47,6 @@ config ARCH_HAS_ILOG2_U64
        bool
        default n
 
-config GENERIC_FIND_NEXT_BIT
-       bool
-       default y
-
-config GENERIC_FIND_BIT_LE
-       bool
-       default y
-
 config GENERIC_BUG
        bool
        default y
index 423145a6f7ba09da24f8058fe32af6d869a636f7..2729c6663d8a79c42705b9f81828e6ee6d6f5dd0 100644 (file)
@@ -91,14 +91,6 @@ config GENERIC_HWEIGHT
        bool
        default y
 
-config GENERIC_FIND_NEXT_BIT
-       bool
-       default y
-
-config GENERIC_FIND_BIT_LE
-       bool
-       default y
-
 config GENERIC_GPIO
        bool
        help
@@ -141,6 +133,7 @@ config PPC
        select GENERIC_IRQ_SHOW
        select GENERIC_IRQ_SHOW_LEVEL
        select HAVE_RCU_TABLE_FREE if SMP
+       select HAVE_SYSCALL_TRACEPOINTS
 
 config EARLY_PRINTK
        bool
index 2779f08313a5b0c0400a2457e72dc39a6eba1cb0..22dd6ae84da0b08de2cefd98856d1ee4da308edb 100644 (file)
                                0x0 0x0 0x0 0x3 &UIC3 0x12 0x4 /* swizzled int C */
                                0x0 0x0 0x0 0x4 &UIC3 0x13 0x4 /* swizzled int D */>;
                };
+
+               MSI: ppc4xx-msi@C10000000 {
+                       compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
+                       reg = < 0xC 0x10000000 0x100>;
+                       sdr-base = <0x36C>;
+                       msi-data = <0x00000000>;
+                       msi-mask = <0x44440000>;
+                       interrupt-count = <3>;
+                       interrupts = <0 1 2 3>;
+                       interrupt-parent = <&UIC3>;
+                       #interrupt-cells = <1>;
+                       #address-cells = <0>;
+                       #size-cells = <0>;
+                       interrupt-map = <0 &UIC3 0x18 1
+                                       1 &UIC3 0x19 1
+                                       2 &UIC3 0x1A 1
+                                       3 &UIC3 0x1B 1>;
+               };
        };
 };
index 7c3be5e45748e4abdaa2605b70c68fe851c50b21..f913dbe25d35a29dab9391ccbd2e96f203bf8672 100644 (file)
                                0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>;
                };
 
+               MSI: ppc4xx-msi@400300000 {
+                               compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
+                               reg = < 0x4 0x00300000 0x100>;
+                               sdr-base = <0x3B0>;
+                               msi-data = <0x00000000>;
+                               msi-mask = <0x44440000>;
+                               interrupt-count = <3>;
+                               interrupts =<0 1 2 3>;
+                               interrupt-parent = <&UIC0>;
+                               #interrupt-cells = <1>;
+                               #address-cells = <0>;
+                               #size-cells = <0>;
+                               interrupt-map = <0 &UIC0 0xC 1
+                                       1 &UIC0 0x0D 1
+                                       2 &UIC0 0x0E 1
+                                       3 &UIC0 0x0F 1>;
+               };
+
                I2O: i2o@400100000 {
                        compatible = "ibm,i2o-440spe";
                        reg = <0x00000004 0x00100000 0x100>;
index 89edb16649c36609aa2481e97fdf97786ea9872e..1613d6e4049eb2ba22aeceef97e8e57110620713 100644 (file)
                                0x0 0x0 0x0 0x3 &UIC2 0xd 0x4 /* swizzled int C */
                                0x0 0x0 0x0 0x4 &UIC2 0xe 0x4 /* swizzled int D */>;
                };
+
+               MSI: ppc4xx-msi@C10000000 {
+                       compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
+                       reg = < 0x0 0xEF620000 0x100>;
+                       sdr-base = <0x4B0>;
+                       msi-data = <0x00000000>;
+                       msi-mask = <0x44440000>;
+                       interrupt-count = <12>;
+                       interrupts = <0 1 2 3 4 5 6 7 8 9 0xA 0xB 0xC 0xD>;
+                       interrupt-parent = <&UIC2>;
+                       #interrupt-cells = <1>;
+                       #address-cells = <0>;
+                       #size-cells = <0>;
+                       interrupt-map = <0 &UIC2 0x10 1
+                                       1 &UIC2 0x11 1
+                                       2 &UIC2 0x12 1
+                                       2 &UIC2 0x13 1
+                                       2 &UIC2 0x14 1
+                                       2 &UIC2 0x15 1
+                                       2 &UIC2 0x16 1
+                                       2 &UIC2 0x17 1
+                                       2 &UIC2 0x18 1
+                                       2 &UIC2 0x19 1
+                                       2 &UIC2 0x1A 1
+                                       2 &UIC2 0x1B 1
+                                       2 &UIC2 0x1C 1
+                                       3 &UIC2 0x1D 1>;
+               };
        };
 };
index 81636c01d90652d2bc9082a872c4ba1594a0ac5d..d86a3a4981182b5534f30e029c1b8824f3817792 100644 (file)
                                0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>;
                };
 
+               MSI: ppc4xx-msi@400300000 {
+                               compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
+                               reg = < 0x4 0x00300000 0x100
+                                       0x4 0x00300000 0x100>;
+                               sdr-base = <0x3B0>;
+                               msi-data = <0x00000000>;
+                               msi-mask = <0x44440000>;
+                               interrupt-count = <3>;
+                               interrupts =<0 1 2 3>;
+                               interrupt-parent = <&UIC0>;
+                               #interrupt-cells = <1>;
+                               #address-cells = <0>;
+                               #size-cells = <0>;
+                               interrupt-map = <0 &UIC0 0xC 1
+                                       1 &UIC0 0x0D 1
+                                       2 &UIC0 0x0E 1
+                                       3 &UIC0 0x0F 1>;
+               };
+
        };
 
+
        chosen {
                linux,stdout-path = "/plb/opb/serial@ef600200";
        };
index 214208924a9c55be2a010471174461ea4d9aa3ee..04360f9b010939686b142791b7a24607d79e91c4 100644 (file)
@@ -10,7 +10,6 @@ CONFIG_TASK_XACCT=y
 CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_AUDIT=y
 CONFIG_CGROUPS=y
-CONFIG_CGROUP_NS=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_RESOURCE_COUNTERS=y
index 7de13865508c689a76047ccedee63653330aa79d..c9f212b5f3ded98122423e13754ad40fe202e839 100644 (file)
@@ -15,7 +15,6 @@ CONFIG_AUDITSYSCALL=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_CGROUPS=y
-CONFIG_CGROUP_NS=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
index 5c1bf3466749563d2ee683b1a7c4dfef0903e0ea..8a0b5ece8f76f8e6152bfe0a7be812426c06ce95 100644 (file)
@@ -157,6 +157,8 @@ struct fsl_lbc_regs {
 #define LBCR_EPAR_SHIFT    16
 #define LBCR_BMT   0x0000FF00
 #define LBCR_BMT_SHIFT      8
+#define LBCR_BMTPS 0x0000000F
+#define LBCR_BMTPS_SHIFT    0
 #define LBCR_INIT  0x00040000
        __be32 lcrr;            /**< Clock Ratio Register */
 #define LCRR_DBYP    0x80000000
index dde1296b8b41a903dd2707f0d9465162efd36fbc..169d039ed402080720aef97c33e20952d28dad12 100644 (file)
@@ -60,4 +60,18 @@ struct dyn_arch_ftrace {
 
 #endif
 
+#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__)
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
+{
+       /*
+        * Compare the symbol name with the system call name. Skip the .sys or .SyS
+        * prefix from the symbol name and the sys prefix from the system call name and
+        * just match the rest. This is only needed on ppc64 since symbol names on
+        * 32bit do not start with a period so the generic function will work.
+        */
+       return !strcmp(sym + 4, name + 3);
+}
+#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 && !__ASSEMBLY__ */
+
 #endif /* _ASM_POWERPC_FTRACE */
index 852b8c1c09db72672136347c19f181d194cb8e47..fd8201dddd4b738666b3f6efe7c785b5b50f4632 100644 (file)
 #define H_HOME_NODE_ASSOCIATIVITY 0x2EC
 #define H_BEST_ENERGY          0x2F4
 #define H_GET_MPP_X            0x314
-#define MAX_HCALL_OPCODE       H_BEST_ENERGY
+#define MAX_HCALL_OPCODE       H_GET_MPP_X
 
 #ifndef __ASSEMBLY__
 
index 0018bf80cb25d523562a6754cbec75a7eee63688..d902abd3399506cfcacc056645e68f6e5de3f263 100644 (file)
 #define ASM_PPC_RIO_H
 
 extern void platform_rio_init(void);
+#ifdef CONFIG_RAPIDIO
+extern int fsl_rio_mcheck_exception(struct pt_regs *);
+#else
+static inline int fsl_rio_mcheck_exception(struct pt_regs *regs) {return 0; }
+#endif
 
 #endif                         /* ASM_PPC_RIO_H */
index 880b8c1e6e537d0ee451d253894e96a123bd4a57..11eb404b5606c03c94b68189e3f32cb1859cd3dc 100644 (file)
@@ -191,8 +191,6 @@ extern unsigned long __secondary_hold_spinloop;
 extern unsigned long __secondary_hold_acknowledge;
 extern char __secondary_hold;
 
-extern irqreturn_t debug_ipi_action(int irq, void *data);
-
 #endif /* __ASSEMBLY__ */
 
 #endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/suspend.h b/arch/powerpc/include/asm/suspend.h
deleted file mode 100644 (file)
index c6efc34..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_POWERPC_SUSPEND_H
-#define __ASM_POWERPC_SUSPEND_H
-
-static inline int arch_prepare_suspend(void) { return 0; }
-
-#endif /* __ASM_POWERPC_SUSPEND_H */
index 23913e902fc3c50c4307514c159c53db9e7d004d..b54b2add07be99eb67a2a662231443063c8a996e 100644 (file)
 
 #include <linux/sched.h>
 
+/* ftrace syscalls requires exporting the sys_call_table */
+#ifdef CONFIG_FTRACE_SYSCALLS
+extern const unsigned long *sys_call_table;
+#endif /* CONFIG_FTRACE_SYSCALLS */
+
 static inline long syscall_get_nr(struct task_struct *task,
                                  struct pt_regs *regs)
 {
index 37c353e8af7c783fba4b8510e259b714bcc05f55..836f231ec1f0b2ed1301f383b75113ab352ba363 100644 (file)
@@ -110,7 +110,8 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_NOERROR            12      /* Force successful syscall return */
 #define TIF_NOTIFY_RESUME      13      /* callback before returning to user */
 #define TIF_FREEZE             14      /* Freezing for suspend */
-#define TIF_RUNLATCH           15      /* Is the runlatch enabled? */
+#define TIF_SYSCALL_TRACEPOINT 15      /* syscall tracepoint instrumentation */
+#define TIF_RUNLATCH           16      /* Is the runlatch enabled? */
 
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
@@ -127,8 +128,10 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_NOERROR           (1<<TIF_NOERROR)
 #define _TIF_NOTIFY_RESUME     (1<<TIF_NOTIFY_RESUME)
 #define _TIF_FREEZE            (1<<TIF_FREEZE)
+#define _TIF_SYSCALL_TRACEPOINT        (1<<TIF_SYSCALL_TRACEPOINT)
 #define _TIF_RUNLATCH          (1<<TIF_RUNLATCH)
-#define _TIF_SYSCALL_T_OR_A    (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP)
+#define _TIF_SYSCALL_T_OR_A    (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
+                                _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
 
 #define _TIF_USER_WORK_MASK    (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
                                 _TIF_NOTIFY_RESUME)
index 9aab363125724721e94a339413d1c6a5135b70e1..e8b981897d44a268041cc07d7368b1952cd62cfe 100644 (file)
@@ -109,6 +109,7 @@ obj-$(CONFIG_PPC_IO_WORKAROUNDS)    += io-workarounds.o
 
 obj-$(CONFIG_DYNAMIC_FTRACE)   += ftrace.o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER)    += ftrace.o
+obj-$(CONFIG_FTRACE_SYSCALLS)  += ftrace.o
 obj-$(CONFIG_PERF_EVENTS)      += perf_callchain.o
 
 obj-$(CONFIG_PPC_PERF_CTRS)    += perf_event.o
index ce1f3e44c24fabf07408fd3e0ed70919a3828cb3..bf99cfa6bbfe3a29240094eb5fe2db695ed78e66 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/cacheflush.h>
 #include <asm/code-patching.h>
 #include <asm/ftrace.h>
+#include <asm/syscall.h>
 
 
 #ifdef CONFIG_DYNAMIC_FTRACE
@@ -600,3 +601,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
        }
 }
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
+unsigned long __init arch_syscall_addr(int nr)
+{
+       return sys_call_table[nr*2];
+}
+#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */
index a24d37d4cf5158e17ad85421a1bee6d1af14e299..5b428e3086662bcc802f9ce5049ada0c92aefa63 100644 (file)
@@ -295,17 +295,20 @@ static inline void handle_one_irq(unsigned int irq)
        unsigned long saved_sp_limit;
        struct irq_desc *desc;
 
+       desc = irq_to_desc(irq);
+       if (!desc)
+               return;
+
        /* Switch to the irq stack to handle this */
        curtp = current_thread_info();
        irqtp = hardirq_ctx[smp_processor_id()];
 
        if (curtp == irqtp) {
                /* We're already on the irq stack, just handle it */
-               generic_handle_irq(irq);
+               desc->handle_irq(irq, desc);
                return;
        }
 
-       desc = irq_to_desc(irq);
        saved_sp_limit = current->thread.ksp_limit;
 
        irqtp->task = curtp->task;
@@ -557,15 +560,8 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
        if (revmap_type == IRQ_HOST_MAP_LEGACY) {
                if (irq_map[0].host != NULL) {
                        raw_spin_unlock_irqrestore(&irq_big_lock, flags);
-                       /* If we are early boot, we can't free the structure,
-                        * too bad...
-                        * this will be fixed once slab is made available early
-                        * instead of the current cruft
-                        */
-                       if (mem_init_done) {
-                               of_node_put(host->of_node);
-                               kfree(host);
-                       }
+                       of_node_put(host->of_node);
+                       kfree(host);
                        return NULL;
                }
                irq_map[0].host = host;
@@ -727,9 +723,7 @@ unsigned int irq_create_mapping(struct irq_host *host,
        }
        pr_debug("irq: -> using host @%p\n", host);
 
-       /* Check if mapping already exist, if it does, call
-        * host->ops->map() to update the flags
-        */
+       /* Check if mapping already exists */
        virq = irq_find_mapping(host, hwirq);
        if (virq != NO_IRQ) {
                pr_debug("irq: -> existing mapping on virq %d\n", virq);
@@ -899,10 +893,13 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host,
                return irq_find_mapping(host, hwirq);
 
        /*
-        * No rcu_read_lock(ing) needed, the ptr returned can't go under us
-        * as it's referencing an entry in the static irq_map table.
+        * The ptr returned references the static global irq_map.
+        * but freeing an irq can delete nodes along the path to
+        * do the lookup via call_rcu.
         */
+       rcu_read_lock();
        ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
+       rcu_read_unlock();
 
        /*
         * If found in radix tree, then fine.
@@ -1010,14 +1007,23 @@ void irq_free_virt(unsigned int virq, unsigned int count)
        WARN_ON (virq < NUM_ISA_INTERRUPTS);
        WARN_ON (count == 0 || (virq + count) > irq_virq_count);
 
+       if (virq < NUM_ISA_INTERRUPTS) {
+               if (virq + count < NUM_ISA_INTERRUPTS)
+                       return;
+               count  =- NUM_ISA_INTERRUPTS - virq;
+               virq = NUM_ISA_INTERRUPTS;
+       }
+
+       if (count > irq_virq_count || virq > irq_virq_count - count) {
+               if (virq > irq_virq_count)
+                       return;
+               count = irq_virq_count - virq;
+       }
+
        raw_spin_lock_irqsave(&irq_big_lock, flags);
        for (i = virq; i < (virq + count); i++) {
                struct irq_host *host;
 
-               if (i < NUM_ISA_INTERRUPTS ||
-                   (virq + count) > irq_virq_count)
-                       continue;
-
                host = irq_map[i].host;
                irq_map[i].hwirq = host->inval_irq;
                smp_wmb();
index a6ae1cfad86ce86040553fa94346ad2043c3a0bb..cb22024f2b42a189d9848978ac3dafd1c611f19f 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/signal.h>
 #include <linux/seccomp.h>
 #include <linux/audit.h>
+#include <trace/syscall.h>
 #ifdef CONFIG_PPC32
 #include <linux/module.h>
 #endif
@@ -40,6 +41,9 @@
 #include <asm/pgtable.h>
 #include <asm/system.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
 /*
  * The parameter save area on the stack is used to store arguments being passed
  * to callee function and is located at fixed offset from stack pointer.
@@ -1710,6 +1714,9 @@ long do_syscall_trace_enter(struct pt_regs *regs)
                 */
                ret = -1L;
 
+       if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+               trace_sys_enter(regs, regs->gpr[0]);
+
        if (unlikely(current->audit_context)) {
 #ifdef CONFIG_PPC64
                if (!is_32bit_task())
@@ -1738,6 +1745,9 @@ void do_syscall_trace_leave(struct pt_regs *regs)
                audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
                                   regs->result);
 
+       if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+               trace_sys_exit(regs, regs->result);
+
        step = test_thread_flag(TIF_SINGLESTEP);
        if (step || test_thread_flag(TIF_SYSCALL_TRACE))
                tracehook_report_syscall_exit(regs, step);
index 4a6f2ec7e761d89e07bc73967eef323fa77788de..8ebc6700b98d18769f6e8afd1eb54f9f0363ec58 100644 (file)
@@ -129,7 +129,7 @@ static irqreturn_t call_function_single_action(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-irqreturn_t debug_ipi_action(int irq, void *data)
+static irqreturn_t debug_ipi_action(int irq, void *data)
 {
        if (crash_ipi_function_ptr) {
                crash_ipi_function_ptr(get_irq_regs());
index 560c961195015d39b0f62296f48413f27f394f0f..aa17b76dd42799375ce954490526b00e5262de06 100644 (file)
@@ -10,7 +10,6 @@
  */
 
 #include <linux/sched.h>
-#include <asm/suspend.h>
 #include <asm/system.h>
 #include <asm/current.h>
 #include <asm/mmu_context.h>
index b13306b0d9259ffa27b5d9d906e9ece0e9a42de9..0ff4ab98d50ca713f1c8a983349aa2369fcad22e 100644 (file)
@@ -55,6 +55,7 @@
 #endif
 #include <asm/kexec.h>
 #include <asm/ppc-opcode.h>
+#include <asm/rio.h>
 
 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
 int (*__debugger)(struct pt_regs *regs) __read_mostly;
@@ -424,6 +425,12 @@ int machine_check_e500mc(struct pt_regs *regs)
        unsigned long reason = mcsr;
        int recoverable = 1;
 
+       if (reason & MCSR_BUS_RBERR) {
+               recoverable = fsl_rio_mcheck_exception(regs);
+               if (recoverable == 1)
+                       goto silent_out;
+       }
+
        printk("Machine check in kernel mode.\n");
        printk("Caused by (from MCSR=%lx): ", reason);
 
@@ -499,6 +506,7 @@ int machine_check_e500mc(struct pt_regs *regs)
                       reason & MCSR_MEA ? "Effective" : "Physical", addr);
        }
 
+silent_out:
        mtspr(SPRN_MCSR, mcsr);
        return mfspr(SPRN_MCSR) == 0 && recoverable;
 }
@@ -507,6 +515,11 @@ int machine_check_e500(struct pt_regs *regs)
 {
        unsigned long reason = get_mc_reason(regs);
 
+       if (reason & MCSR_BUS_RBERR) {
+               if (fsl_rio_mcheck_exception(regs))
+                       return 1;
+       }
+
        printk("Machine check in kernel mode.\n");
        printk("Caused by (from MCSR=%lx): ", reason);
 
index 8ee51a252cf1b28bb11c1f023b395067e0b90db4..e6bec74be131246b5ee3d1bce119d9a01c97f51d 100644 (file)
@@ -261,6 +261,28 @@ static int get_kernel(unsigned long pc, unsigned long mmcra)
        return is_kernel;
 }
 
+static bool pmc_overflow(unsigned long val)
+{
+       if ((int)val < 0)
+               return true;
+
+       /*
+        * Events on POWER7 can roll back if a speculative event doesn't
+        * eventually complete. Unfortunately in some rare cases they will
+        * raise a performance monitor exception. We need to catch this to
+        * ensure we reset the PMC. In all cases the PMC will be 256 or less
+        * cycles from overflow.
+        *
+        * We only do this if the first pass fails to find any overflowing
+        * PMCs because a user might set a period of less than 256 and we
+        * don't want to mistakenly reset them.
+        */
+       if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
+               return true;
+
+       return false;
+}
+
 static void power4_handle_interrupt(struct pt_regs *regs,
                                    struct op_counter_config *ctr)
 {
@@ -281,7 +303,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
 
        for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
                val = classic_ctr_read(i);
-               if (val < 0) {
+               if (pmc_overflow(val)) {
                        if (oprofile_running && ctr[i].enabled) {
                                oprofile_add_ext_sample(pc, regs, i, is_kernel);
                                classic_ctr_write(i, reset_value[i]);
index b72176434ebe1aa62f7246bf1661ecb84e89f1fd..d733d7ca939c476e69be1994277460b216138175 100644 (file)
@@ -57,6 +57,8 @@ config KILAUEA
        select 405EX
        select PPC40x_SIMPLE
        select PPC4xx_PCI_EXPRESS
+       select PCI_MSI
+       select PPC4xx_MSI
        help
          This option enables support for the AMCC PPC405EX evaluation board.
 
index f485fc5f6d5e64a37913592701ca51d37381d6f7..e958b6f48ec283073e678bb8fdf815ae3bdcdea0 100644 (file)
@@ -74,6 +74,8 @@ config KATMAI
        select 440SPe
        select PCI
        select PPC4xx_PCI_EXPRESS
+       select PCI_MSI
+       select PCC4xx_MSI
        help
          This option enables support for the AMCC PPC440SPe evaluation board.
 
@@ -118,6 +120,8 @@ config CANYONLANDS
        select 460EX
        select PCI
        select PPC4xx_PCI_EXPRESS
+       select PCI_MSI
+       select PPC4xx_MSI
        select IBM_NEW_EMAC_RGMII
        select IBM_NEW_EMAC_ZMII
        help
@@ -144,6 +148,8 @@ config REDWOOD
        select 460SX
        select PCI
        select PPC4xx_PCI_EXPRESS
+       select PCI_MSI
+       select PPC4xx_MSI
        help
          This option enables support for the AMCC PPC460SX Redwood board.
 
index 449c08c1586231b958b968128e5707125440163c..3e4eba603e6b6162fbbdb71679f2f37656dbd40a 100644 (file)
@@ -176,14 +176,14 @@ EXPORT_SYMBOL_GPL(iic_get_target_id);
 #ifdef CONFIG_SMP
 
 /* Use the highest interrupt priorities for IPI */
-static inline int iic_ipi_to_irq(int ipi)
+static inline int iic_msg_to_irq(int msg)
 {
-       return IIC_IRQ_TYPE_IPI + 0xf - ipi;
+       return IIC_IRQ_TYPE_IPI + 0xf - msg;
 }
 
-void iic_cause_IPI(int cpu, int mesg)
+void iic_message_pass(int cpu, int msg)
 {
-       out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - mesg) << 4);
+       out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4);
 }
 
 struct irq_host *iic_get_irq_host(int node)
@@ -192,50 +192,31 @@ struct irq_host *iic_get_irq_host(int node)
 }
 EXPORT_SYMBOL_GPL(iic_get_irq_host);
 
-static irqreturn_t iic_ipi_action(int irq, void *dev_id)
-{
-       int ipi = (int)(long)dev_id;
-
-       switch(ipi) {
-       case PPC_MSG_CALL_FUNCTION:
-               generic_smp_call_function_interrupt();
-               break;
-       case PPC_MSG_RESCHEDULE:
-               scheduler_ipi();
-               break;
-       case PPC_MSG_CALL_FUNC_SINGLE:
-               generic_smp_call_function_single_interrupt();
-               break;
-       case PPC_MSG_DEBUGGER_BREAK:
-               debug_ipi_action(0, NULL);
-               break;
-       }
-       return IRQ_HANDLED;
-}
-static void iic_request_ipi(int ipi, const char *name)
+static void iic_request_ipi(int msg)
 {
        int virq;
 
-       virq = irq_create_mapping(iic_host, iic_ipi_to_irq(ipi));
+       virq = irq_create_mapping(iic_host, iic_msg_to_irq(msg));
        if (virq == NO_IRQ) {
                printk(KERN_ERR
-                      "iic: failed to map IPI %s\n", name);
+                      "iic: failed to map IPI %s\n", smp_ipi_name[msg]);
                return;
        }
-       if (request_irq(virq, iic_ipi_action, IRQF_DISABLED, name,
-                       (void *)(long)ipi))
-               printk(KERN_ERR
-                      "iic: failed to request IPI %s\n", name);
+
+       /*
+        * If smp_request_message_ipi encounters an error it will notify
+        * the error.  If a message is not needed it will return non-zero.
+        */
+       if (smp_request_message_ipi(virq, msg))
+               irq_dispose_mapping(virq);
 }
 
 void iic_request_IPIs(void)
 {
-       iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call");
-       iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched");
-       iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE, "IPI-call-single");
-#ifdef CONFIG_DEBUGGER
-       iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug");
-#endif /* CONFIG_DEBUGGER */
+       iic_request_ipi(PPC_MSG_CALL_FUNCTION);
+       iic_request_ipi(PPC_MSG_RESCHEDULE);
+       iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE);
+       iic_request_ipi(PPC_MSG_DEBUGGER_BREAK);
 }
 
 #endif /* CONFIG_SMP */
index 942dc39d604559f4d1d58da68759a280dad968e7..4f60ae6ca358aa769fdd1976960d126a51a4ba9c 100644 (file)
@@ -75,7 +75,7 @@ enum {
 };
 
 extern void iic_init_IRQ(void);
-extern void iic_cause_IPI(int cpu, int mesg);
+extern void iic_message_pass(int cpu, int msg);
 extern void iic_request_IPIs(void);
 extern void iic_setup_cpu(void);
 
index d176e6148e3f2597a872ee572cf9512226a10c07..dbb641ea90dd6105324ad53844cee8b800c594ce 100644 (file)
@@ -152,7 +152,7 @@ static int smp_cell_cpu_bootable(unsigned int nr)
        return 1;
 }
 static struct smp_ops_t bpa_iic_smp_ops = {
-       .message_pass   = iic_cause_IPI,
+       .message_pass   = iic_message_pass,
        .probe          = smp_iic_probe,
        .kick_cpu       = smp_cell_kick_cpu,
        .setup_cpu      = smp_cell_setup_cpu,
index d775fd148d13760b9b3a9f97a31049a0734c00af..7b4df37ac381edab99725c0ad9a4037d229d369f 100644 (file)
@@ -7,11 +7,18 @@ config PPC4xx_PCI_EXPRESS
        depends on PCI && 4xx
        default n
 
+config PPC4xx_MSI
+       bool
+       depends on PCI_MSI
+       depends on PCI && 4xx
+       default n
+
 config PPC_MSI_BITMAP
        bool
        depends on PCI_MSI
        default y if MPIC
        default y if FSL_PCI
+       default y if PPC4xx_MSI
 
 source "arch/powerpc/sysdev/xics/Kconfig"
 
index 6076e0074a870497ae138e7674958f605d3fc11b..0efa990e3344947d74df4c2806187fd1a0d2b7b7 100644 (file)
@@ -41,6 +41,7 @@ obj-$(CONFIG_OF_RTC)          += of_rtc.o
 ifeq ($(CONFIG_PCI),y)
 obj-$(CONFIG_4xx)              += ppc4xx_pci.o
 endif
+obj-$(CONFIG_PPC4xx_MSI)       += ppc4xx_msi.o
 obj-$(CONFIG_PPC4xx_CPM)       += ppc4xx_cpm.o
 obj-$(CONFIG_PPC4xx_GPIO)      += ppc4xx_gpio.o
 
index 4fcb5a4e60dddaeca9143a79c2ebb61a581d3be6..0608b1657da41f511ceca7a5bb4ef677ee14fbad 100644 (file)
@@ -184,7 +184,8 @@ int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar)
 }
 EXPORT_SYMBOL(fsl_upm_run_pattern);
 
-static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl)
+static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl,
+                                      struct device_node *node)
 {
        struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
 
@@ -198,6 +199,10 @@ static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl)
        /* Enable interrupts for any detected events */
        out_be32(&lbc->lteir, LTEIR_ENABLE);
 
+       /* Set the monitor timeout value to the maximum for erratum A001 */
+       if (of_device_is_compatible(node, "fsl,elbc"))
+               clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS);
+
        return 0;
 }
 
@@ -304,7 +309,7 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
 
        fsl_lbc_ctrl_dev->dev = &dev->dev;
 
-       ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev);
+       ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev, dev->dev.of_node);
        if (ret < 0)
                goto err;
 
index 49798532b477b8630c77a203dfb76c62ad3caf46..5b206a2fe17c47653632e516051984746ad2d587 100644 (file)
@@ -10,7 +10,7 @@
  * - Added Port-Write message handling
  * - Added Machine Check exception handling
  *
- * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc.
+ * Copyright (C) 2007, 2008, 2010 Freescale Semiconductor, Inc.
  * Zhang Wei <wei.zhang@freescale.com>
  *
  * Copyright 2005 MontaVista Software, Inc.
 #define IRQ_RIO_RX(m)          (((struct rio_priv *)(m->priv))->rxirq)
 #define IRQ_RIO_PW(m)          (((struct rio_priv *)(m->priv))->pwirq)
 
+#define IPWSR_CLEAR            0x98
+#define OMSR_CLEAR             0x1cb3
+#define IMSR_CLEAR             0x491
+#define IDSR_CLEAR             0x91
+#define ODSR_CLEAR             0x1c00
+#define LTLEECSR_ENABLE_ALL    0xFFC000FC
+#define ESCSR_CLEAR            0x07120204
+
+#define RIO_PORT1_EDCSR                0x0640
+#define RIO_PORT2_EDCSR                0x0680
+#define RIO_PORT1_IECSR                0x10130
+#define RIO_PORT2_IECSR                0x101B0
+#define RIO_IM0SR              0x13064
+#define RIO_IM1SR              0x13164
+#define RIO_OM0SR              0x13004
+#define RIO_OM1SR              0x13104
+
 #define RIO_ATMU_REGS_OFFSET   0x10c00
 #define RIO_P_MSG_REGS_OFFSET  0x11000
 #define RIO_S_MSG_REGS_OFFSET  0x13000
 #define RIO_GCCSR              0x13c
 #define RIO_ESCSR              0x158
+#define RIO_PORT2_ESCSR                0x178
 #define RIO_CCSR               0x15c
 #define RIO_LTLEDCSR           0x0608
-#define  RIO_LTLEDCSR_IER      0x80000000
-#define  RIO_LTLEDCSR_PRT      0x01000000
+#define RIO_LTLEDCSR_IER       0x80000000
+#define RIO_LTLEDCSR_PRT       0x01000000
 #define RIO_LTLEECSR           0x060c
 #define RIO_EPWISR             0x10010
 #define RIO_ISR_AACR           0x10120
 #define RIO_IPWSR_PWD          0x00000008
 #define RIO_IPWSR_PWB          0x00000004
 
-#define RIO_EPWISR_PINT                0x80000000
+/* EPWISR Error match value */
+#define RIO_EPWISR_PINT1       0x80000000
+#define RIO_EPWISR_PINT2       0x40000000
+#define RIO_EPWISR_MU          0x00000002
 #define RIO_EPWISR_PW          0x00000001
 
 #define RIO_MSG_DESC_SIZE      32
@@ -260,9 +281,7 @@ struct rio_priv {
 static void __iomem *rio_regs_win;
 
 #ifdef CONFIG_E500
-static int (*saved_mcheck_exception)(struct pt_regs *regs);
-
-static int fsl_rio_mcheck_exception(struct pt_regs *regs)
+int fsl_rio_mcheck_exception(struct pt_regs *regs)
 {
        const struct exception_table_entry *entry = NULL;
        unsigned long reason = mfspr(SPRN_MCSR);
@@ -284,11 +303,9 @@ static int fsl_rio_mcheck_exception(struct pt_regs *regs)
                }
        }
 
-       if (saved_mcheck_exception)
-               return saved_mcheck_exception(regs);
-       else
-               return cur_cpu_spec->machine_check(regs);
+       return 0;
 }
+EXPORT_SYMBOL_GPL(fsl_rio_mcheck_exception);
 #endif
 
 /**
@@ -1064,6 +1081,40 @@ static int fsl_rio_doorbell_init(struct rio_mport *mport)
        return rc;
 }
 
+static void port_error_handler(struct rio_mport *port, int offset)
+{
+       /*XXX: Error recovery is not implemented, we just clear errors */
+       out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
+
+       if (offset == 0) {
+               out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0);
+               out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), 0);
+               out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR);
+       } else {
+               out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0);
+               out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), 0);
+               out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR);
+       }
+}
+
+static void msg_unit_error_handler(struct rio_mport *port)
+{
+       struct rio_priv *priv = port->priv;
+
+       /*XXX: Error recovery is not implemented, we just clear errors */
+       out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0);
+
+       out_be32((u32 *)(rio_regs_win + RIO_IM0SR), IMSR_CLEAR);
+       out_be32((u32 *)(rio_regs_win + RIO_IM1SR), IMSR_CLEAR);
+       out_be32((u32 *)(rio_regs_win + RIO_OM0SR), OMSR_CLEAR);
+       out_be32((u32 *)(rio_regs_win + RIO_OM1SR), OMSR_CLEAR);
+
+       out_be32(&priv->msg_regs->odsr, ODSR_CLEAR);
+       out_be32(&priv->msg_regs->dsr, IDSR_CLEAR);
+
+       out_be32(&priv->msg_regs->pwsr, IPWSR_CLEAR);
+}
+
 /**
  * fsl_rio_port_write_handler - MPC85xx port write interrupt handler
  * @irq: Linux interrupt number
@@ -1144,10 +1195,22 @@ fsl_rio_port_write_handler(int irq, void *dev_instance)
        }
 
 pw_done:
-       if (epwisr & RIO_EPWISR_PINT) {
+       if (epwisr & RIO_EPWISR_PINT1) {
+               tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
+               pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
+               port_error_handler(port, 0);
+       }
+
+       if (epwisr & RIO_EPWISR_PINT2) {
                tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
                pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
-               out_be32(priv->regs_win + RIO_LTLEDCSR, 0);
+               port_error_handler(port, 1);
+       }
+
+       if (epwisr & RIO_EPWISR_MU) {
+               tmp = in_be32(priv->regs_win + RIO_LTLEDCSR);
+               pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp);
+               msg_unit_error_handler(port);
        }
 
        return IRQ_HANDLED;
@@ -1258,12 +1321,14 @@ static int fsl_rio_port_write_init(struct rio_mport *mport)
 
 
        /* Hook up port-write handler */
-       rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, 0,
-                        "port-write", (void *)mport);
+       rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler,
+                       IRQF_SHARED, "port-write", (void *)mport);
        if (rc < 0) {
                pr_err("MPC85xx RIO: unable to request inbound doorbell irq");
                goto err_out;
        }
+       /* Enable Error Interrupt */
+       out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL);
 
        INIT_WORK(&priv->pw_work, fsl_pw_dpc);
        spin_lock_init(&priv->pw_fifo_lock);
@@ -1538,11 +1603,6 @@ int fsl_rio_setup(struct platform_device *dev)
        fsl_rio_doorbell_init(port);
        fsl_rio_port_write_init(port);
 
-#ifdef CONFIG_E500
-       saved_mcheck_exception = ppc_md.machine_check_exception;
-       ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
-#endif
-
        return 0;
 err:
        iounmap(priv->regs_win);
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
new file mode 100644 (file)
index 0000000..367af02
--- /dev/null
@@ -0,0 +1,276 @@
+/*
+ * Adding PCI-E MSI support for PPC4XX SoCs.
+ *
+ * Copyright (c) 2010, Applied Micro Circuits Corporation
+ * Authors:    Tirumala R Marri <tmarri@apm.com>
+ *             Feng Kan <fkan@apm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <linux/irq.h>
+#include <linux/bootmem.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <asm/prom.h>
+#include <asm/hw_irq.h>
+#include <asm/ppc-pci.h>
+#include <boot/dcr.h>
+#include <asm/dcr-regs.h>
+#include <asm/msi_bitmap.h>
+
+#define PEIH_TERMADH   0x00
+#define PEIH_TERMADL   0x08
+#define PEIH_MSIED     0x10
+#define PEIH_MSIMK     0x18
+#define PEIH_MSIASS    0x20
+#define PEIH_FLUSH0    0x30
+#define PEIH_FLUSH1    0x38
+#define PEIH_CNTRST    0x48
+#define NR_MSI_IRQS    4
+
+struct ppc4xx_msi {
+       u32 msi_addr_lo;
+       u32 msi_addr_hi;
+       void __iomem *msi_regs;
+       int msi_virqs[NR_MSI_IRQS];
+       struct msi_bitmap bitmap;
+       struct device_node *msi_dev;
+};
+
+static struct ppc4xx_msi ppc4xx_msi;
+
+static int ppc4xx_msi_init_allocator(struct platform_device *dev,
+               struct ppc4xx_msi *msi_data)
+{
+       int err;
+
+       err = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS,
+                             dev->dev.of_node);
+       if (err)
+               return err;
+
+       err = msi_bitmap_reserve_dt_hwirqs(&msi_data->bitmap);
+       if (err < 0) {
+               msi_bitmap_free(&msi_data->bitmap);
+               return err;
+       }
+
+       return 0;
+}
+
+static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+       int int_no = -ENOMEM;
+       unsigned int virq;
+       struct msi_msg msg;
+       struct msi_desc *entry;
+       struct ppc4xx_msi *msi_data = &ppc4xx_msi;
+
+       list_for_each_entry(entry, &dev->msi_list, list) {
+               int_no = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
+               if (int_no >= 0)
+                       break;
+               if (int_no < 0) {
+                       pr_debug("%s: fail allocating msi interrupt\n",
+                                       __func__);
+               }
+               virq = irq_of_parse_and_map(msi_data->msi_dev, int_no);
+               if (virq == NO_IRQ) {
+                       dev_err(&dev->dev, "%s: fail mapping irq\n", __func__);
+                       msi_bitmap_free_hwirqs(&msi_data->bitmap, int_no, 1);
+                       return -ENOSPC;
+               }
+               dev_dbg(&dev->dev, "%s: virq = %d\n", __func__, virq);
+
+               /* Setup msi address space */
+               msg.address_hi = msi_data->msi_addr_hi;
+               msg.address_lo = msi_data->msi_addr_lo;
+
+               irq_set_msi_desc(virq, entry);
+               msg.data = int_no;
+               write_msi_msg(virq, &msg);
+       }
+       return 0;
+}
+
+void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
+{
+       struct msi_desc *entry;
+       struct ppc4xx_msi *msi_data = &ppc4xx_msi;
+
+       dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
+
+       list_for_each_entry(entry, &dev->msi_list, list) {
+               if (entry->irq == NO_IRQ)
+                       continue;
+               irq_set_msi_desc(entry->irq, NULL);
+               msi_bitmap_free_hwirqs(&msi_data->bitmap,
+                               virq_to_hw(entry->irq), 1);
+               irq_dispose_mapping(entry->irq);
+       }
+}
+
+static int ppc4xx_msi_check_device(struct pci_dev *pdev, int nvec, int type)
+{
+       dev_dbg(&pdev->dev, "PCIE-MSI:%s called. vec %x type %d\n",
+               __func__, nvec, type);
+       if (type == PCI_CAP_ID_MSIX)
+               pr_debug("ppc4xx msi: MSI-X untested, trying anyway.\n");
+
+       return 0;
+}
+
+static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
+                                struct resource res, struct ppc4xx_msi *msi)
+{
+       const u32 *msi_data;
+       const u32 *msi_mask;
+       const u32 *sdr_addr;
+       dma_addr_t msi_phys;
+       void *msi_virt;
+
+       sdr_addr = of_get_property(dev->dev.of_node, "sdr-base", NULL);
+       if (!sdr_addr)
+               return -1;
+
+       SDR0_WRITE(sdr_addr, (u64)res.start >> 32);      /*HIGH addr */
+       SDR0_WRITE(sdr_addr + 1, res.start & 0xFFFFFFFF); /* Low addr */
+
+
+       msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi");
+       if (msi->msi_dev)
+               return -ENODEV;
+
+       msi->msi_regs = of_iomap(msi->msi_dev, 0);
+       if (!msi->msi_regs) {
+               dev_err(&dev->dev, "of_iomap problem failed\n");
+               return -ENOMEM;
+       }
+       dev_dbg(&dev->dev, "PCIE-MSI: msi register mapped 0x%x 0x%x\n",
+               (u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs));
+
+       msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL);
+       msi->msi_addr_hi = 0x0;
+       msi->msi_addr_lo = (u32) msi_phys;
+       dev_dbg(&dev->dev, "PCIE-MSI: msi address 0x%x\n", msi->msi_addr_lo);
+
+       /* Progam the Interrupt handler Termination addr registers */
+       out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi);
+       out_be32(msi->msi_regs + PEIH_TERMADL, msi->msi_addr_lo);
+
+       msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL);
+       if (!msi_data)
+               return -1;
+       msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL);
+       if (!msi_mask)
+               return -1;
+       /* Program MSI Expected data and Mask bits */
+       out_be32(msi->msi_regs + PEIH_MSIED, *msi_data);
+       out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask);
+
+       return 0;
+}
+
+static int ppc4xx_of_msi_remove(struct platform_device *dev)
+{
+       struct ppc4xx_msi *msi = dev->dev.platform_data;
+       int i;
+       int virq;
+
+       for (i = 0; i < NR_MSI_IRQS; i++) {
+               virq = msi->msi_virqs[i];
+               if (virq != NO_IRQ)
+                       irq_dispose_mapping(virq);
+       }
+
+       if (msi->bitmap.bitmap)
+               msi_bitmap_free(&msi->bitmap);
+       iounmap(msi->msi_regs);
+       of_node_put(msi->msi_dev);
+       kfree(msi);
+
+       return 0;
+}
+
+static int __devinit ppc4xx_msi_probe(struct platform_device *dev)
+{
+       struct ppc4xx_msi *msi;
+       struct resource res;
+       int err = 0;
+
+       msi = &ppc4xx_msi;/*keep the msi data for further use*/
+
+       dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n");
+
+       msi = kzalloc(sizeof(struct ppc4xx_msi), GFP_KERNEL);
+       if (!msi) {
+               dev_err(&dev->dev, "No memory for MSI structure\n");
+               return -ENOMEM;
+       }
+       dev->dev.platform_data = msi;
+
+       /* Get MSI ranges */
+       err = of_address_to_resource(dev->dev.of_node, 0, &res);
+       if (err) {
+               dev_err(&dev->dev, "%s resource error!\n",
+                       dev->dev.of_node->full_name);
+               goto error_out;
+       }
+
+       if (ppc4xx_setup_pcieh_hw(dev, res, msi))
+               goto error_out;
+
+       err = ppc4xx_msi_init_allocator(dev, msi);
+       if (err) {
+               dev_err(&dev->dev, "Error allocating MSI bitmap\n");
+               goto error_out;
+       }
+
+       ppc_md.setup_msi_irqs = ppc4xx_setup_msi_irqs;
+       ppc_md.teardown_msi_irqs = ppc4xx_teardown_msi_irqs;
+       ppc_md.msi_check_device = ppc4xx_msi_check_device;
+       return err;
+
+error_out:
+       ppc4xx_of_msi_remove(dev);
+       return err;
+}
+static const struct of_device_id ppc4xx_msi_ids[] = {
+       {
+               .compatible = "amcc,ppc4xx-msi",
+       },
+       {}
+};
+static struct platform_driver ppc4xx_msi_driver = {
+       .probe = ppc4xx_msi_probe,
+       .remove = ppc4xx_of_msi_remove,
+       .driver = {
+                  .name = "ppc4xx-msi",
+                  .owner = THIS_MODULE,
+                  .of_match_table = ppc4xx_msi_ids,
+                  },
+
+};
+
+static __init int ppc4xx_msi_init(void)
+{
+       return platform_driver_register(&ppc4xx_msi_driver);
+}
+
+subsys_initcall(ppc4xx_msi_init);
index ff2d2371b2e92a7dcf931bc0bfcd43a76589dedb..9fab2aa9c2c80d0bbf1fbf0f943b377de3583118 100644 (file)
@@ -2,7 +2,7 @@ config MMU
        def_bool y
 
 config ZONE_DMA
-       def_bool y if 64BIT
+       def_bool y
 
 config LOCKDEP_SUPPORT
        def_bool y
index e43fe753703114890aaff48dec708aec64f32999..f7d3dc555bdbd5c44b45f7f2feeb75560d38887d 100644 (file)
@@ -92,9 +92,7 @@ static void appldata_get_mem_data(void *data)
        mem_data->pswpin     = ev[PSWPIN];
        mem_data->pswpout    = ev[PSWPOUT];
        mem_data->pgalloc    = ev[PGALLOC_NORMAL];
-#ifdef CONFIG_ZONE_DMA
        mem_data->pgalloc    += ev[PGALLOC_DMA];
-#endif
        mem_data->pgfault    = ev[PGFAULT];
        mem_data->pgmajfault = ev[PGMAJFAULT];
 
index e1c8f3a49884bcf73f5a04ad2e6fe613bc13cd45..667c6e9f6a34bf569d1d1441af55eec2d02d3e37 100644 (file)
@@ -621,6 +621,7 @@ static inline unsigned long find_first_zero_bit(const unsigned long *addr,
        bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
        return (bits < size) ? bits : size;
 }
+#define find_first_zero_bit find_first_zero_bit
 
 /**
  * find_first_bit - find the first set bit in a memory region
@@ -641,6 +642,7 @@ static inline unsigned long find_first_bit(const unsigned long * addr,
        bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
        return (bits < size) ? bits : size;
 }
+#define find_first_bit find_first_bit
 
 /**
  * find_next_zero_bit - find the first zero bit in a memory region
@@ -677,6 +679,7 @@ static inline int find_next_zero_bit (const unsigned long * addr,
        }
        return offset + find_first_zero_bit(p, size);
 }
+#define find_next_zero_bit find_next_zero_bit
 
 /**
  * find_next_bit - find the first set bit in a memory region
@@ -713,6 +716,7 @@ static inline int find_next_bit (const unsigned long * addr,
        }
        return offset + find_first_bit(p, size);
 }
+#define find_next_bit find_next_bit
 
 /*
  * Every architecture must define this function. It's the fastest
@@ -742,41 +746,6 @@ static inline int sched_find_first_bit(unsigned long *b)
  *    23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
  */
 
-static inline void __set_bit_le(unsigned long nr, void *addr)
-{
-       __set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
-}
-
-static inline void __clear_bit_le(unsigned long nr, void *addr)
-{
-       __clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
-}
-
-static inline int __test_and_set_bit_le(unsigned long nr, void *addr)
-{
-       return __test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
-}
-
-static inline int test_and_set_bit_le(unsigned long nr, void *addr)
-{
-       return test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
-}
-
-static inline int __test_and_clear_bit_le(unsigned long nr, void *addr)
-{
-       return __test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
-}
-
-static inline int test_and_clear_bit_le(unsigned long nr, void *addr)
-{
-       return test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
-}
-
-static inline int test_bit_le(unsigned long nr, const void *addr)
-{
-       return test_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr);
-}
-
 static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
 {
        unsigned long bytes, bits;
@@ -787,6 +756,7 @@ static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
        bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
        return (bits < size) ? bits : size;
 }
+#define find_first_zero_bit_le find_first_zero_bit_le
 
 static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
                                          unsigned long offset)
@@ -816,6 +786,7 @@ static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
         }
        return offset + find_first_zero_bit_le(p, size);
 }
+#define find_next_zero_bit_le find_next_zero_bit_le
 
 static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
 {
@@ -827,6 +798,7 @@ static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
        bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
        return (bits < size) ? bits : size;
 }
+#define find_first_bit_le find_first_bit_le
 
 static inline int find_next_bit_le(void *vaddr, unsigned long size,
                                     unsigned long offset)
@@ -856,6 +828,9 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size,
        }
        return offset + find_first_bit_le(p, size);
 }
+#define find_next_bit_le find_next_bit_le
+
+#include <asm-generic/bitops/le.h>
 
 #define ext2_set_bit_atomic(lock, nr, addr)    \
        test_and_set_bit_le(nr, addr)
index 8a096b83f51f634623217fc3b5da8f61520a81e7..0e3b35f96be174c12df5d93e33acee36532bcb61 100644 (file)
 #ifndef _S390_DELAY_H
 #define _S390_DELAY_H
 
-extern void __udelay(unsigned long long usecs);
-extern void udelay_simple(unsigned long long usecs);
-extern void __delay(unsigned long loops);
+void __ndelay(unsigned long long nsecs);
+void __udelay(unsigned long long usecs);
+void udelay_simple(unsigned long long usecs);
+void __delay(unsigned long loops);
 
+#define ndelay(n) __ndelay((unsigned long long) (n))
 #define udelay(n) __udelay((unsigned long long) (n))
 #define mdelay(n) __udelay((unsigned long long) (n) * 1000)
 
index 1544b90bd6d610a4cdb51cfe8ef6c1c38afb739f..ba7b01c726a37eb024900e69db673542cc86db02 100644 (file)
@@ -2,6 +2,7 @@
 #define _ASM_IRQ_H
 
 #include <linux/hardirq.h>
+#include <linux/types.h>
 
 enum interruption_class {
        EXTERNAL_INTERRUPT,
@@ -31,4 +32,11 @@ enum interruption_class {
        NR_IRQS,
 };
 
+typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long);
+
+int register_external_interrupt(u16 code, ext_int_handler_t handler);
+int unregister_external_interrupt(u16 code, ext_int_handler_t handler);
+void service_subclass_irq_register(void);
+void service_subclass_irq_unregister(void);
+
 #endif /* _ASM_IRQ_H */
diff --git a/arch/s390/include/asm/s390_ext.h b/arch/s390/include/asm/s390_ext.h
deleted file mode 100644 (file)
index 080876d..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- *    Copyright IBM Corp. 1999,2010
- *    Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
- *              Martin Schwidefsky <schwidefsky@de.ibm.com>,
- */
-
-#ifndef _S390_EXTINT_H
-#define _S390_EXTINT_H
-
-#include <linux/types.h>
-
-typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long);
-
-int register_external_interrupt(__u16 code, ext_int_handler_t handler);
-int unregister_external_interrupt(__u16 code, ext_int_handler_t handler);
-
-#endif /* _S390_EXTINT_H */
diff --git a/arch/s390/include/asm/suspend.h b/arch/s390/include/asm/suspend.h
deleted file mode 100644 (file)
index dc75c61..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef __ASM_S390_SUSPEND_H
-#define __ASM_S390_SUSPEND_H
-
-static inline int arch_prepare_suspend(void)
-{
-       return 0;
-}
-
-#endif
-
index c5338834ddbdfb6614be4d8079afcef4deda1ca2..005d77d8ae2ab04c56f10d6f931e8aac3dddcff7 100644 (file)
@@ -7,7 +7,7 @@
 extern unsigned char cpu_core_id[NR_CPUS];
 extern cpumask_t cpu_core_map[NR_CPUS];
 
-static inline const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+static inline const struct cpumask *cpu_coregroup_mask(int cpu)
 {
        return &cpu_core_map[cpu];
 }
@@ -21,7 +21,7 @@ static inline const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
 extern unsigned char cpu_book_id[NR_CPUS];
 extern cpumask_t cpu_book_map[NR_CPUS];
 
-static inline const struct cpumask *cpu_book_mask(unsigned int cpu)
+static inline const struct cpumask *cpu_book_mask(int cpu)
 {
        return &cpu_book_map[cpu];
 }
index 2d9ea11f919ad2a1565091d3b861da2d2da09de5..2b23885e81e9a40019cc0d0e03748f3d4bbf670e 100644 (file)
 
 #define segment_eq(a,b) ((a).ar4 == (b).ar4)
 
+#define __access_ok(addr, size)        \
+({                             \
+       __chk_user_ptr(addr);   \
+       1;                      \
+})
 
-static inline int __access_ok(const void __user *addr, unsigned long size)
-{
-       return 1;
-}
-#define access_ok(type,addr,size) __access_ok(addr,size)
+#define access_ok(type, addr, size) __access_ok(addr, size)
 
 /*
  * The exception table consists of pairs of addresses: the first is the
index 5ff15dacb5718c04973d4660c9a18478afd1c8f8..df3732249baafa3f0c30474d26da5243412890d1 100644 (file)
@@ -20,10 +20,10 @@ CFLAGS_ptrace.o             += -DUTS_MACHINE='"$(UTS_MACHINE)"'
 
 CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
 
-obj-y  :=  bitmap.o traps.o time.o process.o base.o early.o setup.o \
-           processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
-           s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \
-           vdso.o vtime.o sysinfo.o nmi.o sclp.o jump_label.o
+obj-y  :=  bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \
+           processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \
+           debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \
+           sysinfo.o jump_label.o
 
 obj-y  += $(if $(CONFIG_64BIT),entry64.o,entry.o)
 obj-y  += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
index 3d4a78fc1adc3f6403eaf4ebc93f03211c5e1f86..1ca3d1d6a86ce5e4f3e15309f41cf6fe03300660 100644 (file)
@@ -30,9 +30,9 @@
 #include <asm/atomic.h>
 #include <asm/mathemu.h>
 #include <asm/cpcmd.h>
-#include <asm/s390_ext.h>
 #include <asm/lowcore.h>
 #include <asm/debug.h>
+#include <asm/irq.h>
 
 #ifndef CONFIG_64BIT
 #define ONELONG "%08lx: "
index e204f9597aafa1df6fd51fe3a971ff41b3272f3d..e3264f6a9720515cc65df5784731d4452d32eca1 100644 (file)
@@ -1,19 +1,28 @@
 /*
- *    Copyright IBM Corp. 2004,2010
- *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- *              Thomas Spatzier (tspat@de.ibm.com)
+ *    Copyright IBM Corp. 2004,2011
+ *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
+ *              Holger Smolinski <Holger.Smolinski@de.ibm.com>,
+ *              Thomas Spatzier <tspat@de.ibm.com>,
  *
  * This file contains interrupt related functions.
  */
 
-#include <linux/module.h>
-#include <linux/kernel.h>
 #include <linux/kernel_stat.h>
 #include <linux/interrupt.h>
 #include <linux/seq_file.h>
-#include <linux/cpu.h>
 #include <linux/proc_fs.h>
 #include <linux/profile.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/ftrace.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <asm/irq_regs.h>
+#include <asm/cputime.h>
+#include <asm/lowcore.h>
+#include <asm/irq.h>
+#include "entry.h"
 
 struct irq_class {
        char *name;
@@ -82,8 +91,7 @@ int show_interrupts(struct seq_file *p, void *v)
  * For compatibilty only. S/390 specific setup of interrupts et al. is done
  * much later in init_channel_subsystem().
  */
-void __init
-init_IRQ(void)
+void __init init_IRQ(void)
 {
        /* nothing... */
 }
@@ -134,3 +142,116 @@ void init_irq_proc(void)
        create_prof_cpu_mask(root_irq_dir);
 }
 #endif
+
+/*
+ * ext_int_hash[index] is the start of the list for all external interrupts
+ * that hash to this index. With the current set of external interrupts
+ * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
+ * iucv and 0x2603 pfault) this is always the first element.
+ */
+
+struct ext_int_info {
+       struct ext_int_info *next;
+       ext_int_handler_t handler;
+       u16 code;
+};
+
+static struct ext_int_info *ext_int_hash[256];
+
+static inline int ext_hash(u16 code)
+{
+       return (code + (code >> 9)) & 0xff;
+}
+
+int register_external_interrupt(u16 code, ext_int_handler_t handler)
+{
+       struct ext_int_info *p;
+       int index;
+
+       p = kmalloc(sizeof(*p), GFP_ATOMIC);
+       if (!p)
+               return -ENOMEM;
+       p->code = code;
+       p->handler = handler;
+       index = ext_hash(code);
+       p->next = ext_int_hash[index];
+       ext_int_hash[index] = p;
+       return 0;
+}
+EXPORT_SYMBOL(register_external_interrupt);
+
+int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
+{
+       struct ext_int_info *p, *q;
+       int index;
+
+       index = ext_hash(code);
+       q = NULL;
+       p = ext_int_hash[index];
+       while (p) {
+               if (p->code == code && p->handler == handler)
+                       break;
+               q = p;
+               p = p->next;
+       }
+       if (!p)
+               return -ENOENT;
+       if (q)
+               q->next = p->next;
+       else
+               ext_int_hash[index] = p->next;
+       kfree(p);
+       return 0;
+}
+EXPORT_SYMBOL(unregister_external_interrupt);
+
+void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
+                          unsigned int param32, unsigned long param64)
+{
+       struct pt_regs *old_regs;
+       unsigned short code;
+       struct ext_int_info *p;
+       int index;
+
+       code = (unsigned short) ext_int_code;
+       old_regs = set_irq_regs(regs);
+       s390_idle_check(regs, S390_lowcore.int_clock,
+                       S390_lowcore.async_enter_timer);
+       irq_enter();
+       if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
+               /* Serve timer interrupts first. */
+               clock_comparator_work();
+       kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
+       if (code != 0x1004)
+               __get_cpu_var(s390_idle).nohz_delay = 1;
+       index = ext_hash(code);
+       for (p = ext_int_hash[index]; p; p = p->next) {
+               if (likely(p->code == code))
+                       p->handler(ext_int_code, param32, param64);
+       }
+       irq_exit();
+       set_irq_regs(old_regs);
+}
+
+static DEFINE_SPINLOCK(sc_irq_lock);
+static int sc_irq_refcount;
+
+void service_subclass_irq_register(void)
+{
+       spin_lock(&sc_irq_lock);
+       if (!sc_irq_refcount)
+               ctl_set_bit(0, 9);
+       sc_irq_refcount++;
+       spin_unlock(&sc_irq_lock);
+}
+EXPORT_SYMBOL(service_subclass_irq_register);
+
+void service_subclass_irq_unregister(void)
+{
+       spin_lock(&sc_irq_lock);
+       sc_irq_refcount--;
+       if (!sc_irq_refcount)
+               ctl_clear_bit(0, 9);
+       spin_unlock(&sc_irq_lock);
+}
+EXPORT_SYMBOL(service_subclass_irq_unregister);
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
deleted file mode 100644 (file)
index 1850299..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- *    Copyright IBM Corp. 1999,2010
- *    Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>,
- *              Martin Schwidefsky <schwidefsky@de.ibm.com>,
- */
-
-#include <linux/kernel_stat.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/ftrace.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <asm/s390_ext.h>
-#include <asm/irq_regs.h>
-#include <asm/cputime.h>
-#include <asm/lowcore.h>
-#include <asm/irq.h>
-#include "entry.h"
-
-struct ext_int_info {
-       struct ext_int_info *next;
-       ext_int_handler_t handler;
-       __u16 code;
-};
-
-/*
- * ext_int_hash[index] is the start of the list for all external interrupts
- * that hash to this index. With the current set of external interrupts 
- * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
- * iucv and 0x2603 pfault) this is always the first element. 
- */
-static struct ext_int_info *ext_int_hash[256];
-
-static inline int ext_hash(__u16 code)
-{
-       return (code + (code >> 9)) & 0xff;
-}
-
-int register_external_interrupt(__u16 code, ext_int_handler_t handler)
-{
-       struct ext_int_info *p;
-       int index;
-
-       p = kmalloc(sizeof(*p), GFP_ATOMIC);
-       if (!p)
-               return -ENOMEM;
-       p->code = code;
-       p->handler = handler;
-       index = ext_hash(code);
-       p->next = ext_int_hash[index];
-       ext_int_hash[index] = p;
-       return 0;
-}
-EXPORT_SYMBOL(register_external_interrupt);
-
-int unregister_external_interrupt(__u16 code, ext_int_handler_t handler)
-{
-       struct ext_int_info *p, *q;
-       int index;
-
-       index = ext_hash(code);
-       q = NULL;
-       p = ext_int_hash[index];
-       while (p) {
-               if (p->code == code && p->handler == handler)
-                       break;
-               q = p;
-               p = p->next;
-       }
-       if (!p)
-               return -ENOENT;
-       if (q)
-               q->next = p->next;
-       else
-               ext_int_hash[index] = p->next;
-       kfree(p);
-       return 0;
-}
-EXPORT_SYMBOL(unregister_external_interrupt);
-
-void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code,
-                          unsigned int param32, unsigned long param64)
-{
-       struct pt_regs *old_regs;
-       unsigned short code;
-       struct ext_int_info *p;
-       int index;
-
-       code = (unsigned short) ext_int_code;
-       old_regs = set_irq_regs(regs);
-       s390_idle_check(regs, S390_lowcore.int_clock,
-                       S390_lowcore.async_enter_timer);
-       irq_enter();
-       if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
-               /* Serve timer interrupts first. */
-               clock_comparator_work();
-       kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
-       if (code != 0x1004)
-               __get_cpu_var(s390_idle).nohz_delay = 1;
-       index = ext_hash(code);
-       for (p = ext_int_hash[index]; p; p = p->next) {
-               if (likely(p->code == code))
-                       p->handler(ext_int_code, param32, param64);
-       }
-       irq_exit();
-       set_irq_regs(old_regs);
-}
index f8e85ecbc4590e8f8d45a78f7ab776e9480227a4..52420d2785b3607e1c682568358ea8f31a398d17 100644 (file)
@@ -44,7 +44,6 @@
 #include <asm/sigp.h>
 #include <asm/pgalloc.h>
 #include <asm/irq.h>
-#include <asm/s390_ext.h>
 #include <asm/cpcmd.h>
 #include <asm/tlbflush.h>
 #include <asm/timer.h>
index a59557f1fb5fd5a9c5e6fec4fc9b41169612205b..dff933065ab6ab688d9a3cc652f54d55b60dc79a 100644 (file)
@@ -41,7 +41,6 @@
 #include <linux/kprobes.h>
 #include <asm/uaccess.h>
 #include <asm/delay.h>
-#include <asm/s390_ext.h>
 #include <asm/div64.h>
 #include <asm/vdso.h>
 #include <asm/irq.h>
index 2eafb8c7a746f2a018ca7dcb75475d7aefa0951d..0cd340b72632c4a42fc19d43615e66f76a0c3a7e 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/smp.h>
 #include <linux/cpuset.h>
 #include <asm/delay.h>
-#include <asm/s390_ext.h>
 
 #define PTF_HORIZONTAL (0UL)
 #define PTF_VERTICAL   (1UL)
index b5a4a739b477424d4c9217ebd3bcaf4ba56ddbf8..a65d2e82f61d56813851d293436c697309515cda 100644 (file)
@@ -39,7 +39,6 @@
 #include <asm/atomic.h>
 #include <asm/mathemu.h>
 #include <asm/cpcmd.h>
-#include <asm/s390_ext.h>
 #include <asm/lowcore.h>
 #include <asm/debug.h>
 #include "entry.h"
index 5e8ead4b4aba0d8604d2eaafcacd9b18d34468be..2d6228f60cd69ec9d5e6ee2f670cbf93cc608c42 100644 (file)
 #include <linux/cpu.h>
 #include <linux/kprobes.h>
 
-#include <asm/s390_ext.h>
 #include <asm/timer.h>
 #include <asm/irq_regs.h>
 #include <asm/cputime.h>
+#include <asm/irq.h>
 
 static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
 
index 0f53110e1d09a36eb5ab794285d30854abe82a27..a65229d91c92be055931de197528c67d3403837c 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/irqflags.h>
 #include <linux/interrupt.h>
+#include <asm/div64.h>
 
 void __delay(unsigned long loops)
 {
@@ -116,3 +117,17 @@ void udelay_simple(unsigned long long usecs)
        while (get_clock() < end)
                cpu_relax();
 }
+
+void __ndelay(unsigned long long nsecs)
+{
+       u64 end;
+
+       nsecs <<= 9;
+       do_div(nsecs, 125);
+       end = get_clock() + nsecs;
+       if (nsecs & ~0xfffUL)
+               __udelay(nsecs >> 12);
+       while (get_clock() < end)
+               barrier();
+}
+EXPORT_SYMBOL(__ndelay);
index a0f9e730f26aec574eace4abac04c74eed59bf5c..fe103e891e7a0eb32dc7c67fc0cea3217bd4b6e3 100644 (file)
@@ -34,7 +34,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/system.h>
 #include <asm/pgtable.h>
-#include <asm/s390_ext.h>
+#include <asm/irq.h>
 #include <asm/mmu_context.h>
 #include <asm/compat.h>
 #include "../kernel/entry.h"
@@ -245,9 +245,12 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code,
                do_no_context(regs, int_code, trans_exc_code);
                break;
        default: /* fault & VM_FAULT_ERROR */
-               if (fault & VM_FAULT_OOM)
-                       pagefault_out_of_memory();
-               else if (fault & VM_FAULT_SIGBUS) {
+               if (fault & VM_FAULT_OOM) {
+                       if (!(regs->psw.mask & PSW_MASK_PSTATE))
+                               do_no_context(regs, int_code, trans_exc_code);
+                       else
+                               pagefault_out_of_memory();
+               } else if (fault & VM_FAULT_SIGBUS) {
                        /* Kernel mode? Handle exceptions or die */
                        if (!(regs->psw.mask & PSW_MASK_PSTATE))
                                do_no_context(regs, int_code, trans_exc_code);
@@ -277,7 +280,8 @@ static inline int do_exception(struct pt_regs *regs, int access,
        struct mm_struct *mm;
        struct vm_area_struct *vma;
        unsigned long address;
-       int fault, write;
+       unsigned int flags;
+       int fault;
 
        if (notify_page_fault(regs))
                return 0;
@@ -296,6 +300,10 @@ static inline int do_exception(struct pt_regs *regs, int access,
 
        address = trans_exc_code & __FAIL_ADDR_MASK;
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+       flags = FAULT_FLAG_ALLOW_RETRY;
+       if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
+               flags |= FAULT_FLAG_WRITE;
+retry:
        down_read(&mm->mmap_sem);
 
        fault = VM_FAULT_BADMAP;
@@ -325,21 +333,31 @@ static inline int do_exception(struct pt_regs *regs, int access,
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       write = (access == VM_WRITE ||
-                (trans_exc_code & store_indication) == 0x400) ?
-               FAULT_FLAG_WRITE : 0;
-       fault = handle_mm_fault(mm, vma, address, write);
+       fault = handle_mm_fault(mm, vma, address, flags);
        if (unlikely(fault & VM_FAULT_ERROR))
                goto out_up;
 
-       if (fault & VM_FAULT_MAJOR) {
-               tsk->maj_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
-                                    regs, address);
-       } else {
-               tsk->min_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
-                                    regs, address);
+       /*
+        * Major/minor page fault accounting is only done on the
+        * initial attempt. If we go through a retry, it is extremely
+        * likely that the page will be found in page cache at that point.
+        */
+       if (flags & FAULT_FLAG_ALLOW_RETRY) {
+               if (fault & VM_FAULT_MAJOR) {
+                       tsk->maj_flt++;
+                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+                                     regs, address);
+               } else {
+                       tsk->min_flt++;
+                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+                                     regs, address);
+               }
+               if (fault & VM_FAULT_RETRY) {
+                       /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+                        * of starvation. */
+                       flags &= ~FAULT_FLAG_ALLOW_RETRY;
+                       goto retry;
+               }
        }
        /*
         * The instruction that caused the program check will
@@ -429,10 +447,9 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
        access = write ? VM_WRITE : VM_READ;
        fault = do_exception(&regs, access, uaddr | 2);
        if (unlikely(fault)) {
-               if (fault & VM_FAULT_OOM) {
-                       pagefault_out_of_memory();
-                       fault = 0;
-               } else if (fault & VM_FAULT_SIGBUS)
+               if (fault & VM_FAULT_OOM)
+                       return -EFAULT;
+               else if (fault & VM_FAULT_SIGBUS)
                        do_sigbus(&regs, pgm_int_code, uaddr);
        }
        return fault ? -EFAULT : 0;
@@ -485,7 +502,6 @@ int pfault_init(void)
                "2:\n"
                EX_TABLE(0b,1b)
                : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc");
-        __ctl_set_bit(0, 9);
         return rc;
 }
 
@@ -500,7 +516,6 @@ void pfault_fini(void)
 
        if (!MACHINE_IS_VM || pfault_disable)
                return;
-       __ctl_clear_bit(0,9);
        asm volatile(
                "       diag    %0,0,0x258\n"
                "0:\n"
@@ -615,6 +630,7 @@ static int __init pfault_irq_init(void)
        rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
        if (rc)
                goto out_pfault;
+       service_subclass_irq_register();
        hotcpu_notifier(pfault_cpu_notify, 0);
        return 0;
 
index dfefc2171691aa6cdc27c95878a1cd12a0d39053..59b663109d9024af9728547208b74a56d5d2d6a2 100644 (file)
@@ -119,9 +119,7 @@ void __init paging_init(void)
        sparse_memory_present_with_active_regions(MAX_NUMNODES);
        sparse_init();
        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-#ifdef CONFIG_ZONE_DMA
        max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
-#endif
        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
        free_area_init_nodes(max_zone_pfns);
        fault_init();
index 053caa0fd2768cd6eee4a5daa0ac54bf964e3252..4552ce40c81a5086ad550e21092c78b36ad3b7c3 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/oprofile.h>
 
 #include <asm/lowcore.h>
-#include <asm/s390_ext.h>
+#include <asm/irq.h>
 
 #include "hwsampler.h"
 
@@ -580,7 +580,7 @@ static int hws_cpu_callback(struct notifier_block *nfb,
 {
        /* We do not have sampler space available for all possible CPUs.
           All CPUs should be online when hw sampling is activated. */
-       return NOTIFY_BAD;
+       return (hws_state <= HWS_DEALLOCATED) ? NOTIFY_OK : NOTIFY_BAD;
 }
 
 static struct notifier_block hws_cpu_notifier = {
index e73bc781cc1465cb51f5c23a2c61f168b5279129..288add8d168f05f8181f3a7d0eee61ecda89ce82 100644 (file)
@@ -43,9 +43,6 @@ config NO_DMA
 config RWSEM_GENERIC_SPINLOCK
        def_bool y
 
-config GENERIC_FIND_NEXT_BIT
-       def_bool y
-
 config GENERIC_HWEIGHT
        def_bool y
 
index b44e37753b9a59f30d448ebca1da13814ef33ba5..74495a5ea02738f8cd90fa1536a7daefb0c88be9 100644 (file)
@@ -71,12 +71,6 @@ config GENERIC_CSUM
        def_bool y
        depends on SUPERH64
 
-config GENERIC_FIND_NEXT_BIT
-       def_bool y
-
-config GENERIC_FIND_BIT_LE
-       def_bool y
-
 config GENERIC_HWEIGHT
        def_bool y
 
index 77ec0e7b8ddf6c68b5fa2ad6226a6a03dc57845b..e7583484cc07dd3d9ab257d094f769b94446397a 100644 (file)
@@ -7,7 +7,6 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_CGROUPS=y
-CONFIG_CGROUP_NS=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
index c41650572d7957584804e4b5a3f9c03403176113..8a7dd7b59c5c01f9a0adf539e87ad569f6dd678e 100644 (file)
@@ -12,7 +12,6 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_CGROUPS=y
 CONFIG_CGROUP_DEBUG=y
-CONFIG_CGROUP_NS=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
index a468ff227fc6503c13fec6e3ab8e310aace0b876..72c3fad7383f5fc254d90fe3d499eb4a61ce1a47 100644 (file)
@@ -8,7 +8,6 @@ CONFIG_RCU_TRACE=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_CGROUPS=y
 CONFIG_CGROUP_DEBUG=y
-CONFIG_CGROUP_NS=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
 CONFIG_RESOURCE_COUNTERS=y
index 3f92d37c6374e7dfde027bc5933eb40c07115d5d..6bb413036892cb2f50af2db94649584557703b91 100644 (file)
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_CGROUPS=y
-CONFIG_CGROUP_NS=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CGROUP_CPUACCT=y
index 7b3daec6fefe6cb7fe1a54a6c9bb4284cbe11bd5..8bfa4d056d7a6574e4ac7f4fa6d6b49a90001d7c 100644 (file)
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_CGROUPS=y
 CONFIG_CGROUP_DEBUG=y
-CONFIG_CGROUP_NS=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CGROUP_DEVICE=y
 CONFIG_CPUSETS=y
index 4235e228d921057c4e801e2ed2535b5f084132ff..f3613952d1aeb8469a0581b91fcd35c3c80ca3d0 100644 (file)
@@ -34,5 +34,6 @@ static inline void arch_kgdb_breakpoint(void)
 
 #define CACHE_FLUSH_IS_SAFE    1
 #define BREAK_INSTR_SIZE       2
+#define GDB_ADJUSTS_BREAK_OFFSET
 
 #endif /* __ASM_SH_KGDB_H */
index de167d3a1a8023423cd9ed83f42e2038aa01bbee..40725b4a80186a773cc4f72371cc77e9f3b0cd73 100644 (file)
@@ -40,9 +40,8 @@
 #include <asm/system.h>
 
 #define user_mode(regs)                        (((regs)->sr & 0x40000000)==0)
-#define user_stack_pointer(_regs)      ((unsigned long)(_regs)->regs[15])
 #define kernel_stack_pointer(_regs)    ((unsigned long)(_regs)->regs[15])
-#define instruction_pointer(regs)      ((unsigned long)(regs)->pc)
+#define GET_USP(regs) ((regs)->regs[15])
 
 extern void show_regs(struct pt_regs *);
 
@@ -139,6 +138,9 @@ static inline unsigned long profile_pc(struct pt_regs *regs)
 
        return pc;
 }
+#define profile_pc profile_pc
+
+#include <asm-generic/ptrace.h>
 #endif /* __KERNEL__ */
 
 #endif /* __ASM_SH_PTRACE_H */
index 64eb41a063e8162c7bde64bdcf0cd8ce3e4c45b1..e14567a7e9a14814353fcf9c645d456c4bc5873f 100644 (file)
@@ -3,7 +3,6 @@
 
 #ifndef __ASSEMBLY__
 #include <linux/notifier.h>
-static inline int arch_prepare_suspend(void) { return 0; }
 
 #include <asm/ptrace.h>
 
index 63a027c9ada5aabf99fdea93c2f355e431076cce..af32e17fa170c3d9fa1ef259e4fd7cee0ad48f12 100644 (file)
@@ -190,14 +190,6 @@ config RWSEM_XCHGADD_ALGORITHM
        bool
        default y if SPARC64
 
-config GENERIC_FIND_NEXT_BIT
-       bool
-       default y
-
-config GENERIC_FIND_BIT_LE
-       bool
-       default y
-
 config GENERIC_HWEIGHT
        bool
        default y if !ULTRA_HAS_POPULATION_COUNT
index 635e1bfb1c5d373d8d38c6c94801fd2c6077ebeb..e1e50101b3bbf9e7dd7e7ed93f9f563e8182ee2d 100644 (file)
@@ -5,7 +5,6 @@ config TILE
        def_bool y
        select HAVE_KVM if !TILEGX
        select GENERIC_FIND_FIRST_BIT
-       select GENERIC_FIND_NEXT_BIT
        select USE_GENERIC_SMP_HELPERS
        select CC_OPTIMIZE_FOR_SIZE
        select HAVE_GENERIC_HARDIRQS
index 795ea8e869f40ddf7afab4bd18ffafb5b15b85aa..8aae429a56e2d34690c24253fb1c3825fe1cf52a 100644 (file)
@@ -15,7 +15,6 @@ endmenu
 config UML_X86
        def_bool y
        select GENERIC_FIND_FIRST_BIT
-       select GENERIC_FIND_NEXT_BIT
 
 config 64BIT
        bool
index 88a9c0f32b219677e38eb1eb6b817861ec8edc37..65bad75c7e96d0706f51bb16fcdcb9615e86b435 100644 (file)
@@ -14,7 +14,6 @@
 #define __UNICORE_SUSPEND_H__
 
 #ifndef __ASSEMBLY__
-static inline int arch_prepare_suspend(void) { return 0; }
 
 #include <asm/ptrace.h>
 
index 483775f42d2aab393886ee97ed2987017e0196df..da349723d4115cef7d75aac4680ba2284deaf0d0 100644 (file)
@@ -64,7 +64,6 @@ config X86
        select HAVE_GENERIC_HARDIRQS
        select HAVE_SPARSE_IRQ
        select GENERIC_FIND_FIRST_BIT
-       select GENERIC_FIND_NEXT_BIT
        select GENERIC_IRQ_PROBE
        select GENERIC_PENDING_IRQ if SMP
        select GENERIC_IRQ_SHOW
index 6f9872658dd2d66e80d8025eb7f8c40ee86b958f..2bf18059fbea710667d4636477b41a21a1fb1571 100644 (file)
@@ -10,7 +10,6 @@ CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_AUDIT=y
 CONFIG_LOG_BUF_SHIFT=18
 CONFIG_CGROUPS=y
-CONFIG_CGROUP_NS=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
index ee01a9d5d4f0a7b8e3bd9f6e4f86fb28e7fe2b68..22a0dc8e51dd4196e43acdab23d9a3e32513e167 100644 (file)
@@ -11,7 +11,6 @@ CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_AUDIT=y
 CONFIG_LOG_BUF_SHIFT=18
 CONFIG_CGROUPS=y
-CONFIG_CGROUP_NS=y
 CONFIG_CGROUP_FREEZER=y
 CONFIG_CPUSETS=y
 CONFIG_CGROUP_CPUACCT=y
index 396f5b5fc4d714e4babdb916565731264a312ec8..77e95f54570a9728816055df2e6c562e1a608ccc 100644 (file)
@@ -77,6 +77,7 @@ static inline void arch_kgdb_breakpoint(void)
 }
 #define BREAK_INSTR_SIZE       1
 #define CACHE_FLUSH_IS_SAFE    1
+#define GDB_ADJUSTS_BREAK_OFFSET
 
 extern int kgdb_ll_trap(int cmd, const char *str,
                        struct pt_regs *regs, long err, int trap, int sig);
index 1babf8adecdf624840f60915f19169754e9967f0..94e7618fcac8d6fb8bd61c1b356b522187b1218a 100644 (file)
@@ -136,6 +136,7 @@ struct cpuinfo_x86;
 struct task_struct;
 
 extern unsigned long profile_pc(struct pt_regs *regs);
+#define profile_pc profile_pc
 
 extern unsigned long
 convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
@@ -202,20 +203,11 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
 #endif
 }
 
-static inline unsigned long instruction_pointer(struct pt_regs *regs)
-{
-       return regs->ip;
-}
-
-static inline unsigned long frame_pointer(struct pt_regs *regs)
-{
-       return regs->bp;
-}
+#define GET_IP(regs) ((regs)->ip)
+#define GET_FP(regs) ((regs)->bp)
+#define GET_USP(regs) ((regs)->sp)
 
-static inline unsigned long user_stack_pointer(struct pt_regs *regs)
-{
-       return regs->sp;
-}
+#include <asm-generic/ptrace.h>
 
 /* Query offset/name of register from its name/offset */
 extern int regs_query_register_offset(const char *name);
index fd921c3a68414e341fe8fffa8b1df1326eba2ba7..487055c8c1aaf2d63bbf2b0d8bcfcdd5d616c721 100644 (file)
@@ -9,8 +9,6 @@
 #include <asm/desc.h>
 #include <asm/i387.h>
 
-static inline int arch_prepare_suspend(void) { return 0; }
-
 /* image of the saved processor state */
 struct saved_context {
        u16 es, fs, gs, ss;
index 8d942afae681bec8fe2c47a9bf57955dbc4317b0..09b0bf104156579ae49f72b17c603befcf2db5a6 100644 (file)
@@ -9,11 +9,6 @@
 #include <asm/desc.h>
 #include <asm/i387.h>
 
-static inline int arch_prepare_suspend(void)
-{
-       return 0;
-}
-
 /*
  * Image of the saved processor state, used by the low level ACPI suspend to
  * RAM code and by the low level hibernation code.
index 83e2efd181e27c8d5e9430fe8e52a30c022faf5c..9db5583b6d38ff781611c5b20b812115de28cba2 100644 (file)
@@ -51,6 +51,10 @@ extern int unsynchronized_tsc(void);
 extern int check_tsc_unstable(void);
 extern unsigned long native_calibrate_tsc(void);
 
+#ifdef CONFIG_X86_64
+extern cycles_t vread_tsc(void);
+#endif
+
 /*
  * Boot-time check whether the TSCs are synchronized across
  * all CPUs/cores:
index 9064052b73ded033f961c2756a981ad1789678f0..bb0522850b74c2ae780b6c81981efde9f76f2aa3 100644 (file)
@@ -1,20 +1,6 @@
 #ifndef _ASM_X86_VDSO_H
 #define _ASM_X86_VDSO_H
 
-#ifdef CONFIG_X86_64
-extern const char VDSO64_PRELINK[];
-
-/*
- * Given a pointer to the vDSO image, find the pointer to VDSO64_name
- * as that symbol is defined in the vDSO sources or linker script.
- */
-#define VDSO64_SYMBOL(base, name)                                      \
-({                                                                     \
-       extern const char VDSO64_##name[];                              \
-       (void *)(VDSO64_##name - VDSO64_PRELINK + (unsigned long)(base)); \
-})
-#endif
-
 #if defined CONFIG_X86_32 || defined CONFIG_COMPAT
 extern const char VDSO32_PRELINK[];
 
index 3d61e204826f1da5b6cb747957818a0e7b2d3913..646b4c1ca6958f351d9138cfea58840f4b1c3ae6 100644 (file)
@@ -23,8 +23,6 @@ struct vsyscall_gtod_data {
        struct timespec wall_to_monotonic;
        struct timespec wall_time_coarse;
 };
-extern struct vsyscall_gtod_data __vsyscall_gtod_data
-__section_vsyscall_gtod_data;
 extern struct vsyscall_gtod_data vsyscall_gtod_data;
 
 #endif /* _ASM_X86_VGTOD_H */
index d0983d255fbdd13d1830bd8e9aa39cbd8c8ceae5..d55597351f6a5ab4a558a02b3f09830152232840 100644 (file)
@@ -16,27 +16,19 @@ enum vsyscall_num {
 #ifdef __KERNEL__
 #include <linux/seqlock.h>
 
-#define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
-
 /* Definitions for CONFIG_GENERIC_TIME definitions */
-#define __section_vsyscall_gtod_data __attribute__ \
-       ((unused, __section__ (".vsyscall_gtod_data"),aligned(16)))
-#define __section_vsyscall_clock __attribute__ \
-       ((unused, __section__ (".vsyscall_clock"),aligned(16)))
 #define __vsyscall_fn \
        __attribute__ ((unused, __section__(".vsyscall_fn"))) notrace
 
 #define VGETCPU_RDTSCP 1
 #define VGETCPU_LSL    2
 
-extern int __vgetcpu_mode;
-extern volatile unsigned long __jiffies;
-
 /* kernel space (writeable) */
 extern int vgetcpu_mode;
 extern struct timezone sys_tz;
 
+#include <asm/vvar.h>
+
 extern void map_vsyscall(void);
 
 #endif /* __KERNEL__ */
diff --git a/arch/x86/include/asm/vvar.h b/arch/x86/include/asm/vvar.h
new file mode 100644 (file)
index 0000000..341b355
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * vvar.h: Shared vDSO/kernel variable declarations
+ * Copyright (c) 2011 Andy Lutomirski
+ * Subject to the GNU General Public License, version 2
+ *
+ * A handful of variables are accessible (read-only) from userspace
+ * code in the vsyscall page and the vdso.  They are declared here.
+ * Some other file must define them with DEFINE_VVAR.
+ *
+ * In normal kernel code, they are used like any other variable.
+ * In user code, they are accessed through the VVAR macro.
+ *
+ * Each of these variables lives in the vsyscall page, and each
+ * one needs a unique offset within the little piece of the page
+ * reserved for vvars.  Specify that offset in DECLARE_VVAR.
+ * (There are 896 bytes available.  If you mess up, the linker will
+ * catch it.)
+ */
+
+/* Offset of vars within vsyscall page */
+#define VSYSCALL_VARS_OFFSET (3072 + 128)
+
+#if defined(__VVAR_KERNEL_LDS)
+
+/* The kernel linker script defines its own magic to put vvars in the
+ * right place.
+ */
+#define DECLARE_VVAR(offset, type, name) \
+       EMIT_VVAR(name, VSYSCALL_VARS_OFFSET + offset)
+
+#else
+
+#define DECLARE_VVAR(offset, type, name)                               \
+       static type const * const vvaraddr_ ## name =                   \
+               (void *)(VSYSCALL_START + VSYSCALL_VARS_OFFSET + (offset));
+
+#define DEFINE_VVAR(type, name)                                                \
+       type __vvar_ ## name                                            \
+       __attribute__((section(".vsyscall_var_" #name), aligned(16)))
+
+#define VVAR(name) (*vvaraddr_ ## name)
+
+#endif
+
+/* DECLARE_VVAR(offset, type, name) */
+
+DECLARE_VVAR(0, volatile unsigned long, jiffies)
+DECLARE_VVAR(8, int, vgetcpu_mode)
+DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data)
+
+#undef DECLARE_VVAR
+#undef VSYSCALL_VARS_OFFSET
index 8508bfe52296b18b5b3164acff6e2cf449a31792..d240ea950519343231514be75f4e6a6cea15dd8f 100644 (file)
@@ -447,6 +447,13 @@ HYPERVISOR_hvm_op(int op, void *arg)
        return _hypercall2(unsigned long, hvm_op, op, arg);
 }
 
+static inline int
+HYPERVISOR_tmem_op(
+       struct tmem_op *op)
+{
+       return _hypercall1(int, tmem_op, op);
+}
+
 static inline void
 MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
 {
index 250806472a7e07373c7bf5cd66acfc8bca81d032..f5abe3a245b84ce698472051d873b33f882a9d97 100644 (file)
@@ -8,7 +8,6 @@ CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
 
 ifdef CONFIG_FUNCTION_TRACER
 # Do not profile debug and lowlevel utilities
-CFLAGS_REMOVE_tsc.o = -pg
 CFLAGS_REMOVE_rtc.o = -pg
 CFLAGS_REMOVE_paravirt-spinlocks.o = -pg
 CFLAGS_REMOVE_pvclock.o = -pg
@@ -24,13 +23,16 @@ endif
 nostackp := $(call cc-option, -fno-stack-protector)
 CFLAGS_vsyscall_64.o   := $(PROFILING) -g0 $(nostackp)
 CFLAGS_hpet.o          := $(nostackp)
-CFLAGS_tsc.o           := $(nostackp)
+CFLAGS_vread_tsc_64.o  := $(nostackp)
 CFLAGS_paravirt.o      := $(nostackp)
 GCOV_PROFILE_vsyscall_64.o     := n
 GCOV_PROFILE_hpet.o            := n
 GCOV_PROFILE_tsc.o             := n
 GCOV_PROFILE_paravirt.o                := n
 
+# vread_tsc_64 is hot and should be fully optimized:
+CFLAGS_REMOVE_vread_tsc_64.o = -pg -fno-optimize-sibling-calls
+
 obj-y                  := process_$(BITS).o signal.o entry_$(BITS).o
 obj-y                  += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
 obj-y                  += time.o ioport.o ldt.o dumpstack.o
@@ -39,7 +41,7 @@ obj-$(CONFIG_IRQ_WORK)  += irq_work.o
 obj-y                  += probe_roms.o
 obj-$(CONFIG_X86_32)   += sys_i386_32.o i386_ksyms_32.o
 obj-$(CONFIG_X86_64)   += sys_x86_64.o x8664_ksyms_64.o
-obj-$(CONFIG_X86_64)   += syscall_64.o vsyscall_64.o
+obj-$(CONFIG_X86_64)   += syscall_64.o vsyscall_64.o vread_tsc_64.o
 obj-y                  += bootflag.o e820.o
 obj-y                  += pci-dma.o quirks.o topology.o kdebugfs.o
 obj-y                  += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
index 25a28a245937989d2de9abfb94d96835a1b01615..00cbb272627ff8d9ef2fb7f963c613969cc3710a 100644 (file)
@@ -23,7 +23,7 @@
 #include <asm/time.h>
 
 #ifdef CONFIG_X86_64
-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
+DEFINE_VVAR(volatile unsigned long, jiffies) = INITIAL_JIFFIES;
 #endif
 
 unsigned long profile_pc(struct pt_regs *regs)
index 9335bf7dd2e7635f8bca255532919e98ff015388..6cc6922262af7ca285c814eaf13fdf65fd30e033 100644 (file)
@@ -763,25 +763,6 @@ static cycle_t read_tsc(struct clocksource *cs)
                ret : clocksource_tsc.cycle_last;
 }
 
-#ifdef CONFIG_X86_64
-static cycle_t __vsyscall_fn vread_tsc(void)
-{
-       cycle_t ret;
-
-       /*
-        * Surround the RDTSC by barriers, to make sure it's not
-        * speculated to outside the seqlock critical section and
-        * does not cause time warps:
-        */
-       rdtsc_barrier();
-       ret = (cycle_t)vget_cycles();
-       rdtsc_barrier();
-
-       return ret >= __vsyscall_gtod_data.clock.cycle_last ?
-               ret : __vsyscall_gtod_data.clock.cycle_last;
-}
-#endif
-
 static void resume_tsc(struct clocksource *cs)
 {
        clocksource_tsc.cycle_last = 0;
index 61682f0ac264853cabc8e3b72a8a8ddf1d387bc4..89aed99aafceb3b8ce1df591a77f619418166273 100644 (file)
@@ -161,6 +161,12 @@ SECTIONS
 
 #define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0)
 #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
+#define EMIT_VVAR(x, offset) .vsyscall_var_ ## x       \
+       ADDR(.vsyscall_0) + offset                      \
+       : AT(VLOAD(.vsyscall_var_ ## x)) {              \
+               *(.vsyscall_var_ ## x)                  \
+       }                                               \
+       x = VVIRT(.vsyscall_var_ ## x);
 
        . = ALIGN(4096);
        __vsyscall_0 = .;
@@ -175,18 +181,6 @@ SECTIONS
                *(.vsyscall_fn)
        }
 
-       . = ALIGN(L1_CACHE_BYTES);
-       .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) {
-               *(.vsyscall_gtod_data)
-       }
-
-       vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data);
-       .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) {
-               *(.vsyscall_clock)
-       }
-       vsyscall_clock = VVIRT(.vsyscall_clock);
-
-
        .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) {
                *(.vsyscall_1)
        }
@@ -194,21 +188,14 @@ SECTIONS
                *(.vsyscall_2)
        }
 
-       .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) {
-               *(.vgetcpu_mode)
-       }
-       vgetcpu_mode = VVIRT(.vgetcpu_mode);
-
-       . = ALIGN(L1_CACHE_BYTES);
-       .jiffies : AT(VLOAD(.jiffies)) {
-               *(.jiffies)
-       }
-       jiffies = VVIRT(.jiffies);
-
        .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
                *(.vsyscall_3)
        }
 
+#define __VVAR_KERNEL_LDS
+#include <asm/vvar.h>
+#undef __VVAR_KERNEL_LDS
+
        . = __vsyscall_0 + PAGE_SIZE;
 
 #undef VSYSCALL_ADDR
@@ -216,6 +203,7 @@ SECTIONS
 #undef VLOAD
 #undef VVIRT_OFFSET
 #undef VVIRT
+#undef EMIT_VVAR
 
 #endif /* CONFIG_X86_64 */
 
diff --git a/arch/x86/kernel/vread_tsc_64.c b/arch/x86/kernel/vread_tsc_64.c
new file mode 100644 (file)
index 0000000..a81aa9e
--- /dev/null
@@ -0,0 +1,36 @@
+/* This code runs in userspace. */
+
+#define DISABLE_BRANCH_PROFILING
+#include <asm/vgtod.h>
+
+notrace cycle_t __vsyscall_fn vread_tsc(void)
+{
+       cycle_t ret;
+       u64 last;
+
+       /*
+        * Empirically, a fence (of type that depends on the CPU)
+        * before rdtsc is enough to ensure that rdtsc is ordered
+        * with respect to loads.  The various CPU manuals are unclear
+        * as to whether rdtsc can be reordered with later loads,
+        * but no one has ever seen it happen.
+        */
+       rdtsc_barrier();
+       ret = (cycle_t)vget_cycles();
+
+       last = VVAR(vsyscall_gtod_data).clock.cycle_last;
+
+       if (likely(ret >= last))
+               return ret;
+
+       /*
+        * GCC likes to generate cmov here, but this branch is extremely
+        * predictable (it's just a funciton of time and the likely is
+        * very likely) and there's a data dependence, so force GCC
+        * to generate a branch instead.  I don't barrier() because
+        * we don't actually need a barrier, and if this function
+        * ever gets inlined it will generate worse code.
+        */
+       asm volatile ("");
+       return last;
+}
index dcbb28c4b69461723147e2b863919301083b68b5..3e682184d76c7997d090430db61d52e4826270e8 100644 (file)
                __attribute__ ((unused, __section__(".vsyscall_" #nr))) notrace
 #define __syscall_clobber "r11","cx","memory"
 
-/*
- * vsyscall_gtod_data contains data that is :
- * - readonly from vsyscalls
- * - written by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
- * Try to keep this structure as small as possible to avoid cache line ping pongs
- */
-int __vgetcpu_mode __section_vgetcpu_mode;
-
-struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data =
+DEFINE_VVAR(int, vgetcpu_mode);
+DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
 {
-       .lock = SEQLOCK_UNLOCKED,
+       .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
        .sysctl_enabled = 1,
 };
 
@@ -97,7 +90,7 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
  */
 static __always_inline void do_get_tz(struct timezone * tz)
 {
-       *tz = __vsyscall_gtod_data.sys_tz;
+       *tz = VVAR(vsyscall_gtod_data).sys_tz;
 }
 
 static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
@@ -126,23 +119,24 @@ static __always_inline void do_vgettimeofday(struct timeval * tv)
        unsigned long mult, shift, nsec;
        cycle_t (*vread)(void);
        do {
-               seq = read_seqbegin(&__vsyscall_gtod_data.lock);
+               seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
 
-               vread = __vsyscall_gtod_data.clock.vread;
-               if (unlikely(!__vsyscall_gtod_data.sysctl_enabled || !vread)) {
+               vread = VVAR(vsyscall_gtod_data).clock.vread;
+               if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled ||
+                            !vread)) {
                        gettimeofday(tv,NULL);
                        return;
                }
 
                now = vread();
-               base = __vsyscall_gtod_data.clock.cycle_last;
-               mask = __vsyscall_gtod_data.clock.mask;
-               mult = __vsyscall_gtod_data.clock.mult;
-               shift = __vsyscall_gtod_data.clock.shift;
+               base = VVAR(vsyscall_gtod_data).clock.cycle_last;
+               mask = VVAR(vsyscall_gtod_data).clock.mask;
+               mult = VVAR(vsyscall_gtod_data).clock.mult;
+               shift = VVAR(vsyscall_gtod_data).clock.shift;
 
-               tv->tv_sec = __vsyscall_gtod_data.wall_time_sec;
-               nsec = __vsyscall_gtod_data.wall_time_nsec;
-       } while (read_seqretry(&__vsyscall_gtod_data.lock, seq));
+               tv->tv_sec = VVAR(vsyscall_gtod_data).wall_time_sec;
+               nsec = VVAR(vsyscall_gtod_data).wall_time_nsec;
+       } while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq));
 
        /* calculate interval: */
        cycle_delta = (now - base) & mask;
@@ -171,15 +165,15 @@ time_t __vsyscall(1) vtime(time_t *t)
 {
        unsigned seq;
        time_t result;
-       if (unlikely(!__vsyscall_gtod_data.sysctl_enabled))
+       if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
                return time_syscall(t);
 
        do {
-               seq = read_seqbegin(&__vsyscall_gtod_data.lock);
+               seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
 
-               result = __vsyscall_gtod_data.wall_time_sec;
+               result = VVAR(vsyscall_gtod_data).wall_time_sec;
 
-       } while (read_seqretry(&__vsyscall_gtod_data.lock, seq));
+       } while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq));
 
        if (t)
                *t = result;
@@ -208,9 +202,9 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
           We do this here because otherwise user space would do it on
           its own in a likely inferior way (no access to jiffies).
           If you don't like it pass NULL. */
-       if (tcache && tcache->blob[0] == (j = __jiffies)) {
+       if (tcache && tcache->blob[0] == (j = VVAR(jiffies))) {
                p = tcache->blob[1];
-       } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
+       } else if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) {
                /* Load per CPU data from RDTSCP */
                native_read_tscp(&p);
        } else {
index b6552b189bcdbb43b1f3627616f1f69d488f9efa..bef0bc9624006d1839dc3050ab8356d7852593f4 100644 (file)
@@ -11,7 +11,7 @@ vdso-install-$(VDSO32-y)      += $(vdso32-images)
 
 
 # files to link into the vdso
-vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vvar.o
+vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o
 
 # files to link into kernel
 obj-$(VDSO64-y)                        += vma.o vdso.o
@@ -37,11 +37,24 @@ $(obj)/%.so: OBJCOPYFLAGS := -S
 $(obj)/%.so: $(obj)/%.so.dbg FORCE
        $(call if_changed,objcopy)
 
+#
+# Don't omit frame pointers for ease of userspace debugging, but do
+# optimize sibling calls.
+#
 CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
-       $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector)
+       $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
+       -fno-omit-frame-pointer -foptimize-sibling-calls
 
 $(vobjs): KBUILD_CFLAGS += $(CFL)
 
+#
+# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
+#
+CFLAGS_REMOVE_vdso-note.o = -pg
+CFLAGS_REMOVE_vclock_gettime.o = -pg
+CFLAGS_REMOVE_vgetcpu.o = -pg
+CFLAGS_REMOVE_vvar.o = -pg
+
 targets += vdso-syms.lds
 obj-$(VDSO64-y)                        += vdso-syms.lds
 
index ee55754cc3c5ff378b76f2065a610b72e757f088..a724905fdae7c296bb40492505225f5cf053f35a 100644 (file)
@@ -2,7 +2,7 @@
  * Copyright 2006 Andi Kleen, SUSE Labs.
  * Subject to the GNU Public License, v.2
  *
- * Fast user context implementation of clock_gettime and gettimeofday.
+ * Fast user context implementation of clock_gettime, gettimeofday, and time.
  *
  * The code should have no internal unresolved relocations.
  * Check with readelf after changing.
@@ -22,9 +22,8 @@
 #include <asm/hpet.h>
 #include <asm/unistd.h>
 #include <asm/io.h>
-#include "vextern.h"
 
-#define gtod vdso_vsyscall_gtod_data
+#define gtod (&VVAR(vsyscall_gtod_data))
 
 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 {
@@ -56,22 +55,6 @@ notrace static noinline int do_realtime(struct timespec *ts)
        return 0;
 }
 
-/* Copy of the version in kernel/time.c which we cannot directly access */
-notrace static void
-vset_normalized_timespec(struct timespec *ts, long sec, long nsec)
-{
-       while (nsec >= NSEC_PER_SEC) {
-               nsec -= NSEC_PER_SEC;
-               ++sec;
-       }
-       while (nsec < 0) {
-               nsec += NSEC_PER_SEC;
-               --sec;
-       }
-       ts->tv_sec = sec;
-       ts->tv_nsec = nsec;
-}
-
 notrace static noinline int do_monotonic(struct timespec *ts)
 {
        unsigned long seq, ns, secs;
@@ -82,7 +65,17 @@ notrace static noinline int do_monotonic(struct timespec *ts)
                secs += gtod->wall_to_monotonic.tv_sec;
                ns += gtod->wall_to_monotonic.tv_nsec;
        } while (unlikely(read_seqretry(&gtod->lock, seq)));
-       vset_normalized_timespec(ts, secs, ns);
+
+       /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
+        * are all guaranteed to be nonnegative.
+        */
+       while (ns >= NSEC_PER_SEC) {
+               ns -= NSEC_PER_SEC;
+               ++secs;
+       }
+       ts->tv_sec = secs;
+       ts->tv_nsec = ns;
+
        return 0;
 }
 
@@ -107,7 +100,17 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
                secs += gtod->wall_to_monotonic.tv_sec;
                ns += gtod->wall_to_monotonic.tv_nsec;
        } while (unlikely(read_seqretry(&gtod->lock, seq)));
-       vset_normalized_timespec(ts, secs, ns);
+
+       /* wall_time_nsec and wall_to_monotonic.tv_nsec are
+        * guaranteed to be between 0 and NSEC_PER_SEC.
+        */
+       if (ns >= NSEC_PER_SEC) {
+               ns -= NSEC_PER_SEC;
+               ++secs;
+       }
+       ts->tv_sec = secs;
+       ts->tv_nsec = ns;
+
        return 0;
 }
 
@@ -157,3 +160,32 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
 }
 int gettimeofday(struct timeval *, struct timezone *)
        __attribute__((weak, alias("__vdso_gettimeofday")));
+
+/* This will break when the xtime seconds get inaccurate, but that is
+ * unlikely */
+
+static __always_inline long time_syscall(long *t)
+{
+       long secs;
+       asm volatile("syscall"
+                    : "=a" (secs)
+                    : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory");
+       return secs;
+}
+
+notrace time_t __vdso_time(time_t *t)
+{
+       time_t result;
+
+       if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
+               return time_syscall(t);
+
+       /* This is atomic on x86_64 so we don't need any locks. */
+       result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec);
+
+       if (t)
+               *t = result;
+       return result;
+}
+int time(time_t *t)
+       __attribute__((weak, alias("__vdso_time")));
index 4e5dd3b4de7f64428db97c5ef7ef9c29ceecfbf6..b96b2677cad82820207bb38a3e09bdd8c4a61eba 100644 (file)
@@ -23,15 +23,10 @@ VERSION {
                __vdso_gettimeofday;
                getcpu;
                __vdso_getcpu;
+               time;
+               __vdso_time;
        local: *;
        };
 }
 
 VDSO64_PRELINK = VDSO_PRELINK;
-
-/*
- * Define VDSO64_x for each VEXTERN(x), for use via VDSO64_SYMBOL.
- */
-#define VEXTERN(x)     VDSO64_ ## x = vdso_ ## x;
-#include "vextern.h"
-#undef VEXTERN
diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
deleted file mode 100644 (file)
index 1683ba2..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef VEXTERN
-#include <asm/vsyscall.h>
-#define VEXTERN(x) \
-       extern typeof(x) *vdso_ ## x __attribute__((visibility("hidden")));
-#endif
-
-#define VMAGIC 0xfeedbabeabcdefabUL
-
-/* Any kernel variables used in the vDSO must be exported in the main
-   kernel's vmlinux.lds.S/vsyscall.h/proper __section and
-   put into vextern.h and be referenced as a pointer with vdso prefix.
-   The main kernel later fills in the values.   */
-
-VEXTERN(jiffies)
-VEXTERN(vgetcpu_mode)
-VEXTERN(vsyscall_gtod_data)
index 9fbc6b20026b5ac4f23f9704e2cac9b1daaaf3a6..5463ad558573de5424d4d654e0cfdc20263930dd 100644 (file)
 #include <linux/time.h>
 #include <asm/vsyscall.h>
 #include <asm/vgtod.h>
-#include "vextern.h"
 
 notrace long
 __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
 {
        unsigned int p;
 
-       if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) {
+       if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) {
                /* Load per CPU data from RDTSCP */
                native_read_tscp(&p);
        } else {
index 4b5d26f108bbb21f9ff1ac494388b70fa610c247..7abd2be0f9b95ff847770472b3ee015e92c47197 100644 (file)
@@ -15,9 +15,6 @@
 #include <asm/proto.h>
 #include <asm/vdso.h>
 
-#include "vextern.h"           /* Just for VMAGIC.  */
-#undef VEXTERN
-
 unsigned int __read_mostly vdso_enabled = 1;
 
 extern char vdso_start[], vdso_end[];
@@ -26,20 +23,10 @@ extern unsigned short vdso_sync_cpuid;
 static struct page **vdso_pages;
 static unsigned vdso_size;
 
-static inline void *var_ref(void *p, char *name)
-{
-       if (*(void **)p != (void *)VMAGIC) {
-               printk("VDSO: variable %s broken\n", name);
-               vdso_enabled = 0;
-       }
-       return p;
-}
-
 static int __init init_vdso_vars(void)
 {
        int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
        int i;
-       char *vbase;
 
        vdso_size = npages << PAGE_SHIFT;
        vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL);
@@ -54,20 +41,6 @@ static int __init init_vdso_vars(void)
                copy_page(page_address(p), vdso_start + i*PAGE_SIZE);
        }
 
-       vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL);
-       if (!vbase)
-               goto oom;
-
-       if (memcmp(vbase, "\177ELF", 4)) {
-               printk("VDSO: I'm broken; not ELF\n");
-               vdso_enabled = 0;
-       }
-
-#define VEXTERN(x) \
-       *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
-#include "vextern.h"
-#undef VEXTERN
-       vunmap(vbase);
        return 0;
 
  oom:
diff --git a/arch/x86/vdso/vvar.c b/arch/x86/vdso/vvar.c
deleted file mode 100644 (file)
index 1b7e703..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Define pointer to external vDSO variables.
-   These are part of the vDSO. The kernel fills in the real addresses
-   at boot time. This is done because when the vdso is linked the
-   kernel isn't yet and we don't know the final addresses. */
-#include <linux/kernel.h>
-#include <linux/time.h>
-#include <asm/vsyscall.h>
-#include <asm/timex.h>
-#include <asm/vgtod.h>
-
-#define VEXTERN(x) typeof (__ ## x) *const vdso_ ## x = (void *)VMAGIC;
-#include "vextern.h"
index 02d7524603710bf1afed949598645f6ebab2413b..dc708dcc62f1e5106fefebd390bc24bb1cf97cf1 100644 (file)
 #include "mmu.h"
 #include "debugfs.h"
 
-#define MMU_UPDATE_HISTO       30
-
 /*
  * Protects atomic reservation decrease/increase against concurrent increases.
  * Also protects non-atomic updates of current_pages and balloon lists.
  */
 DEFINE_SPINLOCK(xen_reservation_lock);
 
-#ifdef CONFIG_XEN_DEBUG_FS
-
-static struct {
-       u32 pgd_update;
-       u32 pgd_update_pinned;
-       u32 pgd_update_batched;
-
-       u32 pud_update;
-       u32 pud_update_pinned;
-       u32 pud_update_batched;
-
-       u32 pmd_update;
-       u32 pmd_update_pinned;
-       u32 pmd_update_batched;
-
-       u32 pte_update;
-       u32 pte_update_pinned;
-       u32 pte_update_batched;
-
-       u32 mmu_update;
-       u32 mmu_update_extended;
-       u32 mmu_update_histo[MMU_UPDATE_HISTO];
-
-       u32 prot_commit;
-       u32 prot_commit_batched;
-
-       u32 set_pte_at;
-       u32 set_pte_at_batched;
-       u32 set_pte_at_pinned;
-       u32 set_pte_at_current;
-       u32 set_pte_at_kernel;
-} mmu_stats;
-
-static u8 zero_stats;
-
-static inline void check_zero(void)
-{
-       if (unlikely(zero_stats)) {
-               memset(&mmu_stats, 0, sizeof(mmu_stats));
-               zero_stats = 0;
-       }
-}
-
-#define ADD_STATS(elem, val)                   \
-       do { check_zero(); mmu_stats.elem += (val); } while(0)
-
-#else  /* !CONFIG_XEN_DEBUG_FS */
-
-#define ADD_STATS(elem, val)   do { (void)(val); } while(0)
-
-#endif /* CONFIG_XEN_DEBUG_FS */
-
-
 /*
  * Identity map, in addition to plain kernel map.  This needs to be
  * large enough to allocate page table pages to allocate the rest.
@@ -243,11 +188,6 @@ static bool xen_page_pinned(void *ptr)
        return PagePinned(page);
 }
 
-static bool xen_iomap_pte(pte_t pte)
-{
-       return pte_flags(pte) & _PAGE_IOMAP;
-}
-
 void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
 {
        struct multicall_space mcs;
@@ -257,7 +197,7 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
        u = mcs.args;
 
        /* ptep might be kmapped when using 32-bit HIGHPTE */
-       u->ptr = arbitrary_virt_to_machine(ptep).maddr;
+       u->ptr = virt_to_machine(ptep).maddr;
        u->val = pte_val_ma(pteval);
 
        MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid);
@@ -266,11 +206,6 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid)
 }
 EXPORT_SYMBOL_GPL(xen_set_domain_pte);
 
-static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
-{
-       xen_set_domain_pte(ptep, pteval, DOMID_IO);
-}
-
 static void xen_extend_mmu_update(const struct mmu_update *update)
 {
        struct multicall_space mcs;
@@ -279,27 +214,17 @@ static void xen_extend_mmu_update(const struct mmu_update *update)
        mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
 
        if (mcs.mc != NULL) {
-               ADD_STATS(mmu_update_extended, 1);
-               ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1);
-
                mcs.mc->args[1]++;
-
-               if (mcs.mc->args[1] < MMU_UPDATE_HISTO)
-                       ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1);
-               else
-                       ADD_STATS(mmu_update_histo[0], 1);
        } else {
-               ADD_STATS(mmu_update, 1);
                mcs = __xen_mc_entry(sizeof(*u));
                MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
-               ADD_STATS(mmu_update_histo[1], 1);
        }
 
        u = mcs.args;
        *u = *update;
 }
 
-void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
+static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
 {
        struct mmu_update u;
 
@@ -312,17 +237,13 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
        u.val = pmd_val_ma(val);
        xen_extend_mmu_update(&u);
 
-       ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
-
        xen_mc_issue(PARAVIRT_LAZY_MMU);
 
        preempt_enable();
 }
 
-void xen_set_pmd(pmd_t *ptr, pmd_t val)
+static void xen_set_pmd(pmd_t *ptr, pmd_t val)
 {
-       ADD_STATS(pmd_update, 1);
-
        /* If page is not pinned, we can just update the entry
           directly */
        if (!xen_page_pinned(ptr)) {
@@ -330,8 +251,6 @@ void xen_set_pmd(pmd_t *ptr, pmd_t val)
                return;
        }
 
-       ADD_STATS(pmd_update_pinned, 1);
-
        xen_set_pmd_hyper(ptr, val);
 }
 
@@ -344,35 +263,34 @@ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
        set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
 }
 
-void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
-                   pte_t *ptep, pte_t pteval)
+static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval)
 {
-       if (xen_iomap_pte(pteval)) {
-               xen_set_iomap_pte(ptep, pteval);
-               goto out;
-       }
+       struct mmu_update u;
 
-       ADD_STATS(set_pte_at, 1);
-//     ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
-       ADD_STATS(set_pte_at_current, mm == current->mm);
-       ADD_STATS(set_pte_at_kernel, mm == &init_mm);
+       if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU)
+               return false;
 
-       if (mm == current->mm || mm == &init_mm) {
-               if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
-                       struct multicall_space mcs;
-                       mcs = xen_mc_entry(0);
+       xen_mc_batch();
 
-                       MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
-                       ADD_STATS(set_pte_at_batched, 1);
-                       xen_mc_issue(PARAVIRT_LAZY_MMU);
-                       goto out;
-               } else
-                       if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
-                               goto out;
-       }
-       xen_set_pte(ptep, pteval);
+       u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
+       u.val = pte_val_ma(pteval);
+       xen_extend_mmu_update(&u);
+
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
 
-out:   return;
+       return true;
+}
+
+static void xen_set_pte(pte_t *ptep, pte_t pteval)
+{
+       if (!xen_batched_set_pte(ptep, pteval))
+               native_set_pte(ptep, pteval);
+}
+
+static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
+                   pte_t *ptep, pte_t pteval)
+{
+       xen_set_pte(ptep, pteval);
 }
 
 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm,
@@ -389,13 +307,10 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
 
        xen_mc_batch();
 
-       u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
+       u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
        u.val = pte_val_ma(pte);
        xen_extend_mmu_update(&u);
 
-       ADD_STATS(prot_commit, 1);
-       ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
-
        xen_mc_issue(PARAVIRT_LAZY_MMU);
 }
 
@@ -463,7 +378,7 @@ static pteval_t iomap_pte(pteval_t val)
        return val;
 }
 
-pteval_t xen_pte_val(pte_t pte)
+static pteval_t xen_pte_val(pte_t pte)
 {
        pteval_t pteval = pte.pte;
 
@@ -480,7 +395,7 @@ pteval_t xen_pte_val(pte_t pte)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
 
-pgdval_t xen_pgd_val(pgd_t pgd)
+static pgdval_t xen_pgd_val(pgd_t pgd)
 {
        return pte_mfn_to_pfn(pgd.pgd);
 }
@@ -511,7 +426,7 @@ void xen_set_pat(u64 pat)
        WARN_ON(pat != 0x0007010600070106ull);
 }
 
-pte_t xen_make_pte(pteval_t pte)
+static pte_t xen_make_pte(pteval_t pte)
 {
        phys_addr_t addr = (pte & PTE_PFN_MASK);
 
@@ -581,20 +496,20 @@ pte_t xen_make_pte_debug(pteval_t pte)
 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug);
 #endif
 
-pgd_t xen_make_pgd(pgdval_t pgd)
+static pgd_t xen_make_pgd(pgdval_t pgd)
 {
        pgd = pte_pfn_to_mfn(pgd);
        return native_make_pgd(pgd);
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
 
-pmdval_t xen_pmd_val(pmd_t pmd)
+static pmdval_t xen_pmd_val(pmd_t pmd)
 {
        return pte_mfn_to_pfn(pmd.pmd);
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
 
-void xen_set_pud_hyper(pud_t *ptr, pud_t val)
+static void xen_set_pud_hyper(pud_t *ptr, pud_t val)
 {
        struct mmu_update u;
 
@@ -607,17 +522,13 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val)
        u.val = pud_val_ma(val);
        xen_extend_mmu_update(&u);
 
-       ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
-
        xen_mc_issue(PARAVIRT_LAZY_MMU);
 
        preempt_enable();
 }
 
-void xen_set_pud(pud_t *ptr, pud_t val)
+static void xen_set_pud(pud_t *ptr, pud_t val)
 {
-       ADD_STATS(pud_update, 1);
-
        /* If page is not pinned, we can just update the entry
           directly */
        if (!xen_page_pinned(ptr)) {
@@ -625,56 +536,28 @@ void xen_set_pud(pud_t *ptr, pud_t val)
                return;
        }
 
-       ADD_STATS(pud_update_pinned, 1);
-
        xen_set_pud_hyper(ptr, val);
 }
 
-void xen_set_pte(pte_t *ptep, pte_t pte)
-{
-       if (xen_iomap_pte(pte)) {
-               xen_set_iomap_pte(ptep, pte);
-               return;
-       }
-
-       ADD_STATS(pte_update, 1);
-//     ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
-       ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
-
 #ifdef CONFIG_X86_PAE
-       ptep->pte_high = pte.pte_high;
-       smp_wmb();
-       ptep->pte_low = pte.pte_low;
-#else
-       *ptep = pte;
-#endif
-}
-
-#ifdef CONFIG_X86_PAE
-void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
+static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
 {
-       if (xen_iomap_pte(pte)) {
-               xen_set_iomap_pte(ptep, pte);
-               return;
-       }
-
        set_64bit((u64 *)ptep, native_pte_val(pte));
 }
 
-void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
+static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
-       ptep->pte_low = 0;
-       smp_wmb();              /* make sure low gets written first */
-       ptep->pte_high = 0;
+       if (!xen_batched_set_pte(ptep, native_make_pte(0)))
+               native_pte_clear(mm, addr, ptep);
 }
 
-void xen_pmd_clear(pmd_t *pmdp)
+static void xen_pmd_clear(pmd_t *pmdp)
 {
        set_pmd(pmdp, __pmd(0));
 }
 #endif /* CONFIG_X86_PAE */
 
-pmd_t xen_make_pmd(pmdval_t pmd)
+static pmd_t xen_make_pmd(pmdval_t pmd)
 {
        pmd = pte_pfn_to_mfn(pmd);
        return native_make_pmd(pmd);
@@ -682,13 +565,13 @@ pmd_t xen_make_pmd(pmdval_t pmd)
 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
 
 #if PAGETABLE_LEVELS == 4
-pudval_t xen_pud_val(pud_t pud)
+static pudval_t xen_pud_val(pud_t pud)
 {
        return pte_mfn_to_pfn(pud.pud);
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
 
-pud_t xen_make_pud(pudval_t pud)
+static pud_t xen_make_pud(pudval_t pud)
 {
        pud = pte_pfn_to_mfn(pud);
 
@@ -696,7 +579,7 @@ pud_t xen_make_pud(pudval_t pud)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
 
-pgd_t *xen_get_user_pgd(pgd_t *pgd)
+static pgd_t *xen_get_user_pgd(pgd_t *pgd)
 {
        pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
        unsigned offset = pgd - pgd_page;
@@ -728,7 +611,7 @@ static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
  *  2. It is always pinned
  *  3. It has no user pagetable attached to it
  */
-void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
+static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
 {
        preempt_disable();
 
@@ -741,12 +624,10 @@ void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
        preempt_enable();
 }
 
-void xen_set_pgd(pgd_t *ptr, pgd_t val)
+static void xen_set_pgd(pgd_t *ptr, pgd_t val)
 {
        pgd_t *user_ptr = xen_get_user_pgd(ptr);
 
-       ADD_STATS(pgd_update, 1);
-
        /* If page is not pinned, we can just update the entry
           directly */
        if (!xen_page_pinned(ptr)) {
@@ -758,9 +639,6 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val)
                return;
        }
 
-       ADD_STATS(pgd_update_pinned, 1);
-       ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
-
        /* If it's pinned, then we can at least batch the kernel and
           user updates together. */
        xen_mc_batch();
@@ -1162,14 +1040,14 @@ void xen_mm_unpin_all(void)
        spin_unlock(&pgd_lock);
 }
 
-void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
+static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
 {
        spin_lock(&next->page_table_lock);
        xen_pgd_pin(next);
        spin_unlock(&next->page_table_lock);
 }
 
-void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
+static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
 {
        spin_lock(&mm->page_table_lock);
        xen_pgd_pin(mm);
@@ -1256,7 +1134,7 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
  * pagetable because of lazy tlb flushing.  This means we need need to
  * switch all CPUs off this pagetable before we can unpin it.
  */
-void xen_exit_mmap(struct mm_struct *mm)
+static void xen_exit_mmap(struct mm_struct *mm)
 {
        get_cpu();              /* make sure we don't move around */
        xen_drop_mm_ref(mm);
@@ -2371,7 +2249,7 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
        struct remap_data *rmd = data;
        pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot));
 
-       rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr;
+       rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
        rmd->mmu_update->val = pte_val_ma(pte);
        rmd->mmu_update++;
 
@@ -2425,7 +2303,6 @@ out:
 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
 
 #ifdef CONFIG_XEN_DEBUG_FS
-
 static int p2m_dump_open(struct inode *inode, struct file *filp)
 {
        return single_open(filp, p2m_dump_show, NULL);
@@ -2437,65 +2314,4 @@ static const struct file_operations p2m_dump_fops = {
        .llseek         = seq_lseek,
        .release        = single_release,
 };
-
-static struct dentry *d_mmu_debug;
-
-static int __init xen_mmu_debugfs(void)
-{
-       struct dentry *d_xen = xen_init_debugfs();
-
-       if (d_xen == NULL)
-               return -ENOMEM;
-
-       d_mmu_debug = debugfs_create_dir("mmu", d_xen);
-
-       debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats);
-
-       debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update);
-       debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug,
-                          &mmu_stats.pgd_update_pinned);
-       debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug,
-                          &mmu_stats.pgd_update_pinned);
-
-       debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update);
-       debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug,
-                          &mmu_stats.pud_update_pinned);
-       debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug,
-                          &mmu_stats.pud_update_pinned);
-
-       debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update);
-       debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug,
-                          &mmu_stats.pmd_update_pinned);
-       debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug,
-                          &mmu_stats.pmd_update_pinned);
-
-       debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update);
-//     debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug,
-//                        &mmu_stats.pte_update_pinned);
-       debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug,
-                          &mmu_stats.pte_update_pinned);
-
-       debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update);
-       debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug,
-                          &mmu_stats.mmu_update_extended);
-       xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug,
-                                    mmu_stats.mmu_update_histo, 20);
-
-       debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at);
-       debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug,
-                          &mmu_stats.set_pte_at_batched);
-       debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug,
-                          &mmu_stats.set_pte_at_current);
-       debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug,
-                          &mmu_stats.set_pte_at_kernel);
-
-       debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit);
-       debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug,
-                          &mmu_stats.prot_commit_batched);
-
-       debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops);
-       return 0;
-}
-fs_initcall(xen_mmu_debugfs);
-
-#endif /* CONFIG_XEN_DEBUG_FS */
+#endif /* CONFIG_XEN_DEBUG_FS */
index 537bb9aab777ab00b446d1cec9daded81a3ed71f..73809bb951b40a7b8baf73773260276ed575c623 100644 (file)
@@ -15,43 +15,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
 
 void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
 
-
-void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next);
-void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
-void xen_exit_mmap(struct mm_struct *mm);
-
-pteval_t xen_pte_val(pte_t);
-pmdval_t xen_pmd_val(pmd_t);
-pgdval_t xen_pgd_val(pgd_t);
-
-pte_t xen_make_pte(pteval_t);
-pmd_t xen_make_pmd(pmdval_t);
-pgd_t xen_make_pgd(pgdval_t);
-
-void xen_set_pte(pte_t *ptep, pte_t pteval);
-void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
-                   pte_t *ptep, pte_t pteval);
-
-#ifdef CONFIG_X86_PAE
-void xen_set_pte_atomic(pte_t *ptep, pte_t pte);
-void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
-void xen_pmd_clear(pmd_t *pmdp);
-#endif /* CONFIG_X86_PAE */
-
-void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval);
-void xen_set_pud(pud_t *ptr, pud_t val);
-void xen_set_pmd_hyper(pmd_t *pmdp, pmd_t pmdval);
-void xen_set_pud_hyper(pud_t *ptr, pud_t val);
-
-#if PAGETABLE_LEVELS == 4
-pudval_t xen_pud_val(pud_t pud);
-pud_t xen_make_pud(pudval_t pudval);
-void xen_set_pgd(pgd_t *pgdp, pgd_t pgd);
-void xen_set_pgd_hyper(pgd_t *pgdp, pgd_t pgd);
-#endif
-
-pgd_t *xen_get_user_pgd(pgd_t *pgd);
-
 pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
 void  xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
                                  pte_t *ptep, pte_t pte);
index 7c275f5d0df00603cdc4c468bb89a8470f850515..5d43c1f8ada8b1406e5b7784c063c157274f1155 100644 (file)
@@ -20,12 +20,6 @@ config XTENSA
 config RWSEM_XCHGADD_ALGORITHM
        def_bool y
 
-config GENERIC_FIND_NEXT_BIT
-       def_bool y
-
-config GENERIC_FIND_BIT_LE
-       def_bool y
-
 config GENERIC_HWEIGHT
        def_bool y
 
index 07371cfdfae607c97e7600511fe1c9e7fa13a3d1..bcaf16ee6ad1ba321836413ee8841ab0bbde8f5e 100644 (file)
@@ -30,10 +30,8 @@ EXPORT_SYMBOL_GPL(blkio_root_cgroup);
 
 static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
                                                  struct cgroup *);
-static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
-                             struct task_struct *, bool);
-static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
-                          struct cgroup *, struct task_struct *, bool);
+static int blkiocg_can_attach_task(struct cgroup *, struct task_struct *);
+static void blkiocg_attach_task(struct cgroup *, struct task_struct *);
 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
 
@@ -46,8 +44,8 @@ static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
 struct cgroup_subsys blkio_subsys = {
        .name = "blkio",
        .create = blkiocg_create,
-       .can_attach = blkiocg_can_attach,
-       .attach = blkiocg_attach,
+       .can_attach_task = blkiocg_can_attach_task,
+       .attach_task = blkiocg_attach_task,
        .destroy = blkiocg_destroy,
        .populate = blkiocg_populate,
 #ifdef CONFIG_BLK_CGROUP
@@ -1616,9 +1614,7 @@ done:
  * of the main cic data structures.  For now we allow a task to change
  * its cgroup only if it's the only owner of its ioc.
  */
-static int blkiocg_can_attach(struct cgroup_subsys *subsys,
-                               struct cgroup *cgroup, struct task_struct *tsk,
-                               bool threadgroup)
+static int blkiocg_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
 {
        struct io_context *ioc;
        int ret = 0;
@@ -1633,9 +1629,7 @@ static int blkiocg_can_attach(struct cgroup_subsys *subsys,
        return ret;
 }
 
-static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup,
-                               struct cgroup *prev, struct task_struct *tsk,
-                               bool threadgroup)
+static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
 {
        struct io_context *ioc;
 
index c8303e9d919da239ec5f04a43e11dbe1ceeef111..d2f8f4049abddbf76d3414cd85ef36d2d4027d07 100644 (file)
@@ -345,6 +345,7 @@ void blk_put_queue(struct request_queue *q)
 {
        kobject_put(&q->kobj);
 }
+EXPORT_SYMBOL(blk_put_queue);
 
 /*
  * Note: If a driver supplied the queue lock, it should not zap that lock
@@ -566,6 +567,7 @@ int blk_get_queue(struct request_queue *q)
 
        return 1;
 }
+EXPORT_SYMBOL(blk_get_queue);
 
 static inline void blk_free_request(struct request_queue *q, struct request *rq)
 {
@@ -1130,7 +1132,6 @@ static bool bio_attempt_front_merge(struct request_queue *q,
                                    struct request *req, struct bio *bio)
 {
        const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
-       sector_t sector;
 
        if (!ll_front_merge_fn(q, req, bio))
                return false;
@@ -1140,8 +1141,6 @@ static bool bio_attempt_front_merge(struct request_queue *q,
        if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
                blk_rq_set_mixed_merge(req);
 
-       sector = bio->bi_sector;
-
        bio->bi_next = req->bio;
        req->bio = bio;
 
index 2dd988723d73e98880d271b84dc77ef36b49a0a9..95822ae25cfe3213863d4a1e4d8718eea290afe8 100644 (file)
@@ -1728,7 +1728,7 @@ static void disk_add_events(struct gendisk *disk)
 {
        struct disk_events *ev;
 
-       if (!disk->fops->check_events || !(disk->events | disk->async_events))
+       if (!disk->fops->check_events)
                return;
 
        ev = kzalloc(sizeof(*ev), GFP_KERNEL);
index ffd8797faf4f0d3a10bcd49dbef7264e399347dd..471a04013fe01771bc4b4bf41396af350c50e456 100644 (file)
@@ -6,6 +6,7 @@
  */
 
 #include "bcma_private.h"
+#include <linux/slab.h>
 #include <linux/bcma/bcma.h>
 #include <linux/pci.h>
 
index b7f51e4594f8660f0a54472d264d957dbe533312..dba1c32e1ddfc48229d93d014fedaa9c370e92e3 100644 (file)
  */
 struct brd_device {
        int             brd_number;
-       int             brd_refcnt;
-       loff_t          brd_offset;
-       loff_t          brd_sizelimit;
-       unsigned        brd_blocksize;
 
        struct request_queue    *brd_queue;
        struct gendisk          *brd_disk;
@@ -440,11 +436,11 @@ static int rd_nr;
 int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
 static int max_part;
 static int part_shift;
-module_param(rd_nr, int, 0);
+module_param(rd_nr, int, S_IRUGO);
 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
-module_param(rd_size, int, 0);
+module_param(rd_size, int, S_IRUGO);
 MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
-module_param(max_part, int, 0);
+module_param(max_part, int, S_IRUGO);
 MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
@@ -552,7 +548,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data)
        struct kobject *kobj;
 
        mutex_lock(&brd_devices_mutex);
-       brd = brd_init_one(dev & MINORMASK);
+       brd = brd_init_one(MINOR(dev) >> part_shift);
        kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
        mutex_unlock(&brd_devices_mutex);
 
@@ -575,25 +571,39 @@ static int __init brd_init(void)
         *
         * (1) if rd_nr is specified, create that many upfront, and this
         *     also becomes a hard limit.
-        * (2) if rd_nr is not specified, create 1 rd device on module
-        *     load, user can further extend brd device by create dev node
-        *     themselves and have kernel automatically instantiate actual
-        *     device on-demand.
+        * (2) if rd_nr is not specified, create CONFIG_BLK_DEV_RAM_COUNT
+        *     (default 16) rd device on module load, user can further
+        *     extend brd device by create dev node themselves and have
+        *     kernel automatically instantiate actual device on-demand.
         */
 
        part_shift = 0;
-       if (max_part > 0)
+       if (max_part > 0) {
                part_shift = fls(max_part);
 
+               /*
+                * Adjust max_part according to part_shift as it is exported
+                * to user space so that user can decide correct minor number
+                * if [s]he want to create more devices.
+                *
+                * Note that -1 is required because partition 0 is reserved
+                * for the whole disk.
+                */
+               max_part = (1UL << part_shift) - 1;
+       }
+
+       if ((1UL << part_shift) > DISK_MAX_PARTS)
+               return -EINVAL;
+
        if (rd_nr > 1UL << (MINORBITS - part_shift))
                return -EINVAL;
 
        if (rd_nr) {
                nr = rd_nr;
-               range = rd_nr;
+               range = rd_nr << part_shift;
        } else {
                nr = CONFIG_BLK_DEV_RAM_COUNT;
-               range = 1UL << (MINORBITS - part_shift);
+               range = 1UL << MINORBITS;
        }
 
        if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
@@ -632,7 +642,7 @@ static void __exit brd_exit(void)
        unsigned long range;
        struct brd_device *brd, *next;
 
-       range = rd_nr ? rd_nr :  1UL << (MINORBITS - part_shift);
+       range = rd_nr ? rd_nr << part_shift : 1UL << MINORBITS;
 
        list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
                brd_del_one(brd);
index c59a672a3de0d6571fbe173a8328d12a1e03c86b..76c8da78212bffec71f5dc7af025a84e4028d456 100644 (file)
@@ -1540,9 +1540,9 @@ static const struct block_device_operations lo_fops = {
  * And now the modules code and kernel interface.
  */
 static int max_loop;
-module_param(max_loop, int, 0);
+module_param(max_loop, int, S_IRUGO);
 MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
-module_param(max_part, int, 0);
+module_param(max_part, int, S_IRUGO);
 MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
@@ -1688,9 +1688,20 @@ static int __init loop_init(void)
         */
 
        part_shift = 0;
-       if (max_part > 0)
+       if (max_part > 0) {
                part_shift = fls(max_part);
 
+               /*
+                * Adjust max_part according to part_shift as it is exported
+                * to user space so that user can decide correct minor number
+                * if [s]he want to create more devices.
+                *
+                * Note that -1 is required because partition 0 is reserved
+                * for the whole disk.
+                */
+               max_part = (1UL << part_shift) - 1;
+       }
+
        if ((1UL << part_shift) > DISK_MAX_PARTS)
                return -EINVAL;
 
index 38223e93aa988f020e98a2b0d42a1a50720f6aea..58c0e6387cf73ddfd016baeb38384e08f5a6f794 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/system.h>
 #include <linux/poll.h>
 #include <linux/sched.h>
+#include <linux/seq_file.h>
 #include <linux/spinlock.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
@@ -1896,102 +1897,128 @@ int ipmi_request_supply_msgs(ipmi_user_t          user,
 EXPORT_SYMBOL(ipmi_request_supply_msgs);
 
 #ifdef CONFIG_PROC_FS
-static int ipmb_file_read_proc(char *page, char **start, off_t off,
-                              int count, int *eof, void *data)
+static int smi_ipmb_proc_show(struct seq_file *m, void *v)
 {
-       char       *out = (char *) page;
-       ipmi_smi_t intf = data;
+       ipmi_smi_t intf = m->private;
        int        i;
-       int        rv = 0;
 
-       for (i = 0; i < IPMI_MAX_CHANNELS; i++)
-               rv += sprintf(out+rv, "%x ", intf->channels[i].address);
-       out[rv-1] = '\n'; /* Replace the final space with a newline */
-       out[rv] = '\0';
-       rv++;
-       return rv;
+       seq_printf(m, "%x", intf->channels[0].address);
+       for (i = 1; i < IPMI_MAX_CHANNELS; i++)
+               seq_printf(m, " %x", intf->channels[i].address);
+       return seq_putc(m, '\n');
 }
 
-static int version_file_read_proc(char *page, char **start, off_t off,
-                                 int count, int *eof, void *data)
+static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
 {
-       char       *out = (char *) page;
-       ipmi_smi_t intf = data;
+       return single_open(file, smi_ipmb_proc_show, PDE(inode)->data);
+}
 
-       return sprintf(out, "%u.%u\n",
+static const struct file_operations smi_ipmb_proc_ops = {
+       .open           = smi_ipmb_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int smi_version_proc_show(struct seq_file *m, void *v)
+{
+       ipmi_smi_t intf = m->private;
+
+       return seq_printf(m, "%u.%u\n",
                       ipmi_version_major(&intf->bmc->id),
                       ipmi_version_minor(&intf->bmc->id));
 }
 
-static int stat_file_read_proc(char *page, char **start, off_t off,
-                              int count, int *eof, void *data)
+static int smi_version_proc_open(struct inode *inode, struct file *file)
 {
-       char       *out = (char *) page;
-       ipmi_smi_t intf = data;
+       return single_open(file, smi_version_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations smi_version_proc_ops = {
+       .open           = smi_version_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
 
-       out += sprintf(out, "sent_invalid_commands:       %u\n",
+static int smi_stats_proc_show(struct seq_file *m, void *v)
+{
+       ipmi_smi_t intf = m->private;
+
+       seq_printf(m, "sent_invalid_commands:       %u\n",
                       ipmi_get_stat(intf, sent_invalid_commands));
-       out += sprintf(out, "sent_local_commands:         %u\n",
+       seq_printf(m, "sent_local_commands:         %u\n",
                       ipmi_get_stat(intf, sent_local_commands));
-       out += sprintf(out, "handled_local_responses:     %u\n",
+       seq_printf(m, "handled_local_responses:     %u\n",
                       ipmi_get_stat(intf, handled_local_responses));
-       out += sprintf(out, "unhandled_local_responses:   %u\n",
+       seq_printf(m, "unhandled_local_responses:   %u\n",
                       ipmi_get_stat(intf, unhandled_local_responses));
-       out += sprintf(out, "sent_ipmb_commands:          %u\n",
+       seq_printf(m, "sent_ipmb_commands:          %u\n",
                       ipmi_get_stat(intf, sent_ipmb_commands));
-       out += sprintf(out, "sent_ipmb_command_errs:      %u\n",
+       seq_printf(m, "sent_ipmb_command_errs:      %u\n",
                       ipmi_get_stat(intf, sent_ipmb_command_errs));
-       out += sprintf(out, "retransmitted_ipmb_commands: %u\n",
+       seq_printf(m, "retransmitted_ipmb_commands: %u\n",
                       ipmi_get_stat(intf, retransmitted_ipmb_commands));
-       out += sprintf(out, "timed_out_ipmb_commands:     %u\n",
+       seq_printf(m, "timed_out_ipmb_commands:     %u\n",
                       ipmi_get_stat(intf, timed_out_ipmb_commands));
-       out += sprintf(out, "timed_out_ipmb_broadcasts:   %u\n",
+       seq_printf(m, "timed_out_ipmb_broadcasts:   %u\n",
                       ipmi_get_stat(intf, timed_out_ipmb_broadcasts));
-       out += sprintf(out, "sent_ipmb_responses:         %u\n",
+       seq_printf(m, "sent_ipmb_responses:         %u\n",
                       ipmi_get_stat(intf, sent_ipmb_responses));
-       out += sprintf(out, "handled_ipmb_responses:      %u\n",
+       seq_printf(m, "handled_ipmb_responses:      %u\n",
                       ipmi_get_stat(intf, handled_ipmb_responses));
-       out += sprintf(out, "invalid_ipmb_responses:      %u\n",
+       seq_printf(m, "invalid_ipmb_responses:      %u\n",
                       ipmi_get_stat(intf, invalid_ipmb_responses));
-       out += sprintf(out, "unhandled_ipmb_responses:    %u\n",
+       seq_printf(m, "unhandled_ipmb_responses:    %u\n",
                       ipmi_get_stat(intf, unhandled_ipmb_responses));
-       out += sprintf(out, "sent_lan_commands:           %u\n",
+       seq_printf(m, "sent_lan_commands:           %u\n",
                       ipmi_get_stat(intf, sent_lan_commands));
-       out += sprintf(out, "sent_lan_command_errs:       %u\n",
+       seq_printf(m, "sent_lan_command_errs:       %u\n",
                       ipmi_get_stat(intf, sent_lan_command_errs));
-       out += sprintf(out, "retransmitted_lan_commands:  %u\n",
+       seq_printf(m, "retransmitted_lan_commands:  %u\n",
                       ipmi_get_stat(intf, retransmitted_lan_commands));
-       out += sprintf(out, "timed_out_lan_commands:      %u\n",
+       seq_printf(m, "timed_out_lan_commands:      %u\n",
                       ipmi_get_stat(intf, timed_out_lan_commands));
-       out += sprintf(out, "sent_lan_responses:          %u\n",
+       seq_printf(m, "sent_lan_responses:          %u\n",
                       ipmi_get_stat(intf, sent_lan_responses));
-       out += sprintf(out, "handled_lan_responses:       %u\n",
+       seq_printf(m, "handled_lan_responses:       %u\n",
                       ipmi_get_stat(intf, handled_lan_responses));
-       out += sprintf(out, "invalid_lan_responses:       %u\n",
+       seq_printf(m, "invalid_lan_responses:       %u\n",
                       ipmi_get_stat(intf, invalid_lan_responses));
-       out += sprintf(out, "unhandled_lan_responses:     %u\n",
+       seq_printf(m, "unhandled_lan_responses:     %u\n",
                       ipmi_get_stat(intf, unhandled_lan_responses));
-       out += sprintf(out, "handled_commands:            %u\n",
+       seq_printf(m, "handled_commands:            %u\n",
                       ipmi_get_stat(intf, handled_commands));
-       out += sprintf(out, "invalid_commands:            %u\n",
+       seq_printf(m, "invalid_commands:            %u\n",
                       ipmi_get_stat(intf, invalid_commands));
-       out += sprintf(out, "unhandled_commands:          %u\n",
+       seq_printf(m, "unhandled_commands:          %u\n",
                       ipmi_get_stat(intf, unhandled_commands));
-       out += sprintf(out, "invalid_events:              %u\n",
+       seq_printf(m, "invalid_events:              %u\n",
                       ipmi_get_stat(intf, invalid_events));
-       out += sprintf(out, "events:                      %u\n",
+       seq_printf(m, "events:                      %u\n",
                       ipmi_get_stat(intf, events));
-       out += sprintf(out, "failed rexmit LAN msgs:      %u\n",
+       seq_printf(m, "failed rexmit LAN msgs:      %u\n",
                       ipmi_get_stat(intf, dropped_rexmit_lan_commands));
-       out += sprintf(out, "failed rexmit IPMB msgs:     %u\n",
+       seq_printf(m, "failed rexmit IPMB msgs:     %u\n",
                       ipmi_get_stat(intf, dropped_rexmit_ipmb_commands));
+       return 0;
+}
 
-       return (out - ((char *) page));
+static int smi_stats_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, smi_stats_proc_show, PDE(inode)->data);
 }
+
+static const struct file_operations smi_stats_proc_ops = {
+       .open           = smi_stats_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
 #endif /* CONFIG_PROC_FS */
 
 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
-                           read_proc_t *read_proc,
+                           const struct file_operations *proc_ops,
                            void *data)
 {
        int                    rv = 0;
@@ -2010,15 +2037,12 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
        }
        strcpy(entry->name, name);
 
-       file = create_proc_entry(name, 0, smi->proc_dir);
+       file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data);
        if (!file) {
                kfree(entry->name);
                kfree(entry);
                rv = -ENOMEM;
        } else {
-               file->data = data;
-               file->read_proc = read_proc;
-
                mutex_lock(&smi->proc_entry_lock);
                /* Stick it on the list. */
                entry->next = smi->proc_entries;
@@ -2043,17 +2067,17 @@ static int add_proc_entries(ipmi_smi_t smi, int num)
 
        if (rv == 0)
                rv = ipmi_smi_add_proc_entry(smi, "stats",
-                                            stat_file_read_proc,
+                                            &smi_stats_proc_ops,
                                             smi);
 
        if (rv == 0)
                rv = ipmi_smi_add_proc_entry(smi, "ipmb",
-                                            ipmb_file_read_proc,
+                                            &smi_ipmb_proc_ops,
                                             smi);
 
        if (rv == 0)
                rv = ipmi_smi_add_proc_entry(smi, "version",
-                                            version_file_read_proc,
+                                            &smi_version_proc_ops,
                                             smi);
 #endif /* CONFIG_PROC_FS */
 
index 64c6b85306150723ad8206f11fab7f54d8dd597b..9397ab49b72e8b20f3984a55e31819fea26e20d7 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/moduleparam.h>
 #include <asm/system.h>
 #include <linux/sched.h>
+#include <linux/seq_file.h>
 #include <linux/timer.h>
 #include <linux/errno.h>
 #include <linux/spinlock.h>
@@ -2805,54 +2806,73 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
        return rv;
 }
 
-static int type_file_read_proc(char *page, char **start, off_t off,
-                              int count, int *eof, void *data)
+static int smi_type_proc_show(struct seq_file *m, void *v)
 {
-       struct smi_info *smi = data;
+       struct smi_info *smi = m->private;
 
-       return sprintf(page, "%s\n", si_to_str[smi->si_type]);
+       return seq_printf(m, "%s\n", si_to_str[smi->si_type]);
 }
 
-static int stat_file_read_proc(char *page, char **start, off_t off,
-                              int count, int *eof, void *data)
+static int smi_type_proc_open(struct inode *inode, struct file *file)
 {
-       char            *out = (char *) page;
-       struct smi_info *smi = data;
+       return single_open(file, smi_type_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations smi_type_proc_ops = {
+       .open           = smi_type_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int smi_si_stats_proc_show(struct seq_file *m, void *v)
+{
+       struct smi_info *smi = m->private;
 
-       out += sprintf(out, "interrupts_enabled:    %d\n",
+       seq_printf(m, "interrupts_enabled:    %d\n",
                       smi->irq && !smi->interrupt_disabled);
-       out += sprintf(out, "short_timeouts:        %u\n",
+       seq_printf(m, "short_timeouts:        %u\n",
                       smi_get_stat(smi, short_timeouts));
-       out += sprintf(out, "long_timeouts:         %u\n",
+       seq_printf(m, "long_timeouts:         %u\n",
                       smi_get_stat(smi, long_timeouts));
-       out += sprintf(out, "idles:                 %u\n",
+       seq_printf(m, "idles:                 %u\n",
                       smi_get_stat(smi, idles));
-       out += sprintf(out, "interrupts:            %u\n",
+       seq_printf(m, "interrupts:            %u\n",
                       smi_get_stat(smi, interrupts));
-       out += sprintf(out, "attentions:            %u\n",
+       seq_printf(m, "attentions:            %u\n",
                       smi_get_stat(smi, attentions));
-       out += sprintf(out, "flag_fetches:          %u\n",
+       seq_printf(m, "flag_fetches:          %u\n",
                       smi_get_stat(smi, flag_fetches));
-       out += sprintf(out, "hosed_count:           %u\n",
+       seq_printf(m, "hosed_count:           %u\n",
                       smi_get_stat(smi, hosed_count));
-       out += sprintf(out, "complete_transactions: %u\n",
+       seq_printf(m, "complete_transactions: %u\n",
                       smi_get_stat(smi, complete_transactions));
-       out += sprintf(out, "events:                %u\n",
+       seq_printf(m, "events:                %u\n",
                       smi_get_stat(smi, events));
-       out += sprintf(out, "watchdog_pretimeouts:  %u\n",
+       seq_printf(m, "watchdog_pretimeouts:  %u\n",
                       smi_get_stat(smi, watchdog_pretimeouts));
-       out += sprintf(out, "incoming_messages:     %u\n",
+       seq_printf(m, "incoming_messages:     %u\n",
                       smi_get_stat(smi, incoming_messages));
+       return 0;
+}
 
-       return out - page;
+static int smi_si_stats_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, smi_si_stats_proc_show, PDE(inode)->data);
 }
 
-static int param_read_proc(char *page, char **start, off_t off,
-                          int count, int *eof, void *data)
+static const struct file_operations smi_si_stats_proc_ops = {
+       .open           = smi_si_stats_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int smi_params_proc_show(struct seq_file *m, void *v)
 {
-       struct smi_info *smi = data;
+       struct smi_info *smi = m->private;
 
-       return sprintf(page,
+       return seq_printf(m,
                       "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
                       si_to_str[smi->si_type],
                       addr_space_to_str[smi->io.addr_type],
@@ -2864,6 +2884,18 @@ static int param_read_proc(char *page, char **start, off_t off,
                       smi->slave_addr);
 }
 
+static int smi_params_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, smi_params_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations smi_params_proc_ops = {
+       .open           = smi_params_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 /*
  * oem_data_avail_to_receive_msg_avail
  * @info - smi_info structure with msg_flags set
@@ -3257,7 +3289,7 @@ static int try_smi_init(struct smi_info *new_smi)
        }
 
        rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
-                                    type_file_read_proc,
+                                    &smi_type_proc_ops,
                                     new_smi);
        if (rv) {
                dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
@@ -3265,7 +3297,7 @@ static int try_smi_init(struct smi_info *new_smi)
        }
 
        rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
-                                    stat_file_read_proc,
+                                    &smi_si_stats_proc_ops,
                                     new_smi);
        if (rv) {
                dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
@@ -3273,7 +3305,7 @@ static int try_smi_init(struct smi_info *new_smi)
        }
 
        rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
-                                    param_read_proc,
+                                    &smi_params_proc_ops,
                                     new_smi);
        if (rv) {
                dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
index 966a95bc974b2c0489c7c4f33b52d1b3872238a0..25d139c9dbeddd86440847f3ced302950883ef98 100644 (file)
@@ -271,14 +271,13 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
        pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
        vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
        if (vdata_size <= PAGE_SIZE)
-               vdata = kmalloc(vdata_size, GFP_KERNEL);
+               vdata = kzalloc(vdata_size, GFP_KERNEL);
        else {
-               vdata = vmalloc(vdata_size);
+               vdata = vzalloc(vdata_size);
                flags = VMD_VMALLOCED;
        }
        if (!vdata)
                return -ENOMEM;
-       memset(vdata, 0, vdata_size);
 
        vdata->vm_start = vma->vm_start;
        vdata->vm_end = vma->vm_end;
index f176dbaeb15adfaaceda46ad6bd97e030065ccd8..3fcf80ff12f2226fce01f5749e96972d3857072b 100644 (file)
@@ -457,6 +457,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        return -ENODEV;
 
                modes = port->modes;
+               parport_put_port(port);
                if (copy_to_user (argp, &modes, sizeof (modes))) {
                        return -EFAULT;
                }
index d2c75feff7df32925bc668d01a41cc724a44c9d2..f69f90a61873819c7ff743f9afb93d80ed8c3116 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
-#include <linux/mfd/core.h>
 #include <linux/slab.h>
 
 #include <linux/timb_dma.h>
@@ -685,7 +684,7 @@ static irqreturn_t td_irq(int irq, void *devid)
 
 static int __devinit td_probe(struct platform_device *pdev)
 {
-       struct timb_dma_platform_data *pdata = mfd_get_data(pdev);
+       struct timb_dma_platform_data *pdata = pdev->dev.platform_data;
        struct timb_dma *td;
        struct resource *iomem;
        int irq;
index cace0a7b707af37b394d0e34b00afb2d9968b251..e47e73bbbcc5585ca214b3486cc57ebaaff662c8 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/edac.h>
 #include "edac_core.h"
 
-#define AMD76X_REVISION        " Ver: 2.0.2 "  __DATE__
+#define AMD76X_REVISION        " Ver: 2.0.2"
 #define EDAC_MOD_STR   "amd76x_edac"
 
 #define amd76x_printk(level, fmt, arg...) \
index 35b78d04bbfa589682efcdc744a7e4db2300ccaa..ddd890052ce2c344673d09d31e24d8436cb47e92 100644 (file)
@@ -33,7 +33,7 @@
 #include "edac_module.h"
 #include "amd8111_edac.h"
 
-#define AMD8111_EDAC_REVISION  " Ver: 1.0.0 " __DATE__
+#define AMD8111_EDAC_REVISION  " Ver: 1.0.0"
 #define AMD8111_EDAC_MOD_STR   "amd8111_edac"
 
 #define PCI_DEVICE_ID_AMD_8111_PCI     0x7460
index b432d60c622a5bc52f951bde2983d0e325bea2b4..a5c680561c73f6ec33bbe73b0059d055d4f41574 100644 (file)
@@ -33,7 +33,7 @@
 #include "edac_module.h"
 #include "amd8131_edac.h"
 
-#define AMD8131_EDAC_REVISION  " Ver: 1.0.0 " __DATE__
+#define AMD8131_EDAC_REVISION  " Ver: 1.0.0"
 #define AMD8131_EDAC_MOD_STR   "amd8131_edac"
 
 /* Wrapper functions for accessing PCI configuration space */
index 837ad8f85b48e1197dfd4e3f72a7a4340dd432bb..a687a0d169624f9f22c82fe3047c83e143ce953d 100644 (file)
@@ -30,7 +30,7 @@
 #include "edac_core.h"
 #include "edac_module.h"
 
-#define CPC925_EDAC_REVISION   " Ver: 1.0.0 " __DATE__
+#define CPC925_EDAC_REVISION   " Ver: 1.0.0"
 #define CPC925_EDAC_MOD_STR    "cpc925_edac"
 
 #define cpc925_printk(level, fmt, arg...) \
index ec302d42658919ff0afcd782fed08c6dfba0bcce..1af531a11d21f9d0c638e13d6332f9c9092085e7 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/edac.h>
 #include "edac_core.h"
 
-#define E752X_REVISION " Ver: 2.0.2 " __DATE__
+#define E752X_REVISION " Ver: 2.0.2"
 #define EDAC_MOD_STR   "e752x_edac"
 
 static int report_non_memory_errors;
index 1731d7245816fdedf681d14a5b9290acdeb11a1f..6ffb6d23281f62f4046110b6109c68ff7dfdf630 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/edac.h>
 #include "edac_core.h"
 
-#define        E7XXX_REVISION " Ver: 2.0.2 " __DATE__
+#define        E7XXX_REVISION " Ver: 2.0.2"
 #define        EDAC_MOD_STR    "e7xxx_edac"
 
 #define e7xxx_printk(level, fmt, arg...) \
index eefa3501916b60c0da2a6df740b8413da3df136a..55b8278bb172f6feda52e225581b258b3404402a 100644 (file)
@@ -421,10 +421,6 @@ struct mem_ctl_info {
        u32 ce_count;           /* Total Correctable Errors for this MC */
        unsigned long start_time;       /* mci load start time (in jiffies) */
 
-       /* this stuff is for safe removal of mc devices from global list while
-        * NMI handlers may be traversing list
-        */
-       struct rcu_head rcu;
        struct completion complete;
 
        /* edac sysfs device control */
@@ -620,10 +616,6 @@ struct edac_device_ctl_info {
 
        unsigned long start_time;       /* edac_device load start time (jiffies) */
 
-       /* these are for safe removal of mc devices from global list while
-        * NMI handlers may be traversing list
-        */
-       struct rcu_head rcu;
        struct completion removal_complete;
 
        /* sysfs top name under 'edac' directory
@@ -722,10 +714,6 @@ struct edac_pci_ctl_info {
 
        unsigned long start_time;       /* edac_pci load start time (jiffies) */
 
-       /* these are for safe removal of devices from global list while
-        * NMI handlers may be traversing list
-        */
-       struct rcu_head rcu;
        struct completion complete;
 
        /* sysfs top name under 'edac' directory
index a7408cf86f37f5ece603052bd8956cae30269777..c3f67437afb666f489cf926d573501fb79c344a4 100644 (file)
@@ -345,31 +345,19 @@ fail1:
        return 1;
 }
 
-/*
- * complete_edac_device_list_del
- *
- *     callback function when reference count is zero
- */
-static void complete_edac_device_list_del(struct rcu_head *head)
-{
-       struct edac_device_ctl_info *edac_dev;
-
-       edac_dev = container_of(head, struct edac_device_ctl_info, rcu);
-       INIT_LIST_HEAD(&edac_dev->link);
-}
-
 /*
  * del_edac_device_from_global_list
- *
- *     remove the RCU, setup for a callback call,
- *     then wait for the callback to occur
  */
 static void del_edac_device_from_global_list(struct edac_device_ctl_info
                                                *edac_device)
 {
        list_del_rcu(&edac_device->link);
-       call_rcu(&edac_device->rcu, complete_edac_device_list_del);
-       rcu_barrier();
+
+       /* these are for safe removal of devices from global list while
+        * NMI handlers may be traversing list
+        */
+       synchronize_rcu();
+       INIT_LIST_HEAD(&edac_device->link);
 }
 
 /*
index 1d8056049072698361a2eeb83db0d0a5935964e0..d69144a090435cbfdd5d883e231475e649c666d8 100644 (file)
@@ -447,20 +447,16 @@ fail1:
        return 1;
 }
 
-static void complete_mc_list_del(struct rcu_head *head)
-{
-       struct mem_ctl_info *mci;
-
-       mci = container_of(head, struct mem_ctl_info, rcu);
-       INIT_LIST_HEAD(&mci->link);
-}
-
 static void del_mc_from_global_list(struct mem_ctl_info *mci)
 {
        atomic_dec(&edac_handlers);
        list_del_rcu(&mci->link);
-       call_rcu(&mci->rcu, complete_mc_list_del);
-       rcu_barrier();
+
+       /* these are for safe removal of devices from global list while
+        * NMI handlers may be traversing list
+        */
+       synchronize_rcu();
+       INIT_LIST_HEAD(&mci->link);
 }
 
 /**
index be4b075c30984c1b408246a306664c0a6bb626f0..5ddaa86d6a6e86ef8ed0671070168d7541cd7073 100644 (file)
@@ -15,7 +15,7 @@
 #include "edac_core.h"
 #include "edac_module.h"
 
-#define EDAC_VERSION "Ver: 2.1.0 " __DATE__
+#define EDAC_VERSION "Ver: 2.1.0"
 
 #ifdef CONFIG_EDAC_DEBUG
 /* Values of 0 to 4 will generate output */
index efb5d565078304c60ea096b84bf1ae2b9761e4a4..2b378207d571b6bbd2220ac4b7deb263537b34b4 100644 (file)
@@ -163,19 +163,6 @@ fail1:
        return 1;
 }
 
-/*
- * complete_edac_pci_list_del
- *
- *     RCU completion callback to indicate item is deleted
- */
-static void complete_edac_pci_list_del(struct rcu_head *head)
-{
-       struct edac_pci_ctl_info *pci;
-
-       pci = container_of(head, struct edac_pci_ctl_info, rcu);
-       INIT_LIST_HEAD(&pci->link);
-}
-
 /*
  * del_edac_pci_from_global_list
  *
@@ -184,8 +171,12 @@ static void complete_edac_pci_list_del(struct rcu_head *head)
 static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci)
 {
        list_del_rcu(&pci->link);
-       call_rcu(&pci->rcu, complete_edac_pci_list_del);
-       rcu_barrier();
+
+       /* these are for safe removal of devices from global list while
+        * NMI handlers may be traversing list
+        */
+       synchronize_rcu();
+       INIT_LIST_HEAD(&pci->link);
 }
 
 #if 0
index 87f427c2ce5c56dd6b303eea470baa28b2799ace..4dc3ac25a42261946fe9b8fc6798464ffc104db1 100644 (file)
@@ -27,7 +27,7 @@
 /*
  * Alter this version for the I5000 module when modifications are made
  */
-#define I5000_REVISION    " Ver: 2.0.12 " __DATE__
+#define I5000_REVISION    " Ver: 2.0.12"
 #define EDAC_MOD_STR      "i5000_edac"
 
 #define i5000_printk(level, fmt, arg...) \
index 80a465efbae89e2739e493082a0494cfb11e4faf..74d6ec342afbf2e0f34c9d8fe808f3c8b0f6d9c5 100644 (file)
@@ -33,7 +33,7 @@
 /*
  * Alter this version for the I5400 module when modifications are made
  */
-#define I5400_REVISION    " Ver: 1.0.0 " __DATE__
+#define I5400_REVISION    " Ver: 1.0.0"
 
 #define EDAC_MOD_STR      "i5400_edac"
 
index 363cc1602944086a9bcbe6406ef1bbee0be209cb..a76fe8366b681d14cc2e6158fba49133525aeaa3 100644 (file)
@@ -31,7 +31,7 @@
 /*
  * Alter this version for the I7300 module when modifications are made
  */
-#define I7300_REVISION    " Ver: 1.0.0 " __DATE__
+#define I7300_REVISION    " Ver: 1.0.0"
 
 #define EDAC_MOD_STR      "i7300_edac"
 
index 465cbc25149fffab899997e3d133cacacd5bad5f..04f1e7ce02b196868d42253243d79d1044d9b7cc 100644 (file)
@@ -59,7 +59,7 @@ MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices");
 /*
  * Alter this version for the module when modifications are made
  */
-#define I7CORE_REVISION    " Ver: 1.0.0 " __DATE__
+#define I7CORE_REVISION    " Ver: 1.0.0"
 #define EDAC_MOD_STR      "i7core_edac"
 
 /*
index b8a95cf50718b8e6ef15f74a9c12d49b2b486568..931a057750491a80bcc300fea6efa13f8cf58703 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/edac.h>
 #include "edac_core.h"
 
-#define  I82860_REVISION " Ver: 2.0.2 " __DATE__
+#define  I82860_REVISION " Ver: 2.0.2"
 #define EDAC_MOD_STR   "i82860_edac"
 
 #define i82860_printk(level, fmt, arg...) \
index b2fd1e899142a15d0020f618e79f218e366a0676..33864c63c6840908a082121154f470daeeaf0344 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/edac.h>
 #include "edac_core.h"
 
-#define I82875P_REVISION       " Ver: 2.0.2 " __DATE__
+#define I82875P_REVISION       " Ver: 2.0.2"
 #define EDAC_MOD_STR           "i82875p_edac"
 
 #define i82875p_printk(level, fmt, arg...) \
index 92e65e7038e906b63769b441e170c38c510ecf97..a5da732fe5b2680be5a197a97e580619e9717b52 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/edac.h>
 #include "edac_core.h"
 
-#define I82975X_REVISION       " Ver: 1.0.0 " __DATE__
+#define I82975X_REVISION       " Ver: 1.0.0"
 #define EDAC_MOD_STR           "i82975x_edac"
 
 #define i82975x_printk(level, fmt, arg...) \
index cb24df839460cd59c988e07ca828d71f564a9ff3..932016f2cf06032a78f48297899a9a9c8b3c8eb2 100644 (file)
@@ -11,7 +11,7 @@
 #ifndef _MPC85XX_EDAC_H_
 #define _MPC85XX_EDAC_H_
 
-#define MPC85XX_REVISION " Ver: 2.0.0 " __DATE__
+#define MPC85XX_REVISION " Ver: 2.0.0"
 #define EDAC_MOD_STR   "MPC85xx_edac"
 
 #define mpc85xx_printk(level, fmt, arg...) \
index e042e2daa8f42b99c6b40aa2c1af89b7f1f1aa47..c7f209c92a1a4e73d34ca0b86400a19bf527a4fb 100644 (file)
@@ -12,7 +12,7 @@
 #ifndef _MV64X60_EDAC_H_
 #define _MV64X60_EDAC_H_
 
-#define MV64x60_REVISION " Ver: 2.0.0 " __DATE__
+#define MV64x60_REVISION " Ver: 2.0.0"
 #define EDAC_MOD_STR   "MV64x60_edac"
 
 #define mv64x60_printk(level, fmt, arg...) \
index af8e7b1aa290a2e0d682de5d1aefb9e8808c23e1..0de7d8770891e5421a52f7a8732b66e09dd13423 100644 (file)
 #define EDAC_OPSTATE_UNKNOWN_STR       "unknown"
 
 #define PPC4XX_EDAC_MODULE_NAME                "ppc4xx_edac"
-#define PPC4XX_EDAC_MODULE_REVISION    "v1.0.0 " __DATE__
+#define PPC4XX_EDAC_MODULE_REVISION    "v1.0.0"
 
 #define PPC4XX_EDAC_MESSAGE_SIZE       256
 
index 678513738c331fbd1497d2c772aab9d0b3dcc143..b153674431f19c7f4674a03c000da279a62ee878 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/edac.h>
 #include "edac_core.h"
 
-#define R82600_REVISION        " Ver: 2.0.2 " __DATE__
+#define R82600_REVISION        " Ver: 2.0.2"
 #define EDAC_MOD_STR   "r82600_edac"
 
 #define r82600_printk(level, fmt, arg...) \
index d3b295305542a2b71e00f5cc97d7682557cef0c4..d213646037553cabba73180bc4beb1b6d6d68440 100644 (file)
@@ -1,5 +1,5 @@
 #
-# platform-neutral GPIO infrastructure and expanders
+# GPIO infrastructure and drivers
 #
 
 config ARCH_WANT_OPTIONAL_GPIOLIB
@@ -31,7 +31,7 @@ menuconfig GPIOLIB
        help
          This enables GPIO support through the generic GPIO library.
          You only need to enable this, if you also want to enable
-         one or more of the GPIO expansion card drivers below.
+         one or more of the GPIO drivers below.
 
          If unsure, say N.
 
@@ -63,21 +63,26 @@ config GPIO_SYSFS
          Kernel drivers may also request that a particular GPIO be
          exported to userspace; this can be useful when debugging.
 
-# put expanders in the right section, in alphabetical order
+# put drivers in the right section, in alphabetical order
 
 config GPIO_MAX730X
        tristate
 
-comment "Memory mapped GPIO expanders:"
+comment "Memory mapped GPIO drivers:"
+
+config GPIO_BASIC_MMIO_CORE
+       tristate
+       help
+         Provides core functionality for basic memory-mapped GPIO controllers.
 
 config GPIO_BASIC_MMIO
        tristate "Basic memory-mapped GPIO controllers support"
+       select GPIO_BASIC_MMIO_CORE
        help
          Say yes here to support basic memory-mapped GPIO controllers.
 
 config GPIO_IT8761E
        tristate "IT8761E GPIO support"
-       depends on GPIOLIB
        help
          Say yes here to support GPIO functionality of IT8761E super I/O chip.
 
@@ -101,7 +106,7 @@ config GPIO_VR41XX
 
 config GPIO_SCH
        tristate "Intel SCH/TunnelCreek GPIO"
-       depends on GPIOLIB && PCI && X86
+       depends on PCI && X86
        select MFD_CORE
        select LPC_SCH
        help
@@ -121,7 +126,7 @@ config GPIO_SCH
 
 config GPIO_VX855
        tristate "VIA VX855/VX875 GPIO"
-       depends on GPIOLIB && MFD_SUPPORT && PCI
+       depends on MFD_SUPPORT && PCI
        select MFD_CORE
        select MFD_VX855
        help
@@ -347,13 +352,13 @@ config GPIO_ML_IOH
 
 config GPIO_TIMBERDALE
        bool "Support for timberdale GPIO IP"
-       depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM
+       depends on MFD_TIMBERDALE && HAS_IOMEM
        ---help---
        Add support for the GPIO IP in the timberdale FPGA.
 
 config GPIO_RDC321X
        tristate "RDC R-321x GPIO support"
-       depends on PCI && GPIOLIB
+       depends on PCI
        select MFD_SUPPORT
        select MFD_CORE
        select MFD_RDC321X
@@ -419,4 +424,11 @@ config AB8500_GPIO
        depends on AB8500_CORE && BROKEN
        help
          Select this to enable the AB8500 IC GPIO driver
+
+config GPIO_TPS65910
+       bool "TPS65910 GPIO"
+       depends on MFD_TPS65910
+       help
+         Select this option to enable GPIO driver for the TPS65910
+         chip family.
 endif
index becef5954356bdc6b126a5c8a1bbb208f1e6ee1f..6a3387acc0e5d71b8daae00da808f271bca6ed3f 100644 (file)
@@ -1,8 +1,4 @@
-# generic gpio support: dedicated expander chips, etc
-#
-# NOTE: platform-specific GPIO drivers don't belong in the
-# drivers/gpio directory; put them with other platform setup
-# code, IRQ controllers, board init, etc.
+# generic gpio support: platform drivers, dedicated expander chips, etc
 
 ccflags-$(CONFIG_DEBUG_GPIO)   += -DDEBUG
 
@@ -10,6 +6,7 @@ obj-$(CONFIG_GPIOLIB)          += gpiolib.o
 
 obj-$(CONFIG_GPIO_ADP5520)     += adp5520-gpio.o
 obj-$(CONFIG_GPIO_ADP5588)     += adp5588-gpio.o
+obj-$(CONFIG_GPIO_BASIC_MMIO_CORE)     += basic_mmio_gpio.o
 obj-$(CONFIG_GPIO_BASIC_MMIO)  += basic_mmio_gpio.o
 obj-$(CONFIG_GPIO_LANGWELL)    += langwell_gpio.o
 obj-$(CONFIG_GPIO_MAX730X)     += max730x.o
@@ -43,3 +40,4 @@ obj-$(CONFIG_GPIO_SX150X)     += sx150x.o
 obj-$(CONFIG_GPIO_VX855)       += vx855_gpio.o
 obj-$(CONFIG_GPIO_ML_IOH)      += ml_ioh_gpio.o
 obj-$(CONFIG_AB8500_GPIO)       += ab8500-gpio.o
+obj-$(CONFIG_GPIO_TPS65910)    += tps65910-gpio.o
index 3addea65894e47969a56687dc2d9c1f2f95180eb..8152e9f516b0786d04d358c4921f50924431ca62 100644 (file)
@@ -45,6 +45,7 @@ o        `                     ~~~~\___/~~~~    ` controller in FPGA is ,.`
  */
 
 #include <linux/init.h>
+#include <linux/err.h>
 #include <linux/bug.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -61,102 +62,101 @@ o        `                     ~~~~\___/~~~~    ` controller in FPGA is ,.`
 #include <linux/mod_devicetable.h>
 #include <linux/basic_mmio_gpio.h>
 
-struct bgpio_chip {
-       struct gpio_chip gc;
-       void __iomem *reg_dat;
-       void __iomem *reg_set;
-       void __iomem *reg_clr;
-
-       /* Number of bits (GPIOs): <register width> * 8. */
-       int bits;
-
-       /*
-        * Some GPIO controllers work with the big-endian bits notation,
-        * e.g. in a 8-bits register, GPIO7 is the least significant bit.
-        */
-       int big_endian_bits;
-
-       /*
-        * Used to lock bgpio_chip->data. Also, this is needed to keep
-        * shadowed and real data registers writes together.
-        */
-       spinlock_t lock;
-
-       /* Shadowed data register to clear/set bits safely. */
-       unsigned long data;
-};
+static void bgpio_write8(void __iomem *reg, unsigned long data)
+{
+       writeb(data, reg);
+}
 
-static struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc)
+static unsigned long bgpio_read8(void __iomem *reg)
 {
-       return container_of(gc, struct bgpio_chip, gc);
+       return readb(reg);
 }
 
-static unsigned long bgpio_in(struct bgpio_chip *bgc)
+static void bgpio_write16(void __iomem *reg, unsigned long data)
 {
-       switch (bgc->bits) {
-       case 8:
-               return __raw_readb(bgc->reg_dat);
-       case 16:
-               return __raw_readw(bgc->reg_dat);
-       case 32:
-               return __raw_readl(bgc->reg_dat);
-#if BITS_PER_LONG >= 64
-       case 64:
-               return __raw_readq(bgc->reg_dat);
-#endif
-       }
-       return -EINVAL;
+       writew(data, reg);
 }
 
-static void bgpio_out(struct bgpio_chip *bgc, void __iomem *reg,
-                     unsigned long data)
+static unsigned long bgpio_read16(void __iomem *reg)
 {
-       switch (bgc->bits) {
-       case 8:
-               __raw_writeb(data, reg);
-               return;
-       case 16:
-               __raw_writew(data, reg);
-               return;
-       case 32:
-               __raw_writel(data, reg);
-               return;
+       return readw(reg);
+}
+
+static void bgpio_write32(void __iomem *reg, unsigned long data)
+{
+       writel(data, reg);
+}
+
+static unsigned long bgpio_read32(void __iomem *reg)
+{
+       return readl(reg);
+}
+
 #if BITS_PER_LONG >= 64
-       case 64:
-               __raw_writeq(data, reg);
-               return;
-#endif
-       }
+static void bgpio_write64(void __iomem *reg, unsigned long data)
+{
+       writeq(data, reg);
 }
 
+static unsigned long bgpio_read64(void __iomem *reg)
+{
+       return readq(reg);
+}
+#endif /* BITS_PER_LONG >= 64 */
+
 static unsigned long bgpio_pin2mask(struct bgpio_chip *bgc, unsigned int pin)
 {
-       if (bgc->big_endian_bits)
-               return 1 << (bgc->bits - 1 - pin);
-       else
-               return 1 << pin;
+       return 1 << pin;
+}
+
+static unsigned long bgpio_pin2mask_be(struct bgpio_chip *bgc,
+                                      unsigned int pin)
+{
+       return 1 << (bgc->bits - 1 - pin);
 }
 
 static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
 {
        struct bgpio_chip *bgc = to_bgpio_chip(gc);
 
-       return bgpio_in(bgc) & bgpio_pin2mask(bgc, gpio);
+       return bgc->read_reg(bgc->reg_dat) & bgc->pin2mask(bgc, gpio);
 }
 
 static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
 {
        struct bgpio_chip *bgc = to_bgpio_chip(gc);
-       unsigned long mask = bgpio_pin2mask(bgc, gpio);
+       unsigned long mask = bgc->pin2mask(bgc, gpio);
        unsigned long flags;
 
-       if (bgc->reg_set) {
-               if (val)
-                       bgpio_out(bgc, bgc->reg_set, mask);
-               else
-                       bgpio_out(bgc, bgc->reg_clr, mask);
-               return;
-       }
+       spin_lock_irqsave(&bgc->lock, flags);
+
+       if (val)
+               bgc->data |= mask;
+       else
+               bgc->data &= ~mask;
+
+       bgc->write_reg(bgc->reg_dat, bgc->data);
+
+       spin_unlock_irqrestore(&bgc->lock, flags);
+}
+
+static void bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio,
+                                int val)
+{
+       struct bgpio_chip *bgc = to_bgpio_chip(gc);
+       unsigned long mask = bgc->pin2mask(bgc, gpio);
+
+       if (val)
+               bgc->write_reg(bgc->reg_set, mask);
+       else
+               bgc->write_reg(bgc->reg_clr, mask);
+}
+
+static void bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       struct bgpio_chip *bgc = to_bgpio_chip(gc);
+       unsigned long mask = bgc->pin2mask(bgc, gpio);
+       unsigned long flags;
 
        spin_lock_irqsave(&bgc->lock, flags);
 
@@ -165,103 +165,352 @@ static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
        else
                bgc->data &= ~mask;
 
-       bgpio_out(bgc, bgc->reg_dat, bgc->data);
+       bgc->write_reg(bgc->reg_set, bgc->data);
 
        spin_unlock_irqrestore(&bgc->lock, flags);
 }
 
+static int bgpio_simple_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+       return 0;
+}
+
+static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio,
+                               int val)
+{
+       gc->set(gc, gpio, val);
+
+       return 0;
+}
+
 static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
 {
+       struct bgpio_chip *bgc = to_bgpio_chip(gc);
+       unsigned long flags;
+
+       spin_lock_irqsave(&bgc->lock, flags);
+
+       bgc->dir &= ~bgc->pin2mask(bgc, gpio);
+       bgc->write_reg(bgc->reg_dir, bgc->dir);
+
+       spin_unlock_irqrestore(&bgc->lock, flags);
+
        return 0;
 }
 
 static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
 {
-       bgpio_set(gc, gpio, val);
+       struct bgpio_chip *bgc = to_bgpio_chip(gc);
+       unsigned long flags;
+
+       gc->set(gc, gpio, val);
+
+       spin_lock_irqsave(&bgc->lock, flags);
+
+       bgc->dir |= bgc->pin2mask(bgc, gpio);
+       bgc->write_reg(bgc->reg_dir, bgc->dir);
+
+       spin_unlock_irqrestore(&bgc->lock, flags);
+
        return 0;
 }
 
-static int __devinit bgpio_probe(struct platform_device *pdev)
+static int bgpio_dir_in_inv(struct gpio_chip *gc, unsigned int gpio)
 {
-       const struct platform_device_id *platid = platform_get_device_id(pdev);
-       struct device *dev = &pdev->dev;
-       struct bgpio_pdata *pdata = dev_get_platdata(dev);
-       struct bgpio_chip *bgc;
-       struct resource *res_dat;
-       struct resource *res_set;
-       struct resource *res_clr;
-       resource_size_t dat_sz;
-       int bits;
-       int ret;
+       struct bgpio_chip *bgc = to_bgpio_chip(gc);
+       unsigned long flags;
 
-       res_dat = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
-       if (!res_dat)
-               return -EINVAL;
+       spin_lock_irqsave(&bgc->lock, flags);
 
-       dat_sz = resource_size(res_dat);
-       if (!is_power_of_2(dat_sz))
-               return -EINVAL;
+       bgc->dir |= bgc->pin2mask(bgc, gpio);
+       bgc->write_reg(bgc->reg_dir, bgc->dir);
+
+       spin_unlock_irqrestore(&bgc->lock, flags);
+
+       return 0;
+}
 
-       bits = dat_sz * 8;
-       if (bits > BITS_PER_LONG)
+static int bgpio_dir_out_inv(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       struct bgpio_chip *bgc = to_bgpio_chip(gc);
+       unsigned long flags;
+
+       gc->set(gc, gpio, val);
+
+       spin_lock_irqsave(&bgc->lock, flags);
+
+       bgc->dir &= ~bgc->pin2mask(bgc, gpio);
+       bgc->write_reg(bgc->reg_dir, bgc->dir);
+
+       spin_unlock_irqrestore(&bgc->lock, flags);
+
+       return 0;
+}
+
+static int bgpio_setup_accessors(struct device *dev,
+                                struct bgpio_chip *bgc,
+                                bool be)
+{
+
+       switch (bgc->bits) {
+       case 8:
+               bgc->read_reg   = bgpio_read8;
+               bgc->write_reg  = bgpio_write8;
+               break;
+       case 16:
+               bgc->read_reg   = bgpio_read16;
+               bgc->write_reg  = bgpio_write16;
+               break;
+       case 32:
+               bgc->read_reg   = bgpio_read32;
+               bgc->write_reg  = bgpio_write32;
+               break;
+#if BITS_PER_LONG >= 64
+       case 64:
+               bgc->read_reg   = bgpio_read64;
+               bgc->write_reg  = bgpio_write64;
+               break;
+#endif /* BITS_PER_LONG >= 64 */
+       default:
+               dev_err(dev, "unsupported data width %u bits\n", bgc->bits);
                return -EINVAL;
+       }
 
-       bgc = devm_kzalloc(dev, sizeof(*bgc), GFP_KERNEL);
-       if (!bgc)
-               return -ENOMEM;
+       bgc->pin2mask = be ? bgpio_pin2mask_be : bgpio_pin2mask;
+
+       return 0;
+}
+
+/*
+ * Create the device and allocate the resources.  For setting GPIO's there are
+ * three supported configurations:
+ *
+ *     - single input/output register resource (named "dat").
+ *     - set/clear pair (named "set" and "clr").
+ *     - single output register resource and single input resource ("set" and
+ *     dat").
+ *
+ * For the single output register, this drives a 1 by setting a bit and a zero
+ * by clearing a bit.  For the set clr pair, this drives a 1 by setting a bit
+ * in the set register and clears it by setting a bit in the clear register.
+ * The configuration is detected by which resources are present.
+ *
+ * For setting the GPIO direction, there are three supported configurations:
+ *
+ *     - simple bidirection GPIO that requires no configuration.
+ *     - an output direction register (named "dirout") where a 1 bit
+ *     indicates the GPIO is an output.
+ *     - an input direction register (named "dirin") where a 1 bit indicates
+ *     the GPIO is an input.
+ */
+static int bgpio_setup_io(struct bgpio_chip *bgc,
+                         void __iomem *dat,
+                         void __iomem *set,
+                         void __iomem *clr)
+{
 
-       bgc->reg_dat = devm_ioremap(dev, res_dat->start, dat_sz);
+       bgc->reg_dat = dat;
        if (!bgc->reg_dat)
-               return -ENOMEM;
+               return -EINVAL;
+
+       if (set && clr) {
+               bgc->reg_set = set;
+               bgc->reg_clr = clr;
+               bgc->gc.set = bgpio_set_with_clear;
+       } else if (set && !clr) {
+               bgc->reg_set = set;
+               bgc->gc.set = bgpio_set_set;
+       } else {
+               bgc->gc.set = bgpio_set;
+       }
+
+       bgc->gc.get = bgpio_get;
+
+       return 0;
+}
 
-       res_set = platform_get_resource_byname(pdev, IORESOURCE_MEM, "set");
-       res_clr = platform_get_resource_byname(pdev, IORESOURCE_MEM, "clr");
-       if (res_set && res_clr) {
-               if (resource_size(res_set) != resource_size(res_clr) ||
-                               resource_size(res_set) != dat_sz)
-                       return -EINVAL;
-
-               bgc->reg_set = devm_ioremap(dev, res_set->start, dat_sz);
-               bgc->reg_clr = devm_ioremap(dev, res_clr->start, dat_sz);
-               if (!bgc->reg_set || !bgc->reg_clr)
-                       return -ENOMEM;
-       } else if (res_set || res_clr) {
+static int bgpio_setup_direction(struct bgpio_chip *bgc,
+                                void __iomem *dirout,
+                                void __iomem *dirin)
+{
+       if (dirout && dirin) {
                return -EINVAL;
+       } else if (dirout) {
+               bgc->reg_dir = dirout;
+               bgc->gc.direction_output = bgpio_dir_out;
+               bgc->gc.direction_input = bgpio_dir_in;
+       } else if (dirin) {
+               bgc->reg_dir = dirin;
+               bgc->gc.direction_output = bgpio_dir_out_inv;
+               bgc->gc.direction_input = bgpio_dir_in_inv;
+       } else {
+               bgc->gc.direction_output = bgpio_simple_dir_out;
+               bgc->gc.direction_input = bgpio_simple_dir_in;
        }
 
-       spin_lock_init(&bgc->lock);
+       return 0;
+}
 
-       bgc->bits = bits;
-       bgc->big_endian_bits = !strcmp(platid->name, "basic-mmio-gpio-be");
-       bgc->data = bgpio_in(bgc);
+int __devexit bgpio_remove(struct bgpio_chip *bgc)
+{
+       int err = gpiochip_remove(&bgc->gc);
 
-       bgc->gc.ngpio = bits;
-       bgc->gc.direction_input = bgpio_dir_in;
-       bgc->gc.direction_output = bgpio_dir_out;
-       bgc->gc.get = bgpio_get;
-       bgc->gc.set = bgpio_set;
+       kfree(bgc);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(bgpio_remove);
+
+int __devinit bgpio_init(struct bgpio_chip *bgc,
+                        struct device *dev,
+                        unsigned long sz,
+                        void __iomem *dat,
+                        void __iomem *set,
+                        void __iomem *clr,
+                        void __iomem *dirout,
+                        void __iomem *dirin,
+                        bool big_endian)
+{
+       int ret;
+
+       if (!is_power_of_2(sz))
+               return -EINVAL;
+
+       bgc->bits = sz * 8;
+       if (bgc->bits > BITS_PER_LONG)
+               return -EINVAL;
+
+       spin_lock_init(&bgc->lock);
        bgc->gc.dev = dev;
        bgc->gc.label = dev_name(dev);
+       bgc->gc.base = -1;
+       bgc->gc.ngpio = bgc->bits;
 
-       if (pdata)
-               bgc->gc.base = pdata->base;
-       else
-               bgc->gc.base = -1;
+       ret = bgpio_setup_io(bgc, dat, set, clr);
+       if (ret)
+               return ret;
 
-       dev_set_drvdata(dev, bgc);
+       ret = bgpio_setup_accessors(dev, bgc, big_endian);
+       if (ret)
+               return ret;
 
-       ret = gpiochip_add(&bgc->gc);
+       ret = bgpio_setup_direction(bgc, dirout, dirin);
        if (ret)
-               dev_err(dev, "gpiochip_add() failed: %d\n", ret);
+               return ret;
+
+       bgc->data = bgc->read_reg(bgc->reg_dat);
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(bgpio_init);
+
+#ifdef CONFIG_GPIO_BASIC_MMIO
 
-static int __devexit bgpio_remove(struct platform_device *pdev)
+static void __iomem *bgpio_map(struct platform_device *pdev,
+                              const char *name,
+                              resource_size_t sane_sz,
+                              int *err)
 {
-       struct bgpio_chip *bgc = dev_get_drvdata(&pdev->dev);
+       struct device *dev = &pdev->dev;
+       struct resource *r;
+       resource_size_t start;
+       resource_size_t sz;
+       void __iomem *ret;
+
+       *err = 0;
+
+       r = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+       if (!r)
+               return NULL;
 
-       return gpiochip_remove(&bgc->gc);
+       sz = resource_size(r);
+       if (sz != sane_sz) {
+               *err = -EINVAL;
+               return NULL;
+       }
+
+       start = r->start;
+       if (!devm_request_mem_region(dev, start, sz, r->name)) {
+               *err = -EBUSY;
+               return NULL;
+       }
+
+       ret = devm_ioremap(dev, start, sz);
+       if (!ret) {
+               *err = -ENOMEM;
+               return NULL;
+       }
+
+       return ret;
+}
+
+static int __devinit bgpio_pdev_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct resource *r;
+       void __iomem *dat;
+       void __iomem *set;
+       void __iomem *clr;
+       void __iomem *dirout;
+       void __iomem *dirin;
+       unsigned long sz;
+       bool be;
+       int err;
+       struct bgpio_chip *bgc;
+       struct bgpio_pdata *pdata = dev_get_platdata(dev);
+
+       r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
+       if (!r)
+               return -EINVAL;
+
+       sz = resource_size(r);
+
+       dat = bgpio_map(pdev, "dat", sz, &err);
+       if (!dat)
+               return err ? err : -EINVAL;
+
+       set = bgpio_map(pdev, "set", sz, &err);
+       if (err)
+               return err;
+
+       clr = bgpio_map(pdev, "clr", sz, &err);
+       if (err)
+               return err;
+
+       dirout = bgpio_map(pdev, "dirout", sz, &err);
+       if (err)
+               return err;
+
+       dirin = bgpio_map(pdev, "dirin", sz, &err);
+       if (err)
+               return err;
+
+       be = !strcmp(platform_get_device_id(pdev)->name, "basic-mmio-gpio-be");
+
+       bgc = devm_kzalloc(&pdev->dev, sizeof(*bgc), GFP_KERNEL);
+       if (!bgc)
+               return -ENOMEM;
+
+       err = bgpio_init(bgc, dev, sz, dat, set, clr, dirout, dirin, be);
+       if (err)
+               return err;
+
+       if (pdata) {
+               bgc->gc.base = pdata->base;
+               if (pdata->ngpio > 0)
+                       bgc->gc.ngpio = pdata->ngpio;
+       }
+
+       platform_set_drvdata(pdev, bgc);
+
+       return gpiochip_add(&bgc->gc);
+}
+
+static int __devexit bgpio_pdev_remove(struct platform_device *pdev)
+{
+       struct bgpio_chip *bgc = platform_get_drvdata(pdev);
+
+       return bgpio_remove(bgc);
 }
 
 static const struct platform_device_id bgpio_id_table[] = {
@@ -276,21 +525,23 @@ static struct platform_driver bgpio_driver = {
                .name = "basic-mmio-gpio",
        },
        .id_table = bgpio_id_table,
-       .probe = bgpio_probe,
-       .remove = __devexit_p(bgpio_remove),
+       .probe = bgpio_pdev_probe,
+       .remove = __devexit_p(bgpio_pdev_remove),
 };
 
-static int __init bgpio_init(void)
+static int __init bgpio_platform_init(void)
 {
        return platform_driver_register(&bgpio_driver);
 }
-module_init(bgpio_init);
+module_init(bgpio_platform_init);
 
-static void __exit bgpio_exit(void)
+static void __exit bgpio_platform_exit(void)
 {
        platform_driver_unregister(&bgpio_driver);
 }
-module_exit(bgpio_exit);
+module_exit(bgpio_platform_exit);
+
+#endif /* CONFIG_GPIO_BASIC_MMIO */
 
 MODULE_DESCRIPTION("Driver for basic memory-mapped GPIO controllers");
 MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
index 36a2974815b70d4bb0346c7b14b6d84c2ac7accc..137a8ca678221f3b3c7121a481aaf9ce6d0872da 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/idr.h>
 #include <linux/slab.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/gpio.h>
 
 /* Optional implementation infrastructure for GPIO interfaces.
  *
@@ -1165,6 +1167,7 @@ struct gpio_chip *gpiochip_find(void *data,
 
        return chip;
 }
+EXPORT_SYMBOL_GPL(gpiochip_find);
 
 /* These "optional" allocation calls help prevent drivers from stomping
  * on each other, and help provide better diagnostics in debugfs.
@@ -1404,6 +1407,8 @@ int gpio_direction_input(unsigned gpio)
        status = chip->direction_input(chip, gpio);
        if (status == 0)
                clear_bit(FLAG_IS_OUT, &desc->flags);
+
+       trace_gpio_direction(chip->base + gpio, 1, status);
 lose:
        return status;
 fail:
@@ -1457,6 +1462,8 @@ int gpio_direction_output(unsigned gpio, int value)
        status = chip->direction_output(chip, gpio, value);
        if (status == 0)
                set_bit(FLAG_IS_OUT, &desc->flags);
+       trace_gpio_value(chip->base + gpio, 0, value);
+       trace_gpio_direction(chip->base + gpio, 0, status);
 lose:
        return status;
 fail:
@@ -1546,10 +1553,13 @@ EXPORT_SYMBOL_GPL(gpio_set_debounce);
 int __gpio_get_value(unsigned gpio)
 {
        struct gpio_chip        *chip;
+       int value;
 
        chip = gpio_to_chip(gpio);
        WARN_ON(chip->can_sleep);
-       return chip->get ? chip->get(chip, gpio - chip->base) : 0;
+       value = chip->get ? chip->get(chip, gpio - chip->base) : 0;
+       trace_gpio_value(gpio, 1, value);
+       return value;
 }
 EXPORT_SYMBOL_GPL(__gpio_get_value);
 
@@ -1568,6 +1578,7 @@ void __gpio_set_value(unsigned gpio, int value)
 
        chip = gpio_to_chip(gpio);
        WARN_ON(chip->can_sleep);
+       trace_gpio_value(gpio, 0, value);
        chip->set(chip, gpio - chip->base, value);
 }
 EXPORT_SYMBOL_GPL(__gpio_set_value);
@@ -1618,10 +1629,13 @@ EXPORT_SYMBOL_GPL(__gpio_to_irq);
 int gpio_get_value_cansleep(unsigned gpio)
 {
        struct gpio_chip        *chip;
+       int value;
 
        might_sleep_if(extra_checks);
        chip = gpio_to_chip(gpio);
-       return chip->get ? chip->get(chip, gpio - chip->base) : 0;
+       value = chip->get ? chip->get(chip, gpio - chip->base) : 0;
+       trace_gpio_value(gpio, 1, value);
+       return value;
 }
 EXPORT_SYMBOL_GPL(gpio_get_value_cansleep);
 
@@ -1631,6 +1645,7 @@ void gpio_set_value_cansleep(unsigned gpio, int value)
 
        might_sleep_if(extra_checks);
        chip = gpio_to_chip(gpio);
+       trace_gpio_value(gpio, 0, value);
        chip->set(chip, gpio - chip->base, value);
 }
 EXPORT_SYMBOL_GPL(gpio_set_value_cansleep);
index 2514fb075f4afbdc1bb7a0ad62a9e3dff3bd9994..813ac077e5d7d5b0d25e465c53dcb10a023ef595 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
-#include <linux/mfd/core.h>
 #include <linux/io.h>
 #include <linux/gpio.h>
 #include <linux/slab.h>
@@ -150,7 +149,7 @@ static int __devinit ttl_probe(struct platform_device *pdev)
        struct resource *res;
        int ret;
 
-       pdata = mfd_get_data(pdev);
+       pdata = pdev->dev.platform_data;
        if (!pdata) {
                dev_err(dev, "no platform data\n");
                ret = -ENXIO;
index 7630ab7b9bec30cdad9628908d74813075413b65..78a843947d82150031cf9ed60ac6254eef3f0ab1 100644 (file)
@@ -397,7 +397,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
 
                        irq_set_chip_data(irq, chip);
                        irq_set_chip_and_handler(irq, &pca953x_irq_chip,
-                                                handle_edge_irq);
+                                                handle_simple_irq);
 #ifdef CONFIG_ARM
                        set_irq_flags(irq, IRQF_VALID);
 #else
index a9bda881935a82907ff0c5c753efee59b924cdf5..2762698e0204adc99699839137680bafc7fcc0c7 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/pci.h>
 #include <linux/gpio.h>
 #include <linux/mfd/rdc321x.h>
-#include <linux/mfd/core.h>
 #include <linux/slab.h>
 
 struct rdc321x_gpio {
@@ -136,7 +135,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
        struct rdc321x_gpio *rdc321x_gpio_dev;
        struct rdc321x_gpio_pdata *pdata;
 
-       pdata = mfd_get_data(pdev);
+       pdata = pdev->dev.platform_data;
        if (!pdata) {
                dev_err(&pdev->dev, "no platform data supplied\n");
                return -ENODEV;
index edbe1eae531fa6ad35394232af3de40f78425622..0265872e57d15e666b1365b2954befb9e98bf59b 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/module.h>
 #include <linux/gpio.h>
 #include <linux/platform_device.h>
-#include <linux/mfd/core.h>
 #include <linux/irq.h>
 #include <linux/io.h>
 #include <linux/timb_gpio.h>
@@ -229,7 +228,7 @@ static int __devinit timbgpio_probe(struct platform_device *pdev)
        struct gpio_chip *gc;
        struct timbgpio *tgpio;
        struct resource *iomem;
-       struct timbgpio_platform_data *pdata = mfd_get_data(pdev);
+       struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
        int irq = platform_get_irq(pdev, 0);
 
        if (!pdata || pdata->nr_pins > 32) {
@@ -320,13 +319,14 @@ err_mem:
 static int __devexit timbgpio_remove(struct platform_device *pdev)
 {
        int err;
+       struct timbgpio_platform_data *pdata = pdev->dev.platform_data;
        struct timbgpio *tgpio = platform_get_drvdata(pdev);
        struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        int irq = platform_get_irq(pdev, 0);
 
        if (irq >= 0 && tgpio->irq_base > 0) {
                int i;
-               for (i = 0; i < tgpio->gpio.ngpio; i++) {
+               for (i = 0; i < pdata->nr_pins; i++) {
                        irq_set_chip(tgpio->irq_base + i, NULL);
                        irq_set_chip_data(tgpio->irq_base + i, NULL);
                }
diff --git a/drivers/gpio/tps65910-gpio.c b/drivers/gpio/tps65910-gpio.c
new file mode 100644 (file)
index 0000000..8d1ddfd
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+ * tps65910-gpio.c  --  TI TPS6591x
+ *
+ * Copyright 2010 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ * Author: Jorge Eduardo Candelaria jedu@slimlogic.co.uk>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under  the terms of the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/mfd/tps65910.h>
+
+static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
+{
+       struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+       uint8_t val;
+
+       tps65910->read(tps65910, TPS65910_GPIO0 + offset, 1, &val);
+
+       if (val & GPIO_STS_MASK)
+               return 1;
+
+       return 0;
+}
+
+static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset,
+                             int value)
+{
+       struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+
+       if (value)
+               tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
+                                               GPIO_SET_MASK);
+       else
+               tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
+                                               GPIO_SET_MASK);
+}
+
+static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
+                               int value)
+{
+       struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+
+       /* Set the initial value */
+       tps65910_gpio_set(gc, 0, value);
+
+       return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
+                                               GPIO_CFG_MASK);
+}
+
+static int tps65910_gpio_input(struct gpio_chip *gc, unsigned offset)
+{
+       struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
+
+       return tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset,
+                                               GPIO_CFG_MASK);
+}
+
+void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base)
+{
+       int ret;
+
+       if (!gpio_base)
+               return;
+
+       tps65910->gpio.owner            = THIS_MODULE;
+       tps65910->gpio.label            = tps65910->i2c_client->name;
+       tps65910->gpio.dev              = tps65910->dev;
+       tps65910->gpio.base             = gpio_base;
+
+       switch(tps65910_chip_id(tps65910)) {
+       case TPS65910:
+               tps65910->gpio.ngpio    = 6;
+       case TPS65911:
+               tps65910->gpio.ngpio    = 9;
+       default:
+               return;
+       }
+       tps65910->gpio.can_sleep        = 1;
+
+       tps65910->gpio.direction_input  = tps65910_gpio_input;
+       tps65910->gpio.direction_output = tps65910_gpio_output;
+       tps65910->gpio.set              = tps65910_gpio_set;
+       tps65910->gpio.get              = tps65910_gpio_get;
+
+       ret = gpiochip_add(&tps65910->gpio);
+
+       if (ret)
+               dev_warn(tps65910->dev, "GPIO registration failed: %d\n", ret);
+}
index 9577c432e77ffd75ae6316b78256c2519f62124b..de3d2465fe24eeabe835b9f76cc34eeb2b106d98 100644 (file)
@@ -350,6 +350,7 @@ static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
 
 static int create_name_attr(struct platform_data *pdata, struct device *dev)
 {
+       sysfs_attr_init(&pdata->name_attr.attr);
        pdata->name_attr.attr.name = "name";
        pdata->name_attr.attr.mode = S_IRUGO;
        pdata->name_attr.show = show_name;
@@ -372,6 +373,7 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev,
        for (i = 0; i < MAX_ATTRS; i++) {
                snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i],
                        attr_no);
+               sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr);
                tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i];
                tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO;
                tdata->sd_attrs[i].dev_attr.show = rd_ptr[i];
@@ -422,7 +424,7 @@ static void update_ttarget(__u8 cpu_model, struct temp_data *tdata,
        }
 }
 
-static int chk_ucode_version(struct platform_device *pdev)
+static int __devinit chk_ucode_version(struct platform_device *pdev)
 {
        struct cpuinfo_x86 *c = &cpu_data(pdev->id);
        int err;
@@ -509,8 +511,8 @@ static int create_core_data(struct platform_data *pdata,
        /*
         * Provide a single set of attributes for all HT siblings of a core
         * to avoid duplicate sensors (the processor ID and core ID of all
-        * HT siblings of a core is the same).
-        * Skip if a HT sibling of this core is already online.
+        * HT siblings of a core are the same).
+        * Skip if a HT sibling of this core is already registered.
         * This is not an error.
         */
        if (pdata->core_data[attr_no] != NULL)
@@ -770,10 +772,10 @@ static void __cpuinit put_core_offline(unsigned int cpu)
                coretemp_remove_core(pdata, &pdev->dev, indx);
 
        /*
-        * If a core is taken offline, but a HT sibling of the same core is
-        * still online, register the alternate sibling. This ensures that
-        * exactly one set of attributes is provided as long as at least one
-        * HT sibling of a core is online.
+        * If a HT sibling of a core is taken offline, but another HT sibling
+        * of the same core is still online, register the alternate sibling.
+        * This ensures that exactly one set of attributes is provided as long
+        * as at least one HT sibling of a core is online.
         */
        for_each_sibling(i, cpu) {
                if (i != cpu) {
index 98799bab69ce44a19b61afc3ce7ab64074504da8..354770ed3186cdfa30ae07fcd829f5b46906f7e4 100644 (file)
@@ -707,6 +707,7 @@ do {                                                                        \
        struct sensor_device_attribute *a                               \
            = &data->_type##s[data->num_##_type##s].attribute;          \
        BUG_ON(data->num_attributes >= data->max_attributes);           \
+       sysfs_attr_init(&a->dev_attr.attr);                             \
        a->dev_attr.attr.name = _name;                                  \
        a->dev_attr.attr.mode = _mode;                                  \
        a->dev_attr.show = _show;                                       \
index fee1a2613861a8d1ea16d12b9e660dd0947df05c..1b46a9d9f907336fb70526ca93881ce3b3934950 100644 (file)
@@ -49,7 +49,6 @@
 #include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/platform_device.h>
-#include <linux/mfd/core.h>
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
 #include <linux/wait.h>
@@ -306,7 +305,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
                return -EIO;
        }
 
-       pdata = mfd_get_data(pdev);
+       pdata = pdev->dev.platform_data;
        if (pdata) {
                i2c->regstep = pdata->regstep;
                i2c->clock_khz = pdata->clock_khz;
index e9d5ff4d14966c2a47bb98d57277933efd24b153..4bb68f35caf2efc30fdd0466a114ad0822e27d5a 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/errno.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
-#include <linux/mfd/core.h>
 #include <linux/i2c.h>
 #include <linux/interrupt.h>
 #include <linux/wait.h>
@@ -705,7 +704,7 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
        if (irq < 0)
                goto resource_missing;
 
-       pdata = mfd_get_data(pdev);
+       pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data;
        if (!pdata)
                return -EINVAL;
 
index 6e35eccc9caacb9677f771568a39583423056887..0f9a84c1046ab38776abe891ef0ff98ea0c19c98 100644 (file)
@@ -2,6 +2,7 @@ menuconfig INFINIBAND
        tristate "InfiniBand support"
        depends on PCI || BROKEN
        depends on HAS_IOMEM
+       depends on NET
        ---help---
          Core support for InfiniBand (IB).  Make sure to also select
          any protocols you wish to use as well as drivers for your
index cb1ab3ea49986f448474ce941c8e2f9cf7418b64..c8bbaef1becb1588c8051c214db9c378b39c37c3 100644 (file)
@@ -8,7 +8,7 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \
                                        $(user_access-y)
 
 ib_core-y :=                   packer.o ud_header.o verbs.o sysfs.o \
-                               device.o fmr_pool.o cache.o
+                               device.o fmr_pool.o cache.o netlink.o
 ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
 
 ib_mad-y :=                    mad.o smi.o agent.o mad_rmpp.o
index f804e28e1ebb5b9dc8ebdfdf8f4c53a81e9b8a72..f62f52fb9ece776000fb98dc830d52bf8bfb4f57 100644 (file)
@@ -3639,8 +3639,16 @@ static struct kobj_type cm_port_obj_type = {
        .release = cm_release_port_obj
 };
 
+static char *cm_devnode(struct device *dev, mode_t *mode)
+{
+       *mode = 0666;
+       return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
+}
+
 struct class cm_class = {
+       .owner   = THIS_MODULE,
        .name    = "infiniband_cm",
+       .devnode = cm_devnode,
 };
 EXPORT_SYMBOL(cm_class);
 
index 99dde874fbbdf53e93675643ac1b1e1bba372bd9..b6a33b3c516de9b8d1c12f0c20e958ff3608e9a5 100644 (file)
@@ -47,6 +47,7 @@
 
 #include <rdma/rdma_cm.h>
 #include <rdma/rdma_cm_ib.h>
+#include <rdma/rdma_netlink.h>
 #include <rdma/ib_cache.h>
 #include <rdma/ib_cm.h>
 #include <rdma/ib_sa.h>
@@ -89,20 +90,6 @@ struct cma_device {
        struct list_head        id_list;
 };
 
-enum cma_state {
-       CMA_IDLE,
-       CMA_ADDR_QUERY,
-       CMA_ADDR_RESOLVED,
-       CMA_ROUTE_QUERY,
-       CMA_ROUTE_RESOLVED,
-       CMA_CONNECT,
-       CMA_DISCONNECT,
-       CMA_ADDR_BOUND,
-       CMA_LISTEN,
-       CMA_DEVICE_REMOVAL,
-       CMA_DESTROYING
-};
-
 struct rdma_bind_list {
        struct idr              *ps;
        struct hlist_head       owners;
@@ -126,7 +113,7 @@ struct rdma_id_private {
        struct list_head        mc_list;
 
        int                     internal_id;
-       enum cma_state          state;
+       enum rdma_cm_state      state;
        spinlock_t              lock;
        struct mutex            qp_mutex;
 
@@ -146,6 +133,7 @@ struct rdma_id_private {
        u32                     seq_num;
        u32                     qkey;
        u32                     qp_num;
+       pid_t                   owner;
        u8                      srq;
        u8                      tos;
        u8                      reuseaddr;
@@ -165,8 +153,8 @@ struct cma_multicast {
 struct cma_work {
        struct work_struct      work;
        struct rdma_id_private  *id;
-       enum cma_state          old_state;
-       enum cma_state          new_state;
+       enum rdma_cm_state      old_state;
+       enum rdma_cm_state      new_state;
        struct rdma_cm_event    event;
 };
 
@@ -217,7 +205,7 @@ struct sdp_hah {
 #define CMA_VERSION 0x00
 #define SDP_MAJ_VERSION 0x2
 
-static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
+static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
 {
        unsigned long flags;
        int ret;
@@ -229,7 +217,7 @@ static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)
 }
 
 static int cma_comp_exch(struct rdma_id_private *id_priv,
-                        enum cma_state comp, enum cma_state exch)
+                        enum rdma_cm_state comp, enum rdma_cm_state exch)
 {
        unsigned long flags;
        int ret;
@@ -241,11 +229,11 @@ static int cma_comp_exch(struct rdma_id_private *id_priv,
        return ret;
 }
 
-static enum cma_state cma_exch(struct rdma_id_private *id_priv,
-                              enum cma_state exch)
+static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
+                                  enum rdma_cm_state exch)
 {
        unsigned long flags;
-       enum cma_state old;
+       enum rdma_cm_state old;
 
        spin_lock_irqsave(&id_priv->lock, flags);
        old = id_priv->state;
@@ -279,11 +267,6 @@ static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
        hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);
 }
 
-static inline int cma_is_ud_ps(enum rdma_port_space ps)
-{
-       return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB);
-}
-
 static void cma_attach_to_dev(struct rdma_id_private *id_priv,
                              struct cma_device *cma_dev)
 {
@@ -413,7 +396,7 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
 }
 
 static int cma_disable_callback(struct rdma_id_private *id_priv,
-                             enum cma_state state)
+                               enum rdma_cm_state state)
 {
        mutex_lock(&id_priv->handler_mutex);
        if (id_priv->state != state) {
@@ -429,7 +412,8 @@ static int cma_has_cm_dev(struct rdma_id_private *id_priv)
 }
 
 struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
-                                 void *context, enum rdma_port_space ps)
+                                 void *context, enum rdma_port_space ps,
+                                 enum ib_qp_type qp_type)
 {
        struct rdma_id_private *id_priv;
 
@@ -437,10 +421,12 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
        if (!id_priv)
                return ERR_PTR(-ENOMEM);
 
-       id_priv->state = CMA_IDLE;
+       id_priv->owner = task_pid_nr(current);
+       id_priv->state = RDMA_CM_IDLE;
        id_priv->id.context = context;
        id_priv->id.event_handler = event_handler;
        id_priv->id.ps = ps;
+       id_priv->id.qp_type = qp_type;
        spin_lock_init(&id_priv->lock);
        mutex_init(&id_priv->qp_mutex);
        init_completion(&id_priv->comp);
@@ -508,7 +494,7 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
        if (IS_ERR(qp))
                return PTR_ERR(qp);
 
-       if (cma_is_ud_ps(id_priv->id.ps))
+       if (id->qp_type == IB_QPT_UD)
                ret = cma_init_ud_qp(id_priv, qp);
        else
                ret = cma_init_conn_qp(id_priv, qp);
@@ -636,7 +622,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
        qp_attr->port_num = id_priv->id.port_num;
        *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
 
-       if (cma_is_ud_ps(id_priv->id.ps)) {
+       if (id_priv->id.qp_type == IB_QPT_UD) {
                ret = cma_set_qkey(id_priv);
                if (ret)
                        return ret;
@@ -659,7 +645,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
        id_priv = container_of(id, struct rdma_id_private, id);
        switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
        case RDMA_TRANSPORT_IB:
-               if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps))
+               if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
                        ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
                else
                        ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
@@ -858,16 +844,16 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)
 }
 
 static void cma_cancel_operation(struct rdma_id_private *id_priv,
-                                enum cma_state state)
+                                enum rdma_cm_state state)
 {
        switch (state) {
-       case CMA_ADDR_QUERY:
+       case RDMA_CM_ADDR_QUERY:
                rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
                break;
-       case CMA_ROUTE_QUERY:
+       case RDMA_CM_ROUTE_QUERY:
                cma_cancel_route(id_priv);
                break;
-       case CMA_LISTEN:
+       case RDMA_CM_LISTEN:
                if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)
                                && !id_priv->cma_dev)
                        cma_cancel_listens(id_priv);
@@ -918,10 +904,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
 void rdma_destroy_id(struct rdma_cm_id *id)
 {
        struct rdma_id_private *id_priv;
-       enum cma_state state;
+       enum rdma_cm_state state;
 
        id_priv = container_of(id, struct rdma_id_private, id);
-       state = cma_exch(id_priv, CMA_DESTROYING);
+       state = cma_exch(id_priv, RDMA_CM_DESTROYING);
        cma_cancel_operation(id_priv, state);
 
        /*
@@ -1015,9 +1001,9 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
        int ret = 0;
 
        if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
-               cma_disable_callback(id_priv, CMA_CONNECT)) ||
+               cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
            (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
-               cma_disable_callback(id_priv, CMA_DISCONNECT)))
+               cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
                return 0;
 
        memset(&event, 0, sizeof event);
@@ -1048,7 +1034,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
                event.status = -ETIMEDOUT; /* fall through */
        case IB_CM_DREQ_RECEIVED:
        case IB_CM_DREP_RECEIVED:
-               if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT))
+               if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
+                                  RDMA_CM_DISCONNECT))
                        goto out;
                event.event = RDMA_CM_EVENT_DISCONNECTED;
                break;
@@ -1075,7 +1062,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
        if (ret) {
                /* Destroy the CM ID by returning a non-zero value. */
                id_priv->cm_id.ib = NULL;
-               cma_exch(id_priv, CMA_DESTROYING);
+               cma_exch(id_priv, RDMA_CM_DESTROYING);
                mutex_unlock(&id_priv->handler_mutex);
                rdma_destroy_id(&id_priv->id);
                return ret;
@@ -1101,7 +1088,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
                goto err;
 
        id = rdma_create_id(listen_id->event_handler, listen_id->context,
-                           listen_id->ps);
+                           listen_id->ps, ib_event->param.req_rcvd.qp_type);
        if (IS_ERR(id))
                goto err;
 
@@ -1132,7 +1119,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
        rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
 
        id_priv = container_of(id, struct rdma_id_private, id);
-       id_priv->state = CMA_CONNECT;
+       id_priv->state = RDMA_CM_CONNECT;
        return id_priv;
 
 destroy_id:
@@ -1152,7 +1139,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
        int ret;
 
        id = rdma_create_id(listen_id->event_handler, listen_id->context,
-                           listen_id->ps);
+                           listen_id->ps, IB_QPT_UD);
        if (IS_ERR(id))
                return NULL;
 
@@ -1172,7 +1159,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
        }
 
        id_priv = container_of(id, struct rdma_id_private, id);
-       id_priv->state = CMA_CONNECT;
+       id_priv->state = RDMA_CM_CONNECT;
        return id_priv;
 err:
        rdma_destroy_id(id);
@@ -1201,13 +1188,13 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
        int offset, ret;
 
        listen_id = cm_id->context;
-       if (cma_disable_callback(listen_id, CMA_LISTEN))
+       if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
                return -ECONNABORTED;
 
        memset(&event, 0, sizeof event);
        offset = cma_user_data_offset(listen_id->id.ps);
        event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
-       if (cma_is_ud_ps(listen_id->id.ps)) {
+       if (listen_id->id.qp_type == IB_QPT_UD) {
                conn_id = cma_new_udp_id(&listen_id->id, ib_event);
                event.param.ud.private_data = ib_event->private_data + offset;
                event.param.ud.private_data_len =
@@ -1243,8 +1230,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
                 * while we're accessing the cm_id.
                 */
                mutex_lock(&lock);
-               if (cma_comp(conn_id, CMA_CONNECT) &&
-                   !cma_is_ud_ps(conn_id->id.ps))
+               if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD))
                        ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
                mutex_unlock(&lock);
                mutex_unlock(&conn_id->handler_mutex);
@@ -1257,7 +1243,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
        conn_id->cm_id.ib = NULL;
 
 release_conn_id:
-       cma_exch(conn_id, CMA_DESTROYING);
+       cma_exch(conn_id, RDMA_CM_DESTROYING);
        mutex_unlock(&conn_id->handler_mutex);
        rdma_destroy_id(&conn_id->id);
 
@@ -1328,7 +1314,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
        struct sockaddr_in *sin;
        int ret = 0;
 
-       if (cma_disable_callback(id_priv, CMA_CONNECT))
+       if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
                return 0;
 
        memset(&event, 0, sizeof event);
@@ -1371,7 +1357,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
        if (ret) {
                /* Destroy the CM ID by returning a non-zero value. */
                id_priv->cm_id.iw = NULL;
-               cma_exch(id_priv, CMA_DESTROYING);
+               cma_exch(id_priv, RDMA_CM_DESTROYING);
                mutex_unlock(&id_priv->handler_mutex);
                rdma_destroy_id(&id_priv->id);
                return ret;
@@ -1393,20 +1379,20 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
        struct ib_device_attr attr;
 
        listen_id = cm_id->context;
-       if (cma_disable_callback(listen_id, CMA_LISTEN))
+       if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
                return -ECONNABORTED;
 
        /* Create a new RDMA id for the new IW CM ID */
        new_cm_id = rdma_create_id(listen_id->id.event_handler,
                                   listen_id->id.context,
-                                  RDMA_PS_TCP);
+                                  RDMA_PS_TCP, IB_QPT_RC);
        if (IS_ERR(new_cm_id)) {
                ret = -ENOMEM;
                goto out;
        }
        conn_id = container_of(new_cm_id, struct rdma_id_private, id);
        mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
-       conn_id->state = CMA_CONNECT;
+       conn_id->state = RDMA_CM_CONNECT;
 
        dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
        if (!dev) {
@@ -1461,7 +1447,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
        if (ret) {
                /* User wants to destroy the CM ID */
                conn_id->cm_id.iw = NULL;
-               cma_exch(conn_id, CMA_DESTROYING);
+               cma_exch(conn_id, RDMA_CM_DESTROYING);
                mutex_unlock(&conn_id->handler_mutex);
                cma_deref_id(conn_id);
                rdma_destroy_id(&conn_id->id);
@@ -1548,13 +1534,14 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
        struct rdma_cm_id *id;
        int ret;
 
-       id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps);
+       id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps,
+                           id_priv->id.qp_type);
        if (IS_ERR(id))
                return;
 
        dev_id_priv = container_of(id, struct rdma_id_private, id);
 
-       dev_id_priv->state = CMA_ADDR_BOUND;
+       dev_id_priv->state = RDMA_CM_ADDR_BOUND;
        memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,
               ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr));
 
@@ -1601,8 +1588,8 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
                route->num_paths = 1;
                *route->path_rec = *path_rec;
        } else {
-               work->old_state = CMA_ROUTE_QUERY;
-               work->new_state = CMA_ADDR_RESOLVED;
+               work->old_state = RDMA_CM_ROUTE_QUERY;
+               work->new_state = RDMA_CM_ADDR_RESOLVED;
                work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
                work->event.status = status;
        }
@@ -1660,7 +1647,7 @@ static void cma_work_handler(struct work_struct *_work)
                goto out;
 
        if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
-               cma_exch(id_priv, CMA_DESTROYING);
+               cma_exch(id_priv, RDMA_CM_DESTROYING);
                destroy = 1;
        }
 out:
@@ -1678,12 +1665,12 @@ static void cma_ndev_work_handler(struct work_struct *_work)
        int destroy = 0;
 
        mutex_lock(&id_priv->handler_mutex);
-       if (id_priv->state == CMA_DESTROYING ||
-           id_priv->state == CMA_DEVICE_REMOVAL)
+       if (id_priv->state == RDMA_CM_DESTROYING ||
+           id_priv->state == RDMA_CM_DEVICE_REMOVAL)
                goto out;
 
        if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
-               cma_exch(id_priv, CMA_DESTROYING);
+               cma_exch(id_priv, RDMA_CM_DESTROYING);
                destroy = 1;
        }
 
@@ -1707,8 +1694,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
 
        work->id = id_priv;
        INIT_WORK(&work->work, cma_work_handler);
-       work->old_state = CMA_ROUTE_QUERY;
-       work->new_state = CMA_ROUTE_RESOLVED;
+       work->old_state = RDMA_CM_ROUTE_QUERY;
+       work->new_state = RDMA_CM_ROUTE_RESOLVED;
        work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
 
        route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
@@ -1737,7 +1724,8 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
        int ret;
 
        id_priv = container_of(id, struct rdma_id_private, id);
-       if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED))
+       if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
+                          RDMA_CM_ROUTE_RESOLVED))
                return -EINVAL;
 
        id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
@@ -1750,7 +1738,7 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,
        id->route.num_paths = num_paths;
        return 0;
 err:
-       cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED);
+       cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
        return ret;
 }
 EXPORT_SYMBOL(rdma_set_ib_paths);
@@ -1765,8 +1753,8 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
 
        work->id = id_priv;
        INIT_WORK(&work->work, cma_work_handler);
-       work->old_state = CMA_ROUTE_QUERY;
-       work->new_state = CMA_ROUTE_RESOLVED;
+       work->old_state = RDMA_CM_ROUTE_QUERY;
+       work->new_state = RDMA_CM_ROUTE_RESOLVED;
        work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
        queue_work(cma_wq, &work->work);
        return 0;
@@ -1830,8 +1818,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
                goto err2;
        }
 
-       work->old_state = CMA_ROUTE_QUERY;
-       work->new_state = CMA_ROUTE_RESOLVED;
+       work->old_state = RDMA_CM_ROUTE_QUERY;
+       work->new_state = RDMA_CM_ROUTE_RESOLVED;
        work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
        work->event.status = 0;
 
@@ -1853,7 +1841,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
        int ret;
 
        id_priv = container_of(id, struct rdma_id_private, id);
-       if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY))
+       if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
                return -EINVAL;
 
        atomic_inc(&id_priv->refcount);
@@ -1882,7 +1870,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
 
        return 0;
 err:
-       cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED);
+       cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
        cma_deref_id(id_priv);
        return ret;
 }
@@ -1941,14 +1929,16 @@ static void addr_handler(int status, struct sockaddr *src_addr,
 
        memset(&event, 0, sizeof event);
        mutex_lock(&id_priv->handler_mutex);
-       if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED))
+       if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
+                          RDMA_CM_ADDR_RESOLVED))
                goto out;
 
        if (!status && !id_priv->cma_dev)
                status = cma_acquire_dev(id_priv);
 
        if (status) {
-               if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
+               if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
+                                  RDMA_CM_ADDR_BOUND))
                        goto out;
                event.event = RDMA_CM_EVENT_ADDR_ERROR;
                event.status = status;
@@ -1959,7 +1949,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
        }
 
        if (id_priv->id.event_handler(&id_priv->id, &event)) {
-               cma_exch(id_priv, CMA_DESTROYING);
+               cma_exch(id_priv, RDMA_CM_DESTROYING);
                mutex_unlock(&id_priv->handler_mutex);
                cma_deref_id(id_priv);
                rdma_destroy_id(&id_priv->id);
@@ -2004,8 +1994,8 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
 
        work->id = id_priv;
        INIT_WORK(&work->work, cma_work_handler);
-       work->old_state = CMA_ADDR_QUERY;
-       work->new_state = CMA_ADDR_RESOLVED;
+       work->old_state = RDMA_CM_ADDR_QUERY;
+       work->new_state = RDMA_CM_ADDR_RESOLVED;
        work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
        queue_work(cma_wq, &work->work);
        return 0;
@@ -2034,13 +2024,13 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
        int ret;
 
        id_priv = container_of(id, struct rdma_id_private, id);
-       if (id_priv->state == CMA_IDLE) {
+       if (id_priv->state == RDMA_CM_IDLE) {
                ret = cma_bind_addr(id, src_addr, dst_addr);
                if (ret)
                        return ret;
        }
 
-       if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY))
+       if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
                return -EINVAL;
 
        atomic_inc(&id_priv->refcount);
@@ -2056,7 +2046,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
 
        return 0;
 err:
-       cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND);
+       cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
        cma_deref_id(id_priv);
        return ret;
 }
@@ -2070,7 +2060,7 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
 
        id_priv = container_of(id, struct rdma_id_private, id);
        spin_lock_irqsave(&id_priv->lock, flags);
-       if (id_priv->state == CMA_IDLE) {
+       if (id_priv->state == RDMA_CM_IDLE) {
                id_priv->reuseaddr = reuse;
                ret = 0;
        } else {
@@ -2177,7 +2167,7 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
                if (id_priv == cur_id)
                        continue;
 
-               if ((cur_id->state == CMA_LISTEN) ||
+               if ((cur_id->state == RDMA_CM_LISTEN) ||
                    !reuseaddr || !cur_id->reuseaddr) {
                        cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr;
                        if (cma_any_addr(cur_addr))
@@ -2280,14 +2270,14 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
        int ret;
 
        id_priv = container_of(id, struct rdma_id_private, id);
-       if (id_priv->state == CMA_IDLE) {
+       if (id_priv->state == RDMA_CM_IDLE) {
                ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
                ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
                if (ret)
                        return ret;
        }
 
-       if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
+       if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
                return -EINVAL;
 
        if (id_priv->reuseaddr) {
@@ -2319,7 +2309,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
        return 0;
 err:
        id_priv->backlog = 0;
-       cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
+       cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
        return ret;
 }
 EXPORT_SYMBOL(rdma_listen);
@@ -2333,7 +2323,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
                return -EAFNOSUPPORT;
 
        id_priv = container_of(id, struct rdma_id_private, id);
-       if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND))
+       if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
                return -EINVAL;
 
        ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
@@ -2360,7 +2350,7 @@ err2:
        if (id_priv->cma_dev)
                cma_release_dev(id_priv);
 err1:
-       cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE);
+       cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
        return ret;
 }
 EXPORT_SYMBOL(rdma_bind_addr);
@@ -2433,7 +2423,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
        struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
        int ret = 0;
 
-       if (cma_disable_callback(id_priv, CMA_CONNECT))
+       if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
                return 0;
 
        memset(&event, 0, sizeof event);
@@ -2479,7 +2469,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
        if (ret) {
                /* Destroy the CM ID by returning a non-zero value. */
                id_priv->cm_id.ib = NULL;
-               cma_exch(id_priv, CMA_DESTROYING);
+               cma_exch(id_priv, RDMA_CM_DESTROYING);
                mutex_unlock(&id_priv->handler_mutex);
                rdma_destroy_id(&id_priv->id);
                return ret;
@@ -2645,7 +2635,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
        int ret;
 
        id_priv = container_of(id, struct rdma_id_private, id);
-       if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT))
+       if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
                return -EINVAL;
 
        if (!id->qp) {
@@ -2655,7 +2645,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
 
        switch (rdma_node_get_transport(id->device->node_type)) {
        case RDMA_TRANSPORT_IB:
-               if (cma_is_ud_ps(id->ps))
+               if (id->qp_type == IB_QPT_UD)
                        ret = cma_resolve_ib_udp(id_priv, conn_param);
                else
                        ret = cma_connect_ib(id_priv, conn_param);
@@ -2672,7 +2662,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
 
        return 0;
 err:
-       cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED);
+       cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
        return ret;
 }
 EXPORT_SYMBOL(rdma_connect);
@@ -2758,7 +2748,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
        int ret;
 
        id_priv = container_of(id, struct rdma_id_private, id);
-       if (!cma_comp(id_priv, CMA_CONNECT))
+
+       id_priv->owner = task_pid_nr(current);
+
+       if (!cma_comp(id_priv, RDMA_CM_CONNECT))
                return -EINVAL;
 
        if (!id->qp && conn_param) {
@@ -2768,7 +2761,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
 
        switch (rdma_node_get_transport(id->device->node_type)) {
        case RDMA_TRANSPORT_IB:
-               if (cma_is_ud_ps(id->ps))
+               if (id->qp_type == IB_QPT_UD)
                        ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
                                                conn_param->private_data,
                                                conn_param->private_data_len);
@@ -2829,7 +2822,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
 
        switch (rdma_node_get_transport(id->device->node_type)) {
        case RDMA_TRANSPORT_IB:
-               if (cma_is_ud_ps(id->ps))
+               if (id->qp_type == IB_QPT_UD)
                        ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,
                                                private_data, private_data_len);
                else
@@ -2887,8 +2880,8 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
        int ret;
 
        id_priv = mc->id_priv;
-       if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) &&
-           cma_disable_callback(id_priv, CMA_ADDR_RESOLVED))
+       if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
+           cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
                return 0;
 
        mutex_lock(&id_priv->qp_mutex);
@@ -2912,7 +2905,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
 
        ret = id_priv->id.event_handler(&id_priv->id, &event);
        if (ret) {
-               cma_exch(id_priv, CMA_DESTROYING);
+               cma_exch(id_priv, RDMA_CM_DESTROYING);
                mutex_unlock(&id_priv->handler_mutex);
                rdma_destroy_id(&id_priv->id);
                return 0;
@@ -3095,8 +3088,8 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
        int ret;
 
        id_priv = container_of(id, struct rdma_id_private, id);
-       if (!cma_comp(id_priv, CMA_ADDR_BOUND) &&
-           !cma_comp(id_priv, CMA_ADDR_RESOLVED))
+       if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
+           !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
                return -EINVAL;
 
        mc = kmalloc(sizeof *mc, GFP_KERNEL);
@@ -3261,19 +3254,19 @@ static void cma_add_one(struct ib_device *device)
 static int cma_remove_id_dev(struct rdma_id_private *id_priv)
 {
        struct rdma_cm_event event;
-       enum cma_state state;
+       enum rdma_cm_state state;
        int ret = 0;
 
        /* Record that we want to remove the device */
-       state = cma_exch(id_priv, CMA_DEVICE_REMOVAL);
-       if (state == CMA_DESTROYING)
+       state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL);
+       if (state == RDMA_CM_DESTROYING)
                return 0;
 
        cma_cancel_operation(id_priv, state);
        mutex_lock(&id_priv->handler_mutex);
 
        /* Check for destruction from another callback. */
-       if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL))
+       if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
                goto out;
 
        memset(&event, 0, sizeof event);
@@ -3328,6 +3321,100 @@ static void cma_remove_one(struct ib_device *device)
        kfree(cma_dev);
 }
 
+static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct nlmsghdr *nlh;
+       struct rdma_cm_id_stats *id_stats;
+       struct rdma_id_private *id_priv;
+       struct rdma_cm_id *id = NULL;
+       struct cma_device *cma_dev;
+       int i_dev = 0, i_id = 0;
+
+       /*
+        * We export all of the IDs as a sequence of messages.  Each
+        * ID gets its own netlink message.
+        */
+       mutex_lock(&lock);
+
+       list_for_each_entry(cma_dev, &dev_list, list) {
+               if (i_dev < cb->args[0]) {
+                       i_dev++;
+                       continue;
+               }
+
+               i_id = 0;
+               list_for_each_entry(id_priv, &cma_dev->id_list, list) {
+                       if (i_id < cb->args[1]) {
+                               i_id++;
+                               continue;
+                       }
+
+                       id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq,
+                                               sizeof *id_stats, RDMA_NL_RDMA_CM,
+                                               RDMA_NL_RDMA_CM_ID_STATS);
+                       if (!id_stats)
+                               goto out;
+
+                       memset(id_stats, 0, sizeof *id_stats);
+                       id = &id_priv->id;
+                       id_stats->node_type = id->route.addr.dev_addr.dev_type;
+                       id_stats->port_num = id->port_num;
+                       id_stats->bound_dev_if =
+                               id->route.addr.dev_addr.bound_dev_if;
+
+                       if (id->route.addr.src_addr.ss_family == AF_INET) {
+                               if (ibnl_put_attr(skb, nlh,
+                                                 sizeof(struct sockaddr_in),
+                                                 &id->route.addr.src_addr,
+                                                 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) {
+                                       goto out;
+                               }
+                               if (ibnl_put_attr(skb, nlh,
+                                                 sizeof(struct sockaddr_in),
+                                                 &id->route.addr.dst_addr,
+                                                 RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) {
+                                       goto out;
+                               }
+                       } else if (id->route.addr.src_addr.ss_family == AF_INET6) {
+                               if (ibnl_put_attr(skb, nlh,
+                                                 sizeof(struct sockaddr_in6),
+                                                 &id->route.addr.src_addr,
+                                                 RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) {
+                                       goto out;
+                               }
+                               if (ibnl_put_attr(skb, nlh,
+                                                 sizeof(struct sockaddr_in6),
+                                                 &id->route.addr.dst_addr,
+                                                 RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) {
+                                       goto out;
+                               }
+                       }
+
+                       id_stats->pid           = id_priv->owner;
+                       id_stats->port_space    = id->ps;
+                       id_stats->cm_state      = id_priv->state;
+                       id_stats->qp_num        = id_priv->qp_num;
+                       id_stats->qp_type       = id->qp_type;
+
+                       i_id++;
+               }
+
+               cb->args[1] = 0;
+               i_dev++;
+       }
+
+out:
+       mutex_unlock(&lock);
+       cb->args[0] = i_dev;
+       cb->args[1] = i_id;
+
+       return skb->len;
+}
+
+static const struct ibnl_client_cbs cma_cb_table[] = {
+       [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats },
+};
+
 static int __init cma_init(void)
 {
        int ret;
@@ -3343,6 +3430,10 @@ static int __init cma_init(void)
        ret = ib_register_client(&cma_client);
        if (ret)
                goto err;
+
+       if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table))
+               printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n");
+
        return 0;
 
 err:
@@ -3355,6 +3446,7 @@ err:
 
 static void __exit cma_cleanup(void)
 {
+       ibnl_remove_client(RDMA_NL_RDMA_CM);
        ib_unregister_client(&cma_client);
        unregister_netdevice_notifier(&cma_nb);
        rdma_addr_unregister_client(&addr_client);
index f793bf2f5da7cf035d6f05da681aea7473cfd4ea..4007f721d25d2dda4bfb176802d06fb71705fe65 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/mutex.h>
+#include <rdma/rdma_netlink.h>
 
 #include "core_priv.h"
 
@@ -725,22 +726,40 @@ static int __init ib_core_init(void)
                return -ENOMEM;
 
        ret = ib_sysfs_setup();
-       if (ret)
+       if (ret) {
                printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
+               goto err;
+       }
+
+       ret = ibnl_init();
+       if (ret) {
+               printk(KERN_WARNING "Couldn't init IB netlink interface\n");
+               goto err_sysfs;
+       }
 
        ret = ib_cache_setup();
        if (ret) {
                printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
-               ib_sysfs_cleanup();
-               destroy_workqueue(ib_wq);
+               goto err_nl;
        }
 
+       return 0;
+
+err_nl:
+       ibnl_cleanup();
+
+err_sysfs:
+       ib_sysfs_cleanup();
+
+err:
+       destroy_workqueue(ib_wq);
        return ret;
 }
 
 static void __exit ib_core_cleanup(void)
 {
        ib_cache_cleanup();
+       ibnl_cleanup();
        ib_sysfs_cleanup();
        /* Make sure that any pending umem accounting work is done. */
        destroy_workqueue(ib_wq);
index 822cfdcd9f785af9fffb9a2b2b9b9f037b328e33..b4d8672a3e4ef539481a4089cc25f8c7b8f38901 100644 (file)
@@ -276,6 +276,13 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
                goto error1;
        }
 
+       /* Verify the QP requested is supported.  For example, Ethernet devices
+        * will not have QP0 */
+       if (!port_priv->qp_info[qpn].qp) {
+               ret = ERR_PTR(-EPROTONOSUPPORT);
+               goto error1;
+       }
+
        /* Allocate structures */
        mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL);
        if (!mad_agent_priv) {
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
new file mode 100644 (file)
index 0000000..4a5abaf
--- /dev/null
@@ -0,0 +1,190 @@
+/*
+ * Copyright (c) 2010 Voltaire Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
+
+#include <net/netlink.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <rdma/rdma_netlink.h>
+
+struct ibnl_client {
+       struct list_head                list;
+       int                             index;
+       int                             nops;
+       const struct ibnl_client_cbs   *cb_table;
+};
+
+static DEFINE_MUTEX(ibnl_mutex);
+static struct sock *nls;
+static LIST_HEAD(client_list);
+
+int ibnl_add_client(int index, int nops,
+                   const struct ibnl_client_cbs cb_table[])
+{
+       struct ibnl_client *cur;
+       struct ibnl_client *nl_client;
+
+       nl_client = kmalloc(sizeof *nl_client, GFP_KERNEL);
+       if (!nl_client)
+               return -ENOMEM;
+
+       nl_client->index        = index;
+       nl_client->nops         = nops;
+       nl_client->cb_table     = cb_table;
+
+       mutex_lock(&ibnl_mutex);
+
+       list_for_each_entry(cur, &client_list, list) {
+               if (cur->index == index) {
+                       pr_warn("Client for %d already exists\n", index);
+                       mutex_unlock(&ibnl_mutex);
+                       kfree(nl_client);
+                       return -EINVAL;
+               }
+       }
+
+       list_add_tail(&nl_client->list, &client_list);
+
+       mutex_unlock(&ibnl_mutex);
+
+       return 0;
+}
+EXPORT_SYMBOL(ibnl_add_client);
+
+int ibnl_remove_client(int index)
+{
+       struct ibnl_client *cur, *next;
+
+       mutex_lock(&ibnl_mutex);
+       list_for_each_entry_safe(cur, next, &client_list, list) {
+               if (cur->index == index) {
+                       list_del(&(cur->list));
+                       mutex_unlock(&ibnl_mutex);
+                       kfree(cur);
+                       return 0;
+               }
+       }
+       pr_warn("Can't remove callback for client idx %d. Not found\n", index);
+       mutex_unlock(&ibnl_mutex);
+
+       return -EINVAL;
+}
+EXPORT_SYMBOL(ibnl_remove_client);
+
+void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
+                  int len, int client, int op)
+{
+       unsigned char *prev_tail;
+
+       prev_tail = skb_tail_pointer(skb);
+       *nlh = NLMSG_NEW(skb, 0, seq, RDMA_NL_GET_TYPE(client, op),
+                       len, NLM_F_MULTI);
+       (*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail;
+       return NLMSG_DATA(*nlh);
+
+nlmsg_failure:
+       nlmsg_trim(skb, prev_tail);
+       return NULL;
+}
+EXPORT_SYMBOL(ibnl_put_msg);
+
+int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
+                 int len, void *data, int type)
+{
+       unsigned char *prev_tail;
+
+       prev_tail = skb_tail_pointer(skb);
+       NLA_PUT(skb, type, len, data);
+       nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail;
+       return 0;
+
+nla_put_failure:
+       nlmsg_trim(skb, prev_tail - nlh->nlmsg_len);
+       return -EMSGSIZE;
+}
+EXPORT_SYMBOL(ibnl_put_attr);
+
+static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+       struct ibnl_client *client;
+       int type = nlh->nlmsg_type;
+       int index = RDMA_NL_GET_CLIENT(type);
+       int op = RDMA_NL_GET_OP(type);
+
+       list_for_each_entry(client, &client_list, list) {
+               if (client->index == index) {
+                       if (op < 0 || op >= client->nops ||
+                           !client->cb_table[RDMA_NL_GET_OP(op)].dump)
+                               return -EINVAL;
+                       return netlink_dump_start(nls, skb, nlh,
+                                                 client->cb_table[op].dump,
+                                                 NULL);
+               }
+       }
+
+       pr_info("Index %d wasn't found in client list\n", index);
+       return -EINVAL;
+}
+
+static void ibnl_rcv(struct sk_buff *skb)
+{
+       mutex_lock(&ibnl_mutex);
+       netlink_rcv_skb(skb, &ibnl_rcv_msg);
+       mutex_unlock(&ibnl_mutex);
+}
+
+int __init ibnl_init(void)
+{
+       nls = netlink_kernel_create(&init_net, NETLINK_RDMA, 0, ibnl_rcv,
+                                   NULL, THIS_MODULE);
+       if (!nls) {
+               pr_warn("Failed to create netlink socket\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+void ibnl_cleanup(void)
+{
+       struct ibnl_client *cur, *next;
+
+       mutex_lock(&ibnl_mutex);
+       list_for_each_entry_safe(cur, next, &client_list, list) {
+               list_del(&(cur->list));
+               kfree(cur);
+       }
+       mutex_unlock(&ibnl_mutex);
+
+       netlink_kernel_release(nls);
+}
index b3fa798525b2a1d8d3df793f41fe255eacf676dd..71be5eebd683a52605096ecf7745ee0d93af758d 100644 (file)
@@ -367,13 +367,28 @@ done:
        return ret;
 }
 
-static ssize_t ucma_create_id(struct ucma_file *file,
-                               const char __user *inbuf,
-                               int in_len, int out_len)
+static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
+{
+       switch (cmd->ps) {
+       case RDMA_PS_TCP:
+               *qp_type = IB_QPT_RC;
+               return 0;
+       case RDMA_PS_UDP:
+       case RDMA_PS_IPOIB:
+               *qp_type = IB_QPT_UD;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
+                             int in_len, int out_len)
 {
        struct rdma_ucm_create_id cmd;
        struct rdma_ucm_create_id_resp resp;
        struct ucma_context *ctx;
+       enum ib_qp_type qp_type;
        int ret;
 
        if (out_len < sizeof(resp))
@@ -382,6 +397,10 @@ static ssize_t ucma_create_id(struct ucma_file *file,
        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
                return -EFAULT;
 
+       ret = ucma_get_qp_type(&cmd, &qp_type);
+       if (ret)
+               return ret;
+
        mutex_lock(&file->mut);
        ctx = ucma_alloc_ctx(file);
        mutex_unlock(&file->mut);
@@ -389,7 +408,7 @@ static ssize_t ucma_create_id(struct ucma_file *file,
                return -ENOMEM;
 
        ctx->uid = cmd.uid;
-       ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps);
+       ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type);
        if (IS_ERR(ctx->cm_id)) {
                ret = PTR_ERR(ctx->cm_id);
                goto err1;
@@ -1338,9 +1357,11 @@ static const struct file_operations ucma_fops = {
 };
 
 static struct miscdevice ucma_misc = {
-       .minor  = MISC_DYNAMIC_MINOR,
-       .name   = "rdma_cm",
-       .fops   = &ucma_fops,
+       .minor          = MISC_DYNAMIC_MINOR,
+       .name           = "rdma_cm",
+       .nodename       = "infiniband/rdma_cm",
+       .mode           = 0666,
+       .fops           = &ucma_fops,
 };
 
 static ssize_t show_abi_version(struct device *dev,
index cd1996d0ad089950960ba8bb3f4eaa0bff24f7fe..8d261b6ea5feaef85091726c81df2230d38e56b0 100644 (file)
@@ -1176,6 +1176,11 @@ static void ib_umad_remove_one(struct ib_device *device)
        kref_put(&umad_dev->ref, ib_umad_release_dev);
 }
 
+static char *umad_devnode(struct device *dev, mode_t *mode)
+{
+       return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
+}
+
 static int __init ib_umad_init(void)
 {
        int ret;
@@ -1194,6 +1199,8 @@ static int __init ib_umad_init(void)
                goto out_chrdev;
        }
 
+       umad_class->devnode = umad_devnode;
+
        ret = class_create_file(umad_class, &class_attr_abi_version.attr);
        if (ret) {
                printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n");
index ec83e9fe387bd9112266d984254a9f00200e7652..e49a85f8a44debe499e0bcd2c4d489174b5aab3d 100644 (file)
@@ -824,6 +824,12 @@ static void ib_uverbs_remove_one(struct ib_device *device)
        kfree(uverbs_dev);
 }
 
+static char *uverbs_devnode(struct device *dev, mode_t *mode)
+{
+       *mode = 0666;
+       return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
+}
+
 static int __init ib_uverbs_init(void)
 {
        int ret;
@@ -842,6 +848,8 @@ static int __init ib_uverbs_init(void)
                goto out_chrdev;
        }
 
+       uverbs_class->devnode = uverbs_devnode;
+
        ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
        if (ret) {
                printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n");
index 2391841389944f914c3d4bc224f136b27f170fa3..0a5008fbebac553e90dae527f88549eb90210936 100644 (file)
@@ -914,7 +914,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
                goto err;
 
        if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
-               iwch_post_zb_read(ep->com.qp);
+               iwch_post_zb_read(ep);
        }
 
        goto out;
@@ -1078,6 +1078,8 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
        struct iwch_ep *ep = ctx;
        struct cpl_wr_ack *hdr = cplhdr(skb);
        unsigned int credits = ntohs(hdr->credits);
+       unsigned long flags;
+       int post_zb = 0;
 
        PDBG("%s ep %p credits %u\n", __func__, ep, credits);
 
@@ -1087,28 +1089,34 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
                return CPL_RET_BUF_DONE;
        }
 
+       spin_lock_irqsave(&ep->com.lock, flags);
        BUG_ON(credits != 1);
        dst_confirm(ep->dst);
        if (!ep->mpa_skb) {
                PDBG("%s rdma_init wr_ack ep %p state %u\n",
-                       __func__, ep, state_read(&ep->com));
+                       __func__, ep, ep->com.state);
                if (ep->mpa_attr.initiator) {
                        PDBG("%s initiator ep %p state %u\n",
-                               __func__, ep, state_read(&ep->com));
-                       if (peer2peer)
-                               iwch_post_zb_read(ep->com.qp);
+                               __func__, ep, ep->com.state);
+                       if (peer2peer && ep->com.state == FPDU_MODE)
+                               post_zb = 1;
                } else {
                        PDBG("%s responder ep %p state %u\n",
-                               __func__, ep, state_read(&ep->com));
-                       ep->com.rpl_done = 1;
-                       wake_up(&ep->com.waitq);
+                               __func__, ep, ep->com.state);
+                       if (ep->com.state == MPA_REQ_RCVD) {
+                               ep->com.rpl_done = 1;
+                               wake_up(&ep->com.waitq);
+                       }
                }
        } else {
                PDBG("%s lsm ack ep %p state %u freeing skb\n",
-                       __func__, ep, state_read(&ep->com));
+                       __func__, ep, ep->com.state);
                kfree_skb(ep->mpa_skb);
                ep->mpa_skb = NULL;
        }
+       spin_unlock_irqrestore(&ep->com.lock, flags);
+       if (post_zb)
+               iwch_post_zb_read(ep);
        return CPL_RET_BUF_DONE;
 }
 
index c5406da3f4cd2d28dc02e7480d6a4c224516095c..9a342c9b220d240afebce55e5abfac6f67ee4953 100644 (file)
@@ -332,7 +332,7 @@ int iwch_bind_mw(struct ib_qp *qp,
                             struct ib_mw_bind *mw_bind);
 int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
 int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
-int iwch_post_zb_read(struct iwch_qp *qhp);
+int iwch_post_zb_read(struct iwch_ep *ep);
 int iwch_register_device(struct iwch_dev *dev);
 void iwch_unregister_device(struct iwch_dev *dev);
 void stop_read_rep_timer(struct iwch_qp *qhp);
index 1b4cd09f74dc2517567d8595fdd416c13ddd4941..ecd313f359a45ac6d90c0184ae1ccea3eeda7a6c 100644 (file)
@@ -738,7 +738,7 @@ static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
        }
 }
 
-int iwch_post_zb_read(struct iwch_qp *qhp)
+int iwch_post_zb_read(struct iwch_ep *ep)
 {
        union t3_wr *wqe;
        struct sk_buff *skb;
@@ -761,10 +761,10 @@ int iwch_post_zb_read(struct iwch_qp *qhp)
        wqe->read.local_len = cpu_to_be32(0);
        wqe->read.local_to = cpu_to_be64(1);
        wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ));
-       wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)|
+       wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)|
                                                V_FW_RIWR_LEN(flit_cnt));
        skb->priority = CPL_PRIORITY_DATA;
-       return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
+       return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb);
 }
 
 /*
index 35d2a5dd9bb402d212c5f5ad5f8aa82480d7ce24..4f045375c8e27b8fb7dc93776c9b1a7e6eb7a4da 100644 (file)
@@ -35,7 +35,7 @@
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/idr.h>
-#include <linux/workqueue.h>
+#include <linux/completion.h>
 #include <linux/netdevice.h>
 #include <linux/sched.h>
 #include <linux/pci.h>
@@ -131,28 +131,21 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
 
 #define C4IW_WR_TO (10*HZ)
 
-enum {
-       REPLY_READY = 0,
-};
-
 struct c4iw_wr_wait {
-       wait_queue_head_t wait;
-       unsigned long status;
+       struct completion completion;
        int ret;
 };
 
 static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
 {
        wr_waitp->ret = 0;
-       wr_waitp->status = 0;
-       init_waitqueue_head(&wr_waitp->wait);
+       init_completion(&wr_waitp->completion);
 }
 
 static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
 {
        wr_waitp->ret = ret;
-       set_bit(REPLY_READY, &wr_waitp->status);
-       wake_up(&wr_waitp->wait);
+       complete(&wr_waitp->completion);
 }
 
 static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
@@ -164,8 +157,7 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
        int ret;
 
        do {
-               ret = wait_event_timeout(wr_waitp->wait,
-                       test_and_clear_bit(REPLY_READY, &wr_waitp->status), to);
+               ret = wait_for_completion_timeout(&wr_waitp->completion, to);
                if (!ret) {
                        printk(KERN_ERR MOD "%s - Device %s not responding - "
                               "tid %u qpid %u\n", func,
index 13de1192927cc50da063562fdc8a47e8a4336422..2d668c69f6d958bcd9ba275967c8bbc1f3a93b9d 100644 (file)
@@ -1138,7 +1138,9 @@ static ssize_t nes_store_wqm_quanta(struct device_driver *ddp,
        u32 i = 0;
        struct nes_device *nesdev;
 
-       strict_strtoul(buf, 0, &wqm_quanta_value);
+       if (kstrtoul(buf, 0, &wqm_quanta_value) < 0)
+               return -EINVAL;
+
        list_for_each_entry(nesdev, &nes_dev_list, list) {
                if (i == ee_flsh_adapter) {
                        nesdev->nesadapter->wqm_quanta = wqm_quanta_value;
index 7c03a70c55a2815586c805bd17fc9fd69f21b2fc..8349f9c5064c042d568cf56619b76dd391410402 100644 (file)
@@ -1,6 +1,6 @@
 config INFINIBAND_QIB
        tristate "QLogic PCIe HCA support"
-       depends on 64BIT && NET
+       depends on 64BIT
        ---help---
        This is a low-level driver for QLogic PCIe QLE InfiniBand host
        channel adapters.  This driver does not support the QLogic
index 9876865732f76c4abd815072dbc9b5c86075c3d8..ede1475bee09cc74ef7b6269f65d001c7dc97198 100644 (file)
@@ -548,7 +548,7 @@ int iser_connect(struct iser_conn   *ib_conn,
        iser_conn_get(ib_conn); /* ref ib conn's cma id */
        ib_conn->cma_id = rdma_create_id(iser_cma_handler,
                                             (void *)ib_conn,
-                                            RDMA_PS_TCP);
+                                            RDMA_PS_TCP, IB_QPT_RC);
        if (IS_ERR(ib_conn->cma_id)) {
                err = PTR_ERR(ib_conn->cma_id);
                iser_err("rdma_create_id failed: %d\n", err);
index 376d640487d2ce942ff58d348a38a8a9737c5699..ee165fdcb5968431d1cf96a9cb02d56a7e338ba4 100644 (file)
@@ -1147,7 +1147,7 @@ static void srp_process_aer_req(struct srp_target_port *target,
 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
 {
        struct ib_device *dev = target->srp_host->srp_dev->dev;
-       struct srp_iu *iu = (struct srp_iu *) wc->wr_id;
+       struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id;
        int res;
        u8 opcode;
 
@@ -1231,7 +1231,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
                        break;
                }
 
-               iu = (struct srp_iu *) wc.wr_id;
+               iu = (struct srp_iu *) (uintptr_t) wc.wr_id;
                list_add(&iu->list, &target->free_tx);
        }
 }
index 69badb4e06aaa9bb067520bb373055e045e787ed..b4dee9d5a0555ff76dabb1b85b7e23c92db63222 100644 (file)
@@ -412,6 +412,17 @@ config KEYBOARD_PXA930_ROTARY
          To compile this driver as a module, choose M here: the
          module will be called pxa930_rotary.
 
+config KEYBOARD_PMIC8XXX
+       tristate "Qualcomm PMIC8XXX keypad support"
+       depends on MFD_PM8XXX
+       help
+         Say Y here if you want to enable the driver for the PMIC8XXX
+         keypad provided as a reference design from Qualcomm. This is intended
+         to support upto 18x8 matrix based keypad design.
+
+         To compile this driver as a module, choose M here: the module will
+         be called pmic8xxx-keypad.
+
 config KEYBOARD_SAMSUNG
        tristate "Samsung keypad support"
        depends on SAMSUNG_DEV_KEYPAD
index c49cf8e04cd72d7b75f044228ce8bf30c3b2d866..ddde0fd476f744c2aa2cd250362f809c4469310b 100644 (file)
@@ -34,6 +34,7 @@ obj-$(CONFIG_KEYBOARD_NOMADIK)                += nomadik-ske-keypad.o
 obj-$(CONFIG_KEYBOARD_OMAP)            += omap-keypad.o
 obj-$(CONFIG_KEYBOARD_OMAP4)           += omap4-keypad.o
 obj-$(CONFIG_KEYBOARD_OPENCORES)       += opencores-kbd.o
+obj-$(CONFIG_KEYBOARD_PMIC8XXX)                += pmic8xxx-keypad.o
 obj-$(CONFIG_KEYBOARD_PXA27x)          += pxa27x_keypad.o
 obj-$(CONFIG_KEYBOARD_PXA930_ROTARY)   += pxa930_rotary.o
 obj-$(CONFIG_KEYBOARD_QT1070)           += qt1070.o
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
new file mode 100644 (file)
index 0000000..40b02ae
--- /dev/null
@@ -0,0 +1,799 @@
+/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/mfd/pm8xxx/gpio.h>
+#include <linux/input/pmic8xxx-keypad.h>
+
+#define PM8XXX_MAX_ROWS                18
+#define PM8XXX_MAX_COLS                8
+#define PM8XXX_ROW_SHIFT       3
+#define PM8XXX_MATRIX_MAX_SIZE (PM8XXX_MAX_ROWS * PM8XXX_MAX_COLS)
+
+#define PM8XXX_MIN_ROWS                5
+#define PM8XXX_MIN_COLS                5
+
+#define MAX_SCAN_DELAY         128
+#define MIN_SCAN_DELAY         1
+
+/* in nanoseconds */
+#define MAX_ROW_HOLD_DELAY     122000
+#define MIN_ROW_HOLD_DELAY     30500
+
+#define MAX_DEBOUNCE_TIME      20
+#define MIN_DEBOUNCE_TIME      5
+
+#define KEYP_CTRL                      0x148
+
+#define KEYP_CTRL_EVNTS                        BIT(0)
+#define KEYP_CTRL_EVNTS_MASK           0x3
+
+#define KEYP_CTRL_SCAN_COLS_SHIFT      5
+#define KEYP_CTRL_SCAN_COLS_MIN                5
+#define KEYP_CTRL_SCAN_COLS_BITS       0x3
+
+#define KEYP_CTRL_SCAN_ROWS_SHIFT      2
+#define KEYP_CTRL_SCAN_ROWS_MIN                5
+#define KEYP_CTRL_SCAN_ROWS_BITS       0x7
+
+#define KEYP_CTRL_KEYP_EN              BIT(7)
+
+#define KEYP_SCAN                      0x149
+
+#define KEYP_SCAN_READ_STATE           BIT(0)
+#define KEYP_SCAN_DBOUNCE_SHIFT                1
+#define KEYP_SCAN_PAUSE_SHIFT          3
+#define KEYP_SCAN_ROW_HOLD_SHIFT       6
+
+#define KEYP_TEST                      0x14A
+
+#define KEYP_TEST_CLEAR_RECENT_SCAN    BIT(6)
+#define KEYP_TEST_CLEAR_OLD_SCAN       BIT(5)
+#define KEYP_TEST_READ_RESET           BIT(4)
+#define KEYP_TEST_DTEST_EN             BIT(3)
+#define KEYP_TEST_ABORT_READ           BIT(0)
+
+#define KEYP_TEST_DBG_SELECT_SHIFT     1
+
+/* bits of these registers represent
+ * '0' for key press
+ * '1' for key release
+ */
+#define KEYP_RECENT_DATA               0x14B
+#define KEYP_OLD_DATA                  0x14C
+
+#define KEYP_CLOCK_FREQ                        32768
+
+/**
+ * struct pmic8xxx_kp - internal keypad data structure
+ * @pdata - keypad platform data pointer
+ * @input - input device pointer for keypad
+ * @key_sense_irq - key press/release irq number
+ * @key_stuck_irq - key stuck notification irq number
+ * @keycodes - array to hold the key codes
+ * @dev - parent device pointer
+ * @keystate - present key press/release state
+ * @stuckstate - present state when key stuck irq
+ * @ctrl_reg - control register value
+ */
+struct pmic8xxx_kp {
+       const struct pm8xxx_keypad_platform_data *pdata;
+       struct input_dev *input;
+       int key_sense_irq;
+       int key_stuck_irq;
+
+       unsigned short keycodes[PM8XXX_MATRIX_MAX_SIZE];
+
+       struct device *dev;
+       u16 keystate[PM8XXX_MAX_ROWS];
+       u16 stuckstate[PM8XXX_MAX_ROWS];
+
+       u8 ctrl_reg;
+};
+
+static int pmic8xxx_kp_write_u8(struct pmic8xxx_kp *kp,
+                                u8 data, u16 reg)
+{
+       int rc;
+
+       rc = pm8xxx_writeb(kp->dev->parent, reg, data);
+       return rc;
+}
+
+static int pmic8xxx_kp_read(struct pmic8xxx_kp *kp,
+                                u8 *data, u16 reg, unsigned num_bytes)
+{
+       int rc;
+
+       rc = pm8xxx_read_buf(kp->dev->parent, reg, data, num_bytes);
+       return rc;
+}
+
+static int pmic8xxx_kp_read_u8(struct pmic8xxx_kp *kp,
+                                u8 *data, u16 reg)
+{
+       int rc;
+
+       rc = pmic8xxx_kp_read(kp, data, reg, 1);
+       return rc;
+}
+
+static u8 pmic8xxx_col_state(struct pmic8xxx_kp *kp, u8 col)
+{
+       /* all keys pressed on that particular row? */
+       if (col == 0x00)
+               return 1 << kp->pdata->num_cols;
+       else
+               return col & ((1 << kp->pdata->num_cols) - 1);
+}
+
+/*
+ * Synchronous read protocol for RevB0 onwards:
+ *
+ * 1. Write '1' to ReadState bit in KEYP_SCAN register
+ * 2. Wait 2*32KHz clocks, so that HW can successfully enter read mode
+ *    synchronously
+ * 3. Read rows in old array first if events are more than one
+ * 4. Read rows in recent array
+ * 5. Wait 4*32KHz clocks
+ * 6. Write '0' to ReadState bit of KEYP_SCAN register so that hw can
+ *    synchronously exit read mode.
+ */
+static int pmic8xxx_chk_sync_read(struct pmic8xxx_kp *kp)
+{
+       int rc;
+       u8 scan_val;
+
+       rc = pmic8xxx_kp_read_u8(kp, &scan_val, KEYP_SCAN);
+       if (rc < 0) {
+               dev_err(kp->dev, "Error reading KEYP_SCAN reg, rc=%d\n", rc);
+               return rc;
+       }
+
+       scan_val |= 0x1;
+
+       rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN);
+       if (rc < 0) {
+               dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc);
+               return rc;
+       }
+
+       /* 2 * 32KHz clocks */
+       udelay((2 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1);
+
+       return rc;
+}
+
+static int pmic8xxx_kp_read_data(struct pmic8xxx_kp *kp, u16 *state,
+                                       u16 data_reg, int read_rows)
+{
+       int rc, row;
+       u8 new_data[PM8XXX_MAX_ROWS];
+
+       rc = pmic8xxx_kp_read(kp, new_data, data_reg, read_rows);
+       if (rc)
+               return rc;
+
+       for (row = 0; row < kp->pdata->num_rows; row++) {
+               dev_dbg(kp->dev, "new_data[%d] = %d\n", row,
+                                       new_data[row]);
+               state[row] = pmic8xxx_col_state(kp, new_data[row]);
+       }
+
+       return rc;
+}
+
+static int pmic8xxx_kp_read_matrix(struct pmic8xxx_kp *kp, u16 *new_state,
+                                        u16 *old_state)
+{
+       int rc, read_rows;
+       u8 scan_val;
+
+       if (kp->pdata->num_rows < PM8XXX_MIN_ROWS)
+               read_rows = PM8XXX_MIN_ROWS;
+       else
+               read_rows = kp->pdata->num_rows;
+
+       pmic8xxx_chk_sync_read(kp);
+
+       if (old_state) {
+               rc = pmic8xxx_kp_read_data(kp, old_state, KEYP_OLD_DATA,
+                                               read_rows);
+               if (rc < 0) {
+                       dev_err(kp->dev,
+                               "Error reading KEYP_OLD_DATA, rc=%d\n", rc);
+                       return rc;
+               }
+       }
+
+       rc = pmic8xxx_kp_read_data(kp, new_state, KEYP_RECENT_DATA,
+                                        read_rows);
+       if (rc < 0) {
+               dev_err(kp->dev,
+                       "Error reading KEYP_RECENT_DATA, rc=%d\n", rc);
+               return rc;
+       }
+
+       /* 4 * 32KHz clocks */
+       udelay((4 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1);
+
+       rc = pmic8xxx_kp_read_u8(kp, &scan_val, KEYP_SCAN);
+       if (rc < 0) {
+               dev_err(kp->dev, "Error reading KEYP_SCAN reg, rc=%d\n", rc);
+               return rc;
+       }
+
+       scan_val &= 0xFE;
+       rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN);
+       if (rc < 0)
+               dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc);
+
+       return rc;
+}
+
+static void __pmic8xxx_kp_scan_matrix(struct pmic8xxx_kp *kp, u16 *new_state,
+                                        u16 *old_state)
+{
+       int row, col, code;
+
+       for (row = 0; row < kp->pdata->num_rows; row++) {
+               int bits_changed = new_state[row] ^ old_state[row];
+
+               if (!bits_changed)
+                       continue;
+
+               for (col = 0; col < kp->pdata->num_cols; col++) {
+                       if (!(bits_changed & (1 << col)))
+                               continue;
+
+                       dev_dbg(kp->dev, "key [%d:%d] %s\n", row, col,
+                                       !(new_state[row] & (1 << col)) ?
+                                       "pressed" : "released");
+
+                       code = MATRIX_SCAN_CODE(row, col, PM8XXX_ROW_SHIFT);
+
+                       input_event(kp->input, EV_MSC, MSC_SCAN, code);
+                       input_report_key(kp->input,
+                                       kp->keycodes[code],
+                                       !(new_state[row] & (1 << col)));
+
+                       input_sync(kp->input);
+               }
+       }
+}
+
+static bool pmic8xxx_detect_ghost_keys(struct pmic8xxx_kp *kp, u16 *new_state)
+{
+       int row, found_first = -1;
+       u16 check, row_state;
+
+       check = 0;
+       for (row = 0; row < kp->pdata->num_rows; row++) {
+               row_state = (~new_state[row]) &
+                                ((1 << kp->pdata->num_cols) - 1);
+
+               if (hweight16(row_state) > 1) {
+                       if (found_first == -1)
+                               found_first = row;
+                       if (check & row_state) {
+                               dev_dbg(kp->dev, "detected ghost key on row[%d]"
+                                        " and row[%d]\n", found_first, row);
+                               return true;
+                       }
+               }
+               check |= row_state;
+       }
+       return false;
+}
+
+static int pmic8xxx_kp_scan_matrix(struct pmic8xxx_kp *kp, unsigned int events)
+{
+       u16 new_state[PM8XXX_MAX_ROWS];
+       u16 old_state[PM8XXX_MAX_ROWS];
+       int rc;
+
+       switch (events) {
+       case 0x1:
+               rc = pmic8xxx_kp_read_matrix(kp, new_state, NULL);
+               if (rc < 0)
+                       return rc;
+
+               /* detecting ghost key is not an error */
+               if (pmic8xxx_detect_ghost_keys(kp, new_state))
+                       return 0;
+               __pmic8xxx_kp_scan_matrix(kp, new_state, kp->keystate);
+               memcpy(kp->keystate, new_state, sizeof(new_state));
+       break;
+       case 0x3: /* two events - eventcounter is gray-coded */
+               rc = pmic8xxx_kp_read_matrix(kp, new_state, old_state);
+               if (rc < 0)
+                       return rc;
+
+               __pmic8xxx_kp_scan_matrix(kp, old_state, kp->keystate);
+               __pmic8xxx_kp_scan_matrix(kp, new_state, old_state);
+               memcpy(kp->keystate, new_state, sizeof(new_state));
+       break;
+       case 0x2:
+               dev_dbg(kp->dev, "Some key events were lost\n");
+               rc = pmic8xxx_kp_read_matrix(kp, new_state, old_state);
+               if (rc < 0)
+                       return rc;
+               __pmic8xxx_kp_scan_matrix(kp, old_state, kp->keystate);
+               __pmic8xxx_kp_scan_matrix(kp, new_state, old_state);
+               memcpy(kp->keystate, new_state, sizeof(new_state));
+       break;
+       default:
+               rc = -EINVAL;
+       }
+       return rc;
+}
+
+/*
+ * NOTE: We are reading recent and old data registers blindly
+ * whenever key-stuck interrupt happens, because events counter doesn't
+ * get updated when this interrupt happens due to key stuck doesn't get
+ * considered as key state change.
+ *
+ * We are not using old data register contents after they are being read
+ * because it might report the key which was pressed before the key being stuck
+ * as stuck key because it's pressed status is stored in the old data
+ * register.
+ */
+static irqreturn_t pmic8xxx_kp_stuck_irq(int irq, void *data)
+{
+       u16 new_state[PM8XXX_MAX_ROWS];
+       u16 old_state[PM8XXX_MAX_ROWS];
+       int rc;
+       struct pmic8xxx_kp *kp = data;
+
+       rc = pmic8xxx_kp_read_matrix(kp, new_state, old_state);
+       if (rc < 0) {
+               dev_err(kp->dev, "failed to read keypad matrix\n");
+               return IRQ_HANDLED;
+       }
+
+       __pmic8xxx_kp_scan_matrix(kp, new_state, kp->stuckstate);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t pmic8xxx_kp_irq(int irq, void *data)
+{
+       struct pmic8xxx_kp *kp = data;
+       u8 ctrl_val, events;
+       int rc;
+
+       rc = pmic8xxx_kp_read(kp, &ctrl_val, KEYP_CTRL, 1);
+       if (rc < 0) {
+               dev_err(kp->dev, "failed to read keyp_ctrl register\n");
+               return IRQ_HANDLED;
+       }
+
+       events = ctrl_val & KEYP_CTRL_EVNTS_MASK;
+
+       rc = pmic8xxx_kp_scan_matrix(kp, events);
+       if (rc < 0)
+               dev_err(kp->dev, "failed to scan matrix\n");
+
+       return IRQ_HANDLED;
+}
+
+static int __devinit pmic8xxx_kpd_init(struct pmic8xxx_kp *kp)
+{
+       int bits, rc, cycles;
+       u8 scan_val = 0, ctrl_val = 0;
+       static const u8 row_bits[] = {
+               0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7,
+       };
+
+       /* Find column bits */
+       if (kp->pdata->num_cols < KEYP_CTRL_SCAN_COLS_MIN)
+               bits = 0;
+       else
+               bits = kp->pdata->num_cols - KEYP_CTRL_SCAN_COLS_MIN;
+       ctrl_val = (bits & KEYP_CTRL_SCAN_COLS_BITS) <<
+               KEYP_CTRL_SCAN_COLS_SHIFT;
+
+       /* Find row bits */
+       if (kp->pdata->num_rows < KEYP_CTRL_SCAN_ROWS_MIN)
+               bits = 0;
+       else
+               bits = row_bits[kp->pdata->num_rows - KEYP_CTRL_SCAN_ROWS_MIN];
+
+       ctrl_val |= (bits << KEYP_CTRL_SCAN_ROWS_SHIFT);
+
+       rc = pmic8xxx_kp_write_u8(kp, ctrl_val, KEYP_CTRL);
+       if (rc < 0) {
+               dev_err(kp->dev, "Error writing KEYP_CTRL reg, rc=%d\n", rc);
+               return rc;
+       }
+
+       bits = (kp->pdata->debounce_ms / 5) - 1;
+
+       scan_val |= (bits << KEYP_SCAN_DBOUNCE_SHIFT);
+
+       bits = fls(kp->pdata->scan_delay_ms) - 1;
+       scan_val |= (bits << KEYP_SCAN_PAUSE_SHIFT);
+
+       /* Row hold time is a multiple of 32KHz cycles. */
+       cycles = (kp->pdata->row_hold_ns * KEYP_CLOCK_FREQ) / NSEC_PER_SEC;
+
+       scan_val |= (cycles << KEYP_SCAN_ROW_HOLD_SHIFT);
+
+       rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN);
+       if (rc)
+               dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc);
+
+       return rc;
+
+}
+
+static int  __devinit pmic8xxx_kp_config_gpio(int gpio_start, int num_gpios,
+                       struct pmic8xxx_kp *kp, struct pm_gpio *gpio_config)
+{
+       int     rc, i;
+
+       if (gpio_start < 0 || num_gpios < 0)
+               return -EINVAL;
+
+       for (i = 0; i < num_gpios; i++) {
+               rc = pm8xxx_gpio_config(gpio_start + i, gpio_config);
+               if (rc) {
+                       dev_err(kp->dev, "%s: FAIL pm8xxx_gpio_config():"
+                                       "for PM GPIO [%d] rc=%d.\n",
+                                       __func__, gpio_start + i, rc);
+                       return rc;
+               }
+        }
+
+       return 0;
+}
+
+static int pmic8xxx_kp_enable(struct pmic8xxx_kp *kp)
+{
+       int rc;
+
+       kp->ctrl_reg |= KEYP_CTRL_KEYP_EN;
+
+       rc = pmic8xxx_kp_write_u8(kp, kp->ctrl_reg, KEYP_CTRL);
+       if (rc < 0)
+               dev_err(kp->dev, "Error writing KEYP_CTRL reg, rc=%d\n", rc);
+
+       return rc;
+}
+
+static int pmic8xxx_kp_disable(struct pmic8xxx_kp *kp)
+{
+       int rc;
+
+       kp->ctrl_reg &= ~KEYP_CTRL_KEYP_EN;
+
+       rc = pmic8xxx_kp_write_u8(kp, kp->ctrl_reg, KEYP_CTRL);
+       if (rc < 0)
+               return rc;
+
+       return rc;
+}
+
+static int pmic8xxx_kp_open(struct input_dev *dev)
+{
+       struct pmic8xxx_kp *kp = input_get_drvdata(dev);
+
+       return pmic8xxx_kp_enable(kp);
+}
+
+static void pmic8xxx_kp_close(struct input_dev *dev)
+{
+       struct pmic8xxx_kp *kp = input_get_drvdata(dev);
+
+       pmic8xxx_kp_disable(kp);
+}
+
+/*
+ * keypad controller should be initialized in the following sequence
+ * only, otherwise it might get into FSM stuck state.
+ *
+ * - Initialize keypad control parameters, like no. of rows, columns,
+ *   timing values etc.,
+ * - configure rows and column gpios pull up/down.
+ * - set irq edge type.
+ * - enable the keypad controller.
+ */
+static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev)
+{
+       const struct pm8xxx_keypad_platform_data *pdata = mfd_get_data(pdev);
+       const struct matrix_keymap_data *keymap_data;
+       struct pmic8xxx_kp *kp;
+       int rc;
+       u8 ctrl_val;
+
+       struct pm_gpio kypd_drv = {
+               .direction      = PM_GPIO_DIR_OUT,
+               .output_buffer  = PM_GPIO_OUT_BUF_OPEN_DRAIN,
+               .output_value   = 0,
+               .pull           = PM_GPIO_PULL_NO,
+               .vin_sel        = PM_GPIO_VIN_S3,
+               .out_strength   = PM_GPIO_STRENGTH_LOW,
+               .function       = PM_GPIO_FUNC_1,
+               .inv_int_pol    = 1,
+       };
+
+       struct pm_gpio kypd_sns = {
+               .direction      = PM_GPIO_DIR_IN,
+               .pull           = PM_GPIO_PULL_UP_31P5,
+               .vin_sel        = PM_GPIO_VIN_S3,
+               .out_strength   = PM_GPIO_STRENGTH_NO,
+               .function       = PM_GPIO_FUNC_NORMAL,
+               .inv_int_pol    = 1,
+       };
+
+
+       if (!pdata || !pdata->num_cols || !pdata->num_rows ||
+               pdata->num_cols > PM8XXX_MAX_COLS ||
+               pdata->num_rows > PM8XXX_MAX_ROWS ||
+               pdata->num_cols < PM8XXX_MIN_COLS) {
+               dev_err(&pdev->dev, "invalid platform data\n");
+               return -EINVAL;
+       }
+
+       if (!pdata->scan_delay_ms ||
+               pdata->scan_delay_ms > MAX_SCAN_DELAY ||
+               pdata->scan_delay_ms < MIN_SCAN_DELAY ||
+               !is_power_of_2(pdata->scan_delay_ms)) {
+               dev_err(&pdev->dev, "invalid keypad scan time supplied\n");
+               return -EINVAL;
+       }
+
+       if (!pdata->row_hold_ns ||
+               pdata->row_hold_ns > MAX_ROW_HOLD_DELAY ||
+               pdata->row_hold_ns < MIN_ROW_HOLD_DELAY ||
+               ((pdata->row_hold_ns % MIN_ROW_HOLD_DELAY) != 0)) {
+               dev_err(&pdev->dev, "invalid keypad row hold time supplied\n");
+               return -EINVAL;
+       }
+
+       if (!pdata->debounce_ms ||
+               ((pdata->debounce_ms % 5) != 0) ||
+               pdata->debounce_ms > MAX_DEBOUNCE_TIME ||
+               pdata->debounce_ms < MIN_DEBOUNCE_TIME) {
+               dev_err(&pdev->dev, "invalid debounce time supplied\n");
+               return -EINVAL;
+       }
+
+       keymap_data = pdata->keymap_data;
+       if (!keymap_data) {
+               dev_err(&pdev->dev, "no keymap data supplied\n");
+               return -EINVAL;
+       }
+
+       kp = kzalloc(sizeof(*kp), GFP_KERNEL);
+       if (!kp)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, kp);
+
+       kp->pdata       = pdata;
+       kp->dev         = &pdev->dev;
+
+       kp->input = input_allocate_device();
+       if (!kp->input) {
+               dev_err(&pdev->dev, "unable to allocate input device\n");
+               rc = -ENOMEM;
+               goto err_alloc_device;
+       }
+
+       kp->key_sense_irq = platform_get_irq(pdev, 0);
+       if (kp->key_sense_irq < 0) {
+               dev_err(&pdev->dev, "unable to get keypad sense irq\n");
+               rc = -ENXIO;
+               goto err_get_irq;
+       }
+
+       kp->key_stuck_irq = platform_get_irq(pdev, 1);
+       if (kp->key_stuck_irq < 0) {
+               dev_err(&pdev->dev, "unable to get keypad stuck irq\n");
+               rc = -ENXIO;
+               goto err_get_irq;
+       }
+
+       kp->input->name = pdata->input_name ? : "PMIC8XXX keypad";
+       kp->input->phys = pdata->input_phys_device ? : "pmic8xxx_keypad/input0";
+
+       kp->input->dev.parent   = &pdev->dev;
+
+       kp->input->id.bustype   = BUS_I2C;
+       kp->input->id.version   = 0x0001;
+       kp->input->id.product   = 0x0001;
+       kp->input->id.vendor    = 0x0001;
+
+       kp->input->evbit[0]     = BIT_MASK(EV_KEY);
+
+       if (pdata->rep)
+               __set_bit(EV_REP, kp->input->evbit);
+
+       kp->input->keycode      = kp->keycodes;
+       kp->input->keycodemax   = PM8XXX_MATRIX_MAX_SIZE;
+       kp->input->keycodesize  = sizeof(kp->keycodes);
+       kp->input->open         = pmic8xxx_kp_open;
+       kp->input->close        = pmic8xxx_kp_close;
+
+       matrix_keypad_build_keymap(keymap_data, PM8XXX_ROW_SHIFT,
+                                       kp->input->keycode, kp->input->keybit);
+
+       input_set_capability(kp->input, EV_MSC, MSC_SCAN);
+       input_set_drvdata(kp->input, kp);
+
+       /* initialize keypad state */
+       memset(kp->keystate, 0xff, sizeof(kp->keystate));
+       memset(kp->stuckstate, 0xff, sizeof(kp->stuckstate));
+
+       rc = pmic8xxx_kpd_init(kp);
+       if (rc < 0) {
+               dev_err(&pdev->dev, "unable to initialize keypad controller\n");
+               goto err_get_irq;
+       }
+
+       rc = pmic8xxx_kp_config_gpio(pdata->cols_gpio_start,
+                                       pdata->num_cols, kp, &kypd_sns);
+       if (rc < 0) {
+               dev_err(&pdev->dev, "unable to configure keypad sense lines\n");
+               goto err_gpio_config;
+       }
+
+       rc = pmic8xxx_kp_config_gpio(pdata->rows_gpio_start,
+                                       pdata->num_rows, kp, &kypd_drv);
+       if (rc < 0) {
+               dev_err(&pdev->dev, "unable to configure keypad drive lines\n");
+               goto err_gpio_config;
+       }
+
+       rc = request_any_context_irq(kp->key_sense_irq, pmic8xxx_kp_irq,
+                                IRQF_TRIGGER_RISING, "pmic-keypad", kp);
+       if (rc < 0) {
+               dev_err(&pdev->dev, "failed to request keypad sense irq\n");
+               goto err_get_irq;
+       }
+
+       rc = request_any_context_irq(kp->key_stuck_irq, pmic8xxx_kp_stuck_irq,
+                                IRQF_TRIGGER_RISING, "pmic-keypad-stuck", kp);
+       if (rc < 0) {
+               dev_err(&pdev->dev, "failed to request keypad stuck irq\n");
+               goto err_req_stuck_irq;
+       }
+
+       rc = pmic8xxx_kp_read_u8(kp, &ctrl_val, KEYP_CTRL);
+       if (rc < 0) {
+               dev_err(&pdev->dev, "failed to read KEYP_CTRL register\n");
+               goto err_pmic_reg_read;
+       }
+
+       kp->ctrl_reg = ctrl_val;
+
+       rc = input_register_device(kp->input);
+       if (rc < 0) {
+               dev_err(&pdev->dev, "unable to register keypad input device\n");
+               goto err_pmic_reg_read;
+       }
+
+       device_init_wakeup(&pdev->dev, pdata->wakeup);
+
+       return 0;
+
+err_pmic_reg_read:
+       free_irq(kp->key_stuck_irq, NULL);
+err_req_stuck_irq:
+       free_irq(kp->key_sense_irq, NULL);
+err_gpio_config:
+err_get_irq:
+       input_free_device(kp->input);
+err_alloc_device:
+       platform_set_drvdata(pdev, NULL);
+       kfree(kp);
+       return rc;
+}
+
+static int __devexit pmic8xxx_kp_remove(struct platform_device *pdev)
+{
+       struct pmic8xxx_kp *kp = platform_get_drvdata(pdev);
+
+       device_init_wakeup(&pdev->dev, 0);
+       free_irq(kp->key_stuck_irq, NULL);
+       free_irq(kp->key_sense_irq, NULL);
+       input_unregister_device(kp->input);
+       kfree(kp);
+
+       platform_set_drvdata(pdev, NULL);
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pmic8xxx_kp_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct pmic8xxx_kp *kp = platform_get_drvdata(pdev);
+       struct input_dev *input_dev = kp->input;
+
+       if (device_may_wakeup(dev)) {
+               enable_irq_wake(kp->key_sense_irq);
+       } else {
+               mutex_lock(&input_dev->mutex);
+
+               if (input_dev->users)
+                       pmic8xxx_kp_disable(kp);
+
+               mutex_unlock(&input_dev->mutex);
+       }
+
+       return 0;
+}
+
+static int pmic8xxx_kp_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct pmic8xxx_kp *kp = platform_get_drvdata(pdev);
+       struct input_dev *input_dev = kp->input;
+
+       if (device_may_wakeup(dev)) {
+               disable_irq_wake(kp->key_sense_irq);
+       } else {
+               mutex_lock(&input_dev->mutex);
+
+               if (input_dev->users)
+                       pmic8xxx_kp_enable(kp);
+
+               mutex_unlock(&input_dev->mutex);
+       }
+
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(pm8xxx_kp_pm_ops,
+                        pmic8xxx_kp_suspend, pmic8xxx_kp_resume);
+
+static struct platform_driver pmic8xxx_kp_driver = {
+       .probe          = pmic8xxx_kp_probe,
+       .remove         = __devexit_p(pmic8xxx_kp_remove),
+       .driver         = {
+               .name = PM8XXX_KEYPAD_DEV_NAME,
+               .owner = THIS_MODULE,
+               .pm = &pm8xxx_kp_pm_ops,
+       },
+};
+
+static int __init pmic8xxx_kp_init(void)
+{
+       return platform_driver_register(&pmic8xxx_kp_driver);
+}
+module_init(pmic8xxx_kp_init);
+
+static void __exit pmic8xxx_kp_exit(void)
+{
+       platform_driver_unregister(&pmic8xxx_kp_driver);
+}
+module_exit(pmic8xxx_kp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8XXX keypad driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pmic8xxx_keypad");
+MODULE_AUTHOR("Trilok Soni <tsoni@codeaurora.org>");
index f9cf0881b0e3cf386a3e606d462aebf4bacd5575..45dc6aa62ba4fce3715c20b504da7f02745997e5 100644 (file)
@@ -330,6 +330,17 @@ config INPUT_PWM_BEEPER
          To compile this driver as a module, choose M here: the module will be
          called pwm-beeper.
 
+config INPUT_PMIC8XXX_PWRKEY
+       tristate "PMIC8XXX power key support"
+       depends on MFD_PM8XXX
+       help
+         Say Y here if you want support for the PMIC8XXX power key.
+
+         If unsure, say N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called pmic8xxx-pwrkey.
+
 config INPUT_GPIO_ROTARY_ENCODER
        tristate "Rotary encoders connected to GPIO pins"
        depends on GPIOLIB && GENERIC_GPIO
index e3f7984e627427ead449de2d6e6452d3aac1c915..38efb2cb182b90646e7e4e7e41288e0222fca641 100644 (file)
@@ -33,6 +33,7 @@ obj-$(CONFIG_INPUT_PCF8574)           += pcf8574_keypad.o
 obj-$(CONFIG_INPUT_PCSPKR)             += pcspkr.o
 obj-$(CONFIG_INPUT_POWERMATE)          += powermate.o
 obj-$(CONFIG_INPUT_PWM_BEEPER)         += pwm-beeper.o
+obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY)    += pmic8xxx-pwrkey.o
 obj-$(CONFIG_INPUT_RB532_BUTTON)       += rb532_button.o
 obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER)        += rotary_encoder.o
 obj-$(CONFIG_INPUT_SGI_BTNS)           += sgi_btns.o
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
new file mode 100644 (file)
index 0000000..97e07e7
--- /dev/null
@@ -0,0 +1,231 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/log2.h>
+
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/input/pmic8xxx-pwrkey.h>
+
+#define PON_CNTL_1 0x1C
+#define PON_CNTL_PULL_UP BIT(7)
+#define PON_CNTL_TRIG_DELAY_MASK (0x7)
+
+/**
+ * struct pmic8xxx_pwrkey - pmic8xxx pwrkey information
+ * @key_press_irq: key press irq number
+ */
+struct pmic8xxx_pwrkey {
+       struct input_dev *pwr;
+       int key_press_irq;
+};
+
+static irqreturn_t pwrkey_press_irq(int irq, void *_pwrkey)
+{
+       struct pmic8xxx_pwrkey *pwrkey = _pwrkey;
+
+       input_report_key(pwrkey->pwr, KEY_POWER, 1);
+       input_sync(pwrkey->pwr);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t pwrkey_release_irq(int irq, void *_pwrkey)
+{
+       struct pmic8xxx_pwrkey *pwrkey = _pwrkey;
+
+       input_report_key(pwrkey->pwr, KEY_POWER, 0);
+       input_sync(pwrkey->pwr);
+
+       return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pmic8xxx_pwrkey_suspend(struct device *dev)
+{
+       struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev);
+
+       if (device_may_wakeup(dev))
+               enable_irq_wake(pwrkey->key_press_irq);
+
+       return 0;
+}
+
+static int pmic8xxx_pwrkey_resume(struct device *dev)
+{
+       struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev);
+
+       if (device_may_wakeup(dev))
+               disable_irq_wake(pwrkey->key_press_irq);
+
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(pm8xxx_pwr_key_pm_ops,
+               pmic8xxx_pwrkey_suspend, pmic8xxx_pwrkey_resume);
+
+static int __devinit pmic8xxx_pwrkey_probe(struct platform_device *pdev)
+{
+       struct input_dev *pwr;
+       int key_release_irq = platform_get_irq(pdev, 0);
+       int key_press_irq = platform_get_irq(pdev, 1);
+       int err;
+       unsigned int delay;
+       u8 pon_cntl;
+       struct pmic8xxx_pwrkey *pwrkey;
+       const struct pm8xxx_pwrkey_platform_data *pdata = mfd_get_data(pdev);
+
+       if (!pdata) {
+               dev_err(&pdev->dev, "power key platform data not supplied\n");
+               return -EINVAL;
+       }
+
+       if (pdata->kpd_trigger_delay_us > 62500) {
+               dev_err(&pdev->dev, "invalid power key trigger delay\n");
+               return -EINVAL;
+       }
+
+       pwrkey = kzalloc(sizeof(*pwrkey), GFP_KERNEL);
+       if (!pwrkey)
+               return -ENOMEM;
+
+       pwr = input_allocate_device();
+       if (!pwr) {
+               dev_dbg(&pdev->dev, "Can't allocate power button\n");
+               err = -ENOMEM;
+               goto free_pwrkey;
+       }
+
+       input_set_capability(pwr, EV_KEY, KEY_POWER);
+
+       pwr->name = "pmic8xxx_pwrkey";
+       pwr->phys = "pmic8xxx_pwrkey/input0";
+       pwr->dev.parent = &pdev->dev;
+
+       delay = (pdata->kpd_trigger_delay_us << 10) / USEC_PER_SEC;
+       delay = 1 + ilog2(delay);
+
+       err = pm8xxx_readb(pdev->dev.parent, PON_CNTL_1, &pon_cntl);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed reading PON_CNTL_1 err=%d\n", err);
+               goto free_input_dev;
+       }
+
+       pon_cntl &= ~PON_CNTL_TRIG_DELAY_MASK;
+       pon_cntl |= (delay & PON_CNTL_TRIG_DELAY_MASK);
+       if (pdata->pull_up)
+               pon_cntl |= PON_CNTL_PULL_UP;
+       else
+               pon_cntl &= ~PON_CNTL_PULL_UP;
+
+       err = pm8xxx_writeb(pdev->dev.parent, PON_CNTL_1, pon_cntl);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed writing PON_CNTL_1 err=%d\n", err);
+               goto free_input_dev;
+       }
+
+       err = input_register_device(pwr);
+       if (err) {
+               dev_dbg(&pdev->dev, "Can't register power key: %d\n", err);
+               goto free_input_dev;
+       }
+
+       pwrkey->key_press_irq = key_press_irq;
+       pwrkey->pwr = pwr;
+
+       platform_set_drvdata(pdev, pwrkey);
+
+       err = request_irq(key_press_irq, pwrkey_press_irq,
+               IRQF_TRIGGER_RISING, "pmic8xxx_pwrkey_press", pwrkey);
+       if (err < 0) {
+               dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
+                                key_press_irq, err);
+               goto unreg_input_dev;
+       }
+
+       err = request_irq(key_release_irq, pwrkey_release_irq,
+                IRQF_TRIGGER_RISING, "pmic8xxx_pwrkey_release", pwrkey);
+       if (err < 0) {
+               dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n",
+                                key_release_irq, err);
+
+               goto free_press_irq;
+       }
+
+       device_init_wakeup(&pdev->dev, pdata->wakeup);
+
+       return 0;
+
+free_press_irq:
+       free_irq(key_press_irq, NULL);
+unreg_input_dev:
+       platform_set_drvdata(pdev, NULL);
+       input_unregister_device(pwr);
+       pwr = NULL;
+free_input_dev:
+       input_free_device(pwr);
+free_pwrkey:
+       kfree(pwrkey);
+       return err;
+}
+
+static int __devexit pmic8xxx_pwrkey_remove(struct platform_device *pdev)
+{
+       struct pmic8xxx_pwrkey *pwrkey = platform_get_drvdata(pdev);
+       int key_release_irq = platform_get_irq(pdev, 0);
+       int key_press_irq = platform_get_irq(pdev, 1);
+
+       device_init_wakeup(&pdev->dev, 0);
+
+       free_irq(key_press_irq, pwrkey);
+       free_irq(key_release_irq, pwrkey);
+       input_unregister_device(pwrkey->pwr);
+       platform_set_drvdata(pdev, NULL);
+       kfree(pwrkey);
+
+       return 0;
+}
+
+static struct platform_driver pmic8xxx_pwrkey_driver = {
+       .probe          = pmic8xxx_pwrkey_probe,
+       .remove         = __devexit_p(pmic8xxx_pwrkey_remove),
+       .driver         = {
+               .name   = PM8XXX_PWRKEY_DEV_NAME,
+               .owner  = THIS_MODULE,
+               .pm     = &pm8xxx_pwr_key_pm_ops,
+       },
+};
+
+static int __init pmic8xxx_pwrkey_init(void)
+{
+       return platform_driver_register(&pmic8xxx_pwrkey_driver);
+}
+module_init(pmic8xxx_pwrkey_init);
+
+static void __exit pmic8xxx_pwrkey_exit(void)
+{
+       platform_driver_unregister(&pmic8xxx_pwrkey_driver);
+}
+module_exit(pmic8xxx_pwrkey_exit);
+
+MODULE_ALIAS("platform:pmic8xxx_pwrkey");
+MODULE_DESCRIPTION("PMIC8XXX Power Key driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Trilok Soni <tsoni@codeaurora.org>");
index 6a11694e3fc7bb1274f59105e35f9d0b7c7d33c9..014dd4ad0d4fecede04221e5348fc78e023d76e2 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/workqueue.h>
 #include <linux/i2c/twl.h>
 #include <linux/mfd/twl4030-codec.h>
-#include <linux/mfd/core.h>
 #include <linux/input.h>
 #include <linux/slab.h>
 
@@ -197,7 +196,7 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
 
 static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
 {
-       struct twl4030_codec_vibra_data *pdata = mfd_get_data(pdev);
+       struct twl4030_codec_vibra_data *pdata = pdev->dev.platform_data;
        struct vibra_info *info;
        int ret;
 
index d36a4c09e25db2b5d8ee558ab2bf31664455e508..0bbee7824d78790130b6ea4afdc42ebe8895d399 100644 (file)
@@ -113,9 +113,8 @@ void diva_xdi_didd_remove_adapter(int card)
 static void start_dbg(void)
 {
        DbgRegister("DIVAS", DRIVERRELEASE_DIVAS, (debugmask) ? debugmask : DBG_DEFAULT);
-       DBG_LOG(("DIVA ISDNXDI BUILD (%s[%s]-%s-%s)",
-                DIVA_BUILD, diva_xdi_common_code_build, __DATE__,
-                __TIME__))
+       DBG_LOG(("DIVA ISDNXDI BUILD (%s[%s])",
+                DIVA_BUILD, diva_xdi_common_code_build))
 }
 
 /*
index 1d027b475b22563c7d0c5eea64103b4e0b48d307..23f0d5e99f35705c97c165e05b05a7e5239804b7 100644 (file)
@@ -389,6 +389,16 @@ config LEDS_NETXBIG
          and 5Big Network v2 boards. The LEDs are wired to a CPLD and are
          controlled through a GPIO extension bus.
 
+config LEDS_ASIC3
+       bool "LED support for the HTC ASIC3"
+       depends on MFD_ASIC3
+       default y
+       help
+         This option enables support for the LEDs on the HTC ASIC3. The HTC
+         ASIC3 LED GPIOs are inputs, not outputs, thus the leds-gpio driver
+         cannot be used. This driver supports hardware blinking with an on+off
+         period from 62ms to 125s. Say Y to enable LEDs on the HP iPAQ hx4700.
+
 config LEDS_TRIGGERS
        bool "LED Trigger support"
        depends on LEDS_CLASS
index bccb96c9bb45b467cbf14d0bf0c4e338796f8dce..bbfd2e367dc0e8a6b917c2c94b092bfa66bea35e 100644 (file)
@@ -42,6 +42,7 @@ obj-$(CONFIG_LEDS_DELL_NETBOOKS)      += dell-led.o
 obj-$(CONFIG_LEDS_MC13783)             += leds-mc13783.o
 obj-$(CONFIG_LEDS_NS2)                 += leds-ns2.o
 obj-$(CONFIG_LEDS_NETXBIG)             += leds-netxbig.o
+obj-$(CONFIG_LEDS_ASIC3)               += leds-asic3.o
 
 # LED SPI Drivers
 obj-$(CONFIG_LEDS_DAC124S085)          += leds-dac124s085.o
index 416def84d0459e5a5c3a9708261d12fdf76280a6..0d4c16678ace37a2030f68d141bdf19cfc50d3b3 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/leds.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
-#include <linux/mfd/core.h>
 #include <linux/mfd/88pm860x.h>
 
 #define LED_PWM_SHIFT          (3)
@@ -171,7 +170,6 @@ static int pm860x_led_probe(struct platform_device *pdev)
        struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
        struct pm860x_led_pdata *pdata;
        struct pm860x_led *data;
-       struct mfd_cell *cell;
        struct resource *res;
        int ret;
 
@@ -181,10 +179,7 @@ static int pm860x_led_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       cell = pdev->dev.platform_data;
-       if (cell == NULL)
-               return -ENODEV;
-       pdata = cell->mfd_data;
+       pdata = pdev->dev.platform_data;
        if (pdata == NULL) {
                dev_err(&pdev->dev, "No platform data!\n");
                return -EINVAL;
diff --git a/drivers/leds/leds-asic3.c b/drivers/leds/leds-asic3.c
new file mode 100644 (file)
index 0000000..22f847c
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ *  Copyright (C) 2011 Paul Parsons <lost.distance@yahoo.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+
+#include <linux/mfd/asic3.h>
+#include <linux/mfd/core.h>
+
+/*
+ *     The HTC ASIC3 LED GPIOs are inputs, not outputs.
+ *     Hence we turn the LEDs on/off via the TimeBase register.
+ */
+
+/*
+ *     When TimeBase is 4 the clock resolution is about 32Hz.
+ *     This driver supports hardware blinking with an on+off
+ *     period from 62ms (2 clocks) to 125s (4000 clocks).
+ */
+#define MS_TO_CLK(ms)  DIV_ROUND_CLOSEST(((ms)*1024), 32000)
+#define CLK_TO_MS(clk) (((clk)*32000)/1024)
+#define MAX_CLK                4000            /* Fits into 12-bit Time registers */
+#define MAX_MS         CLK_TO_MS(MAX_CLK)
+
+static const unsigned int led_n_base[ASIC3_NUM_LEDS] = {
+       [0] = ASIC3_LED_0_Base,
+       [1] = ASIC3_LED_1_Base,
+       [2] = ASIC3_LED_2_Base,
+};
+
+static void brightness_set(struct led_classdev *cdev,
+       enum led_brightness value)
+{
+       struct platform_device *pdev = to_platform_device(cdev->dev->parent);
+       const struct mfd_cell *cell = mfd_get_cell(pdev);
+       struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
+       u32 timebase;
+       unsigned int base;
+
+       timebase = (value == LED_OFF) ? 0 : (LED_EN|0x4);
+
+       base = led_n_base[cell->id];
+       asic3_write_register(asic, (base + ASIC3_LED_PeriodTime), 32);
+       asic3_write_register(asic, (base + ASIC3_LED_DutyTime), 32);
+       asic3_write_register(asic, (base + ASIC3_LED_AutoStopCount), 0);
+       asic3_write_register(asic, (base + ASIC3_LED_TimeBase), timebase);
+}
+
+static int blink_set(struct led_classdev *cdev,
+       unsigned long *delay_on,
+       unsigned long *delay_off)
+{
+       struct platform_device *pdev = to_platform_device(cdev->dev->parent);
+       const struct mfd_cell *cell = mfd_get_cell(pdev);
+       struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
+       u32 on;
+       u32 off;
+       unsigned int base;
+
+       if (*delay_on > MAX_MS || *delay_off > MAX_MS)
+               return -EINVAL;
+
+       if (*delay_on == 0 && *delay_off == 0) {
+               /* If both are zero then a sensible default should be chosen */
+               on = MS_TO_CLK(500);
+               off = MS_TO_CLK(500);
+       } else {
+               on = MS_TO_CLK(*delay_on);
+               off = MS_TO_CLK(*delay_off);
+               if ((on + off) > MAX_CLK)
+                       return -EINVAL;
+       }
+
+       base = led_n_base[cell->id];
+       asic3_write_register(asic, (base + ASIC3_LED_PeriodTime), (on + off));
+       asic3_write_register(asic, (base + ASIC3_LED_DutyTime), on);
+       asic3_write_register(asic, (base + ASIC3_LED_AutoStopCount), 0);
+       asic3_write_register(asic, (base + ASIC3_LED_TimeBase), (LED_EN|0x4));
+
+       *delay_on = CLK_TO_MS(on);
+       *delay_off = CLK_TO_MS(off);
+
+       return 0;
+}
+
+static int __devinit asic3_led_probe(struct platform_device *pdev)
+{
+       struct asic3_led *led = pdev->dev.platform_data;
+       int ret;
+
+       ret = mfd_cell_enable(pdev);
+       if (ret < 0)
+               goto ret0;
+
+       led->cdev = kzalloc(sizeof(struct led_classdev), GFP_KERNEL);
+       if (!led->cdev) {
+               ret = -ENOMEM;
+               goto ret1;
+       }
+
+       led->cdev->name = led->name;
+       led->cdev->default_trigger = led->default_trigger;
+       led->cdev->brightness_set = brightness_set;
+       led->cdev->blink_set = blink_set;
+
+       ret = led_classdev_register(&pdev->dev, led->cdev);
+       if (ret < 0)
+               goto ret2;
+
+       return 0;
+
+ret2:
+       kfree(led->cdev);
+ret1:
+       (void) mfd_cell_disable(pdev);
+ret0:
+       return ret;
+}
+
+static int __devexit asic3_led_remove(struct platform_device *pdev)
+{
+       struct asic3_led *led = pdev->dev.platform_data;
+
+       led_classdev_unregister(led->cdev);
+
+       kfree(led->cdev);
+
+       return mfd_cell_disable(pdev);
+}
+
+static struct platform_driver asic3_led_driver = {
+       .probe          = asic3_led_probe,
+       .remove         = __devexit_p(asic3_led_remove),
+       .driver         = {
+               .name   = "leds-asic3",
+               .owner  = THIS_MODULE,
+       },
+};
+
+MODULE_ALIAS("platform:leds-asic3");
+
+static int __init asic3_led_init(void)
+{
+       return platform_driver_register(&asic3_led_driver);
+}
+
+static void __exit asic3_led_exit(void)
+{
+       platform_driver_unregister(&asic3_led_driver);
+}
+
+module_init(asic3_led_init);
+module_exit(asic3_led_exit);
+
+MODULE_AUTHOR("Paul Parsons <lost.distance@yahoo.com>");
+MODULE_DESCRIPTION("HTC ASIC3 LED driver");
+MODULE_LICENSE("GPL");
index 126ca7955f6e16f4f43cd72ec387d4a8a0fd3637..f369e56d6547ced1e92444c8f22cad3e3e09060d 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/leds.h>
 #include <linux/workqueue.h>
 #include <linux/mfd/mc13783.h>
-#include <linux/mfd/core.h>
 #include <linux/slab.h>
 
 struct mc13783_led {
@@ -184,7 +183,7 @@ static int __devinit mc13783_led_setup(struct mc13783_led *led, int max_current)
 
 static int __devinit mc13783_leds_prepare(struct platform_device *pdev)
 {
-       struct mc13783_leds_platform_data *pdata = mfd_get_data(pdev);
+       struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
        int ret = 0;
        int reg = 0;
@@ -265,7 +264,7 @@ out:
 
 static int __devinit mc13783_led_probe(struct platform_device *pdev)
 {
-       struct mc13783_leds_platform_data *pdata = mfd_get_data(pdev);
+       struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct mc13783_led_platform_data *led_cur;
        struct mc13783_led *led, *led_dat;
        int ret, i;
@@ -352,7 +351,7 @@ err_free:
 
 static int __devexit mc13783_led_remove(struct platform_device *pdev)
 {
-       struct mc13783_leds_platform_data *pdata = mfd_get_data(pdev);
+       struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev);
        struct mc13783_led *led = platform_get_drvdata(pdev);
        struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent);
        int i;
index 2d8b4044be36b0ab0436f9f9dde08b487763f90b..b2b0c45f32a91892ee08f778317c12b91b17cf52 100644 (file)
@@ -20,6 +20,7 @@
  */
 
 #include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 
 #define UNSET (-1U)
 
-#define DM1105_BOARD_NOAUTO            UNSET
-#define DM1105_BOARD_UNKNOWN           0
-#define DM1105_BOARD_DVBWORLD_2002     1
-#define DM1105_BOARD_DVBWORLD_2004     2
-#define DM1105_BOARD_AXESS_DM05                3
+#define DM1105_BOARD_NOAUTO                    UNSET
+#define DM1105_BOARD_UNKNOWN                   0
+#define DM1105_BOARD_DVBWORLD_2002             1
+#define DM1105_BOARD_DVBWORLD_2004             2
+#define DM1105_BOARD_AXESS_DM05                        3
+#define DM1105_BOARD_UNBRANDED_I2C_ON_GPIO     4
 
 /* ----------------------------------------------- */
 /*
 #define DM1105_MAX                             0x04
 
 #define DRIVER_NAME                            "dm1105"
+#define DM1105_I2C_GPIO_NAME                   "dm1105-gpio"
 
 #define DM1105_DMA_PACKETS                     47
 #define DM1105_DMA_PACKET_LENGTH               (128*4)
 #define DM1105_DMA_BYTES                       (128 * 4 * DM1105_DMA_PACKETS)
 
+/*  */
+#define GPIO08                                 (1 << 8)
+#define GPIO13                                 (1 << 13)
+#define GPIO14                                 (1 << 14)
+#define GPIO15                                 (1 << 15)
+#define GPIO16                                 (1 << 16)
+#define GPIO17                                 (1 << 17)
+#define GPIO_ALL                               0x03ffff
+
 /* GPIO's for LNB power control */
-#define DM1105_LNB_MASK                                0x00000000
-#define DM1105_LNB_OFF                         0x00020000
-#define DM1105_LNB_13V                         0x00010100
-#define DM1105_LNB_18V                         0x00000100
+#define DM1105_LNB_MASK                                (GPIO_ALL & ~(GPIO14 | GPIO13))
+#define DM1105_LNB_OFF                         GPIO17
+#define DM1105_LNB_13V                         (GPIO16 | GPIO08)
+#define DM1105_LNB_18V                         GPIO08
 
 /* GPIO's for LNB power control for Axess DM05 */
-#define DM05_LNB_MASK                          0x00000000
-#define DM05_LNB_OFF                           0x00020000/* actually 13v */
-#define DM05_LNB_13V                           0x00020000
-#define DM05_LNB_18V                           0x00030000
+#define DM05_LNB_MASK                          (GPIO_ALL & ~(GPIO14 | GPIO13))
+#define DM05_LNB_OFF                           GPIO17/* actually 13v */
+#define DM05_LNB_13V                           GPIO17
+#define DM05_LNB_18V                           (GPIO17 | GPIO16)
+
+/* GPIO's for LNB power control for unbranded with I2C on GPIO */
+#define UNBR_LNB_MASK                          (GPIO17 | GPIO16)
+#define UNBR_LNB_OFF                           0
+#define UNBR_LNB_13V                           GPIO17
+#define UNBR_LNB_18V                           (GPIO17 | GPIO16)
 
 static unsigned int card[]  = {[0 ... 3] = UNSET };
 module_param_array(card,  int, NULL, 0444);
@@ -187,7 +205,11 @@ static unsigned int dm1105_devcount;
 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
 
 struct dm1105_board {
-       char                    *name;
+       char    *name;
+       struct  {
+               u32     mask, off, v13, v18;
+       } lnb;
+       u32     gpio_scl, gpio_sda;
 };
 
 struct dm1105_subid {
@@ -199,15 +221,50 @@ struct dm1105_subid {
 static const struct dm1105_board dm1105_boards[] = {
        [DM1105_BOARD_UNKNOWN] = {
                .name           = "UNKNOWN/GENERIC",
+               .lnb = {
+                       .mask = DM1105_LNB_MASK,
+                       .off = DM1105_LNB_OFF,
+                       .v13 = DM1105_LNB_13V,
+                       .v18 = DM1105_LNB_18V,
+               },
        },
        [DM1105_BOARD_DVBWORLD_2002] = {
                .name           = "DVBWorld PCI 2002",
+               .lnb = {
+                       .mask = DM1105_LNB_MASK,
+                       .off = DM1105_LNB_OFF,
+                       .v13 = DM1105_LNB_13V,
+                       .v18 = DM1105_LNB_18V,
+               },
        },
        [DM1105_BOARD_DVBWORLD_2004] = {
                .name           = "DVBWorld PCI 2004",
+               .lnb = {
+                       .mask = DM1105_LNB_MASK,
+                       .off = DM1105_LNB_OFF,
+                       .v13 = DM1105_LNB_13V,
+                       .v18 = DM1105_LNB_18V,
+               },
        },
        [DM1105_BOARD_AXESS_DM05] = {
                .name           = "Axess/EasyTv DM05",
+               .lnb = {
+                       .mask = DM05_LNB_MASK,
+                       .off = DM05_LNB_OFF,
+                       .v13 = DM05_LNB_13V,
+                       .v18 = DM05_LNB_18V,
+               },
+       },
+       [DM1105_BOARD_UNBRANDED_I2C_ON_GPIO] = {
+               .name           = "Unbranded DM1105 with i2c on GPIOs",
+               .lnb = {
+                       .mask = UNBR_LNB_MASK,
+                       .off = UNBR_LNB_OFF,
+                       .v13 = UNBR_LNB_13V,
+                       .v18 = UNBR_LNB_18V,
+               },
+               .gpio_scl       = GPIO14,
+               .gpio_sda       = GPIO13,
        },
 };
 
@@ -293,6 +350,8 @@ struct dm1105_dev {
 
        /* i2c */
        struct i2c_adapter i2c_adap;
+       struct i2c_adapter i2c_bb_adap;
+       struct i2c_algo_bit_data i2c_bit;
 
        /* irq */
        struct work_struct work;
@@ -328,6 +387,103 @@ struct dm1105_dev {
 #define dm_setl(reg, bit)      dm_andorl((reg), (bit), (bit))
 #define dm_clearl(reg, bit)    dm_andorl((reg), (bit), 0)
 
+/* The chip has 18 GPIOs. In HOST mode GPIO's used as 15 bit address lines,
+ so we can use only 3 GPIO's from GPIO15 to GPIO17.
+ Here I don't check whether HOST is enebled as it is not implemented yet.
+ */
+static void dm1105_gpio_set(struct dm1105_dev *dev, u32 mask)
+{
+       if (mask & 0xfffc0000)
+               printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
+
+       if (mask & 0x0003ffff)
+               dm_setl(DM1105_GPIOVAL, mask & 0x0003ffff);
+
+}
+
+static void dm1105_gpio_clear(struct dm1105_dev *dev, u32 mask)
+{
+       if (mask & 0xfffc0000)
+               printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
+
+       if (mask & 0x0003ffff)
+               dm_clearl(DM1105_GPIOVAL, mask & 0x0003ffff);
+
+}
+
+static void dm1105_gpio_andor(struct dm1105_dev *dev, u32 mask, u32 val)
+{
+       if (mask & 0xfffc0000)
+               printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
+
+       if (mask & 0x0003ffff)
+               dm_andorl(DM1105_GPIOVAL, mask & 0x0003ffff, val);
+
+}
+
+static u32 dm1105_gpio_get(struct dm1105_dev *dev, u32 mask)
+{
+       if (mask & 0xfffc0000)
+               printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
+
+       if (mask & 0x0003ffff)
+               return dm_readl(DM1105_GPIOVAL) & mask & 0x0003ffff;
+
+       return 0;
+}
+
+static void dm1105_gpio_enable(struct dm1105_dev *dev, u32 mask, int asoutput)
+{
+       if (mask & 0xfffc0000)
+               printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__);
+
+       if ((mask & 0x0003ffff) && asoutput)
+               dm_clearl(DM1105_GPIOCTR, mask & 0x0003ffff);
+       else if ((mask & 0x0003ffff) && !asoutput)
+               dm_setl(DM1105_GPIOCTR, mask & 0x0003ffff);
+
+}
+
+static void dm1105_setline(struct dm1105_dev *dev, u32 line, int state)
+{
+       if (state)
+               dm1105_gpio_enable(dev, line, 0);
+       else {
+               dm1105_gpio_enable(dev, line, 1);
+               dm1105_gpio_clear(dev, line);
+       }
+}
+
+static void dm1105_setsda(void *data, int state)
+{
+       struct dm1105_dev *dev = data;
+
+       dm1105_setline(dev, dm1105_boards[dev->boardnr].gpio_sda, state);
+}
+
+static void dm1105_setscl(void *data, int state)
+{
+       struct dm1105_dev *dev = data;
+
+       dm1105_setline(dev, dm1105_boards[dev->boardnr].gpio_scl, state);
+}
+
+static int dm1105_getsda(void *data)
+{
+       struct dm1105_dev *dev = data;
+
+       return dm1105_gpio_get(dev, dm1105_boards[dev->boardnr].gpio_sda)
+                                                                       ? 1 : 0;
+}
+
+static int dm1105_getscl(void *data)
+{
+       struct dm1105_dev *dev = data;
+
+       return dm1105_gpio_get(dev, dm1105_boards[dev->boardnr].gpio_scl)
+                                                                       ? 1 : 0;
+}
+
 static int dm1105_i2c_xfer(struct i2c_adapter *i2c_adap,
                            struct i2c_msg *msgs, int num)
 {
@@ -436,31 +592,20 @@ static inline struct dm1105_dev *frontend_to_dm1105_dev(struct dvb_frontend *fe)
 static int dm1105_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
 {
        struct dm1105_dev *dev = frontend_to_dm1105_dev(fe);
-       u32 lnb_mask, lnb_13v, lnb_18v, lnb_off;
 
-       switch (dev->boardnr) {
-       case DM1105_BOARD_AXESS_DM05:
-               lnb_mask = DM05_LNB_MASK;
-               lnb_off = DM05_LNB_OFF;
-               lnb_13v = DM05_LNB_13V;
-               lnb_18v = DM05_LNB_18V;
-               break;
-       case DM1105_BOARD_DVBWORLD_2002:
-       case DM1105_BOARD_DVBWORLD_2004:
-       default:
-               lnb_mask = DM1105_LNB_MASK;
-               lnb_off = DM1105_LNB_OFF;
-               lnb_13v = DM1105_LNB_13V;
-               lnb_18v = DM1105_LNB_18V;
-       }
-
-       dm_writel(DM1105_GPIOCTR, lnb_mask);
+       dm1105_gpio_enable(dev, dm1105_boards[dev->boardnr].lnb.mask, 1);
        if (voltage == SEC_VOLTAGE_18)
-               dm_writel(DM1105_GPIOVAL, lnb_18v);
+               dm1105_gpio_andor(dev,
+                               dm1105_boards[dev->boardnr].lnb.mask,
+                               dm1105_boards[dev->boardnr].lnb.v18);
        else if (voltage == SEC_VOLTAGE_13)
-               dm_writel(DM1105_GPIOVAL, lnb_13v);
+               dm1105_gpio_andor(dev,
+                               dm1105_boards[dev->boardnr].lnb.mask,
+                               dm1105_boards[dev->boardnr].lnb.v13);
        else
-               dm_writel(DM1105_GPIOVAL, lnb_off);
+               dm1105_gpio_andor(dev,
+                               dm1105_boards[dev->boardnr].lnb.mask,
+                               dm1105_boards[dev->boardnr].lnb.off);
 
        return 0;
 }
@@ -708,6 +853,38 @@ static int __devinit frontend_init(struct dm1105_dev *dev)
        int ret;
 
        switch (dev->boardnr) {
+       case DM1105_BOARD_UNBRANDED_I2C_ON_GPIO:
+               dm1105_gpio_enable(dev, GPIO15, 1);
+               dm1105_gpio_clear(dev, GPIO15);
+               msleep(100);
+               dm1105_gpio_set(dev, GPIO15);
+               msleep(200);
+               dev->fe = dvb_attach(
+                       stv0299_attach, &sharp_z0194a_config,
+                       &dev->i2c_bb_adap);
+               if (dev->fe) {
+                       dev->fe->ops.set_voltage = dm1105_set_voltage;
+                       dvb_attach(dvb_pll_attach, dev->fe, 0x60,
+                                       &dev->i2c_bb_adap, DVB_PLL_OPERA1);
+                       break;
+               }
+
+               dev->fe = dvb_attach(
+                       stv0288_attach, &earda_config,
+                       &dev->i2c_bb_adap);
+               if (dev->fe) {
+                       dev->fe->ops.set_voltage = dm1105_set_voltage;
+                       dvb_attach(stb6000_attach, dev->fe, 0x61,
+                                       &dev->i2c_bb_adap);
+                       break;
+               }
+
+               dev->fe = dvb_attach(
+                       si21xx_attach, &serit_config,
+                       &dev->i2c_bb_adap);
+               if (dev->fe)
+                       dev->fe->ops.set_voltage = dm1105_set_voltage;
+               break;
        case DM1105_BOARD_DVBWORLD_2004:
                dev->fe = dvb_attach(
                        cx24116_attach, &serit_sp2633_config,
@@ -870,11 +1047,32 @@ static int __devinit dm1105_probe(struct pci_dev *pdev,
        if (ret < 0)
                goto err_dm1105_hw_exit;
 
+       i2c_set_adapdata(&dev->i2c_bb_adap, dev);
+       strcpy(dev->i2c_bb_adap.name, DM1105_I2C_GPIO_NAME);
+       dev->i2c_bb_adap.owner = THIS_MODULE;
+       dev->i2c_bb_adap.dev.parent = &pdev->dev;
+       dev->i2c_bb_adap.algo_data = &dev->i2c_bit;
+       dev->i2c_bit.data = dev;
+       dev->i2c_bit.setsda = dm1105_setsda;
+       dev->i2c_bit.setscl = dm1105_setscl;
+       dev->i2c_bit.getsda = dm1105_getsda;
+       dev->i2c_bit.getscl = dm1105_getscl;
+       dev->i2c_bit.udelay = 10;
+       dev->i2c_bit.timeout = 10;
+
+       /* Raise SCL and SDA */
+       dm1105_setsda(dev, 1);
+       dm1105_setscl(dev, 1);
+
+       ret = i2c_bit_add_bus(&dev->i2c_bb_adap);
+       if (ret < 0)
+               goto err_i2c_del_adapter;
+
        /* dvb */
        ret = dvb_register_adapter(&dev->dvb_adapter, DRIVER_NAME,
                                        THIS_MODULE, &pdev->dev, adapter_nr);
        if (ret < 0)
-               goto err_i2c_del_adapter;
+               goto err_i2c_del_adapters;
 
        dvb_adapter = &dev->dvb_adapter;
 
@@ -952,6 +1150,8 @@ err_dvb_dmx_release:
        dvb_dmx_release(dvbdemux);
 err_dvb_unregister_adapter:
        dvb_unregister_adapter(dvb_adapter);
+err_i2c_del_adapters:
+       i2c_del_adapter(&dev->i2c_bb_adap);
 err_i2c_del_adapter:
        i2c_del_adapter(&dev->i2c_adap);
 err_dm1105_hw_exit:
index f36f471deae26e0327299e2469d662e938b1caa9..37b146961ae2d00d185e5b7c992096e66531a2f0 100644 (file)
@@ -207,17 +207,6 @@ static int lme2510_stream_restart(struct dvb_usb_device *d)
                        rbuff, sizeof(rbuff));
        return ret;
 }
-static int lme2510_remote_keypress(struct dvb_usb_adapter *adap, u32 keypress)
-{
-       struct dvb_usb_device *d = adap->dev;
-
-       deb_info(1, "INT Key Keypress =%04x", keypress);
-
-       if (keypress > 0)
-               rc_keydown(d->rc_dev, keypress, 0);
-
-       return 0;
-}
 
 static int lme2510_enable_pid(struct dvb_usb_device *d, u8 index, u16 pid_out)
 {
@@ -256,6 +245,7 @@ static void lme2510_int_response(struct urb *lme_urb)
        struct lme2510_state *st = adap->dev->priv;
        static u8 *ibuf, *rbuf;
        int i = 0, offset;
+       u32 key;
 
        switch (lme_urb->status) {
        case 0:
@@ -282,10 +272,16 @@ static void lme2510_int_response(struct urb *lme_urb)
 
                switch (ibuf[0]) {
                case 0xaa:
-                       debug_data_snipet(1, "INT Remote data snipet in", ibuf);
-                       lme2510_remote_keypress(adap,
-                               (u32)(ibuf[2] << 24) + (ibuf[3] << 16) +
-                               (ibuf[4] << 8) + ibuf[5]);
+                       debug_data_snipet(1, "INT Remote data snipet", ibuf);
+                       if ((ibuf[4] + ibuf[5]) == 0xff) {
+                               key = ibuf[5];
+                               key += (ibuf[3] > 0)
+                                       ? (ibuf[3] ^ 0xff) << 8 : 0;
+                               key += (ibuf[2] ^ 0xff) << 16;
+                               deb_info(1, "INT Key =%08x", key);
+                               if (adap->dev->rc_dev != NULL)
+                                       rc_keydown(adap->dev->rc_dev, key, 0);
+                       }
                        break;
                case 0xbb:
                        switch (st->tuner_config) {
@@ -691,45 +687,6 @@ static int lme2510_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
        return (ret < 0) ? -ENODEV : 0;
 }
 
-static int lme2510_int_service(struct dvb_usb_adapter *adap)
-{
-       struct dvb_usb_device *d = adap->dev;
-       struct rc_dev *rc;
-       int ret;
-
-       info("STA Configuring Remote");
-
-       rc = rc_allocate_device();
-       if (!rc)
-               return -ENOMEM;
-
-       usb_make_path(d->udev, d->rc_phys, sizeof(d->rc_phys));
-       strlcat(d->rc_phys, "/ir0", sizeof(d->rc_phys));
-
-       rc->input_name = "LME2510 Remote Control";
-       rc->input_phys = d->rc_phys;
-       rc->map_name = RC_MAP_LME2510;
-       rc->driver_name = "LME 2510";
-       usb_to_input_id(d->udev, &rc->input_id);
-
-       ret = rc_register_device(rc);
-       if (ret) {
-               rc_free_device(rc);
-               return ret;
-       }
-       d->rc_dev = rc;
-
-       /* Start the Interrupt */
-       ret = lme2510_int_read(adap);
-       if (ret < 0) {
-               rc_unregister_device(rc);
-               info("INT Unable to start Interrupt Service");
-               return -ENODEV;
-       }
-
-       return 0;
-}
-
 static u8 check_sum(u8 *p, u8 len)
 {
        u8 sum = 0;
@@ -831,7 +788,7 @@ static int lme_firmware_switch(struct usb_device *udev, int cold)
 
        cold_fw = !cold;
 
-       if (udev->descriptor.idProduct == 0x1122) {
+       if (le16_to_cpu(udev->descriptor.idProduct) == 0x1122) {
                switch (dvb_usb_lme2510_firmware) {
                default:
                        dvb_usb_lme2510_firmware = TUNER_S0194;
@@ -1053,8 +1010,11 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap)
 
 
 end:   if (ret) {
-               kfree(adap->fe);
-               adap->fe = NULL;
+               if (adap->fe) {
+                       dvb_frontend_detach(adap->fe);
+                       adap->fe = NULL;
+               }
+               adap->dev->props.rc.core.rc_codes = NULL;
                return -ENODEV;
        }
 
@@ -1097,8 +1057,12 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap)
                return -ENODEV;
        }
 
-       /* Start the Interrupt & Remote*/
-       ret = lme2510_int_service(adap);
+       /* Start the Interrupt*/
+       ret = lme2510_int_read(adap);
+       if (ret < 0) {
+               info("INT Unable to start Interrupt Service");
+               return -ENODEV;
+       }
 
        return ret;
 }
@@ -1204,6 +1168,12 @@ static struct dvb_usb_device_properties lme2510_properties = {
                        }
                }
        },
+       .rc.core = {
+               .protocol       = RC_TYPE_NEC,
+               .module_name    = "LME2510 Remote Control",
+               .allowed_protos = RC_TYPE_NEC,
+               .rc_codes       = RC_MAP_LME2510,
+       },
        .power_ctrl       = lme2510_powerup,
        .identify_state   = lme2510_identify_state,
        .i2c_algo         = &lme2510_i2c_algo,
@@ -1246,6 +1216,12 @@ static struct dvb_usb_device_properties lme2510c_properties = {
                        }
                }
        },
+       .rc.core = {
+               .protocol       = RC_TYPE_NEC,
+               .module_name    = "LME2510 Remote Control",
+               .allowed_protos = RC_TYPE_NEC,
+               .rc_codes       = RC_MAP_LME2510,
+       },
        .power_ctrl       = lme2510_powerup,
        .identify_state   = lme2510_identify_state,
        .i2c_algo         = &lme2510_i2c_algo,
@@ -1269,19 +1245,21 @@ static void *lme2510_exit_int(struct dvb_usb_device *d)
                adap->feedcount = 0;
        }
 
-       if (st->lme_urb != NULL) {
+       if (st->usb_buffer != NULL) {
                st->i2c_talk_onoff = 1;
                st->signal_lock = 0;
                st->signal_level = 0;
                st->signal_sn = 0;
                buffer = st->usb_buffer;
+       }
+
+       if (st->lme_urb != NULL) {
                usb_kill_urb(st->lme_urb);
                usb_free_coherent(d->udev, 5000, st->buffer,
                                  st->lme_urb->transfer_dma);
                info("Interrupt Service Stopped");
-               rc_unregister_device(d->rc_dev);
-               info("Remote Stopped");
        }
+
        return buffer;
 }
 
@@ -1293,7 +1271,8 @@ static void lme2510_exit(struct usb_interface *intf)
        if (d != NULL) {
                usb_buffer = lme2510_exit_int(d);
                dvb_usb_device_exit(intf);
-               kfree(usb_buffer);
+               if (usb_buffer != NULL)
+                       kfree(usb_buffer);
        }
 }
 
@@ -1327,5 +1306,5 @@ module_exit(lme2510_module_exit);
 
 MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
 MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
-MODULE_VERSION("1.86");
+MODULE_VERSION("1.88");
 MODULE_LICENSE("GPL");
index 2da55ec2039259d4e05e6154502e4e9068af487a..d70eee00f33a5d33fbefef13860f20ba15c020c4 100644 (file)
@@ -23,7 +23,7 @@
 #include "stb0899_priv.h"
 #include "stb0899_reg.h"
 
-inline u32 stb0899_do_div(u64 n, u32 d)
+static inline u32 stb0899_do_div(u64 n, u32 d)
 {
        /* wrap do_div() for ease of use */
 
index 1742056a34e8c0a8270aee6a81a8ecba4917fc9e..53c7d8f1df289a373a3b96a143f87841f6fd0d43 100644 (file)
@@ -224,7 +224,6 @@ exit:
 }
 
 EXPORT_SYMBOL(tda8261_attach);
-MODULE_PARM_DESC(verbose, "Set verbosity level");
 
 MODULE_AUTHOR("Manu Abraham");
 MODULE_DESCRIPTION("TDA8261 8PSK/QPSK Tuner");
index 5c2a9058c09fd6966d4c47c8c44b8fd681e8d9a6..e83e84003025222f4a74d95024fa5df80f658ca8 100644 (file)
@@ -412,8 +412,7 @@ static int __devinit maxiradio_init_one(struct pci_dev *pdev, const struct pci_d
                goto err_out_free_region;
        }
 
-       v4l2_info(v4l2_dev, "version " DRIVER_VERSION
-                       " time " __TIME__ "  " __DATE__ "\n");
+       v4l2_info(v4l2_dev, "version " DRIVER_VERSION "\n");
 
        v4l2_info(v4l2_dev, "found Guillemot MAXI Radio device (io = 0x%x)\n",
               dev->io);
index 1e3a8dd820a4382a26272ff4557024e64adc619d..a185610b376be1a14e79b52751c7f95aab777dd0 100644 (file)
@@ -21,7 +21,6 @@
 #include <media/v4l2-ioctl.h>
 #include <media/v4l2-device.h>
 #include <linux/platform_device.h>
-#include <linux/mfd/core.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
@@ -149,7 +148,7 @@ static const struct v4l2_file_operations timbradio_fops = {
 
 static int __devinit timbradio_probe(struct platform_device *pdev)
 {
-       struct timb_radio_platform_data *pdata = mfd_get_data(pdev);
+       struct timb_radio_platform_data *pdata = pdev->dev.platform_data;
        struct timbradio *tr;
        int err;
 
index e2550dc2944f939812805d2c23778c86df2ff7e0..459f7272d32634d67143ee679a5375fc85f37f99 100644 (file)
@@ -1382,7 +1382,7 @@ static int wl1273_fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
 
        switch (ctrl->id) {
        case  V4L2_CID_TUNE_ANTENNA_CAPACITOR:
-               ctrl->val = wl1273_fm_get_tx_ctune(radio);
+               ctrl->cur.val = wl1273_fm_get_tx_ctune(radio);
                break;
 
        default:
@@ -1990,7 +1990,7 @@ static int wl1273_fm_radio_remove(struct platform_device *pdev)
 
 static int __devinit wl1273_fm_radio_probe(struct platform_device *pdev)
 {
-       struct wl1273_core **core = mfd_get_data(pdev);
+       struct wl1273_core **core = pdev->dev.platform_data;
        struct wl1273_device *radio;
        struct v4l2_ctrl *ctrl;
        int r = 0;
index d50e5ac75ab68659743be2005473db6789e89192..87010724f9147b53170dc59f1e603115ae2fd72c 100644 (file)
@@ -191,7 +191,7 @@ static int fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
 
        switch (ctrl->id) {
        case  V4L2_CID_TUNE_ANTENNA_CAPACITOR:
-               ctrl->val = fm_tx_get_tune_cap_val(fmdev);
+               ctrl->cur.val = fm_tx_get_tune_cap_val(fmdev);
                break;
        default:
                fmwarn("%s: Unknown IOCTL: %d\n", __func__, ctrl->id);
index 154c337f00fda10182e30222c37533596d1b40e5..7d4bbc226d068dd263a18b9ee69c70793245669f 100644 (file)
@@ -148,6 +148,18 @@ config IR_ITE_CIR
           To compile this driver as a module, choose M here: the
           module will be called ite-cir.
 
+config IR_FINTEK
+       tristate "Fintek Consumer Infrared Transceiver"
+       depends on PNP
+       depends on RC_CORE
+       ---help---
+          Say Y here to enable support for integrated infrared receiver
+          /transciever made by Fintek. This chip is found on assorted
+          Jetway motherboards (and of course, possibly others).
+
+          To compile this driver as a module, choose M here: the
+          module will be called fintek-cir.
+
 config IR_NUVOTON
        tristate "Nuvoton w836x7hg Consumer Infrared Transceiver"
        depends on PNP
index 1f90a219a1627f98d13de3095844bd9f733d87d4..52830e5f4eaaa4ee184fc20ff0ece6e3f3809ace 100644 (file)
@@ -16,6 +16,7 @@ obj-$(CONFIG_IR_LIRC_CODEC) += ir-lirc-codec.o
 obj-$(CONFIG_IR_IMON) += imon.o
 obj-$(CONFIG_IR_ITE_CIR) += ite-cir.o
 obj-$(CONFIG_IR_MCEUSB) += mceusb.o
+obj-$(CONFIG_IR_FINTEK) += fintek-cir.o
 obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o
 obj-$(CONFIG_IR_ENE) += ene_ir.o
 obj-$(CONFIG_IR_REDRAT3) += redrat3.o
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
new file mode 100644 (file)
index 0000000..8fa539d
--- /dev/null
@@ -0,0 +1,684 @@
+/*
+ * Driver for Feature Integration Technology Inc. (aka Fintek) LPC CIR
+ *
+ * Copyright (C) 2011 Jarod Wilson <jarod@redhat.com>
+ *
+ * Special thanks to Fintek for providing hardware and spec sheets.
+ * This driver is based upon the nuvoton, ite and ene drivers for
+ * similar hardware.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pnp.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <media/rc-core.h>
+#include <linux/pci_ids.h>
+
+#include "fintek-cir.h"
+
+/* write val to config reg */
+static inline void fintek_cr_write(struct fintek_dev *fintek, u8 val, u8 reg)
+{
+       fit_dbg("%s: reg 0x%02x, val 0x%02x  (ip/dp: %02x/%02x)",
+               __func__, reg, val, fintek->cr_ip, fintek->cr_dp);
+       outb(reg, fintek->cr_ip);
+       outb(val, fintek->cr_dp);
+}
+
+/* read val from config reg */
+static inline u8 fintek_cr_read(struct fintek_dev *fintek, u8 reg)
+{
+       u8 val;
+
+       outb(reg, fintek->cr_ip);
+       val = inb(fintek->cr_dp);
+
+       fit_dbg("%s: reg 0x%02x, val 0x%02x  (ip/dp: %02x/%02x)",
+               __func__, reg, val, fintek->cr_ip, fintek->cr_dp);
+       return val;
+}
+
+/* update config register bit without changing other bits */
+static inline void fintek_set_reg_bit(struct fintek_dev *fintek, u8 val, u8 reg)
+{
+       u8 tmp = fintek_cr_read(fintek, reg) | val;
+       fintek_cr_write(fintek, tmp, reg);
+}
+
+/* clear config register bit without changing other bits */
+static inline void fintek_clear_reg_bit(struct fintek_dev *fintek, u8 val, u8 reg)
+{
+       u8 tmp = fintek_cr_read(fintek, reg) & ~val;
+       fintek_cr_write(fintek, tmp, reg);
+}
+
+/* enter config mode */
+static inline void fintek_config_mode_enable(struct fintek_dev *fintek)
+{
+       /* Enabling Config Mode explicitly requires writing 2x */
+       outb(CONFIG_REG_ENABLE, fintek->cr_ip);
+       outb(CONFIG_REG_ENABLE, fintek->cr_ip);
+}
+
+/* exit config mode */
+static inline void fintek_config_mode_disable(struct fintek_dev *fintek)
+{
+       outb(CONFIG_REG_DISABLE, fintek->cr_ip);
+}
+
+/*
+ * When you want to address a specific logical device, write its logical
+ * device number to GCR_LOGICAL_DEV_NO
+ */
+static inline void fintek_select_logical_dev(struct fintek_dev *fintek, u8 ldev)
+{
+       fintek_cr_write(fintek, ldev, GCR_LOGICAL_DEV_NO);
+}
+
+/* write val to cir config register */
+static inline void fintek_cir_reg_write(struct fintek_dev *fintek, u8 val, u8 offset)
+{
+       outb(val, fintek->cir_addr + offset);
+}
+
+/* read val from cir config register */
+static u8 fintek_cir_reg_read(struct fintek_dev *fintek, u8 offset)
+{
+       u8 val;
+
+       val = inb(fintek->cir_addr + offset);
+
+       return val;
+}
+
+#define pr_reg(text, ...) \
+       printk(KERN_INFO KBUILD_MODNAME ": " text, ## __VA_ARGS__)
+
+/* dump current cir register contents */
+static void cir_dump_regs(struct fintek_dev *fintek)
+{
+       fintek_config_mode_enable(fintek);
+       fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+
+       pr_reg("%s: Dump CIR logical device registers:\n", FINTEK_DRIVER_NAME);
+       pr_reg(" * CR CIR BASE ADDR: 0x%x\n",
+              (fintek_cr_read(fintek, CIR_CR_BASE_ADDR_HI) << 8) |
+               fintek_cr_read(fintek, CIR_CR_BASE_ADDR_LO));
+       pr_reg(" * CR CIR IRQ NUM:   0x%x\n",
+              fintek_cr_read(fintek, CIR_CR_IRQ_SEL));
+
+       fintek_config_mode_disable(fintek);
+
+       pr_reg("%s: Dump CIR registers:\n", FINTEK_DRIVER_NAME);
+       pr_reg(" * STATUS:     0x%x\n", fintek_cir_reg_read(fintek, CIR_STATUS));
+       pr_reg(" * CONTROL:    0x%x\n", fintek_cir_reg_read(fintek, CIR_CONTROL));
+       pr_reg(" * RX_DATA:    0x%x\n", fintek_cir_reg_read(fintek, CIR_RX_DATA));
+       pr_reg(" * TX_CONTROL: 0x%x\n", fintek_cir_reg_read(fintek, CIR_TX_CONTROL));
+       pr_reg(" * TX_DATA:    0x%x\n", fintek_cir_reg_read(fintek, CIR_TX_DATA));
+}
+
+/* detect hardware features */
+static int fintek_hw_detect(struct fintek_dev *fintek)
+{
+       unsigned long flags;
+       u8 chip_major, chip_minor;
+       u8 vendor_major, vendor_minor;
+       u8 portsel, ir_class;
+       u16 vendor;
+       int ret = 0;
+
+       fintek_config_mode_enable(fintek);
+
+       /* Check if we're using config port 0x4e or 0x2e */
+       portsel = fintek_cr_read(fintek, GCR_CONFIG_PORT_SEL);
+       if (portsel == 0xff) {
+               fit_pr(KERN_INFO, "first portsel read was bunk, trying alt");
+               fintek_config_mode_disable(fintek);
+               fintek->cr_ip = CR_INDEX_PORT2;
+               fintek->cr_dp = CR_DATA_PORT2;
+               fintek_config_mode_enable(fintek);
+               portsel = fintek_cr_read(fintek, GCR_CONFIG_PORT_SEL);
+       }
+       fit_dbg("portsel reg: 0x%02x", portsel);
+
+       ir_class = fintek_cir_reg_read(fintek, CIR_CR_CLASS);
+       fit_dbg("ir_class reg: 0x%02x", ir_class);
+
+       switch (ir_class) {
+       case CLASS_RX_2TX:
+       case CLASS_RX_1TX:
+               fintek->hw_tx_capable = true;
+               break;
+       case CLASS_RX_ONLY:
+       default:
+               fintek->hw_tx_capable = false;
+               break;
+       }
+
+       chip_major = fintek_cr_read(fintek, GCR_CHIP_ID_HI);
+       chip_minor = fintek_cr_read(fintek, GCR_CHIP_ID_LO);
+
+       vendor_major = fintek_cr_read(fintek, GCR_VENDOR_ID_HI);
+       vendor_minor = fintek_cr_read(fintek, GCR_VENDOR_ID_LO);
+       vendor = vendor_major << 8 | vendor_minor;
+
+       if (vendor != VENDOR_ID_FINTEK)
+               fit_pr(KERN_WARNING, "Unknown vendor ID: 0x%04x", vendor);
+       else
+               fit_dbg("Read Fintek vendor ID from chip");
+
+       fintek_config_mode_disable(fintek);
+
+       spin_lock_irqsave(&fintek->fintek_lock, flags);
+       fintek->chip_major  = chip_major;
+       fintek->chip_minor  = chip_minor;
+       fintek->chip_vendor = vendor;
+       spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+
+       return ret;
+}
+
+static void fintek_cir_ldev_init(struct fintek_dev *fintek)
+{
+       /* Select CIR logical device and enable */
+       fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+       fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN);
+
+       /* Write allocated CIR address and IRQ information to hardware */
+       fintek_cr_write(fintek, fintek->cir_addr >> 8, CIR_CR_BASE_ADDR_HI);
+       fintek_cr_write(fintek, fintek->cir_addr & 0xff, CIR_CR_BASE_ADDR_LO);
+
+       fintek_cr_write(fintek, fintek->cir_irq, CIR_CR_IRQ_SEL);
+
+       fit_dbg("CIR initialized, base io address: 0x%lx, irq: %d (len: %d)",
+               fintek->cir_addr, fintek->cir_irq, fintek->cir_port_len);
+}
+
+/* enable CIR interrupts */
+static void fintek_enable_cir_irq(struct fintek_dev *fintek)
+{
+       fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_EN, CIR_STATUS);
+}
+
+static void fintek_cir_regs_init(struct fintek_dev *fintek)
+{
+       /* clear any and all stray interrupts */
+       fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
+
+       /* and finally, enable interrupts */
+       fintek_enable_cir_irq(fintek);
+}
+
+static void fintek_enable_wake(struct fintek_dev *fintek)
+{
+       fintek_config_mode_enable(fintek);
+       fintek_select_logical_dev(fintek, LOGICAL_DEV_ACPI);
+
+       /* Allow CIR PME's to wake system */
+       fintek_set_reg_bit(fintek, ACPI_WAKE_EN_CIR_BIT, LDEV_ACPI_WAKE_EN_REG);
+       /* Enable CIR PME's */
+       fintek_set_reg_bit(fintek, ACPI_PME_CIR_BIT, LDEV_ACPI_PME_EN_REG);
+       /* Clear CIR PME status register */
+       fintek_set_reg_bit(fintek, ACPI_PME_CIR_BIT, LDEV_ACPI_PME_CLR_REG);
+       /* Save state */
+       fintek_set_reg_bit(fintek, ACPI_STATE_CIR_BIT, LDEV_ACPI_STATE_REG);
+
+       fintek_config_mode_disable(fintek);
+}
+
+static int fintek_cmdsize(u8 cmd, u8 subcmd)
+{
+       int datasize = 0;
+
+       switch (cmd) {
+       case BUF_COMMAND_NULL:
+               if (subcmd == BUF_HW_CMD_HEADER)
+                       datasize = 1;
+               break;
+       case BUF_HW_CMD_HEADER:
+               if (subcmd == BUF_CMD_G_REVISION)
+                       datasize = 2;
+               break;
+       case BUF_COMMAND_HEADER:
+               switch (subcmd) {
+               case BUF_CMD_S_CARRIER:
+               case BUF_CMD_S_TIMEOUT:
+               case BUF_RSP_PULSE_COUNT:
+                       datasize = 2;
+                       break;
+               case BUF_CMD_SIG_END:
+               case BUF_CMD_S_TXMASK:
+               case BUF_CMD_S_RXSENSOR:
+                       datasize = 1;
+                       break;
+               }
+       }
+
+       return datasize;
+}
+
+/* process ir data stored in driver buffer */
+static void fintek_process_rx_ir_data(struct fintek_dev *fintek)
+{
+       DEFINE_IR_RAW_EVENT(rawir);
+       u8 sample;
+       int i;
+
+       for (i = 0; i < fintek->pkts; i++) {
+               sample = fintek->buf[i];
+               switch (fintek->parser_state) {
+               case CMD_HEADER:
+                       fintek->cmd = sample;
+                       if ((fintek->cmd == BUF_COMMAND_HEADER) ||
+                           ((fintek->cmd & BUF_COMMAND_MASK) !=
+                            BUF_PULSE_BIT)) {
+                               fintek->parser_state = SUBCMD;
+                               continue;
+                       }
+                       fintek->rem = (fintek->cmd & BUF_LEN_MASK);
+                       fit_dbg("%s: rem: 0x%02x", __func__, fintek->rem);
+                       if (fintek->rem)
+                               fintek->parser_state = PARSE_IRDATA;
+                       else
+                               ir_raw_event_reset(fintek->rdev);
+                       break;
+               case SUBCMD:
+                       fintek->rem = fintek_cmdsize(fintek->cmd, sample);
+                       fintek->parser_state = CMD_DATA;
+                       break;
+               case CMD_DATA:
+                       fintek->rem--;
+                       break;
+               case PARSE_IRDATA:
+                       fintek->rem--;
+                       init_ir_raw_event(&rawir);
+                       rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
+                       rawir.duration = US_TO_NS((sample & BUF_SAMPLE_MASK)
+                                         * CIR_SAMPLE_PERIOD);
+
+                       fit_dbg("Storing %s with duration %d",
+                               rawir.pulse ? "pulse" : "space",
+                               rawir.duration);
+                       ir_raw_event_store_with_filter(fintek->rdev, &rawir);
+                       break;
+               }
+
+               if ((fintek->parser_state != CMD_HEADER) && !fintek->rem)
+                       fintek->parser_state = CMD_HEADER;
+       }
+
+       fintek->pkts = 0;
+
+       fit_dbg("Calling ir_raw_event_handle");
+       ir_raw_event_handle(fintek->rdev);
+}
+
+/* copy data from hardware rx register into driver buffer */
+static void fintek_get_rx_ir_data(struct fintek_dev *fintek, u8 rx_irqs)
+{
+       unsigned long flags;
+       u8 sample, status;
+
+       spin_lock_irqsave(&fintek->fintek_lock, flags);
+
+       /*
+        * We must read data from CIR_RX_DATA until the hardware IR buffer
+        * is empty and clears the RX_TIMEOUT and/or RX_RECEIVE flags in
+        * the CIR_STATUS register
+        */
+       do {
+               sample = fintek_cir_reg_read(fintek, CIR_RX_DATA);
+               fit_dbg("%s: sample: 0x%02x", __func__, sample);
+
+               fintek->buf[fintek->pkts] = sample;
+               fintek->pkts++;
+
+               status = fintek_cir_reg_read(fintek, CIR_STATUS);
+               if (!(status & CIR_STATUS_IRQ_EN))
+                       break;
+       } while (status & rx_irqs);
+
+       fintek_process_rx_ir_data(fintek);
+
+       spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+}
+
+static void fintek_cir_log_irqs(u8 status)
+{
+       fit_pr(KERN_INFO, "IRQ 0x%02x:%s%s%s%s%s", status,
+               status & CIR_STATUS_IRQ_EN      ? " IRQEN"      : "",
+               status & CIR_STATUS_TX_FINISH   ? " TXF"        : "",
+               status & CIR_STATUS_TX_UNDERRUN ? " TXU"        : "",
+               status & CIR_STATUS_RX_TIMEOUT  ? " RXTO"       : "",
+               status & CIR_STATUS_RX_RECEIVE  ? " RXOK"       : "");
+}
+
+/* interrupt service routine for incoming and outgoing CIR data */
+static irqreturn_t fintek_cir_isr(int irq, void *data)
+{
+       struct fintek_dev *fintek = data;
+       u8 status, rx_irqs;
+
+       fit_dbg_verbose("%s firing", __func__);
+
+       fintek_config_mode_enable(fintek);
+       fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+       fintek_config_mode_disable(fintek);
+
+       /*
+        * Get IR Status register contents. Write 1 to ack/clear
+        *
+        * bit: reg name    - description
+        *   3: TX_FINISH   - TX is finished
+        *   2: TX_UNDERRUN - TX underrun
+        *   1: RX_TIMEOUT  - RX data timeout
+        *   0: RX_RECEIVE  - RX data received
+        */
+       status = fintek_cir_reg_read(fintek, CIR_STATUS);
+       if (!(status & CIR_STATUS_IRQ_MASK) || status == 0xff) {
+               fit_dbg_verbose("%s exiting, IRSTS 0x%02x", __func__, status);
+               fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
+               return IRQ_RETVAL(IRQ_NONE);
+       }
+
+       if (debug)
+               fintek_cir_log_irqs(status);
+
+       rx_irqs = status & (CIR_STATUS_RX_RECEIVE | CIR_STATUS_RX_TIMEOUT);
+       if (rx_irqs)
+               fintek_get_rx_ir_data(fintek, rx_irqs);
+
+       /* ack/clear all irq flags we've got */
+       fintek_cir_reg_write(fintek, status, CIR_STATUS);
+
+       fit_dbg_verbose("%s done", __func__);
+       return IRQ_RETVAL(IRQ_HANDLED);
+}
+
+static void fintek_enable_cir(struct fintek_dev *fintek)
+{
+       /* set IRQ enabled */
+       fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_EN, CIR_STATUS);
+
+       fintek_config_mode_enable(fintek);
+
+       /* enable the CIR logical device */
+       fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+       fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN);
+
+       fintek_config_mode_disable(fintek);
+
+       /* clear all pending interrupts */
+       fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
+
+       /* enable interrupts */
+       fintek_enable_cir_irq(fintek);
+}
+
+static void fintek_disable_cir(struct fintek_dev *fintek)
+{
+       fintek_config_mode_enable(fintek);
+
+       /* disable the CIR logical device */
+       fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+       fintek_cr_write(fintek, LOGICAL_DEV_DISABLE, CIR_CR_DEV_EN);
+
+       fintek_config_mode_disable(fintek);
+}
+
+static int fintek_open(struct rc_dev *dev)
+{
+       struct fintek_dev *fintek = dev->priv;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fintek->fintek_lock, flags);
+       fintek_enable_cir(fintek);
+       spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+
+       return 0;
+}
+
+static void fintek_close(struct rc_dev *dev)
+{
+       struct fintek_dev *fintek = dev->priv;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fintek->fintek_lock, flags);
+       fintek_disable_cir(fintek);
+       spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+}
+
+/* Allocate memory, probe hardware, and initialize everything */
+static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
+{
+       struct fintek_dev *fintek;
+       struct rc_dev *rdev;
+       int ret = -ENOMEM;
+
+       fintek = kzalloc(sizeof(struct fintek_dev), GFP_KERNEL);
+       if (!fintek)
+               return ret;
+
+       /* input device for IR remote (and tx) */
+       rdev = rc_allocate_device();
+       if (!rdev)
+               goto failure;
+
+       ret = -ENODEV;
+       /* validate pnp resources */
+       if (!pnp_port_valid(pdev, 0)) {
+               dev_err(&pdev->dev, "IR PNP Port not valid!\n");
+               goto failure;
+       }
+
+       if (!pnp_irq_valid(pdev, 0)) {
+               dev_err(&pdev->dev, "IR PNP IRQ not valid!\n");
+               goto failure;
+       }
+
+       fintek->cir_addr = pnp_port_start(pdev, 0);
+       fintek->cir_irq  = pnp_irq(pdev, 0);
+       fintek->cir_port_len = pnp_port_len(pdev, 0);
+
+       fintek->cr_ip = CR_INDEX_PORT;
+       fintek->cr_dp = CR_DATA_PORT;
+
+       spin_lock_init(&fintek->fintek_lock);
+
+       ret = -EBUSY;
+       /* now claim resources */
+       if (!request_region(fintek->cir_addr,
+                           fintek->cir_port_len, FINTEK_DRIVER_NAME))
+               goto failure;
+
+       if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED,
+                       FINTEK_DRIVER_NAME, (void *)fintek))
+               goto failure;
+
+       pnp_set_drvdata(pdev, fintek);
+       fintek->pdev = pdev;
+
+       ret = fintek_hw_detect(fintek);
+       if (ret)
+               goto failure;
+
+       /* Initialize CIR & CIR Wake Logical Devices */
+       fintek_config_mode_enable(fintek);
+       fintek_cir_ldev_init(fintek);
+       fintek_config_mode_disable(fintek);
+
+       /* Initialize CIR & CIR Wake Config Registers */
+       fintek_cir_regs_init(fintek);
+
+       /* Set up the rc device */
+       rdev->priv = fintek;
+       rdev->driver_type = RC_DRIVER_IR_RAW;
+       rdev->allowed_protos = RC_TYPE_ALL;
+       rdev->open = fintek_open;
+       rdev->close = fintek_close;
+       rdev->input_name = FINTEK_DESCRIPTION;
+       rdev->input_phys = "fintek/cir0";
+       rdev->input_id.bustype = BUS_HOST;
+       rdev->input_id.vendor = VENDOR_ID_FINTEK;
+       rdev->input_id.product = fintek->chip_major;
+       rdev->input_id.version = fintek->chip_minor;
+       rdev->dev.parent = &pdev->dev;
+       rdev->driver_name = FINTEK_DRIVER_NAME;
+       rdev->map_name = RC_MAP_RC6_MCE;
+       rdev->timeout = US_TO_NS(1000);
+       /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
+       rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
+
+       ret = rc_register_device(rdev);
+       if (ret)
+               goto failure;
+
+       device_init_wakeup(&pdev->dev, true);
+       fintek->rdev = rdev;
+       fit_pr(KERN_NOTICE, "driver has been successfully loaded\n");
+       if (debug)
+               cir_dump_regs(fintek);
+
+       return 0;
+
+failure:
+       if (fintek->cir_irq)
+               free_irq(fintek->cir_irq, fintek);
+       if (fintek->cir_addr)
+               release_region(fintek->cir_addr, fintek->cir_port_len);
+
+       rc_free_device(rdev);
+       kfree(fintek);
+
+       return ret;
+}
+
+static void __devexit fintek_remove(struct pnp_dev *pdev)
+{
+       struct fintek_dev *fintek = pnp_get_drvdata(pdev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&fintek->fintek_lock, flags);
+       /* disable CIR */
+       fintek_disable_cir(fintek);
+       fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
+       /* enable CIR Wake (for IR power-on) */
+       fintek_enable_wake(fintek);
+       spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+
+       /* free resources */
+       free_irq(fintek->cir_irq, fintek);
+       release_region(fintek->cir_addr, fintek->cir_port_len);
+
+       rc_unregister_device(fintek->rdev);
+
+       kfree(fintek);
+}
+
+static int fintek_suspend(struct pnp_dev *pdev, pm_message_t state)
+{
+       struct fintek_dev *fintek = pnp_get_drvdata(pdev);
+
+       fit_dbg("%s called", __func__);
+
+       /* disable all CIR interrupts */
+       fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
+
+       fintek_config_mode_enable(fintek);
+
+       /* disable cir logical dev */
+       fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+       fintek_cr_write(fintek, LOGICAL_DEV_DISABLE, CIR_CR_DEV_EN);
+
+       fintek_config_mode_disable(fintek);
+
+       /* make sure wake is enabled */
+       fintek_enable_wake(fintek);
+
+       return 0;
+}
+
+static int fintek_resume(struct pnp_dev *pdev)
+{
+       int ret = 0;
+       struct fintek_dev *fintek = pnp_get_drvdata(pdev);
+
+       fit_dbg("%s called", __func__);
+
+       /* open interrupt */
+       fintek_enable_cir_irq(fintek);
+
+       /* Enable CIR logical device */
+       fintek_config_mode_enable(fintek);
+       fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR);
+       fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN);
+
+       fintek_config_mode_disable(fintek);
+
+       fintek_cir_regs_init(fintek);
+
+       return ret;
+}
+
+static void fintek_shutdown(struct pnp_dev *pdev)
+{
+       struct fintek_dev *fintek = pnp_get_drvdata(pdev);
+       fintek_enable_wake(fintek);
+}
+
+static const struct pnp_device_id fintek_ids[] = {
+       { "FIT0002", 0 },   /* CIR */
+       { "", 0 },
+};
+
+static struct pnp_driver fintek_driver = {
+       .name           = FINTEK_DRIVER_NAME,
+       .id_table       = fintek_ids,
+       .flags          = PNP_DRIVER_RES_DO_NOT_CHANGE,
+       .probe          = fintek_probe,
+       .remove         = __devexit_p(fintek_remove),
+       .suspend        = fintek_suspend,
+       .resume         = fintek_resume,
+       .shutdown       = fintek_shutdown,
+};
+
+int fintek_init(void)
+{
+       return pnp_register_driver(&fintek_driver);
+}
+
+void fintek_exit(void)
+{
+       pnp_unregister_driver(&fintek_driver);
+}
+
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Enable debugging output");
+
+MODULE_DEVICE_TABLE(pnp, fintek_ids);
+MODULE_DESCRIPTION(FINTEK_DESCRIPTION " driver");
+
+MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_LICENSE("GPL");
+
+module_init(fintek_init);
+module_exit(fintek_exit);
diff --git a/drivers/media/rc/fintek-cir.h b/drivers/media/rc/fintek-cir.h
new file mode 100644 (file)
index 0000000..1b10b20
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+ * Driver for Feature Integration Technology Inc. (aka Fintek) LPC CIR
+ *
+ * Copyright (C) 2011 Jarod Wilson <jarod@redhat.com>
+ *
+ * Special thanks to Fintek for providing hardware and spec sheets.
+ * This driver is based upon the nuvoton, ite and ene drivers for
+ * similar hardware.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#include <linux/spinlock.h>
+#include <linux/ioctl.h>
+
+/* platform driver name to register */
+#define FINTEK_DRIVER_NAME     "fintek-cir"
+#define FINTEK_DESCRIPTION     "Fintek LPC SuperIO Consumer IR Transceiver"
+#define VENDOR_ID_FINTEK       0x1934
+
+
+/* debugging module parameter */
+static int debug;
+
+#define fit_pr(level, text, ...) \
+       printk(level KBUILD_MODNAME ": " text, ## __VA_ARGS__)
+
+#define fit_dbg(text, ...) \
+       if (debug) \
+               printk(KERN_DEBUG \
+                       KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
+
+#define fit_dbg_verbose(text, ...) \
+       if (debug > 1) \
+               printk(KERN_DEBUG \
+                       KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
+
+#define fit_dbg_wake(text, ...) \
+       if (debug > 2) \
+               printk(KERN_DEBUG \
+                       KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__)
+
+
+#define TX_BUF_LEN 256
+#define RX_BUF_LEN 32
+
+struct fintek_dev {
+       struct pnp_dev *pdev;
+       struct rc_dev *rdev;
+
+       spinlock_t fintek_lock;
+
+       /* for rx */
+       u8 buf[RX_BUF_LEN];
+       unsigned int pkts;
+
+       struct {
+               spinlock_t lock;
+               u8 buf[TX_BUF_LEN];
+               unsigned int buf_count;
+               unsigned int cur_buf_num;
+               wait_queue_head_t queue;
+       } tx;
+
+       /* Config register index/data port pair */
+       u8 cr_ip;
+       u8 cr_dp;
+
+       /* hardware I/O settings */
+       unsigned long cir_addr;
+       int cir_irq;
+       int cir_port_len;
+
+       /* hardware id */
+       u8 chip_major;
+       u8 chip_minor;
+       u16 chip_vendor;
+
+       /* hardware features */
+       bool hw_learning_capable;
+       bool hw_tx_capable;
+
+       /* rx settings */
+       bool learning_enabled;
+       bool carrier_detect_enabled;
+
+       enum {
+               CMD_HEADER = 0,
+               SUBCMD,
+               CMD_DATA,
+               PARSE_IRDATA,
+       } parser_state;
+
+       u8 cmd, rem;
+
+       /* carrier period = 1 / frequency */
+       u32 carrier;
+};
+
+/* buffer packet constants, largely identical to mceusb.c */
+#define BUF_PULSE_BIT          0x80
+#define BUF_LEN_MASK           0x1f
+#define BUF_SAMPLE_MASK                0x7f
+
+#define BUF_COMMAND_HEADER     0x9f
+#define BUF_COMMAND_MASK       0xe0
+#define BUF_COMMAND_NULL       0x00
+#define BUF_HW_CMD_HEADER      0xff
+#define BUF_CMD_G_REVISION     0x0b
+#define BUF_CMD_S_CARRIER      0x06
+#define BUF_CMD_S_TIMEOUT      0x0c
+#define BUF_CMD_SIG_END                0x01
+#define BUF_CMD_S_TXMASK       0x08
+#define BUF_CMD_S_RXSENSOR     0x14
+#define BUF_RSP_PULSE_COUNT    0x15
+
+#define CIR_SAMPLE_PERIOD      50
+
+/*
+ * Configuration Register:
+ *  Index Port
+ *  Data Port
+ */
+#define CR_INDEX_PORT          0x2e
+#define CR_DATA_PORT           0x2f
+
+/* Possible alternate values, depends on how the chip is wired */
+#define CR_INDEX_PORT2         0x4e
+#define CR_DATA_PORT2          0x4f
+
+/*
+ * GCR_CONFIG_PORT_SEL bit 4 specifies which Index Port value is
+ * active. 1 = 0x4e, 0 = 0x2e
+ */
+#define PORT_SEL_PORT_4E_EN    0x10
+
+/* Extended Function Mode enable/disable magic values */
+#define CONFIG_REG_ENABLE      0x87
+#define CONFIG_REG_DISABLE     0xaa
+
+/* Chip IDs found in CR_CHIP_ID_{HI,LO} */
+#define CHIP_ID_HIGH_F71809U   0x04
+#define CHIP_ID_LOW_F71809U    0x08
+
+/*
+ * Global control regs we need to care about:
+ *      Global Control                  def.
+ *      Register name           addr    val. */
+#define GCR_SOFTWARE_RESET     0x02 /* 0x00 */
+#define GCR_LOGICAL_DEV_NO     0x07 /* 0x00 */
+#define GCR_CHIP_ID_HI         0x20 /* 0x04 */
+#define GCR_CHIP_ID_LO         0x21 /* 0x08 */
+#define GCR_VENDOR_ID_HI       0x23 /* 0x19 */
+#define GCR_VENDOR_ID_LO       0x24 /* 0x34 */
+#define GCR_CONFIG_PORT_SEL    0x25 /* 0x01 */
+#define GCR_KBMOUSE_WAKEUP     0x27
+
+#define LOGICAL_DEV_DISABLE    0x00
+#define LOGICAL_DEV_ENABLE     0x01
+
+/* Logical device number of the CIR function */
+#define LOGICAL_DEV_CIR                0x05
+
+/* CIR Logical Device (LDN 0x08) config registers */
+#define CIR_CR_COMMAND_INDEX   0x04
+#define CIR_CR_IRCS            0x05 /* Before host writes command to IR, host
+                                       must set to 1. When host finshes write
+                                       command to IR, host must clear to 0. */
+#define CIR_CR_COMMAND_DATA    0x06 /* Host read or write comand data */
+#define CIR_CR_CLASS           0x07 /* 0xff = rx-only, 0x66 = rx + 2 tx,
+                                       0x33 = rx + 1 tx */
+#define CIR_CR_DEV_EN          0x30 /* bit0 = 1 enables CIR */
+#define CIR_CR_BASE_ADDR_HI    0x60 /* MSB of CIR IO base addr */
+#define CIR_CR_BASE_ADDR_LO    0x61 /* LSB of CIR IO base addr */
+#define CIR_CR_IRQ_SEL         0x70 /* bits3-0 store CIR IRQ */
+#define CIR_CR_PSOUT_STATUS    0xf1
+#define CIR_CR_WAKE_KEY3_ADDR  0xf8
+#define CIR_CR_WAKE_KEY3_CODE  0xf9
+#define CIR_CR_WAKE_KEY3_DC    0xfa
+#define CIR_CR_WAKE_CONTROL    0xfb
+#define CIR_CR_WAKE_KEY12_ADDR 0xfc
+#define CIR_CR_WAKE_KEY4_ADDR  0xfd
+#define CIR_CR_WAKE_KEY5_ADDR  0xfe
+
+#define CLASS_RX_ONLY          0xff
+#define CLASS_RX_2TX           0x66
+#define CLASS_RX_1TX           0x33
+
+/* CIR device registers */
+#define CIR_STATUS             0x00
+#define CIR_RX_DATA            0x01
+#define CIR_TX_CONTROL         0x02
+#define CIR_TX_DATA            0x03
+#define CIR_CONTROL            0x04
+
+/* Bits to enable CIR wake */
+#define LOGICAL_DEV_ACPI       0x01
+#define LDEV_ACPI_WAKE_EN_REG  0xe8
+#define ACPI_WAKE_EN_CIR_BIT   0x04
+
+#define LDEV_ACPI_PME_EN_REG   0xf0
+#define LDEV_ACPI_PME_CLR_REG  0xf1
+#define ACPI_PME_CIR_BIT       0x02
+
+#define LDEV_ACPI_STATE_REG    0xf4
+#define ACPI_STATE_CIR_BIT     0x20
+
+/*
+ * CIR status register (0x00):
+ *   7 - CIR_IRQ_EN (1 = enable CIR IRQ, 0 = disable)
+ *   3 - TX_FINISH (1 when TX finished, write 1 to clear)
+ *   2 - TX_UNDERRUN (1 on TX underrun, write 1 to clear)
+ *   1 - RX_TIMEOUT (1 on RX timeout, write 1 to clear)
+ *   0 - RX_RECEIVE (1 on RX receive, write 1 to clear)
+ */
+#define CIR_STATUS_IRQ_EN      0x80
+#define CIR_STATUS_TX_FINISH   0x08
+#define CIR_STATUS_TX_UNDERRUN 0x04
+#define CIR_STATUS_RX_TIMEOUT  0x02
+#define CIR_STATUS_RX_RECEIVE  0x01
+#define CIR_STATUS_IRQ_MASK    0x0f
+
+/*
+ * CIR TX control register (0x02):
+ *   7 - TX_START (1 to indicate TX start, auto-cleared when done)
+ *   6 - TX_END (1 to indicate TX data written to TX fifo)
+ */
+#define CIR_TX_CONTROL_TX_START        0x80
+#define CIR_TX_CONTROL_TX_END  0x40
+
index afae14fd152ea26289c67135f017e09858560326..129d3f9a461de94b415b01a6bd1d77cd67da5758 100644 (file)
 
 static struct rc_map_table lme2510_rc[] = {
        /* Type 1 - 26 buttons */
-       { 0xef12ba45, KEY_0 },
-       { 0xef12a05f, KEY_1 },
-       { 0xef12af50, KEY_2 },
-       { 0xef12a25d, KEY_3 },
-       { 0xef12be41, KEY_4 },
-       { 0xef12f50a, KEY_5 },
-       { 0xef12bd42, KEY_6 },
-       { 0xef12b847, KEY_7 },
-       { 0xef12b649, KEY_8 },
-       { 0xef12fa05, KEY_9 },
-       { 0xef12bc43, KEY_POWER },
-       { 0xef12b946, KEY_SUBTITLE },
-       { 0xef12f906, KEY_PAUSE },
-       { 0xef12fc03, KEY_MEDIA_REPEAT},
-       { 0xef12fd02, KEY_PAUSE },
-       { 0xef12a15e, KEY_VOLUMEUP },
-       { 0xef12a35c, KEY_VOLUMEDOWN },
-       { 0xef12f609, KEY_CHANNELUP },
-       { 0xef12e51a, KEY_CHANNELDOWN },
-       { 0xef12e11e, KEY_PLAY },
-       { 0xef12e41b, KEY_ZOOM },
-       { 0xef12a659, KEY_MUTE },
-       { 0xef12a55a, KEY_TV },
-       { 0xef12e718, KEY_RECORD },
-       { 0xef12f807, KEY_EPG },
-       { 0xef12fe01, KEY_STOP },
+       { 0x10ed45, KEY_0 },
+       { 0x10ed5f, KEY_1 },
+       { 0x10ed50, KEY_2 },
+       { 0x10ed5d, KEY_3 },
+       { 0x10ed41, KEY_4 },
+       { 0x10ed0a, KEY_5 },
+       { 0x10ed42, KEY_6 },
+       { 0x10ed47, KEY_7 },
+       { 0x10ed49, KEY_8 },
+       { 0x10ed05, KEY_9 },
+       { 0x10ed43, KEY_POWER },
+       { 0x10ed46, KEY_SUBTITLE },
+       { 0x10ed06, KEY_PAUSE },
+       { 0x10ed03, KEY_MEDIA_REPEAT},
+       { 0x10ed02, KEY_PAUSE },
+       { 0x10ed5e, KEY_VOLUMEUP },
+       { 0x10ed5c, KEY_VOLUMEDOWN },
+       { 0x10ed09, KEY_CHANNELUP },
+       { 0x10ed1a, KEY_CHANNELDOWN },
+       { 0x10ed1e, KEY_PLAY },
+       { 0x10ed1b, KEY_ZOOM },
+       { 0x10ed59, KEY_MUTE },
+       { 0x10ed5a, KEY_TV },
+       { 0x10ed18, KEY_RECORD },
+       { 0x10ed07, KEY_EPG },
+       { 0x10ed01, KEY_STOP },
        /* Type 2 - 20 buttons */
-       { 0xff40ea15, KEY_0 },
-       { 0xff40f708, KEY_1 },
-       { 0xff40f609, KEY_2 },
-       { 0xff40f50a, KEY_3 },
-       { 0xff40f30c, KEY_4 },
-       { 0xff40f20d, KEY_5 },
-       { 0xff40f10e, KEY_6 },
-       { 0xff40ef10, KEY_7 },
-       { 0xff40ee11, KEY_8 },
-       { 0xff40ed12, KEY_9 },
-       { 0xff40ff00, KEY_POWER },
-       { 0xff40fb04, KEY_MEDIA_REPEAT}, /* Recall */
-       { 0xff40e51a, KEY_PAUSE }, /* Timeshift */
-       { 0xff40fd02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
-       { 0xff40f906, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
-       { 0xff40fe01, KEY_CHANNELUP },
-       { 0xff40fa05, KEY_CHANNELDOWN },
-       { 0xff40eb14, KEY_ZOOM },
-       { 0xff40e718, KEY_RECORD },
-       { 0xff40e916, KEY_STOP },
+       { 0xbf15, KEY_0 },
+       { 0xbf08, KEY_1 },
+       { 0xbf09, KEY_2 },
+       { 0xbf0a, KEY_3 },
+       { 0xbf0c, KEY_4 },
+       { 0xbf0d, KEY_5 },
+       { 0xbf0e, KEY_6 },
+       { 0xbf10, KEY_7 },
+       { 0xbf11, KEY_8 },
+       { 0xbf12, KEY_9 },
+       { 0xbf00, KEY_POWER },
+       { 0xbf04, KEY_MEDIA_REPEAT}, /* Recall */
+       { 0xbf1a, KEY_PAUSE }, /* Timeshift */
+       { 0xbf02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
+       { 0xbf06, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
+       { 0xbf01, KEY_CHANNELUP },
+       { 0xbf05, KEY_CHANNELDOWN },
+       { 0xbf14, KEY_ZOOM },
+       { 0xbf18, KEY_RECORD },
+       { 0xbf16, KEY_STOP },
        /* Type 3 - 20 buttons */
-       { 0xff00e31c, KEY_0 },
-       { 0xff00f807, KEY_1 },
-       { 0xff00ea15, KEY_2 },
-       { 0xff00f609, KEY_3 },
-       { 0xff00e916, KEY_4 },
-       { 0xff00e619, KEY_5 },
-       { 0xff00f20d, KEY_6 },
-       { 0xff00f30c, KEY_7 },
-       { 0xff00e718, KEY_8 },
-       { 0xff00a15e, KEY_9 },
-       { 0xff00ba45, KEY_POWER },
-       { 0xff00bb44, KEY_MEDIA_REPEAT}, /* Recall */
-       { 0xff00b54a, KEY_PAUSE }, /* Timeshift */
-       { 0xff00b847, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
-       { 0xff00bc43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
-       { 0xff00b946, KEY_CHANNELUP },
-       { 0xff00bf40, KEY_CHANNELDOWN },
-       { 0xff00f708, KEY_ZOOM },
-       { 0xff00bd42, KEY_RECORD },
-       { 0xff00a55a, KEY_STOP },
+       { 0x1c, KEY_0 },
+       { 0x07, KEY_1 },
+       { 0x15, KEY_2 },
+       { 0x09, KEY_3 },
+       { 0x16, KEY_4 },
+       { 0x19, KEY_5 },
+       { 0x0d, KEY_6 },
+       { 0x0c, KEY_7 },
+       { 0x18, KEY_8 },
+       { 0x5e, KEY_9 },
+       { 0x45, KEY_POWER },
+       { 0x44, KEY_MEDIA_REPEAT}, /* Recall */
+       { 0x4a, KEY_PAUSE }, /* Timeshift */
+       { 0x47, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */
+       { 0x43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/
+       { 0x46, KEY_CHANNELUP },
+       { 0x40, KEY_CHANNELDOWN },
+       { 0x08, KEY_ZOOM },
+       { 0x42, KEY_RECORD },
+       { 0x5a, KEY_STOP },
 };
 
 static struct rc_map_list lme2510_map = {
        .map = {
                .scan    = lme2510_rc,
                .size    = ARRAY_SIZE(lme2510_rc),
-               .rc_type = RC_TYPE_UNKNOWN,
+               .rc_type = RC_TYPE_NEC,
                .name    = RC_MAP_LME2510,
        }
 };
index 3be180b3ba274294561e83b9e48ebb60c0e7fe2c..bb53de7fe4087b227f98b63cdc15b035d28f008c 100644 (file)
@@ -687,7 +687,7 @@ config VIDEO_HEXIUM_GEMINI
 
 config VIDEO_TIMBERDALE
        tristate "Support for timberdale Video In/LogiWIN"
-       depends on VIDEO_V4L2 && I2C
+       depends on VIDEO_V4L2 && I2C && DMADEVICES
        select DMA_ENGINE
        select TIMB_DMA
        select VIDEO_ADV7180
@@ -757,6 +757,8 @@ config VIDEO_NOON010PC30
        ---help---
          This driver supports NOON010PC30 CIF camera from Siliconfile
 
+source "drivers/media/video/m5mols/Kconfig"
+
 config VIDEO_OMAP3
        tristate "OMAP 3 Camera support (EXPERIMENTAL)"
        select OMAP_IOMMU
@@ -952,7 +954,7 @@ config  VIDEO_SAMSUNG_S5P_FIMC
 
 config VIDEO_S5P_MIPI_CSIS
        tristate "Samsung S5P and EXYNOS4 MIPI CSI receiver driver"
-       depends on VIDEO_V4L2 && PM_RUNTIME && VIDEO_V4L2_SUBDEV_API
+       depends on VIDEO_V4L2 && PM_RUNTIME && PLAT_S5P && VIDEO_V4L2_SUBDEV_API
        ---help---
          This is a v4l2 driver for Samsung S5P/EXYNOS4 MIPI-CSI receiver.
 
index 9519160c2e01eb1da9b49ca4eaf2cd9ce4318b3e..f0fecd6f6a33e818c4f4713d2b62e1ecd762e4a9 100644 (file)
@@ -69,6 +69,7 @@ obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
 obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o
 obj-$(CONFIG_VIDEO_SR030PC30)  += sr030pc30.o
 obj-$(CONFIG_VIDEO_NOON010PC30)        += noon010pc30.o
+obj-$(CONFIG_VIDEO_M5MOLS)     += m5mols/
 
 obj-$(CONFIG_SOC_CAMERA_IMX074)                += imx074.o
 obj-$(CONFIG_SOC_CAMERA_MT9M001)       += mt9m001.o
index 0073a8c553361aadab0d3f214b6e9520f1838f59..40eb6326e48a8ab95ca7c476999772bd3e76ff3b 100644 (file)
@@ -438,7 +438,7 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v
                strcat(vc->card, " (676/");
                break;
        default:
-               strcat(vc->card, " (???/");
+               strcat(vc->card, " (XXX/");
                break;
        }
        switch (cam->params.version.sensor_flags) {
@@ -458,7 +458,7 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v
                strcat(vc->card, "500)");
                break;
        default:
-               strcat(vc->card, "???)");
+               strcat(vc->card, "XXX)");
                break;
        }
 
index 280df43ca446458f639879bf5e3aa18f2229579d..8d7813415760e5f1cf753368f86b0f124e642b55 100644 (file)
@@ -1354,7 +1354,7 @@ void cx231xx_dump_SC_reg(struct cx231xx *dev)
 {
        u8 value[4] = { 0, 0, 0, 0 };
        int status = 0;
-       cx231xx_info("cx231xx_dump_SC_reg %s!\n", __TIME__);
+       cx231xx_info("cx231xx_dump_SC_reg!\n");
 
        status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT,
                                 value, 4);
index 66671a4092e4e68344e0314cec0e9a984c7e49a7..26fc206f095e13f9e202c13e7754236af0daa551 100644 (file)
@@ -34,7 +34,7 @@ MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
 MODULE_DESCRIPTION("GSPCA/Kinect Sensor Device USB Camera Driver");
 MODULE_LICENSE("GPL");
 
-#ifdef DEBUG
+#ifdef GSPCA_DEBUG
 int gspca_debug = D_ERR | D_PROBE | D_CONF | D_STREAM | D_FRAM | D_PACK |
        D_USBI | D_USBO | D_V4L2;
 #endif
diff --git a/drivers/media/video/m5mols/Kconfig b/drivers/media/video/m5mols/Kconfig
new file mode 100644 (file)
index 0000000..302dc3d
--- /dev/null
@@ -0,0 +1,5 @@
+config VIDEO_M5MOLS
+       tristate "Fujitsu M-5MOLS 8MP sensor support"
+       depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+       ---help---
+         This driver supports Fujitsu M-5MOLS camera sensor with ISP
diff --git a/drivers/media/video/m5mols/Makefile b/drivers/media/video/m5mols/Makefile
new file mode 100644 (file)
index 0000000..0a44e02
--- /dev/null
@@ -0,0 +1,3 @@
+m5mols-objs    := m5mols_core.o m5mols_controls.o m5mols_capture.o
+
+obj-$(CONFIG_VIDEO_M5MOLS)             += m5mols.o
diff --git a/drivers/media/video/m5mols/m5mols.h b/drivers/media/video/m5mols/m5mols.h
new file mode 100644 (file)
index 0000000..10b55c8
--- /dev/null
@@ -0,0 +1,296 @@
+/*
+ * Header for M-5MOLS 8M Pixel camera sensor with ISP
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Author: HeungJun Kim, riverful.kim@samsung.com
+ *
+ * Copyright (C) 2009 Samsung Electronics Co., Ltd.
+ * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef M5MOLS_H
+#define M5MOLS_H
+
+#include <media/v4l2-subdev.h>
+#include "m5mols_reg.h"
+
+extern int m5mols_debug;
+
+#define to_m5mols(__sd)        container_of(__sd, struct m5mols_info, sd)
+
+#define to_sd(__ctrl) \
+       (&container_of(__ctrl->handler, struct m5mols_info, handle)->sd)
+
+enum m5mols_restype {
+       M5MOLS_RESTYPE_MONITOR,
+       M5MOLS_RESTYPE_CAPTURE,
+       M5MOLS_RESTYPE_MAX,
+};
+
+/**
+ * struct m5mols_resolution - structure for the resolution
+ * @type: resolution type according to the pixel code
+ * @width: width of the resolution
+ * @height: height of the resolution
+ * @reg: resolution preset register value
+ */
+struct m5mols_resolution {
+       u8 reg;
+       enum m5mols_restype type;
+       u16 width;
+       u16 height;
+};
+
+/**
+ * struct m5mols_exif - structure for the EXIF information of M-5MOLS
+ * @exposure_time: exposure time register value
+ * @shutter_speed: speed of the shutter register value
+ * @aperture: aperture register value
+ * @exposure_bias: it calls also EV bias
+ * @iso_speed: ISO register value
+ * @flash: status register value of the flash
+ * @sdr: status register value of the Subject Distance Range
+ * @qval: not written exact meaning in document
+ */
+struct m5mols_exif {
+       u32 exposure_time;
+       u32 shutter_speed;
+       u32 aperture;
+       u32 brightness;
+       u32 exposure_bias;
+       u16 iso_speed;
+       u16 flash;
+       u16 sdr;
+       u16 qval;
+};
+
+/**
+ * struct m5mols_capture - Structure for the capture capability
+ * @exif: EXIF information
+ * @main: size in bytes of the main image
+ * @thumb: size in bytes of the thumb image, if it was accompanied
+ * @total: total size in bytes of the produced image
+ */
+struct m5mols_capture {
+       struct m5mols_exif exif;
+       u32 main;
+       u32 thumb;
+       u32 total;
+};
+
+/**
+ * struct m5mols_scenemode - structure for the scenemode capability
+ * @metering: metering light register value
+ * @ev_bias: EV bias register value
+ * @wb_mode: mode which means the WhiteBalance is Auto or Manual
+ * @wb_preset: whitebalance preset register value in the Manual mode
+ * @chroma_en: register value whether the Chroma capability is enabled or not
+ * @chroma_lvl: chroma's level register value
+ * @edge_en: register value Whether the Edge capability is enabled or not
+ * @edge_lvl: edge's level register value
+ * @af_range: Auto Focus's range
+ * @fd_mode: Face Detection mode
+ * @mcc: Multi-axis Color Conversion which means emotion color
+ * @light: status of the Light
+ * @flash: status of the Flash
+ * @tone: Tone color which means Contrast
+ * @iso: ISO register value
+ * @capt_mode: Mode of the Image Stabilization while the camera capturing
+ * @wdr: Wide Dynamic Range register value
+ *
+ * The each value according to each scenemode is recommended in the documents.
+ */
+struct m5mols_scenemode {
+       u32 metering;
+       u32 ev_bias;
+       u32 wb_mode;
+       u32 wb_preset;
+       u32 chroma_en;
+       u32 chroma_lvl;
+       u32 edge_en;
+       u32 edge_lvl;
+       u32 af_range;
+       u32 fd_mode;
+       u32 mcc;
+       u32 light;
+       u32 flash;
+       u32 tone;
+       u32 iso;
+       u32 capt_mode;
+       u32 wdr;
+};
+
+/**
+ * struct m5mols_version - firmware version information
+ * @customer:  customer information
+ * @project:   version of project information according to customer
+ * @fw:                firmware revision
+ * @hw:                hardware revision
+ * @param:     version of the parameter
+ * @awb:       Auto WhiteBalance algorithm version
+ * @str:       information about manufacturer and packaging vendor
+ * @af:                Auto Focus version
+ *
+ * The register offset starts the customer version at 0x0, and it ends
+ * the awb version at 0x09. The customer, project information occupies 1 bytes
+ * each. And also the fw, hw, param, awb each requires 2 bytes. The str is
+ * unique string associated with firmware's version. It includes information
+ * about manufacturer and the vendor of the sensor's packaging. The least
+ * significant 2 bytes of the string indicate packaging manufacturer.
+ */
+#define VERSION_STRING_SIZE    22
+struct m5mols_version {
+       u8      customer;
+       u8      project;
+       u16     fw;
+       u16     hw;
+       u16     param;
+       u16     awb;
+       u8      str[VERSION_STRING_SIZE];
+       u8      af;
+};
+#define VERSION_SIZE sizeof(struct m5mols_version)
+
+/**
+ * struct m5mols_info - M-5MOLS driver data structure
+ * @pdata: platform data
+ * @sd: v4l-subdev instance
+ * @pad: media pad
+ * @ffmt: current fmt according to resolution type
+ * @res_type: current resolution type
+ * @code: current code
+ * @irq_waitq: waitqueue for the capture
+ * @work_irq: workqueue for the IRQ
+ * @flags: state variable for the interrupt handler
+ * @handle: control handler
+ * @autoexposure: Auto Exposure control
+ * @exposure: Exposure control
+ * @autowb: Auto White Balance control
+ * @colorfx: Color effect control
+ * @saturation:        Saturation control
+ * @zoom: Zoom control
+ * @ver: information of the version
+ * @cap: the capture mode attributes
+ * @power: current sensor's power status
+ * @ctrl_sync: true means all controls of the sensor are initialized
+ * @int_capture: true means the capture interrupt is issued once
+ * @lock_ae: true means the Auto Exposure is locked
+ * @lock_awb: true means the Aut WhiteBalance is locked
+ * @resolution:        register value for current resolution
+ * @interrupt: register value for current interrupt status
+ * @mode: register value for current operation mode
+ * @mode_save: register value for current operation mode for saving
+ * @set_power: optional power callback to the board code
+ */
+struct m5mols_info {
+       const struct m5mols_platform_data *pdata;
+       struct v4l2_subdev sd;
+       struct media_pad pad;
+       struct v4l2_mbus_framefmt ffmt[M5MOLS_RESTYPE_MAX];
+       int res_type;
+       enum v4l2_mbus_pixelcode code;
+       wait_queue_head_t irq_waitq;
+       struct work_struct work_irq;
+       unsigned long flags;
+
+       struct v4l2_ctrl_handler handle;
+       /* Autoexposure/exposure control cluster */
+       struct {
+               struct v4l2_ctrl *autoexposure;
+               struct v4l2_ctrl *exposure;
+       };
+       struct v4l2_ctrl *autowb;
+       struct v4l2_ctrl *colorfx;
+       struct v4l2_ctrl *saturation;
+       struct v4l2_ctrl *zoom;
+
+       struct m5mols_version ver;
+       struct m5mols_capture cap;
+       bool power;
+       bool ctrl_sync;
+       bool lock_ae;
+       bool lock_awb;
+       u8 resolution;
+       u32 interrupt;
+       u32 mode;
+       u32 mode_save;
+       int (*set_power)(struct device *dev, int on);
+};
+
+#define ST_CAPT_IRQ 0
+
+#define is_powered(__info) (__info->power)
+#define is_ctrl_synced(__info) (__info->ctrl_sync)
+#define is_available_af(__info)        (__info->ver.af)
+#define is_code(__code, __type) (__code == m5mols_default_ffmt[__type].code)
+#define is_manufacturer(__info, __manufacturer)        \
+                               (__info->ver.str[0] == __manufacturer[0] && \
+                                __info->ver.str[1] == __manufacturer[1])
+/*
+ * I2C operation of the M-5MOLS
+ *
+ * The I2C read operation of the M-5MOLS requires 2 messages. The first
+ * message sends the information about the command, command category, and total
+ * message size. The second message is used to retrieve the data specifed in
+ * the first message
+ *
+ *   1st message                                2nd message
+ *   +-------+---+----------+-----+-------+     +------+------+------+------+
+ *   | size1 | R | category | cmd | size2 |     | d[0] | d[1] | d[2] | d[3] |
+ *   +-------+---+----------+-----+-------+     +------+------+------+------+
+ *   - size1: message data size(5 in this case)
+ *   - size2: desired buffer size of the 2nd message
+ *   - d[0..3]: according to size2
+ *
+ * The I2C write operation needs just one message. The message includes
+ * category, command, total size, and desired data.
+ *
+ *   1st message
+ *   +-------+---+----------+-----+------+------+------+------+
+ *   | size1 | W | category | cmd | d[0] | d[1] | d[2] | d[3] |
+ *   +-------+---+----------+-----+------+------+------+------+
+ *   - d[0..3]: according to size1
+ */
+int m5mols_read(struct v4l2_subdev *sd, u32 reg_comb, u32 *val);
+int m5mols_write(struct v4l2_subdev *sd, u32 reg_comb, u32 val);
+int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 value);
+
+/*
+ * Mode operation of the M-5MOLS
+ *
+ * Changing the mode of the M-5MOLS is needed right executing order.
+ * There are three modes(PARAMETER, MONITOR, CAPTURE) which can be changed
+ * by user. There are various categories associated with each mode.
+ *
+ * +============================================================+
+ * | mode      | category                                      |
+ * +============================================================+
+ * | FLASH     | FLASH(only after Stand-by or Power-on)        |
+ * | SYSTEM    | SYSTEM(only after sensor arm-booting)         |
+ * | PARAMETER | PARAMETER                                     |
+ * | MONITOR   | MONITOR(preview), Auto Focus, Face Detection  |
+ * | CAPTURE   | Single CAPTURE, Preview(recording)            |
+ * +============================================================+
+ *
+ * The available executing order between each modes are as follows:
+ *   PARAMETER <---> MONITOR <---> CAPTURE
+ */
+int m5mols_mode(struct m5mols_info *info, u32 mode);
+
+int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg);
+int m5mols_sync_controls(struct m5mols_info *info);
+int m5mols_start_capture(struct m5mols_info *info);
+int m5mols_do_scenemode(struct m5mols_info *info, u32 mode);
+int m5mols_lock_3a(struct m5mols_info *info, bool lock);
+int m5mols_set_ctrl(struct v4l2_ctrl *ctrl);
+
+/* The firmware function */
+int m5mols_update_fw(struct v4l2_subdev *sd,
+                    int (*set_power)(struct m5mols_info *, bool));
+
+#endif /* M5MOLS_H */
diff --git a/drivers/media/video/m5mols/m5mols_capture.c b/drivers/media/video/m5mols/m5mols_capture.c
new file mode 100644 (file)
index 0000000..d71a390
--- /dev/null
@@ -0,0 +1,191 @@
+/*
+ * The Capture code for Fujitsu M-5MOLS ISP
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Author: HeungJun Kim, riverful.kim@samsung.com
+ *
+ * Copyright (C) 2009 Samsung Electronics Co., Ltd.
+ * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/videodev2.h>
+#include <linux/version.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/m5mols.h>
+
+#include "m5mols.h"
+#include "m5mols_reg.h"
+
+static int m5mols_capture_error_handler(struct m5mols_info *info,
+                                       int timeout)
+{
+       int ret;
+
+       /* Disable all interrupts and clear relevant interrupt staus bits */
+       ret = m5mols_write(&info->sd, SYSTEM_INT_ENABLE,
+                          info->interrupt & ~(REG_INT_CAPTURE));
+       if (ret)
+               return ret;
+
+       if (timeout == 0)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+/**
+ * m5mols_read_rational - I2C read of a rational number
+ *
+ * Read numerator and denominator from registers @addr_num and @addr_den
+ * respectively and return the division result in @val.
+ */
+static int m5mols_read_rational(struct v4l2_subdev *sd, u32 addr_num,
+                               u32 addr_den, u32 *val)
+{
+       u32 num, den;
+
+       int ret = m5mols_read(sd, addr_num, &num);
+       if (!ret)
+               ret = m5mols_read(sd, addr_den, &den);
+       if (ret)
+               return ret;
+       *val = den == 0 ? 0 : num / den;
+       return ret;
+}
+
+/**
+ * m5mols_capture_info - Gather captured image information
+ *
+ * For now it gathers only EXIF information and file size.
+ */
+static int m5mols_capture_info(struct m5mols_info *info)
+{
+       struct m5mols_exif *exif = &info->cap.exif;
+       struct v4l2_subdev *sd = &info->sd;
+       int ret;
+
+       ret = m5mols_read_rational(sd, EXIF_INFO_EXPTIME_NU,
+                                  EXIF_INFO_EXPTIME_DE, &exif->exposure_time);
+       if (ret)
+               return ret;
+       ret = m5mols_read_rational(sd, EXIF_INFO_TV_NU, EXIF_INFO_TV_DE,
+                                  &exif->shutter_speed);
+       if (ret)
+               return ret;
+       ret = m5mols_read_rational(sd, EXIF_INFO_AV_NU, EXIF_INFO_AV_DE,
+                                  &exif->aperture);
+       if (ret)
+               return ret;
+       ret = m5mols_read_rational(sd, EXIF_INFO_BV_NU, EXIF_INFO_BV_DE,
+                                  &exif->brightness);
+       if (ret)
+               return ret;
+       ret = m5mols_read_rational(sd, EXIF_INFO_EBV_NU, EXIF_INFO_EBV_DE,
+                                  &exif->exposure_bias);
+       if (ret)
+               return ret;
+
+       ret = m5mols_read(sd, EXIF_INFO_ISO, (u32 *)&exif->iso_speed);
+       if (!ret)
+               ret = m5mols_read(sd, EXIF_INFO_FLASH, (u32 *)&exif->flash);
+       if (!ret)
+               ret = m5mols_read(sd, EXIF_INFO_SDR, (u32 *)&exif->sdr);
+       if (!ret)
+               ret = m5mols_read(sd, EXIF_INFO_QVAL, (u32 *)&exif->qval);
+       if (ret)
+               return ret;
+
+       if (!ret)
+               ret = m5mols_read(sd, CAPC_IMAGE_SIZE, &info->cap.main);
+       if (!ret)
+               ret = m5mols_read(sd, CAPC_THUMB_SIZE, &info->cap.thumb);
+       if (!ret)
+               info->cap.total = info->cap.main + info->cap.thumb;
+
+       return ret;
+}
+
+int m5mols_start_capture(struct m5mols_info *info)
+{
+       struct v4l2_subdev *sd = &info->sd;
+       u32 resolution = info->resolution;
+       int timeout;
+       int ret;
+
+       /*
+        * Preparing capture. Setting control & interrupt before entering
+        * capture mode
+        *
+        * 1) change to MONITOR mode for operating control & interrupt
+        * 2) set controls (considering v4l2_control value & lock 3A)
+        * 3) set interrupt
+        * 4) change to CAPTURE mode
+        */
+       ret = m5mols_mode(info, REG_MONITOR);
+       if (!ret)
+               ret = m5mols_sync_controls(info);
+       if (!ret)
+               ret = m5mols_lock_3a(info, true);
+       if (!ret)
+               ret = m5mols_enable_interrupt(sd, REG_INT_CAPTURE);
+       if (!ret)
+               ret = m5mols_mode(info, REG_CAPTURE);
+       if (!ret) {
+               /* Wait for capture interrupt, after changing capture mode */
+               timeout = wait_event_interruptible_timeout(info->irq_waitq,
+                                          test_bit(ST_CAPT_IRQ, &info->flags),
+                                          msecs_to_jiffies(2000));
+               if (test_and_clear_bit(ST_CAPT_IRQ, &info->flags))
+                       ret = m5mols_capture_error_handler(info, timeout);
+       }
+       if (!ret)
+               ret = m5mols_lock_3a(info, false);
+       if (ret)
+               return ret;
+       /*
+        * Starting capture. Setting capture frame count and resolution and
+        * the format(available format: JPEG, Bayer RAW, YUV).
+        *
+        * 1) select single or multi(enable to 25), format, size
+        * 2) set interrupt
+        * 3) start capture(for main image, now)
+        * 4) get information
+        * 5) notify file size to v4l2 device(e.g, to s5p-fimc v4l2 device)
+        */
+       ret = m5mols_write(sd, CAPC_SEL_FRAME, 1);
+       if (!ret)
+               ret = m5mols_write(sd, CAPP_YUVOUT_MAIN, REG_JPEG);
+       if (!ret)
+               ret = m5mols_write(sd, CAPP_MAIN_IMAGE_SIZE, resolution);
+       if (!ret)
+               ret = m5mols_enable_interrupt(sd, REG_INT_CAPTURE);
+       if (!ret)
+               ret = m5mols_write(sd, CAPC_START, REG_CAP_START_MAIN);
+       if (!ret) {
+               /* Wait for the capture completion interrupt */
+               timeout = wait_event_interruptible_timeout(info->irq_waitq,
+                                          test_bit(ST_CAPT_IRQ, &info->flags),
+                                          msecs_to_jiffies(2000));
+               if (test_and_clear_bit(ST_CAPT_IRQ, &info->flags)) {
+                       ret = m5mols_capture_info(info);
+                       if (!ret)
+                               v4l2_subdev_notify(sd, 0, &info->cap.total);
+               }
+       }
+
+       return m5mols_capture_error_handler(info, timeout);
+}
diff --git a/drivers/media/video/m5mols/m5mols_controls.c b/drivers/media/video/m5mols/m5mols_controls.c
new file mode 100644 (file)
index 0000000..817c16f
--- /dev/null
@@ -0,0 +1,299 @@
+/*
+ * Controls for M-5MOLS 8M Pixel camera sensor with ISP
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Author: HeungJun Kim, riverful.kim@samsung.com
+ *
+ * Copyright (C) 2009 Samsung Electronics Co., Ltd.
+ * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+
+#include "m5mols.h"
+#include "m5mols_reg.h"
+
+static struct m5mols_scenemode m5mols_default_scenemode[] = {
+       [REG_SCENE_NORMAL] = {
+               REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+               REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+               REG_AF_NORMAL, REG_FD_OFF,
+               REG_MCC_NORMAL, REG_LIGHT_OFF, REG_FLASH_OFF,
+               5, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+       },
+       [REG_SCENE_PORTRAIT] = {
+               REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+               REG_CHROMA_ON, 3, REG_EDGE_ON, 4,
+               REG_AF_NORMAL, BIT_FD_EN | BIT_FD_DRAW_FACE_FRAME,
+               REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+               6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+       },
+       [REG_SCENE_LANDSCAPE] = {
+               REG_AE_ALL, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+               REG_CHROMA_ON, 4, REG_EDGE_ON, 6,
+               REG_AF_NORMAL, REG_FD_OFF,
+               REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+               6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+       },
+       [REG_SCENE_SPORTS] = {
+               REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+               REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+               REG_AF_NORMAL, REG_FD_OFF,
+               REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+               6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+       },
+       [REG_SCENE_PARTY_INDOOR] = {
+               REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+               REG_CHROMA_ON, 4, REG_EDGE_ON, 5,
+               REG_AF_NORMAL, REG_FD_OFF,
+               REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+               6, REG_ISO_200, REG_CAP_NONE, REG_WDR_OFF,
+       },
+       [REG_SCENE_BEACH_SNOW] = {
+               REG_AE_CENTER, REG_AE_INDEX_10_POS, REG_AWB_AUTO, 0,
+               REG_CHROMA_ON, 4, REG_EDGE_ON, 5,
+               REG_AF_NORMAL, REG_FD_OFF,
+               REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+               6, REG_ISO_50, REG_CAP_NONE, REG_WDR_OFF,
+       },
+       [REG_SCENE_SUNSET] = {
+               REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_PRESET,
+               REG_AWB_DAYLIGHT,
+               REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+               REG_AF_NORMAL, REG_FD_OFF,
+               REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+               6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+       },
+       [REG_SCENE_DAWN_DUSK] = {
+               REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_PRESET,
+               REG_AWB_FLUORESCENT_1,
+               REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+               REG_AF_NORMAL, REG_FD_OFF,
+               REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+               6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+       },
+       [REG_SCENE_FALL] = {
+               REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+               REG_CHROMA_ON, 5, REG_EDGE_ON, 5,
+               REG_AF_NORMAL, REG_FD_OFF,
+               REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+               6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+       },
+       [REG_SCENE_NIGHT] = {
+               REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+               REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+               REG_AF_NORMAL, REG_FD_OFF,
+               REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+               6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+       },
+       [REG_SCENE_AGAINST_LIGHT] = {
+               REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+               REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+               REG_AF_NORMAL, REG_FD_OFF,
+               REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+               6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+       },
+       [REG_SCENE_FIRE] = {
+               REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+               REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+               REG_AF_NORMAL, REG_FD_OFF,
+               REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+               6, REG_ISO_50, REG_CAP_NONE, REG_WDR_OFF,
+       },
+       [REG_SCENE_TEXT] = {
+               REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+               REG_CHROMA_ON, 3, REG_EDGE_ON, 7,
+               REG_AF_MACRO, REG_FD_OFF,
+               REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+               6, REG_ISO_AUTO, REG_CAP_ANTI_SHAKE, REG_WDR_ON,
+       },
+       [REG_SCENE_CANDLE] = {
+               REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0,
+               REG_CHROMA_ON, 3, REG_EDGE_ON, 5,
+               REG_AF_NORMAL, REG_FD_OFF,
+               REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF,
+               6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF,
+       },
+};
+
+/**
+ * m5mols_do_scenemode() - Change current scenemode
+ * @mode:      Desired mode of the scenemode
+ *
+ * WARNING: The execution order is important. Do not change the order.
+ */
+int m5mols_do_scenemode(struct m5mols_info *info, u32 mode)
+{
+       struct v4l2_subdev *sd = &info->sd;
+       struct m5mols_scenemode scenemode = m5mols_default_scenemode[mode];
+       int ret;
+
+       if (mode > REG_SCENE_CANDLE)
+               return -EINVAL;
+
+       ret = m5mols_lock_3a(info, false);
+       if (!ret)
+               ret = m5mols_write(sd, AE_EV_PRESET_MONITOR, mode);
+       if (!ret)
+               ret = m5mols_write(sd, AE_EV_PRESET_CAPTURE, mode);
+       if (!ret)
+               ret = m5mols_write(sd, AE_MODE, scenemode.metering);
+       if (!ret)
+               ret = m5mols_write(sd, AE_INDEX, scenemode.ev_bias);
+       if (!ret)
+               ret = m5mols_write(sd, AWB_MODE, scenemode.wb_mode);
+       if (!ret)
+               ret = m5mols_write(sd, AWB_MANUAL, scenemode.wb_preset);
+       if (!ret)
+               ret = m5mols_write(sd, MON_CHROMA_EN, scenemode.chroma_en);
+       if (!ret)
+               ret = m5mols_write(sd, MON_CHROMA_LVL, scenemode.chroma_lvl);
+       if (!ret)
+               ret = m5mols_write(sd, MON_EDGE_EN, scenemode.edge_en);
+       if (!ret)
+               ret = m5mols_write(sd, MON_EDGE_LVL, scenemode.edge_lvl);
+       if (!ret && is_available_af(info))
+               ret = m5mols_write(sd, AF_MODE, scenemode.af_range);
+       if (!ret && is_available_af(info))
+               ret = m5mols_write(sd, FD_CTL, scenemode.fd_mode);
+       if (!ret)
+               ret = m5mols_write(sd, MON_TONE_CTL, scenemode.tone);
+       if (!ret)
+               ret = m5mols_write(sd, AE_ISO, scenemode.iso);
+       if (!ret)
+               ret = m5mols_mode(info, REG_CAPTURE);
+       if (!ret)
+               ret = m5mols_write(sd, CAPP_WDR_EN, scenemode.wdr);
+       if (!ret)
+               ret = m5mols_write(sd, CAPP_MCC_MODE, scenemode.mcc);
+       if (!ret)
+               ret = m5mols_write(sd, CAPP_LIGHT_CTRL, scenemode.light);
+       if (!ret)
+               ret = m5mols_write(sd, CAPP_FLASH_CTRL, scenemode.flash);
+       if (!ret)
+               ret = m5mols_write(sd, CAPC_MODE, scenemode.capt_mode);
+       if (!ret)
+               ret = m5mols_mode(info, REG_MONITOR);
+
+       return ret;
+}
+
+static int m5mols_lock_ae(struct m5mols_info *info, bool lock)
+{
+       int ret = 0;
+
+       if (info->lock_ae != lock)
+               ret = m5mols_write(&info->sd, AE_LOCK,
+                               lock ? REG_AE_LOCK : REG_AE_UNLOCK);
+       if (!ret)
+               info->lock_ae = lock;
+
+       return ret;
+}
+
+static int m5mols_lock_awb(struct m5mols_info *info, bool lock)
+{
+       int ret = 0;
+
+       if (info->lock_awb != lock)
+               ret = m5mols_write(&info->sd, AWB_LOCK,
+                               lock ? REG_AWB_LOCK : REG_AWB_UNLOCK);
+       if (!ret)
+               info->lock_awb = lock;
+
+       return ret;
+}
+
+/* m5mols_lock_3a() - Lock 3A(Auto Exposure, Auto Whitebalance, Auto Focus) */
+int m5mols_lock_3a(struct m5mols_info *info, bool lock)
+{
+       int ret;
+
+       ret = m5mols_lock_ae(info, lock);
+       if (!ret)
+               ret = m5mols_lock_awb(info, lock);
+       /* Don't need to handle unlocking AF */
+       if (!ret && is_available_af(info) && lock)
+               ret = m5mols_write(&info->sd, AF_EXECUTE, REG_AF_STOP);
+
+       return ret;
+}
+
+/* m5mols_set_ctrl() - The main s_ctrl function called by m5mols_set_ctrl() */
+int m5mols_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+       struct v4l2_subdev *sd = to_sd(ctrl);
+       struct m5mols_info *info = to_m5mols(sd);
+       int ret;
+
+       switch (ctrl->id) {
+       case V4L2_CID_ZOOM_ABSOLUTE:
+               return m5mols_write(sd, MON_ZOOM, ctrl->val);
+
+       case V4L2_CID_EXPOSURE_AUTO:
+               ret = m5mols_lock_ae(info,
+                       ctrl->val == V4L2_EXPOSURE_AUTO ? false : true);
+               if (!ret && ctrl->val == V4L2_EXPOSURE_AUTO)
+                       ret = m5mols_write(sd, AE_MODE, REG_AE_ALL);
+               if (!ret && ctrl->val == V4L2_EXPOSURE_MANUAL) {
+                       int val = info->exposure->val;
+                       ret = m5mols_write(sd, AE_MODE, REG_AE_OFF);
+                       if (!ret)
+                               ret = m5mols_write(sd, AE_MAN_GAIN_MON, val);
+                       if (!ret)
+                               ret = m5mols_write(sd, AE_MAN_GAIN_CAP, val);
+               }
+               return ret;
+
+       case V4L2_CID_AUTO_WHITE_BALANCE:
+               ret = m5mols_lock_awb(info, ctrl->val ? false : true);
+               if (!ret)
+                       ret = m5mols_write(sd, AWB_MODE, ctrl->val ?
+                               REG_AWB_AUTO : REG_AWB_PRESET);
+               return ret;
+
+       case V4L2_CID_SATURATION:
+               ret = m5mols_write(sd, MON_CHROMA_LVL, ctrl->val);
+               if (!ret)
+                       ret = m5mols_write(sd, MON_CHROMA_EN, REG_CHROMA_ON);
+               return ret;
+
+       case V4L2_CID_COLORFX:
+               /*
+                * This control uses two kinds of registers: normal & color.
+                * The normal effect belongs to category 1, while the color
+                * one belongs to category 2.
+                *
+                * The normal effect uses one register: CAT1_EFFECT.
+                * The color effect uses three registers:
+                * CAT2_COLOR_EFFECT, CAT2_CFIXR, CAT2_CFIXB.
+                */
+               ret = m5mols_write(sd, PARM_EFFECT,
+                       ctrl->val == V4L2_COLORFX_NEGATIVE ? REG_EFFECT_NEGA :
+                       ctrl->val == V4L2_COLORFX_EMBOSS ? REG_EFFECT_EMBOSS :
+                       REG_EFFECT_OFF);
+               if (!ret)
+                       ret = m5mols_write(sd, MON_EFFECT,
+                               ctrl->val == V4L2_COLORFX_SEPIA ?
+                               REG_COLOR_EFFECT_ON : REG_COLOR_EFFECT_OFF);
+               if (!ret)
+                       ret = m5mols_write(sd, MON_CFIXR,
+                               ctrl->val == V4L2_COLORFX_SEPIA ?
+                               REG_CFIXR_SEPIA : 0);
+               if (!ret)
+                       ret = m5mols_write(sd, MON_CFIXB,
+                               ctrl->val == V4L2_COLORFX_SEPIA ?
+                               REG_CFIXB_SEPIA : 0);
+               return ret;
+       }
+
+       return -EINVAL;
+}
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
new file mode 100644 (file)
index 0000000..76eac26
--- /dev/null
@@ -0,0 +1,1004 @@
+/*
+ * Driver for M-5MOLS 8M Pixel camera sensor with ISP
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Author: HeungJun Kim, riverful.kim@samsung.com
+ *
+ * Copyright (C) 2009 Samsung Electronics Co., Ltd.
+ * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/m5mols.h>
+
+#include "m5mols.h"
+#include "m5mols_reg.h"
+
+int m5mols_debug;
+module_param(m5mols_debug, int, 0644);
+
+#define MODULE_NAME            "M5MOLS"
+#define M5MOLS_I2C_CHECK_RETRY 500
+
+/* The regulator consumer names for external voltage regulators */
+static struct regulator_bulk_data supplies[] = {
+       {
+               .supply = "core",       /* ARM core power, 1.2V */
+       }, {
+               .supply = "dig_18",     /* digital power 1, 1.8V */
+       }, {
+               .supply = "d_sensor",   /* sensor power 1, 1.8V */
+       }, {
+               .supply = "dig_28",     /* digital power 2, 2.8V */
+       }, {
+               .supply = "a_sensor",   /* analog power */
+       }, {
+               .supply = "dig_12",     /* digital power 3, 1.2V */
+       },
+};
+
+static struct v4l2_mbus_framefmt m5mols_default_ffmt[M5MOLS_RESTYPE_MAX] = {
+       [M5MOLS_RESTYPE_MONITOR] = {
+               .width          = 1920,
+               .height         = 1080,
+               .code           = V4L2_MBUS_FMT_VYUY8_2X8,
+               .field          = V4L2_FIELD_NONE,
+               .colorspace     = V4L2_COLORSPACE_JPEG,
+       },
+       [M5MOLS_RESTYPE_CAPTURE] = {
+               .width          = 1920,
+               .height         = 1080,
+               .code           = V4L2_MBUS_FMT_JPEG_1X8,
+               .field          = V4L2_FIELD_NONE,
+               .colorspace     = V4L2_COLORSPACE_JPEG,
+       },
+};
+#define SIZE_DEFAULT_FFMT      ARRAY_SIZE(m5mols_default_ffmt)
+
+static const struct m5mols_resolution m5mols_reg_res[] = {
+       { 0x01, M5MOLS_RESTYPE_MONITOR, 128, 96 },      /* SUB-QCIF */
+       { 0x03, M5MOLS_RESTYPE_MONITOR, 160, 120 },     /* QQVGA */
+       { 0x05, M5MOLS_RESTYPE_MONITOR, 176, 144 },     /* QCIF */
+       { 0x06, M5MOLS_RESTYPE_MONITOR, 176, 176 },
+       { 0x08, M5MOLS_RESTYPE_MONITOR, 240, 320 },     /* QVGA */
+       { 0x09, M5MOLS_RESTYPE_MONITOR, 320, 240 },     /* QVGA */
+       { 0x0c, M5MOLS_RESTYPE_MONITOR, 240, 400 },     /* WQVGA */
+       { 0x0d, M5MOLS_RESTYPE_MONITOR, 400, 240 },     /* WQVGA */
+       { 0x0e, M5MOLS_RESTYPE_MONITOR, 352, 288 },     /* CIF */
+       { 0x13, M5MOLS_RESTYPE_MONITOR, 480, 360 },
+       { 0x15, M5MOLS_RESTYPE_MONITOR, 640, 360 },     /* qHD */
+       { 0x17, M5MOLS_RESTYPE_MONITOR, 640, 480 },     /* VGA */
+       { 0x18, M5MOLS_RESTYPE_MONITOR, 720, 480 },
+       { 0x1a, M5MOLS_RESTYPE_MONITOR, 800, 480 },     /* WVGA */
+       { 0x1f, M5MOLS_RESTYPE_MONITOR, 800, 600 },     /* SVGA */
+       { 0x21, M5MOLS_RESTYPE_MONITOR, 1280, 720 },    /* HD */
+       { 0x25, M5MOLS_RESTYPE_MONITOR, 1920, 1080 },   /* 1080p */
+       { 0x29, M5MOLS_RESTYPE_MONITOR, 3264, 2448 },   /* 2.63fps 8M */
+       { 0x39, M5MOLS_RESTYPE_MONITOR, 800, 602 },     /* AHS_MON debug */
+
+       { 0x02, M5MOLS_RESTYPE_CAPTURE, 320, 240 },     /* QVGA */
+       { 0x04, M5MOLS_RESTYPE_CAPTURE, 400, 240 },     /* WQVGA */
+       { 0x07, M5MOLS_RESTYPE_CAPTURE, 480, 360 },
+       { 0x08, M5MOLS_RESTYPE_CAPTURE, 640, 360 },     /* qHD */
+       { 0x09, M5MOLS_RESTYPE_CAPTURE, 640, 480 },     /* VGA */
+       { 0x0a, M5MOLS_RESTYPE_CAPTURE, 800, 480 },     /* WVGA */
+       { 0x10, M5MOLS_RESTYPE_CAPTURE, 1280, 720 },    /* HD */
+       { 0x14, M5MOLS_RESTYPE_CAPTURE, 1280, 960 },    /* 1M */
+       { 0x17, M5MOLS_RESTYPE_CAPTURE, 1600, 1200 },   /* 2M */
+       { 0x19, M5MOLS_RESTYPE_CAPTURE, 1920, 1080 },   /* Full-HD */
+       { 0x1a, M5MOLS_RESTYPE_CAPTURE, 2048, 1152 },   /* 3Mega */
+       { 0x1b, M5MOLS_RESTYPE_CAPTURE, 2048, 1536 },
+       { 0x1c, M5MOLS_RESTYPE_CAPTURE, 2560, 1440 },   /* 4Mega */
+       { 0x1d, M5MOLS_RESTYPE_CAPTURE, 2560, 1536 },
+       { 0x1f, M5MOLS_RESTYPE_CAPTURE, 2560, 1920 },   /* 5Mega */
+       { 0x21, M5MOLS_RESTYPE_CAPTURE, 3264, 1836 },   /* 6Mega */
+       { 0x22, M5MOLS_RESTYPE_CAPTURE, 3264, 1960 },
+       { 0x25, M5MOLS_RESTYPE_CAPTURE, 3264, 2448 },   /* 8Mega */
+};
+
+/**
+ * m5mols_swap_byte - an byte array to integer conversion function
+ * @size: size in bytes of I2C packet defined in the M-5MOLS datasheet
+ *
+ * Convert I2C data byte array with performing any required byte
+ * reordering to assure proper values for each data type, regardless
+ * of the architecture endianness.
+ */
+static u32 m5mols_swap_byte(u8 *data, u8 length)
+{
+       if (length == 1)
+               return *data;
+       else if (length == 2)
+               return be16_to_cpu(*((u16 *)data));
+       else
+               return be32_to_cpu(*((u32 *)data));
+}
+
+/**
+ * m5mols_read -  I2C read function
+ * @reg: combination of size, category and command for the I2C packet
+ * @val: read value
+ */
+int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
+{
+       struct i2c_client *client = v4l2_get_subdevdata(sd);
+       u8 rbuf[M5MOLS_I2C_MAX_SIZE + 1];
+       u8 size = I2C_SIZE(reg);
+       u8 category = I2C_CATEGORY(reg);
+       u8 cmd = I2C_COMMAND(reg);
+       struct i2c_msg msg[2];
+       u8 wbuf[5];
+       int ret;
+
+       if (!client->adapter)
+               return -ENODEV;
+
+       if (size != 1 && size != 2 && size != 4) {
+               v4l2_err(sd, "Wrong data size\n");
+               return -EINVAL;
+       }
+
+       msg[0].addr = client->addr;
+       msg[0].flags = 0;
+       msg[0].len = 5;
+       msg[0].buf = wbuf;
+       wbuf[0] = 5;
+       wbuf[1] = M5MOLS_BYTE_READ;
+       wbuf[2] = category;
+       wbuf[3] = cmd;
+       wbuf[4] = size;
+
+       msg[1].addr = client->addr;
+       msg[1].flags = I2C_M_RD;
+       msg[1].len = size + 1;
+       msg[1].buf = rbuf;
+
+       /* minimum stabilization time */
+       usleep_range(200, 200);
+
+       ret = i2c_transfer(client->adapter, msg, 2);
+       if (ret < 0) {
+               v4l2_err(sd, "read failed: size:%d cat:%02x cmd:%02x. %d\n",
+                        size, category, cmd, ret);
+               return ret;
+       }
+
+       *val = m5mols_swap_byte(&rbuf[1], size);
+
+       return 0;
+}
+
+/**
+ * m5mols_write - I2C command write function
+ * @reg: combination of size, category and command for the I2C packet
+ * @val: value to write
+ */
+int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val)
+{
+       struct i2c_client *client = v4l2_get_subdevdata(sd);
+       u8 wbuf[M5MOLS_I2C_MAX_SIZE + 4];
+       u8 category = I2C_CATEGORY(reg);
+       u8 cmd = I2C_COMMAND(reg);
+       u8 size = I2C_SIZE(reg);
+       u32 *buf = (u32 *)&wbuf[4];
+       struct i2c_msg msg[1];
+       int ret;
+
+       if (!client->adapter)
+               return -ENODEV;
+
+       if (size != 1 && size != 2 && size != 4) {
+               v4l2_err(sd, "Wrong data size\n");
+               return -EINVAL;
+       }
+
+       msg->addr = client->addr;
+       msg->flags = 0;
+       msg->len = (u16)size + 4;
+       msg->buf = wbuf;
+       wbuf[0] = size + 4;
+       wbuf[1] = M5MOLS_BYTE_WRITE;
+       wbuf[2] = category;
+       wbuf[3] = cmd;
+
+       *buf = m5mols_swap_byte((u8 *)&val, size);
+
+       usleep_range(200, 200);
+
+       ret = i2c_transfer(client->adapter, msg, 1);
+       if (ret < 0) {
+               v4l2_err(sd, "write failed: size:%d cat:%02x cmd:%02x. %d\n",
+                       size, category, cmd, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 mask)
+{
+       u32 busy, i;
+       int ret;
+
+       for (i = 0; i < M5MOLS_I2C_CHECK_RETRY; i++) {
+               ret = m5mols_read(sd, I2C_REG(category, cmd, 1), &busy);
+               if (ret < 0)
+                       return ret;
+               if ((busy & mask) == mask)
+                       return 0;
+       }
+       return -EBUSY;
+}
+
+/**
+ * m5mols_enable_interrupt - Clear interrupt pending bits and unmask interrupts
+ *
+ * Before writing desired interrupt value the INT_FACTOR register should
+ * be read to clear pending interrupts.
+ */
+int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg)
+{
+       struct m5mols_info *info = to_m5mols(sd);
+       u32 mask = is_available_af(info) ? REG_INT_AF : 0;
+       u32 dummy;
+       int ret;
+
+       ret = m5mols_read(sd, SYSTEM_INT_FACTOR, &dummy);
+       if (!ret)
+               ret = m5mols_write(sd, SYSTEM_INT_ENABLE, reg & ~mask);
+       return ret;
+}
+
+/**
+ * m5mols_reg_mode - Write the mode and check busy status
+ *
+ * It always accompanies a little delay changing the M-5MOLS mode, so it is
+ * needed checking current busy status to guarantee right mode.
+ */
+static int m5mols_reg_mode(struct v4l2_subdev *sd, u32 mode)
+{
+       int ret = m5mols_write(sd, SYSTEM_SYSMODE, mode);
+
+       return ret ? ret : m5mols_busy(sd, CAT_SYSTEM, CAT0_SYSMODE, mode);
+}
+
+/**
+ * m5mols_mode - manage the M-5MOLS's mode
+ * @mode: the required operation mode
+ *
+ * The commands of M-5MOLS are grouped into specific modes. Each functionality
+ * can be guaranteed only when the sensor is operating in mode which which
+ * a command belongs to.
+ */
+int m5mols_mode(struct m5mols_info *info, u32 mode)
+{
+       struct v4l2_subdev *sd = &info->sd;
+       int ret = -EINVAL;
+       u32 reg;
+
+       if (mode < REG_PARAMETER && mode > REG_CAPTURE)
+               return ret;
+
+       ret = m5mols_read(sd, SYSTEM_SYSMODE, &reg);
+       if ((!ret && reg == mode) || ret)
+               return ret;
+
+       switch (reg) {
+       case REG_PARAMETER:
+               ret = m5mols_reg_mode(sd, REG_MONITOR);
+               if (!ret && mode == REG_MONITOR)
+                       break;
+               if (!ret)
+                       ret = m5mols_reg_mode(sd, REG_CAPTURE);
+               break;
+
+       case REG_MONITOR:
+               if (mode == REG_PARAMETER) {
+                       ret = m5mols_reg_mode(sd, REG_PARAMETER);
+                       break;
+               }
+
+               ret = m5mols_reg_mode(sd, REG_CAPTURE);
+               break;
+
+       case REG_CAPTURE:
+               ret = m5mols_reg_mode(sd, REG_MONITOR);
+               if (!ret && mode == REG_MONITOR)
+                       break;
+               if (!ret)
+                       ret = m5mols_reg_mode(sd, REG_PARAMETER);
+               break;
+
+       default:
+               v4l2_warn(sd, "Wrong mode: %d\n", mode);
+       }
+
+       if (!ret)
+               info->mode = mode;
+
+       return ret;
+}
+
+/**
+ * m5mols_get_version - retrieve full revisions information of M-5MOLS
+ *
+ * The version information includes revisions of hardware and firmware,
+ * AutoFocus alghorithm version and the version string.
+ */
+static int m5mols_get_version(struct v4l2_subdev *sd)
+{
+       struct m5mols_info *info = to_m5mols(sd);
+       union {
+               struct m5mols_version ver;
+               u8 bytes[VERSION_SIZE];
+       } version;
+       u32 *value;
+       u8 cmd = CAT0_VER_CUSTOMER;
+       int ret;
+
+       do {
+               value = (u32 *)&version.bytes[cmd];
+               ret = m5mols_read(sd, SYSTEM_CMD(cmd), value);
+               if (ret)
+                       return ret;
+       } while (cmd++ != CAT0_VER_AWB);
+
+       do {
+               value = (u32 *)&version.bytes[cmd];
+               ret = m5mols_read(sd, SYSTEM_VER_STRING, value);
+               if (ret)
+                       return ret;
+               if (cmd >= VERSION_SIZE - 1)
+                       return -EINVAL;
+       } while (version.bytes[cmd++]);
+
+       value = (u32 *)&version.bytes[cmd];
+       ret = m5mols_read(sd, AF_VERSION, value);
+       if (ret)
+               return ret;
+
+       /* store version information swapped for being readable */
+       info->ver       = version.ver;
+       info->ver.fw    = be16_to_cpu(info->ver.fw);
+       info->ver.hw    = be16_to_cpu(info->ver.hw);
+       info->ver.param = be16_to_cpu(info->ver.param);
+       info->ver.awb   = be16_to_cpu(info->ver.awb);
+
+       v4l2_info(sd, "Manufacturer\t[%s]\n",
+                       is_manufacturer(info, REG_SAMSUNG_ELECTRO) ?
+                       "Samsung Electro-Machanics" :
+                       is_manufacturer(info, REG_SAMSUNG_OPTICS) ?
+                       "Samsung Fiber-Optics" :
+                       is_manufacturer(info, REG_SAMSUNG_TECHWIN) ?
+                       "Samsung Techwin" : "None");
+       v4l2_info(sd, "Customer/Project\t[0x%02x/0x%02x]\n",
+                       info->ver.customer, info->ver.project);
+
+       if (!is_available_af(info))
+               v4l2_info(sd, "No support Auto Focus on this firmware\n");
+
+       return ret;
+}
+
+/**
+ * __find_restype - Lookup M-5MOLS resolution type according to pixel code
+ * @code: pixel code
+ */
+static enum m5mols_restype __find_restype(enum v4l2_mbus_pixelcode code)
+{
+       enum m5mols_restype type = M5MOLS_RESTYPE_MONITOR;
+
+       do {
+               if (code == m5mols_default_ffmt[type].code)
+                       return type;
+       } while (type++ != SIZE_DEFAULT_FFMT);
+
+       return 0;
+}
+
+/**
+ * __find_resolution - Lookup preset and type of M-5MOLS's resolution
+ * @mf: pixel format to find/negotiate the resolution preset for
+ * @type: M-5MOLS resolution type
+ * @resolution:        M-5MOLS resolution preset register value
+ *
+ * Find nearest resolution matching resolution preset and adjust mf
+ * to supported values.
+ */
+static int __find_resolution(struct v4l2_subdev *sd,
+                            struct v4l2_mbus_framefmt *mf,
+                            enum m5mols_restype *type,
+                            u32 *resolution)
+{
+       const struct m5mols_resolution *fsize = &m5mols_reg_res[0];
+       const struct m5mols_resolution *match = NULL;
+       enum m5mols_restype stype = __find_restype(mf->code);
+       int i = ARRAY_SIZE(m5mols_reg_res);
+       unsigned int min_err = ~0;
+
+       while (i--) {
+               int err;
+               if (stype == fsize->type) {
+                       err = abs(fsize->width - mf->width)
+                               + abs(fsize->height - mf->height);
+
+                       if (err < min_err) {
+                               min_err = err;
+                               match = fsize;
+                       }
+               }
+               fsize++;
+       }
+       if (match) {
+               mf->width  = match->width;
+               mf->height = match->height;
+               *resolution = match->reg;
+               *type = stype;
+               return 0;
+       }
+
+       return -EINVAL;
+}
+
+static struct v4l2_mbus_framefmt *__find_format(struct m5mols_info *info,
+                               struct v4l2_subdev_fh *fh,
+                               enum v4l2_subdev_format_whence which,
+                               enum m5mols_restype type)
+{
+       if (which == V4L2_SUBDEV_FORMAT_TRY)
+               return fh ? v4l2_subdev_get_try_format(fh, 0) : NULL;
+
+       return &info->ffmt[type];
+}
+
+static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+                         struct v4l2_subdev_format *fmt)
+{
+       struct m5mols_info *info = to_m5mols(sd);
+       struct v4l2_mbus_framefmt *format;
+
+       if (fmt->pad != 0)
+               return -EINVAL;
+
+       format = __find_format(info, fh, fmt->which, info->res_type);
+       if (!format)
+               return -EINVAL;
+
+       fmt->format = *format;
+       return 0;
+}
+
+static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
+                         struct v4l2_subdev_format *fmt)
+{
+       struct m5mols_info *info = to_m5mols(sd);
+       struct v4l2_mbus_framefmt *format = &fmt->format;
+       struct v4l2_mbus_framefmt *sfmt;
+       enum m5mols_restype type;
+       u32 resolution = 0;
+       int ret;
+
+       if (fmt->pad != 0)
+               return -EINVAL;
+
+       ret = __find_resolution(sd, format, &type, &resolution);
+       if (ret < 0)
+               return ret;
+
+       sfmt = __find_format(info, fh, fmt->which, type);
+       if (!sfmt)
+               return 0;
+
+       *sfmt           = m5mols_default_ffmt[type];
+       sfmt->width     = format->width;
+       sfmt->height    = format->height;
+
+       if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+               info->resolution = resolution;
+               info->code = format->code;
+               info->res_type = type;
+       }
+
+       return 0;
+}
+
+static int m5mols_enum_mbus_code(struct v4l2_subdev *sd,
+                                struct v4l2_subdev_fh *fh,
+                                struct v4l2_subdev_mbus_code_enum *code)
+{
+       if (!code || code->index >= SIZE_DEFAULT_FFMT)
+               return -EINVAL;
+
+       code->code = m5mols_default_ffmt[code->index].code;
+
+       return 0;
+}
+
+static struct v4l2_subdev_pad_ops m5mols_pad_ops = {
+       .enum_mbus_code = m5mols_enum_mbus_code,
+       .get_fmt        = m5mols_get_fmt,
+       .set_fmt        = m5mols_set_fmt,
+};
+
+/**
+ * m5mols_sync_controls - Apply default scene mode and the current controls
+ *
+ * This is used only streaming for syncing between v4l2_ctrl framework and
+ * m5mols's controls. First, do the scenemode to the sensor, then call
+ * v4l2_ctrl_handler_setup. It can be same between some commands and
+ * the scenemode's in the default v4l2_ctrls. But, such commands of control
+ * should be prior to the scenemode's one.
+ */
+int m5mols_sync_controls(struct m5mols_info *info)
+{
+       int ret = -EINVAL;
+
+       if (!is_ctrl_synced(info)) {
+               ret = m5mols_do_scenemode(info, REG_SCENE_NORMAL);
+               if (ret)
+                       return ret;
+
+               v4l2_ctrl_handler_setup(&info->handle);
+               info->ctrl_sync = true;
+       }
+
+       return ret;
+}
+
+/**
+ * m5mols_start_monitor - Start the monitor mode
+ *
+ * Before applying the controls setup the resolution and frame rate
+ * in PARAMETER mode, and then switch over to MONITOR mode.
+ */
+static int m5mols_start_monitor(struct m5mols_info *info)
+{
+       struct v4l2_subdev *sd = &info->sd;
+       int ret;
+
+       ret = m5mols_mode(info, REG_PARAMETER);
+       if (!ret)
+               ret = m5mols_write(sd, PARM_MON_SIZE, info->resolution);
+       if (!ret)
+               ret = m5mols_write(sd, PARM_MON_FPS, REG_FPS_30);
+       if (!ret)
+               ret = m5mols_mode(info, REG_MONITOR);
+       if (!ret)
+               ret = m5mols_sync_controls(info);
+
+       return ret;
+}
+
+static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
+{
+       struct m5mols_info *info = to_m5mols(sd);
+
+       if (enable) {
+               int ret = -EINVAL;
+
+               if (is_code(info->code, M5MOLS_RESTYPE_MONITOR))
+                       ret = m5mols_start_monitor(info);
+               if (is_code(info->code, M5MOLS_RESTYPE_CAPTURE))
+                       ret = m5mols_start_capture(info);
+
+               return ret;
+       }
+
+       return m5mols_mode(info, REG_PARAMETER);
+}
+
+static const struct v4l2_subdev_video_ops m5mols_video_ops = {
+       .s_stream       = m5mols_s_stream,
+};
+
+static int m5mols_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+       struct v4l2_subdev *sd = to_sd(ctrl);
+       struct m5mols_info *info = to_m5mols(sd);
+       int ret;
+
+       info->mode_save = info->mode;
+
+       ret = m5mols_mode(info, REG_PARAMETER);
+       if (!ret)
+               ret = m5mols_set_ctrl(ctrl);
+       if (!ret)
+               ret = m5mols_mode(info, info->mode_save);
+
+       return ret;
+}
+
+static const struct v4l2_ctrl_ops m5mols_ctrl_ops = {
+       .s_ctrl = m5mols_s_ctrl,
+};
+
+static int m5mols_sensor_power(struct m5mols_info *info, bool enable)
+{
+       struct v4l2_subdev *sd = &info->sd;
+       struct i2c_client *client = v4l2_get_subdevdata(sd);
+       const struct m5mols_platform_data *pdata = info->pdata;
+       int ret;
+
+       if (enable) {
+               if (is_powered(info))
+                       return 0;
+
+               if (info->set_power) {
+                       ret = info->set_power(&client->dev, 1);
+                       if (ret)
+                               return ret;
+               }
+
+               ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
+               if (ret) {
+                       info->set_power(&client->dev, 0);
+                       return ret;
+               }
+
+               gpio_set_value(pdata->gpio_reset, !pdata->reset_polarity);
+               usleep_range(1000, 1000);
+               info->power = true;
+
+               return ret;
+       }
+
+       if (!is_powered(info))
+               return 0;
+
+       ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies);
+       if (ret)
+               return ret;
+
+       if (info->set_power)
+               info->set_power(&client->dev, 0);
+
+       gpio_set_value(pdata->gpio_reset, pdata->reset_polarity);
+       usleep_range(1000, 1000);
+       info->power = false;
+
+       return ret;
+}
+
+/* m5mols_update_fw - optional firmware update routine */
+int __attribute__ ((weak)) m5mols_update_fw(struct v4l2_subdev *sd,
+               int (*set_power)(struct m5mols_info *, bool))
+{
+       return 0;
+}
+
+/**
+ * m5mols_sensor_armboot - Booting M-5MOLS internal ARM core.
+ *
+ * Booting internal ARM core makes the M-5MOLS is ready for getting commands
+ * with I2C. It's the first thing to be done after it powered up. It must wait
+ * at least 520ms recommended by M-5MOLS datasheet, after executing arm booting.
+ */
+static int m5mols_sensor_armboot(struct v4l2_subdev *sd)
+{
+       int ret;
+
+       ret = m5mols_write(sd, FLASH_CAM_START, REG_START_ARM_BOOT);
+       if (ret < 0)
+               return ret;
+
+       msleep(520);
+
+       ret = m5mols_get_version(sd);
+       if (!ret)
+               ret = m5mols_update_fw(sd, m5mols_sensor_power);
+       if (ret)
+               return ret;
+
+       v4l2_dbg(1, m5mols_debug, sd, "Success ARM Booting\n");
+
+       ret = m5mols_write(sd, PARM_INTERFACE, REG_INTERFACE_MIPI);
+       if (!ret)
+               ret = m5mols_enable_interrupt(sd, REG_INT_AF);
+
+       return ret;
+}
+
+static int m5mols_init_controls(struct m5mols_info *info)
+{
+       struct v4l2_subdev *sd = &info->sd;
+       u16 max_exposure;
+       u16 step_zoom;
+       int ret;
+
+       /* Determine value's range & step of controls for various FW version */
+       ret = m5mols_read(sd, AE_MAX_GAIN_MON, (u32 *)&max_exposure);
+       if (!ret)
+               step_zoom = is_manufacturer(info, REG_SAMSUNG_OPTICS) ? 31 : 1;
+       if (ret)
+               return ret;
+
+       v4l2_ctrl_handler_init(&info->handle, 6);
+       info->autowb = v4l2_ctrl_new_std(&info->handle,
+                       &m5mols_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE,
+                       0, 1, 1, 0);
+       info->saturation = v4l2_ctrl_new_std(&info->handle,
+                       &m5mols_ctrl_ops, V4L2_CID_SATURATION,
+                       1, 5, 1, 3);
+       info->zoom = v4l2_ctrl_new_std(&info->handle,
+                       &m5mols_ctrl_ops, V4L2_CID_ZOOM_ABSOLUTE,
+                       1, 70, step_zoom, 1);
+       info->exposure = v4l2_ctrl_new_std(&info->handle,
+                       &m5mols_ctrl_ops, V4L2_CID_EXPOSURE,
+                       0, max_exposure, 1, (int)max_exposure/2);
+       info->colorfx = v4l2_ctrl_new_std_menu(&info->handle,
+                       &m5mols_ctrl_ops, V4L2_CID_COLORFX,
+                       4, (1 << V4L2_COLORFX_BW), V4L2_COLORFX_NONE);
+       info->autoexposure = v4l2_ctrl_new_std_menu(&info->handle,
+                       &m5mols_ctrl_ops, V4L2_CID_EXPOSURE_AUTO,
+                       1, 0, V4L2_EXPOSURE_MANUAL);
+
+       sd->ctrl_handler = &info->handle;
+       if (info->handle.error) {
+               v4l2_err(sd, "Failed to initialize controls: %d\n", ret);
+               v4l2_ctrl_handler_free(&info->handle);
+               return info->handle.error;
+       }
+
+       v4l2_ctrl_cluster(2, &info->autoexposure);
+
+       return 0;
+}
+
+/**
+ * m5mols_s_power - Main sensor power control function
+ *
+ * To prevent breaking the lens when the sensor is powered off the Soft-Landing
+ * algorithm is called where available. The Soft-Landing algorithm availability
+ * dependends on the firmware provider.
+ */
+static int m5mols_s_power(struct v4l2_subdev *sd, int on)
+{
+       struct m5mols_info *info = to_m5mols(sd);
+       int ret;
+
+       if (on) {
+               ret = m5mols_sensor_power(info, true);
+               if (!ret)
+                       ret = m5mols_sensor_armboot(sd);
+               if (!ret)
+                       ret = m5mols_init_controls(info);
+               if (ret)
+                       return ret;
+
+               info->ffmt[M5MOLS_RESTYPE_MONITOR] =
+                       m5mols_default_ffmt[M5MOLS_RESTYPE_MONITOR];
+               info->ffmt[M5MOLS_RESTYPE_CAPTURE] =
+                       m5mols_default_ffmt[M5MOLS_RESTYPE_CAPTURE];
+               return ret;
+       }
+
+       if (is_manufacturer(info, REG_SAMSUNG_TECHWIN)) {
+               ret = m5mols_mode(info, REG_MONITOR);
+               if (!ret)
+                       ret = m5mols_write(sd, AF_EXECUTE, REG_AF_STOP);
+               if (!ret)
+                       ret = m5mols_write(sd, AF_MODE, REG_AF_POWEROFF);
+               if (!ret)
+                       ret = m5mols_busy(sd, CAT_SYSTEM, CAT0_STATUS,
+                                       REG_AF_IDLE);
+               if (!ret)
+                       v4l2_info(sd, "Success soft-landing lens\n");
+       }
+
+       ret = m5mols_sensor_power(info, false);
+       if (!ret) {
+               v4l2_ctrl_handler_free(&info->handle);
+               info->ctrl_sync = false;
+       }
+
+       return ret;
+}
+
+static int m5mols_log_status(struct v4l2_subdev *sd)
+{
+       struct m5mols_info *info = to_m5mols(sd);
+
+       v4l2_ctrl_handler_log_status(&info->handle, sd->name);
+
+       return 0;
+}
+
+static const struct v4l2_subdev_core_ops m5mols_core_ops = {
+       .s_power        = m5mols_s_power,
+       .g_ctrl         = v4l2_subdev_g_ctrl,
+       .s_ctrl         = v4l2_subdev_s_ctrl,
+       .queryctrl      = v4l2_subdev_queryctrl,
+       .querymenu      = v4l2_subdev_querymenu,
+       .g_ext_ctrls    = v4l2_subdev_g_ext_ctrls,
+       .try_ext_ctrls  = v4l2_subdev_try_ext_ctrls,
+       .s_ext_ctrls    = v4l2_subdev_s_ext_ctrls,
+       .log_status     = m5mols_log_status,
+};
+
+static const struct v4l2_subdev_ops m5mols_ops = {
+       .core           = &m5mols_core_ops,
+       .pad            = &m5mols_pad_ops,
+       .video          = &m5mols_video_ops,
+};
+
+static void m5mols_irq_work(struct work_struct *work)
+{
+       struct m5mols_info *info =
+               container_of(work, struct m5mols_info, work_irq);
+       struct v4l2_subdev *sd = &info->sd;
+       u32 reg;
+       int ret;
+
+       if (!is_powered(info) ||
+                       m5mols_read(sd, SYSTEM_INT_FACTOR, &info->interrupt))
+               return;
+
+       switch (info->interrupt & REG_INT_MASK) {
+       case REG_INT_AF:
+               if (!is_available_af(info))
+                       break;
+               ret = m5mols_read(sd, AF_STATUS, &reg);
+               v4l2_dbg(2, m5mols_debug, sd, "AF %s\n",
+                        reg == REG_AF_FAIL ? "Failed" :
+                        reg == REG_AF_SUCCESS ? "Success" :
+                        reg == REG_AF_IDLE ? "Idle" : "Busy");
+               break;
+       case REG_INT_CAPTURE:
+               if (!test_and_set_bit(ST_CAPT_IRQ, &info->flags))
+                       wake_up_interruptible(&info->irq_waitq);
+
+               v4l2_dbg(2, m5mols_debug, sd, "CAPTURE\n");
+               break;
+       default:
+               v4l2_dbg(2, m5mols_debug, sd, "Undefined: %02x\n", reg);
+               break;
+       };
+}
+
+static irqreturn_t m5mols_irq_handler(int irq, void *data)
+{
+       struct v4l2_subdev *sd = data;
+       struct m5mols_info *info = to_m5mols(sd);
+
+       schedule_work(&info->work_irq);
+
+       return IRQ_HANDLED;
+}
+
+static int __devinit m5mols_probe(struct i2c_client *client,
+                                 const struct i2c_device_id *id)
+{
+       const struct m5mols_platform_data *pdata = client->dev.platform_data;
+       struct m5mols_info *info;
+       struct v4l2_subdev *sd;
+       int ret;
+
+       if (pdata == NULL) {
+               dev_err(&client->dev, "No platform data\n");
+               return -EINVAL;
+       }
+
+       if (!gpio_is_valid(pdata->gpio_reset)) {
+               dev_err(&client->dev, "No valid RESET GPIO specified\n");
+               return -EINVAL;
+       }
+
+       if (!pdata->irq) {
+               dev_err(&client->dev, "Interrupt not assigned\n");
+               return -EINVAL;
+       }
+
+       info = kzalloc(sizeof(struct m5mols_info), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+
+       info->pdata = pdata;
+       info->set_power = pdata->set_power;
+
+       ret = gpio_request(pdata->gpio_reset, "M5MOLS_NRST");
+       if (ret) {
+               dev_err(&client->dev, "Failed to request gpio: %d\n", ret);
+               goto out_free;
+       }
+       gpio_direction_output(pdata->gpio_reset, pdata->reset_polarity);
+
+       ret = regulator_bulk_get(&client->dev, ARRAY_SIZE(supplies), supplies);
+       if (ret) {
+               dev_err(&client->dev, "Failed to get regulators: %d\n", ret);
+               goto out_gpio;
+       }
+
+       sd = &info->sd;
+       strlcpy(sd->name, MODULE_NAME, sizeof(sd->name));
+       v4l2_i2c_subdev_init(sd, client, &m5mols_ops);
+
+       info->pad.flags = MEDIA_PAD_FL_SOURCE;
+       ret = media_entity_init(&sd->entity, 1, &info->pad, 0);
+       if (ret < 0)
+               goto out_reg;
+       sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR;
+
+       init_waitqueue_head(&info->irq_waitq);
+       INIT_WORK(&info->work_irq, m5mols_irq_work);
+       ret = request_irq(pdata->irq, m5mols_irq_handler,
+                         IRQF_TRIGGER_RISING, MODULE_NAME, sd);
+       if (ret) {
+               dev_err(&client->dev, "Interrupt request failed: %d\n", ret);
+               goto out_me;
+       }
+       info->res_type = M5MOLS_RESTYPE_MONITOR;
+       return 0;
+out_me:
+       media_entity_cleanup(&sd->entity);
+out_reg:
+       regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
+out_gpio:
+       gpio_free(pdata->gpio_reset);
+out_free:
+       kfree(info);
+       return ret;
+}
+
+static int __devexit m5mols_remove(struct i2c_client *client)
+{
+       struct v4l2_subdev *sd = i2c_get_clientdata(client);
+       struct m5mols_info *info = to_m5mols(sd);
+
+       v4l2_device_unregister_subdev(sd);
+       free_irq(info->pdata->irq, sd);
+
+       regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
+       gpio_free(info->pdata->gpio_reset);
+       media_entity_cleanup(&sd->entity);
+       kfree(info);
+       return 0;
+}
+
+static const struct i2c_device_id m5mols_id[] = {
+       { MODULE_NAME, 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(i2c, m5mols_id);
+
+static struct i2c_driver m5mols_i2c_driver = {
+       .driver = {
+               .name   = MODULE_NAME,
+       },
+       .probe          = m5mols_probe,
+       .remove         = __devexit_p(m5mols_remove),
+       .id_table       = m5mols_id,
+};
+
+static int __init m5mols_mod_init(void)
+{
+       return i2c_add_driver(&m5mols_i2c_driver);
+}
+
+static void __exit m5mols_mod_exit(void)
+{
+       i2c_del_driver(&m5mols_i2c_driver);
+}
+
+module_init(m5mols_mod_init);
+module_exit(m5mols_mod_exit);
+
+MODULE_AUTHOR("HeungJun Kim <riverful.kim@samsung.com>");
+MODULE_AUTHOR("Dongsoo Kim <dongsoo45.kim@samsung.com>");
+MODULE_DESCRIPTION("Fujitsu M-5MOLS 8M Pixel camera driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/m5mols/m5mols_reg.h b/drivers/media/video/m5mols/m5mols_reg.h
new file mode 100644 (file)
index 0000000..b83e36f
--- /dev/null
@@ -0,0 +1,399 @@
+/*
+ * Register map for M-5MOLS 8M Pixel camera sensor with ISP
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Author: HeungJun Kim, riverful.kim@samsung.com
+ *
+ * Copyright (C) 2009 Samsung Electronics Co., Ltd.
+ * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef M5MOLS_REG_H
+#define M5MOLS_REG_H
+
+#define M5MOLS_I2C_MAX_SIZE    4
+#define M5MOLS_BYTE_READ       0x01
+#define M5MOLS_BYTE_WRITE      0x02
+
+#define I2C_CATEGORY(__cat)            ((__cat >> 16) & 0xff)
+#define I2C_COMMAND(__comm)            ((__comm >> 8) & 0xff)
+#define I2C_SIZE(__reg_s)              ((__reg_s) & 0xff)
+#define I2C_REG(__cat, __cmd, __reg_s) ((__cat << 16) | (__cmd << 8) | __reg_s)
+
+/*
+ * Category section register
+ *
+ * The category means set including relevant command of M-5MOLS.
+ */
+#define CAT_SYSTEM             0x00
+#define CAT_PARAM              0x01
+#define CAT_MONITOR            0x02
+#define CAT_AE                 0x03
+#define CAT_WB                 0x06
+#define CAT_EXIF               0x07
+#define CAT_FD                 0x09
+#define CAT_LENS               0x0a
+#define CAT_CAPT_PARM          0x0b
+#define CAT_CAPT_CTRL          0x0c
+#define CAT_FLASH              0x0f    /* related to FW, revisions, booting */
+
+/*
+ * Category 0 - SYSTEM mode
+ *
+ * The SYSTEM mode in the M-5MOLS means area available to handle with the whole
+ * & all-round system of sensor. It deals with version/interrupt/setting mode &
+ * even sensor's status. Especially, the M-5MOLS sensor with ISP varies by
+ * packaging & manufacturer, even the customer and project code. And the
+ * function details may vary among them. The version information helps to
+ * determine what methods shall be used in the driver.
+ *
+ * There is many registers between customer version address and awb one. For
+ * more specific contents, see definition if file m5mols.h.
+ */
+#define CAT0_VER_CUSTOMER      0x00    /* customer version */
+#define CAT0_VER_AWB           0x09    /* Auto WB version */
+#define CAT0_VER_STRING                0x0a    /* string including M-5MOLS */
+#define CAT0_SYSMODE           0x0b    /* SYSTEM mode register */
+#define CAT0_STATUS            0x0c    /* SYSTEM mode status register */
+#define CAT0_INT_FACTOR                0x10    /* interrupt pending register */
+#define CAT0_INT_ENABLE                0x11    /* interrupt enable register */
+
+#define SYSTEM_SYSMODE         I2C_REG(CAT_SYSTEM, CAT0_SYSMODE, 1)
+#define REG_SYSINIT            0x00    /* SYSTEM mode */
+#define REG_PARAMETER          0x01    /* PARAMETER mode */
+#define REG_MONITOR            0x02    /* MONITOR mode */
+#define REG_CAPTURE            0x03    /* CAPTURE mode */
+
+#define SYSTEM_CMD(__cmd)      I2C_REG(CAT_SYSTEM, cmd, 1)
+#define SYSTEM_VER_STRING      I2C_REG(CAT_SYSTEM, CAT0_VER_STRING, 1)
+#define REG_SAMSUNG_ELECTRO    "SE"    /* Samsung Electro-Mechanics */
+#define REG_SAMSUNG_OPTICS     "OP"    /* Samsung Fiber-Optics */
+#define REG_SAMSUNG_TECHWIN    "TB"    /* Samsung Techwin */
+
+#define SYSTEM_INT_FACTOR      I2C_REG(CAT_SYSTEM, CAT0_INT_FACTOR, 1)
+#define SYSTEM_INT_ENABLE      I2C_REG(CAT_SYSTEM, CAT0_INT_ENABLE, 1)
+#define REG_INT_MODE           (1 << 0)
+#define REG_INT_AF             (1 << 1)
+#define REG_INT_ZOOM           (1 << 2)
+#define REG_INT_CAPTURE                (1 << 3)
+#define REG_INT_FRAMESYNC      (1 << 4)
+#define REG_INT_FD             (1 << 5)
+#define REG_INT_LENS_INIT      (1 << 6)
+#define REG_INT_SOUND          (1 << 7)
+#define REG_INT_MASK           0x0f
+
+/*
+ * category 1 - PARAMETER mode
+ *
+ * This category supports function of camera features of M-5MOLS. It means we
+ * can handle with preview(MONITOR) resolution size/frame per second/interface
+ * between the sensor and the Application Processor/even the image effect.
+ */
+#define CAT1_DATA_INTERFACE    0x00    /* interface between sensor and AP */
+#define CAT1_MONITOR_SIZE      0x01    /* resolution at the MONITOR mode */
+#define CAT1_MONITOR_FPS       0x02    /* frame per second at this mode */
+#define CAT1_EFFECT            0x0b    /* image effects */
+
+#define PARM_MON_SIZE          I2C_REG(CAT_PARAM, CAT1_MONITOR_SIZE, 1)
+
+#define PARM_MON_FPS           I2C_REG(CAT_PARAM, CAT1_MONITOR_FPS, 1)
+#define REG_FPS_30             0x02
+
+#define PARM_INTERFACE         I2C_REG(CAT_PARAM, CAT1_DATA_INTERFACE, 1)
+#define REG_INTERFACE_MIPI     0x02
+
+#define PARM_EFFECT            I2C_REG(CAT_PARAM, CAT1_EFFECT, 1)
+#define REG_EFFECT_OFF         0x00
+#define REG_EFFECT_NEGA                0x01
+#define REG_EFFECT_EMBOSS      0x06
+#define REG_EFFECT_OUTLINE     0x07
+#define REG_EFFECT_WATERCOLOR  0x08
+
+/*
+ * Category 2 - MONITOR mode
+ *
+ * The MONITOR mode is same as preview mode as we said. The M-5MOLS has another
+ * mode named "Preview", but this preview mode is used at the case specific
+ * vider-recording mode. This mmode supports only YUYV format. On the other
+ * hand, the JPEG & RAW formats is supports by CAPTURE mode. And, there are
+ * another options like zoom/color effect(different with effect in PARAMETER
+ * mode)/anti hand shaking algorithm.
+ */
+#define CAT2_ZOOM              0x01    /* set the zoom position & execute */
+#define CAT2_ZOOM_STEP         0x03    /* set the zoom step */
+#define CAT2_CFIXB             0x09    /* CB value for color effect */
+#define CAT2_CFIXR             0x0a    /* CR value for color effect */
+#define CAT2_COLOR_EFFECT      0x0b    /* set on/off of color effect */
+#define CAT2_CHROMA_LVL                0x0f    /* set chroma level */
+#define CAT2_CHROMA_EN         0x10    /* set on/off of choroma */
+#define CAT2_EDGE_LVL          0x11    /* set sharpness level */
+#define CAT2_EDGE_EN           0x12    /* set on/off sharpness */
+#define CAT2_TONE_CTL          0x25    /* set tone color(contrast) */
+
+#define MON_ZOOM               I2C_REG(CAT_MONITOR, CAT2_ZOOM, 1)
+
+#define MON_CFIXR              I2C_REG(CAT_MONITOR, CAT2_CFIXR, 1)
+#define MON_CFIXB              I2C_REG(CAT_MONITOR, CAT2_CFIXB, 1)
+#define REG_CFIXB_SEPIA                0xd8
+#define REG_CFIXR_SEPIA                0x18
+
+#define MON_EFFECT             I2C_REG(CAT_MONITOR, CAT2_COLOR_EFFECT, 1)
+#define REG_COLOR_EFFECT_OFF   0x00
+#define REG_COLOR_EFFECT_ON    0x01
+
+#define MON_CHROMA_EN          I2C_REG(CAT_MONITOR, CAT2_CHROMA_EN, 1)
+#define MON_CHROMA_LVL         I2C_REG(CAT_MONITOR, CAT2_CHROMA_LVL, 1)
+#define REG_CHROMA_OFF         0x00
+#define REG_CHROMA_ON          0x01
+
+#define MON_EDGE_EN            I2C_REG(CAT_MONITOR, CAT2_EDGE_EN, 1)
+#define MON_EDGE_LVL           I2C_REG(CAT_MONITOR, CAT2_EDGE_LVL, 1)
+#define REG_EDGE_OFF           0x00
+#define REG_EDGE_ON            0x01
+
+#define MON_TONE_CTL           I2C_REG(CAT_MONITOR, CAT2_TONE_CTL, 1)
+
+/*
+ * Category 3 - Auto Exposure
+ *
+ * The M-5MOLS exposure capbility is detailed as which is similar to digital
+ * camera. This category supports AE locking/various AE mode(range of exposure)
+ * /ISO/flickering/EV bias/shutter/meteoring, and anything else. And the
+ * maximum/minimum exposure gain value depending on M-5MOLS firmware, may be
+ * different. So, this category also provide getting the max/min values. And,
+ * each MONITOR and CAPTURE mode has each gain/shutter/max exposure values.
+ */
+#define CAT3_AE_LOCK           0x00    /* locking Auto exposure */
+#define CAT3_AE_MODE           0x01    /* set AE mode, mode means range */
+#define CAT3_ISO               0x05    /* set ISO */
+#define CAT3_EV_PRESET_MONITOR 0x0a    /* EV(scenemode) preset for MONITOR */
+#define CAT3_EV_PRESET_CAPTURE 0x0b    /* EV(scenemode) preset for CAPTURE */
+#define CAT3_MANUAL_GAIN_MON   0x12    /* meteoring value for the MONITOR */
+#define CAT3_MAX_GAIN_MON      0x1a    /* max gain value for the MONITOR */
+#define CAT3_MANUAL_GAIN_CAP   0x26    /* meteoring value for the CAPTURE */
+#define CAT3_AE_INDEX          0x38    /* AE index */
+
+#define AE_LOCK                        I2C_REG(CAT_AE, CAT3_AE_LOCK, 1)
+#define REG_AE_UNLOCK          0x00
+#define REG_AE_LOCK            0x01
+
+#define AE_MODE                        I2C_REG(CAT_AE, CAT3_AE_MODE, 1)
+#define REG_AE_OFF             0x00    /* AE off */
+#define REG_AE_ALL             0x01    /* calc AE in all block integral */
+#define REG_AE_CENTER          0x03    /* calc AE in center weighted */
+#define REG_AE_SPOT            0x06    /* calc AE in specific spot */
+
+#define AE_ISO                 I2C_REG(CAT_AE, CAT3_ISO, 1)
+#define REG_ISO_AUTO           0x00
+#define REG_ISO_50             0x01
+#define REG_ISO_100            0x02
+#define REG_ISO_200            0x03
+#define REG_ISO_400            0x04
+#define REG_ISO_800            0x05
+
+#define AE_EV_PRESET_MONITOR   I2C_REG(CAT_AE, CAT3_EV_PRESET_MONITOR, 1)
+#define AE_EV_PRESET_CAPTURE   I2C_REG(CAT_AE, CAT3_EV_PRESET_CAPTURE, 1)
+#define REG_SCENE_NORMAL       0x00
+#define REG_SCENE_PORTRAIT     0x01
+#define REG_SCENE_LANDSCAPE    0x02
+#define REG_SCENE_SPORTS       0x03
+#define REG_SCENE_PARTY_INDOOR 0x04
+#define REG_SCENE_BEACH_SNOW   0x05
+#define REG_SCENE_SUNSET       0x06
+#define REG_SCENE_DAWN_DUSK    0x07
+#define REG_SCENE_FALL         0x08
+#define REG_SCENE_NIGHT                0x09
+#define REG_SCENE_AGAINST_LIGHT        0x0a
+#define REG_SCENE_FIRE         0x0b
+#define REG_SCENE_TEXT         0x0c
+#define REG_SCENE_CANDLE       0x0d
+
+#define AE_MAN_GAIN_MON                I2C_REG(CAT_AE, CAT3_MANUAL_GAIN_MON, 2)
+#define AE_MAX_GAIN_MON                I2C_REG(CAT_AE, CAT3_MAX_GAIN_MON, 2)
+#define AE_MAN_GAIN_CAP                I2C_REG(CAT_AE, CAT3_MANUAL_GAIN_CAP, 2)
+
+#define AE_INDEX               I2C_REG(CAT_AE, CAT3_AE_INDEX, 1)
+#define REG_AE_INDEX_20_NEG    0x00
+#define REG_AE_INDEX_15_NEG    0x01
+#define REG_AE_INDEX_10_NEG    0x02
+#define REG_AE_INDEX_05_NEG    0x03
+#define REG_AE_INDEX_00                0x04
+#define REG_AE_INDEX_05_POS    0x05
+#define REG_AE_INDEX_10_POS    0x06
+#define REG_AE_INDEX_15_POS    0x07
+#define REG_AE_INDEX_20_POS    0x08
+
+/*
+ * Category 6 - White Balance
+ *
+ * This category provide AWB locking/mode/preset/speed/gain bias, etc.
+ */
+#define CAT6_AWB_LOCK          0x00    /* locking Auto Whitebalance */
+#define CAT6_AWB_MODE          0x02    /* set Auto or Manual */
+#define CAT6_AWB_MANUAL                0x03    /* set Manual(preset) value */
+
+#define AWB_LOCK               I2C_REG(CAT_WB, CAT6_AWB_LOCK, 1)
+#define REG_AWB_UNLOCK         0x00
+#define REG_AWB_LOCK           0x01
+
+#define AWB_MODE               I2C_REG(CAT_WB, CAT6_AWB_MODE, 1)
+#define REG_AWB_AUTO           0x01    /* AWB off */
+#define REG_AWB_PRESET         0x02    /* AWB preset */
+
+#define AWB_MANUAL             I2C_REG(CAT_WB, CAT6_AWB_MANUAL, 1)
+#define REG_AWB_INCANDESCENT   0x01
+#define REG_AWB_FLUORESCENT_1  0x02
+#define REG_AWB_FLUORESCENT_2  0x03
+#define REG_AWB_DAYLIGHT       0x04
+#define REG_AWB_CLOUDY         0x05
+#define REG_AWB_SHADE          0x06
+#define REG_AWB_HORIZON                0x07
+#define REG_AWB_LEDLIGHT       0x09
+
+/*
+ * Category 7 - EXIF information
+ */
+#define CAT7_INFO_EXPTIME_NU   0x00
+#define CAT7_INFO_EXPTIME_DE   0x04
+#define CAT7_INFO_TV_NU                0x08
+#define CAT7_INFO_TV_DE                0x0c
+#define CAT7_INFO_AV_NU                0x10
+#define CAT7_INFO_AV_DE                0x14
+#define CAT7_INFO_BV_NU                0x18
+#define CAT7_INFO_BV_DE                0x1c
+#define CAT7_INFO_EBV_NU       0x20
+#define CAT7_INFO_EBV_DE       0x24
+#define CAT7_INFO_ISO          0x28
+#define CAT7_INFO_FLASH                0x2a
+#define CAT7_INFO_SDR          0x2c
+#define CAT7_INFO_QVAL         0x2e
+
+#define EXIF_INFO_EXPTIME_NU   I2C_REG(CAT_EXIF, CAT7_INFO_EXPTIME_NU, 4)
+#define EXIF_INFO_EXPTIME_DE   I2C_REG(CAT_EXIF, CAT7_INFO_EXPTIME_DE, 4)
+#define EXIF_INFO_TV_NU                I2C_REG(CAT_EXIF, CAT7_INFO_TV_NU, 4)
+#define EXIF_INFO_TV_DE                I2C_REG(CAT_EXIF, CAT7_INFO_TV_DE, 4)
+#define EXIF_INFO_AV_NU                I2C_REG(CAT_EXIF, CAT7_INFO_AV_NU, 4)
+#define EXIF_INFO_AV_DE                I2C_REG(CAT_EXIF, CAT7_INFO_AV_DE, 4)
+#define EXIF_INFO_BV_NU                I2C_REG(CAT_EXIF, CAT7_INFO_BV_NU, 4)
+#define EXIF_INFO_BV_DE                I2C_REG(CAT_EXIF, CAT7_INFO_BV_DE, 4)
+#define EXIF_INFO_EBV_NU       I2C_REG(CAT_EXIF, CAT7_INFO_EBV_NU, 4)
+#define EXIF_INFO_EBV_DE       I2C_REG(CAT_EXIF, CAT7_INFO_EBV_DE, 4)
+#define EXIF_INFO_ISO          I2C_REG(CAT_EXIF, CAT7_INFO_ISO, 2)
+#define EXIF_INFO_FLASH                I2C_REG(CAT_EXIF, CAT7_INFO_FLASH, 2)
+#define EXIF_INFO_SDR          I2C_REG(CAT_EXIF, CAT7_INFO_SDR, 2)
+#define EXIF_INFO_QVAL         I2C_REG(CAT_EXIF, CAT7_INFO_QVAL, 2)
+
+/*
+ * Category 9 - Face Detection
+ */
+#define CAT9_FD_CTL            0x00
+
+#define FD_CTL                 I2C_REG(CAT_FD, CAT9_FD_CTL, 1)
+#define BIT_FD_EN              0
+#define BIT_FD_DRAW_FACE_FRAME 4
+#define BIT_FD_DRAW_SMILE_LVL  6
+#define REG_FD(shift)          (1 << shift)
+#define REG_FD_OFF             0x0
+
+/*
+ * Category A - Lens Parameter
+ */
+#define CATA_AF_MODE           0x01
+#define CATA_AF_EXECUTE                0x02
+#define CATA_AF_STATUS         0x03
+#define CATA_AF_VERSION                0x0a
+
+#define AF_MODE                        I2C_REG(CAT_LENS, CATA_AF_MODE, 1)
+#define REG_AF_NORMAL          0x00    /* Normal AF, one time */
+#define REG_AF_MACRO           0x01    /* Macro AF, one time */
+#define REG_AF_POWEROFF                0x07
+
+#define AF_EXECUTE             I2C_REG(CAT_LENS, CATA_AF_EXECUTE, 1)
+#define REG_AF_STOP            0x00
+#define REG_AF_EXE_AUTO                0x01
+#define REG_AF_EXE_CAF         0x02
+
+#define AF_STATUS              I2C_REG(CAT_LENS, CATA_AF_STATUS, 1)
+#define REG_AF_FAIL            0x00
+#define REG_AF_SUCCESS         0x02
+#define REG_AF_IDLE            0x04
+#define REG_AF_BUSY            0x05
+
+#define AF_VERSION             I2C_REG(CAT_LENS, CATA_AF_VERSION, 1)
+
+/*
+ * Category B - CAPTURE Parameter
+ */
+#define CATB_YUVOUT_MAIN       0x00
+#define CATB_MAIN_IMAGE_SIZE   0x01
+#define CATB_MCC_MODE          0x1d
+#define CATB_WDR_EN            0x2c
+#define CATB_LIGHT_CTRL                0x40
+#define CATB_FLASH_CTRL                0x41
+
+#define CAPP_YUVOUT_MAIN       I2C_REG(CAT_CAPT_PARM, CATB_YUVOUT_MAIN, 1)
+#define REG_YUV422             0x00
+#define REG_BAYER10            0x05
+#define REG_BAYER8             0x06
+#define REG_JPEG               0x10
+
+#define CAPP_MAIN_IMAGE_SIZE   I2C_REG(CAT_CAPT_PARM, CATB_MAIN_IMAGE_SIZE, 1)
+
+#define CAPP_MCC_MODE          I2C_REG(CAT_CAPT_PARM, CATB_MCC_MODE, 1)
+#define REG_MCC_OFF            0x00
+#define REG_MCC_NORMAL         0x01
+
+#define CAPP_WDR_EN            I2C_REG(CAT_CAPT_PARM, CATB_WDR_EN, 1)
+#define REG_WDR_OFF            0x00
+#define REG_WDR_ON             0x01
+#define REG_WDR_AUTO           0x02
+
+#define CAPP_LIGHT_CTRL                I2C_REG(CAT_CAPT_PARM, CATB_LIGHT_CTRL, 1)
+#define REG_LIGHT_OFF          0x00
+#define REG_LIGHT_ON           0x01
+#define REG_LIGHT_AUTO         0x02
+
+#define CAPP_FLASH_CTRL                I2C_REG(CAT_CAPT_PARM, CATB_FLASH_CTRL, 1)
+#define REG_FLASH_OFF          0x00
+#define REG_FLASH_ON           0x01
+#define REG_FLASH_AUTO         0x02
+
+/*
+ * Category C - CAPTURE Control
+ */
+#define CATC_CAP_MODE          0x00
+#define CATC_CAP_SEL_FRAME     0x06    /* It determines Single or Multi */
+#define CATC_CAP_START         0x09
+#define CATC_CAP_IMAGE_SIZE    0x0d
+#define CATC_CAP_THUMB_SIZE    0x11
+
+#define CAPC_MODE              I2C_REG(CAT_CAPT_CTRL, CATC_CAP_MODE, 1)
+#define REG_CAP_NONE           0x00
+#define REG_CAP_ANTI_SHAKE     0x02
+
+#define CAPC_SEL_FRAME         I2C_REG(CAT_CAPT_CTRL, CATC_CAP_SEL_FRAME, 1)
+
+#define CAPC_START             I2C_REG(CAT_CAPT_CTRL, CATC_CAP_START, 1)
+#define REG_CAP_START_MAIN     0x01
+#define REG_CAP_START_THUMB    0x03
+
+#define CAPC_IMAGE_SIZE                I2C_REG(CAT_CAPT_CTRL, CATC_CAP_IMAGE_SIZE, 1)
+#define CAPC_THUMB_SIZE                I2C_REG(CAT_CAPT_CTRL, CATC_CAP_THUMB_SIZE, 1)
+
+/*
+ * Category F - Flash
+ *
+ * This mode provides functions about internal flash stuff and system startup.
+ */
+#define CATF_CAM_START         0x12    /* It starts internal ARM core booting
+                                        * after power-up */
+
+#define FLASH_CAM_START                I2C_REG(CAT_FLASH, CATF_CAM_START, 1)
+#define REG_START_ARM_BOOT     0x01
+
+#endif /* M5MOLS_REG_H */
index 84d4c7c83435caa355971c1bd0536d27253e0bd3..fc611ebeb82c3a550b3d2425c58927813dfb448d 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/dmaengine.h>
-#include <linux/mfd/core.h>
 #include <linux/scatterlist.h>
 #include <linux/interrupt.h>
 #include <linux/list.h>
@@ -791,7 +790,7 @@ static int __devinit timblogiw_probe(struct platform_device *pdev)
 {
        int err;
        struct timblogiw *lw = NULL;
-       struct timb_video_platform_data *pdata = mfd_get_data(pdev);
+       struct timb_video_platform_data *pdata = pdev->dev.platform_data;
 
        if (!pdata) {
                dev_err(&pdev->dev, "No platform data\n");
index 968c1994eda01f8ec87b2a95c803fc30a927bbde..2071ca8a2f031d86bb876fde6e3dcd79af31b92a 100644 (file)
@@ -1,3 +1,6 @@
 uvcvideo-objs  := uvc_driver.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_ctrl.o \
                  uvc_status.o uvc_isight.o
+ifeq ($(CONFIG_MEDIA_CONTROLLER),y)
+uvcvideo-objs  += uvc_entity.o
+endif
 obj-$(CONFIG_USB_VIDEO_CLASS) += uvcvideo.o
index 823f4b3897459c7dc335c7ba3babb678c02261bb..b6eae48d7fb802f53a950b90ef3f9107f96e9271 100644 (file)
@@ -248,7 +248,7 @@ uint32_t uvc_fraction_to_interval(uint32_t numerator, uint32_t denominator)
  * Terminal and unit management
  */
 
-static struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id)
+struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id)
 {
        struct uvc_entity *entity;
 
@@ -795,9 +795,12 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
        struct uvc_entity *entity;
        unsigned int num_inputs;
        unsigned int size;
+       unsigned int i;
 
+       extra_size = ALIGN(extra_size, sizeof(*entity->pads));
        num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1;
-       size = sizeof(*entity) + extra_size + num_inputs;
+       size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads
+            + num_inputs;
        entity = kzalloc(size, GFP_KERNEL);
        if (entity == NULL)
                return NULL;
@@ -805,8 +808,17 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id,
        entity->id = id;
        entity->type = type;
 
+       entity->num_links = 0;
+       entity->num_pads = num_pads;
+       entity->pads = ((void *)(entity + 1)) + extra_size;
+
+       for (i = 0; i < num_inputs; ++i)
+               entity->pads[i].flags = MEDIA_PAD_FL_SINK;
+       if (!UVC_ENTITY_IS_OTERM(entity))
+               entity->pads[num_pads-1].flags = MEDIA_PAD_FL_SOURCE;
+
        entity->bNrInPins = num_inputs;
-       entity->baSourceID = ((__u8 *)entity) + sizeof(*entity) + extra_size;
+       entity->baSourceID = (__u8 *)(&entity->pads[num_pads]);
 
        return entity;
 }
@@ -1585,6 +1597,13 @@ static void uvc_delete(struct uvc_device *dev)
        uvc_status_cleanup(dev);
        uvc_ctrl_cleanup_device(dev);
 
+       if (dev->vdev.dev)
+               v4l2_device_unregister(&dev->vdev);
+#ifdef CONFIG_MEDIA_CONTROLLER
+       if (media_devnode_is_registered(&dev->mdev.devnode))
+               media_device_unregister(&dev->mdev);
+#endif
+
        list_for_each_safe(p, n, &dev->chains) {
                struct uvc_video_chain *chain;
                chain = list_entry(p, struct uvc_video_chain, list);
@@ -1594,6 +1613,13 @@ static void uvc_delete(struct uvc_device *dev)
        list_for_each_safe(p, n, &dev->entities) {
                struct uvc_entity *entity;
                entity = list_entry(p, struct uvc_entity, list);
+#ifdef CONFIG_MEDIA_CONTROLLER
+               uvc_mc_cleanup_entity(entity);
+#endif
+               if (entity->vdev) {
+                       video_device_release(entity->vdev);
+                       entity->vdev = NULL;
+               }
                kfree(entity);
        }
 
@@ -1616,8 +1642,6 @@ static void uvc_release(struct video_device *vdev)
        struct uvc_streaming *stream = video_get_drvdata(vdev);
        struct uvc_device *dev = stream->dev;
 
-       video_device_release(vdev);
-
        /* Decrement the registered streams count and delete the device when it
         * reaches zero.
         */
@@ -1682,7 +1706,7 @@ static int uvc_register_video(struct uvc_device *dev,
         * unregistered before the reference is released, so we don't need to
         * get another one.
         */
-       vdev->parent = &dev->intf->dev;
+       vdev->v4l2_dev = &dev->vdev;
        vdev->fops = &uvc_fops;
        vdev->release = uvc_release;
        strlcpy(vdev->name, dev->name, sizeof vdev->name);
@@ -1731,6 +1755,8 @@ static int uvc_register_terms(struct uvc_device *dev,
                ret = uvc_register_video(dev, stream);
                if (ret < 0)
                        return ret;
+
+               term->vdev = stream->vdev;
        }
 
        return 0;
@@ -1745,6 +1771,14 @@ static int uvc_register_chains(struct uvc_device *dev)
                ret = uvc_register_terms(dev, chain);
                if (ret < 0)
                        return ret;
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+               ret = uvc_mc_register_entities(chain);
+               if (ret < 0) {
+                       uvc_printk(KERN_INFO, "Failed to register entites "
+                               "(%d).\n", ret);
+               }
+#endif
        }
 
        return 0;
@@ -1814,6 +1848,24 @@ static int uvc_probe(struct usb_interface *intf,
                        "linux-uvc-devel mailing list.\n");
        }
 
+       /* Register the media and V4L2 devices. */
+#ifdef CONFIG_MEDIA_CONTROLLER
+       dev->mdev.dev = &intf->dev;
+       strlcpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model));
+       if (udev->serial)
+               strlcpy(dev->mdev.serial, udev->serial,
+                       sizeof(dev->mdev.serial));
+       strcpy(dev->mdev.bus_info, udev->devpath);
+       dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
+       dev->mdev.driver_version = DRIVER_VERSION_NUMBER;
+       if (media_device_register(&dev->mdev) < 0)
+               goto error;
+
+       dev->vdev.mdev = &dev->mdev;
+#endif
+       if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
+               goto error;
+
        /* Initialize controls. */
        if (uvc_ctrl_init_device(dev) < 0)
                goto error;
@@ -1822,7 +1874,7 @@ static int uvc_probe(struct usb_interface *intf,
        if (uvc_scan_device(dev) < 0)
                goto error;
 
-       /* Register video devices. */
+       /* Register video device nodes. */
        if (uvc_register_chains(dev) < 0)
                goto error;
 
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c
new file mode 100644 (file)
index 0000000..ede7852
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ *      uvc_entity.c  --  USB Video Class driver
+ *
+ *      Copyright (C) 2005-2011
+ *          Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ *      This program is free software; you can redistribute it and/or modify
+ *      it under the terms of the GNU General Public License as published by
+ *      the Free Software Foundation; either version 2 of the License, or
+ *      (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-common.h>
+
+#include "uvcvideo.h"
+
+/* ------------------------------------------------------------------------
+ * Video subdevices registration and unregistration
+ */
+
+static int uvc_mc_register_entity(struct uvc_video_chain *chain,
+       struct uvc_entity *entity)
+{
+       const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
+       struct uvc_entity *remote;
+       unsigned int i;
+       u8 remote_pad;
+       int ret;
+
+       for (i = 0; i < entity->num_pads; ++i) {
+               struct media_entity *source;
+               struct media_entity *sink;
+
+               if (!(entity->pads[i].flags & MEDIA_PAD_FL_SINK))
+                       continue;
+
+               remote = uvc_entity_by_id(chain->dev, entity->baSourceID[i]);
+               if (remote == NULL)
+                       return -EINVAL;
+
+               source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
+                      ? &remote->vdev->entity : &remote->subdev.entity;
+               sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
+                    ? &entity->vdev->entity : &entity->subdev.entity;
+
+               remote_pad = remote->num_pads - 1;
+               ret = media_entity_create_link(source, remote_pad,
+                                              sink, i, flags);
+               if (ret < 0)
+                       return ret;
+       }
+
+       if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING)
+               ret = v4l2_device_register_subdev(&chain->dev->vdev,
+                                                 &entity->subdev);
+
+       return ret;
+}
+
+static struct v4l2_subdev_ops uvc_subdev_ops = {
+};
+
+void uvc_mc_cleanup_entity(struct uvc_entity *entity)
+{
+       if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING)
+               media_entity_cleanup(&entity->subdev.entity);
+       else if (entity->vdev != NULL)
+               media_entity_cleanup(&entity->vdev->entity);
+}
+
+static int uvc_mc_init_entity(struct uvc_entity *entity)
+{
+       int ret;
+
+       if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) {
+               v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops);
+               strlcpy(entity->subdev.name, entity->name,
+                       sizeof(entity->subdev.name));
+
+               ret = media_entity_init(&entity->subdev.entity,
+                                       entity->num_pads, entity->pads, 0);
+       } else
+               ret = media_entity_init(&entity->vdev->entity,
+                                       entity->num_pads, entity->pads, 0);
+
+       return ret;
+}
+
+int uvc_mc_register_entities(struct uvc_video_chain *chain)
+{
+       struct uvc_entity *entity;
+       int ret;
+
+       list_for_each_entry(entity, &chain->entities, chain) {
+               ret = uvc_mc_init_entity(entity);
+               if (ret < 0) {
+                       uvc_printk(KERN_INFO, "Failed to initialize entity for "
+                                  "entity %u\n", entity->id);
+                       return ret;
+               }
+       }
+
+       list_for_each_entry(entity, &chain->entities, chain) {
+               ret = uvc_mc_register_entity(chain, entity);
+               if (ret < 0) {
+                       uvc_printk(KERN_INFO, "Failed to register entity for "
+                                  "entity %u\n", entity->id);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
index 7cf224bae2e527eb2d14a3baf53f6d31168d241c..20107fd3574da44508f69835e5fc098b3eec86a6 100644 (file)
@@ -98,8 +98,11 @@ struct uvc_xu_control {
 #ifdef __KERNEL__
 
 #include <linux/poll.h>
+#include <linux/usb.h>
 #include <linux/usb/video.h>
 #include <linux/uvcvideo.h>
+#include <media/media-device.h>
+#include <media/v4l2-device.h>
 
 /* --------------------------------------------------------------------------
  * UVC constants
@@ -301,6 +304,13 @@ struct uvc_entity {
        __u16 type;
        char name[64];
 
+       /* Media controller-related fields. */
+       struct video_device *vdev;
+       struct v4l2_subdev subdev;
+       unsigned int num_pads;
+       unsigned int num_links;
+       struct media_pad *pads;
+
        union {
                struct {
                        __u16 wObjectiveFocalLengthMin;
@@ -504,6 +514,10 @@ struct uvc_device {
        atomic_t nmappings;
 
        /* Video control interface */
+#ifdef CONFIG_MEDIA_CONTROLLER
+       struct media_device mdev;
+#endif
+       struct v4l2_device vdev;
        __u16 uvc_version;
        __u32 clock_frequency;
 
@@ -583,6 +597,8 @@ extern unsigned int uvc_timeout_param;
 /* Core driver */
 extern struct uvc_driver uvc_driver;
 
+extern struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id);
+
 /* Video buffers queue management. */
 extern void uvc_queue_init(struct uvc_video_queue *queue,
                enum v4l2_buf_type type, int drop_corrupted);
@@ -616,6 +632,10 @@ static inline int uvc_queue_streaming(struct uvc_video_queue *queue)
 /* V4L2 interface */
 extern const struct v4l2_file_operations uvc_fops;
 
+/* Media controller */
+extern int uvc_mc_register_entities(struct uvc_video_chain *chain);
+extern void uvc_mc_cleanup_entity(struct uvc_entity *entity);
+
 /* Video */
 extern int uvc_video_init(struct uvc_streaming *stream);
 extern int uvc_video_suspend(struct uvc_streaming *stream);
index 011cb6ce861bcd8f4d820f6b1e77bff51ec9cd99..17dfe9bb6d2745c2bb5f6123b1afbc6c6d997ca6 100644 (file)
 
 #define INT_STATUS_NUM                 3
 
-static struct resource bk_resources[] __initdata = {
+static struct resource bk_resources[] __devinitdata = {
        {PM8606_BACKLIGHT1, PM8606_BACKLIGHT1, "backlight-0", IORESOURCE_IO,},
        {PM8606_BACKLIGHT2, PM8606_BACKLIGHT2, "backlight-1", IORESOURCE_IO,},
        {PM8606_BACKLIGHT3, PM8606_BACKLIGHT3, "backlight-2", IORESOURCE_IO,},
 };
 
-static struct resource led_resources[] __initdata = {
+static struct resource led_resources[] __devinitdata = {
        {PM8606_LED1_RED,   PM8606_LED1_RED,   "led0-red",   IORESOURCE_IO,},
        {PM8606_LED1_GREEN, PM8606_LED1_GREEN, "led0-green", IORESOURCE_IO,},
        {PM8606_LED1_BLUE,  PM8606_LED1_BLUE,  "led0-blue",  IORESOURCE_IO,},
@@ -36,7 +36,7 @@ static struct resource led_resources[] __initdata = {
        {PM8606_LED2_BLUE,  PM8606_LED2_BLUE,  "led1-blue",  IORESOURCE_IO,},
 };
 
-static struct resource regulator_resources[] __initdata = {
+static struct resource regulator_resources[] __devinitdata = {
        {PM8607_ID_BUCK1, PM8607_ID_BUCK1, "buck-1", IORESOURCE_IO,},
        {PM8607_ID_BUCK2, PM8607_ID_BUCK2, "buck-2", IORESOURCE_IO,},
        {PM8607_ID_BUCK3, PM8607_ID_BUCK3, "buck-3", IORESOURCE_IO,},
@@ -57,15 +57,15 @@ static struct resource regulator_resources[] __initdata = {
        {PM8607_ID_LDO15, PM8607_ID_LDO15, "ldo-15", IORESOURCE_IO,},
 };
 
-static struct resource touch_resources[] __initdata = {
+static struct resource touch_resources[] __devinitdata = {
        {PM8607_IRQ_PEN, PM8607_IRQ_PEN, "touch", IORESOURCE_IRQ,},
 };
 
-static struct resource onkey_resources[] __initdata = {
+static struct resource onkey_resources[] __devinitdata = {
        {PM8607_IRQ_ONKEY, PM8607_IRQ_ONKEY, "onkey", IORESOURCE_IRQ,},
 };
 
-static struct resource codec_resources[] __initdata = {
+static struct resource codec_resources[] __devinitdata = {
        /* Headset microphone insertion or removal */
        {PM8607_IRQ_MICIN,   PM8607_IRQ_MICIN,   "micin",   IORESOURCE_IRQ,},
        /* Hook-switch press or release */
@@ -76,12 +76,12 @@ static struct resource codec_resources[] __initdata = {
        {PM8607_IRQ_AUDIO_SHORT, PM8607_IRQ_AUDIO_SHORT, "audio-short", IORESOURCE_IRQ,},
 };
 
-static struct resource battery_resources[] __initdata = {
+static struct resource battery_resources[] __devinitdata = {
        {PM8607_IRQ_CC,  PM8607_IRQ_CC,  "columb counter", IORESOURCE_IRQ,},
        {PM8607_IRQ_BAT, PM8607_IRQ_BAT, "battery",        IORESOURCE_IRQ,},
 };
 
-static struct resource charger_resources[] __initdata = {
+static struct resource charger_resources[] __devinitdata = {
        {PM8607_IRQ_CHG,  PM8607_IRQ_CHG,  "charger detect",  IORESOURCE_IRQ,},
        {PM8607_IRQ_CHG_DONE,  PM8607_IRQ_CHG_DONE,  "charging done",       IORESOURCE_IRQ,},
        {PM8607_IRQ_CHG_FAULT, PM8607_IRQ_CHG_FAULT, "charging timeout",    IORESOURCE_IRQ,},
@@ -90,13 +90,17 @@ static struct resource charger_resources[] __initdata = {
        {PM8607_IRQ_VCHG, PM8607_IRQ_VCHG, "vchg voltage",    IORESOURCE_IRQ,},
 };
 
-static struct mfd_cell bk_devs[] __initdata = {
+static struct resource rtc_resources[] __devinitdata = {
+       {PM8607_IRQ_RTC, PM8607_IRQ_RTC, "rtc", IORESOURCE_IRQ,},
+};
+
+static struct mfd_cell bk_devs[] = {
        {"88pm860x-backlight", 0,},
        {"88pm860x-backlight", 1,},
        {"88pm860x-backlight", 2,},
 };
 
-static struct mfd_cell led_devs[] __initdata = {
+static struct mfd_cell led_devs[] = {
        {"88pm860x-led", 0,},
        {"88pm860x-led", 1,},
        {"88pm860x-led", 2,},
@@ -105,7 +109,7 @@ static struct mfd_cell led_devs[] __initdata = {
        {"88pm860x-led", 5,},
 };
 
-static struct mfd_cell regulator_devs[] __initdata = {
+static struct mfd_cell regulator_devs[] = {
        {"88pm860x-regulator", 0,},
        {"88pm860x-regulator", 1,},
        {"88pm860x-regulator", 2,},
@@ -126,15 +130,15 @@ static struct mfd_cell regulator_devs[] __initdata = {
        {"88pm860x-regulator", 17,},
 };
 
-static struct mfd_cell touch_devs[] __initdata = {
+static struct mfd_cell touch_devs[] = {
        {"88pm860x-touch", -1,},
 };
 
-static struct mfd_cell onkey_devs[] __initdata = {
+static struct mfd_cell onkey_devs[] = {
        {"88pm860x-onkey", -1,},
 };
 
-static struct mfd_cell codec_devs[] __initdata = {
+static struct mfd_cell codec_devs[] = {
        {"88pm860x-codec", -1,},
 };
 
@@ -143,11 +147,10 @@ static struct mfd_cell power_devs[] = {
        {"88pm860x-charger", -1,},
 };
 
-static struct pm860x_backlight_pdata bk_pdata[ARRAY_SIZE(bk_devs)];
-static struct pm860x_led_pdata led_pdata[ARRAY_SIZE(led_devs)];
-static struct regulator_init_data regulator_pdata[ARRAY_SIZE(regulator_devs)];
-static struct pm860x_touch_pdata touch_pdata;
-static struct pm860x_power_pdata power_pdata;
+static struct mfd_cell rtc_devs[] = {
+       {"88pm860x-rtc", -1,},
+};
+
 
 struct pm860x_irq_data {
        int     reg;
@@ -501,7 +504,6 @@ static void device_irq_exit(struct pm860x_chip *chip)
 }
 
 static void __devinit device_bk_init(struct pm860x_chip *chip,
-                                    struct i2c_client *i2c,
                                     struct pm860x_platform_data *pdata)
 {
        int ret;
@@ -514,13 +516,12 @@ static void __devinit device_bk_init(struct pm860x_chip *chip,
                pdata->num_backlights = ARRAY_SIZE(bk_devs);
 
        for (i = 0; i < pdata->num_backlights; i++) {
-               memcpy(&bk_pdata[i], &pdata->backlight[i],
-                       sizeof(struct pm860x_backlight_pdata));
-               bk_devs[i].mfd_data = &bk_pdata[i];
+               bk_devs[i].platform_data = &pdata->backlight[i];
+               bk_devs[i].pdata_size = sizeof(struct pm860x_backlight_pdata);
 
                for (j = 0; j < ARRAY_SIZE(bk_devs); j++) {
                        id = bk_resources[j].start;
-                       if (bk_pdata[i].flags != id)
+                       if (pdata->backlight[i].flags != id)
                                continue;
 
                        bk_devs[i].num_resources = 1;
@@ -538,7 +539,6 @@ static void __devinit device_bk_init(struct pm860x_chip *chip,
 }
 
 static void __devinit device_led_init(struct pm860x_chip *chip,
-                                     struct i2c_client *i2c,
                                      struct pm860x_platform_data *pdata)
 {
        int ret;
@@ -551,13 +551,12 @@ static void __devinit device_led_init(struct pm860x_chip *chip,
                pdata->num_leds = ARRAY_SIZE(led_devs);
 
        for (i = 0; i < pdata->num_leds; i++) {
-               memcpy(&led_pdata[i], &pdata->led[i],
-                       sizeof(struct pm860x_led_pdata));
-               led_devs[i].mfd_data = &led_pdata[i];
+               led_devs[i].platform_data = &pdata->led[i];
+               led_devs[i].pdata_size = sizeof(struct pm860x_led_pdata);
 
                for (j = 0; j < ARRAY_SIZE(led_devs); j++) {
                        id = led_resources[j].start;
-                       if (led_pdata[i].flags != id)
+                       if (pdata->led[i].flags != id)
                                continue;
 
                        led_devs[i].num_resources = 1;
@@ -575,12 +574,11 @@ static void __devinit device_led_init(struct pm860x_chip *chip,
 }
 
 static void __devinit device_regulator_init(struct pm860x_chip *chip,
-                                           struct i2c_client *i2c,
                                            struct pm860x_platform_data *pdata)
 {
        struct regulator_init_data *initdata;
        int ret;
-       int i, j;
+       int i, seq;
 
        if ((pdata == NULL) || (pdata->regulator == NULL))
                return;
@@ -588,41 +586,21 @@ static void __devinit device_regulator_init(struct pm860x_chip *chip,
        if (pdata->num_regulators > ARRAY_SIZE(regulator_devs))
                pdata->num_regulators = ARRAY_SIZE(regulator_devs);
 
-       for (i = 0, j = -1; i < pdata->num_regulators; i++) {
+       for (i = 0, seq = -1; i < pdata->num_regulators; i++) {
                initdata = &pdata->regulator[i];
-               if (strstr(initdata->constraints.name, "BUCK")) {
-                       sscanf(initdata->constraints.name, "BUCK%d", &j);
-                       /* BUCK1 ~ BUCK3 */
-                       if ((j < 1) || (j > 3)) {
-                               dev_err(chip->dev, "Failed to add constraint "
-                                       "(%s)\n", initdata->constraints.name);
-                               goto out;
-                       }
-                       j = (j - 1) + PM8607_ID_BUCK1;
-               }
-               if (strstr(initdata->constraints.name, "LDO")) {
-                       sscanf(initdata->constraints.name, "LDO%d", &j);
-                       /* LDO1 ~ LDO15 */
-                       if ((j < 1) || (j > 15)) {
-                               dev_err(chip->dev, "Failed to add constraint "
-                                       "(%s)\n", initdata->constraints.name);
-                               goto out;
-                       }
-                       j = (j - 1) + PM8607_ID_LDO1;
-               }
-               if (j == -1) {
-                       dev_err(chip->dev, "Failed to add constraint (%s)\n",
-                               initdata->constraints.name);
+               seq = *(unsigned int *)initdata->driver_data;
+               if ((seq < 0) || (seq > PM8607_ID_RG_MAX)) {
+                       dev_err(chip->dev, "Wrong ID(%d) on regulator(%s)\n",
+                               seq, initdata->constraints.name);
                        goto out;
                }
-               memcpy(&regulator_pdata[i], &pdata->regulator[i],
-                       sizeof(struct regulator_init_data));
-               regulator_devs[i].mfd_data = &regulator_pdata[i];
+               regulator_devs[i].platform_data = &pdata->regulator[i];
+               regulator_devs[i].pdata_size = sizeof(struct regulator_init_data);
                regulator_devs[i].num_resources = 1;
-               regulator_devs[i].resources = &regulator_resources[j];
+               regulator_devs[i].resources = &regulator_resources[seq];
 
                ret = mfd_add_devices(chip->dev, 0, &regulator_devs[i], 1,
-                                     &regulator_resources[j], 0);
+                                     &regulator_resources[seq], 0);
                if (ret < 0) {
                        dev_err(chip->dev, "Failed to add regulator subdev\n");
                        goto out;
@@ -632,17 +610,35 @@ out:
        return;
 }
 
+static void __devinit device_rtc_init(struct pm860x_chip *chip,
+                                     struct pm860x_platform_data *pdata)
+{
+       int ret;
+
+       if ((pdata == NULL))
+               return;
+
+       rtc_devs[0].platform_data = pdata->rtc;
+       rtc_devs[0].pdata_size = sizeof(struct pm860x_rtc_pdata);
+       rtc_devs[0].num_resources = ARRAY_SIZE(rtc_resources);
+       rtc_devs[0].resources = &rtc_resources[0];
+       ret = mfd_add_devices(chip->dev, 0, &rtc_devs[0],
+                             ARRAY_SIZE(rtc_devs), &rtc_resources[0],
+                             chip->irq_base);
+       if (ret < 0)
+               dev_err(chip->dev, "Failed to add rtc subdev\n");
+}
+
 static void __devinit device_touch_init(struct pm860x_chip *chip,
-                                       struct i2c_client *i2c,
                                        struct pm860x_platform_data *pdata)
 {
        int ret;
 
-       if ((pdata == NULL) || (pdata->touch == NULL))
+       if (pdata == NULL)
                return;
 
-       memcpy(&touch_pdata, pdata->touch, sizeof(struct pm860x_touch_pdata));
-       touch_devs[0].mfd_data = &touch_pdata;
+       touch_devs[0].platform_data = pdata->touch;
+       touch_devs[0].pdata_size = sizeof(struct pm860x_touch_pdata);
        touch_devs[0].num_resources = ARRAY_SIZE(touch_resources);
        touch_devs[0].resources = &touch_resources[0];
        ret = mfd_add_devices(chip->dev, 0, &touch_devs[0],
@@ -653,16 +649,15 @@ static void __devinit device_touch_init(struct pm860x_chip *chip,
 }
 
 static void __devinit device_power_init(struct pm860x_chip *chip,
-                                       struct i2c_client *i2c,
                                        struct pm860x_platform_data *pdata)
 {
        int ret;
 
-       if ((pdata == NULL) || (pdata->power == NULL))
+       if (pdata == NULL)
                return;
 
-       memcpy(&power_pdata, pdata->power, sizeof(struct pm860x_power_pdata));
-       power_devs[0].mfd_data = &power_pdata;
+       power_devs[0].platform_data = pdata->power;
+       power_devs[0].pdata_size = sizeof(struct pm860x_power_pdata);
        power_devs[0].num_resources = ARRAY_SIZE(battery_resources);
        power_devs[0].resources = &battery_resources[0],
        ret = mfd_add_devices(chip->dev, 0, &power_devs[0], 1,
@@ -670,7 +665,8 @@ static void __devinit device_power_init(struct pm860x_chip *chip,
        if (ret < 0)
                dev_err(chip->dev, "Failed to add battery subdev\n");
 
-       power_devs[1].mfd_data = &power_pdata;
+       power_devs[1].platform_data = pdata->power;
+       power_devs[1].pdata_size = sizeof(struct pm860x_power_pdata);
        power_devs[1].num_resources = ARRAY_SIZE(charger_resources);
        power_devs[1].resources = &charger_resources[0],
        ret = mfd_add_devices(chip->dev, 0, &power_devs[1], 1,
@@ -680,7 +676,6 @@ static void __devinit device_power_init(struct pm860x_chip *chip,
 }
 
 static void __devinit device_onkey_init(struct pm860x_chip *chip,
-                                       struct i2c_client *i2c,
                                        struct pm860x_platform_data *pdata)
 {
        int ret;
@@ -695,7 +690,6 @@ static void __devinit device_onkey_init(struct pm860x_chip *chip,
 }
 
 static void __devinit device_codec_init(struct pm860x_chip *chip,
-                                       struct i2c_client *i2c,
                                        struct pm860x_platform_data *pdata)
 {
        int ret;
@@ -763,11 +757,12 @@ static void __devinit device_8607_init(struct pm860x_chip *chip,
        if (ret < 0)
                goto out;
 
-       device_regulator_init(chip, i2c, pdata);
-       device_onkey_init(chip, i2c, pdata);
-       device_touch_init(chip, i2c, pdata);
-       device_power_init(chip, i2c, pdata);
-       device_codec_init(chip, i2c, pdata);
+       device_regulator_init(chip, pdata);
+       device_rtc_init(chip, pdata);
+       device_onkey_init(chip, pdata);
+       device_touch_init(chip, pdata);
+       device_power_init(chip, pdata);
+       device_codec_init(chip, pdata);
 out:
        return;
 }
@@ -779,8 +774,8 @@ int __devinit pm860x_device_init(struct pm860x_chip *chip,
 
        switch (chip->id) {
        case CHIP_PM8606:
-               device_bk_init(chip, chip->client, pdata);
-               device_led_init(chip, chip->client, pdata);
+               device_bk_init(chip, pdata);
+               device_led_init(chip, pdata);
                break;
        case CHIP_PM8607:
                device_8607_init(chip, chip->client, pdata);
@@ -790,8 +785,8 @@ int __devinit pm860x_device_init(struct pm860x_chip *chip,
        if (chip->companion) {
                switch (chip->id) {
                case CHIP_PM8607:
-                       device_bk_init(chip, chip->companion, pdata);
-                       device_led_init(chip, chip->companion, pdata);
+                       device_bk_init(chip, pdata);
+                       device_led_init(chip, pdata);
                        break;
                case CHIP_PM8606:
                        device_8607_init(chip, chip->companion, pdata);
index 481770ab271618cda7507576358ad21708bfcb02..b6c267724e14b1dabb9b08b6daac2821351f5817 100644 (file)
@@ -157,6 +157,20 @@ config TPS6507X
          This driver can also be built as a module.  If so, the module
          will be called tps6507x.
 
+config MFD_TPS6586X
+       bool "TPS6586x Power Management chips"
+       depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
+       select MFD_CORE
+       help
+         If you say yes here you get support for the TPS6586X series of
+         Power Management chips.
+         This driver provides common support for accessing the device,
+         additional drivers must be enabled in order to use the
+         functionality of the device.
+
+         This driver can also be built as a module.  If so, the module
+         will be called tps6586x.
+
 config MENELAUS
        bool "Texas Instruments TWL92330/Menelaus PM chip"
        depends on I2C=y && ARCH_OMAP2
@@ -455,6 +469,20 @@ config MFD_PCF50633
          facilities, and registers devices for the various functions
          so that function-specific drivers can bind to them.
 
+config PCF50633_ADC
+       tristate "Support for NXP PCF50633 ADC"
+       depends on MFD_PCF50633
+       help
+        Say yes here if you want to include support for ADC in the
+        NXP PCF50633 chip.
+
+config PCF50633_GPIO
+       tristate "Support for NXP PCF50633 GPIO"
+       depends on MFD_PCF50633
+       help
+        Say yes here if you want to include support GPIO for pins on
+        the PCF50633 chip.
+
 config MFD_MC13783
        tristate
 
@@ -470,20 +498,6 @@ config MFD_MC13XXX
          additional drivers must be enabled in order to use the
          functionality of the device.
 
-config PCF50633_ADC
-       tristate "Support for NXP PCF50633 ADC"
-       depends on MFD_PCF50633
-       help
-        Say yes here if you want to include support for ADC in the
-        NXP PCF50633 chip.
-
-config PCF50633_GPIO
-       tristate "Support for NXP PCF50633 GPIO"
-       depends on MFD_PCF50633
-       help
-        Say yes here if you want to include support GPIO for pins on
-        the PCF50633 chip.
-
 config ABX500_CORE
        bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
        default y if ARCH_U300 || ARCH_U8500
@@ -649,20 +663,6 @@ config MFD_JZ4740_ADC
          Say yes here if you want support for the ADC unit in the JZ4740 SoC.
          This driver is necessary for jz4740-battery and jz4740-hwmon driver.
 
-config MFD_TPS6586X
-       bool "TPS6586x Power Management chips"
-       depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS
-       select MFD_CORE
-       help
-         If you say yes here you get support for the TPS6586X series of
-         Power Management chips.
-         This driver provides common support for accessing the device,
-         additional drivers must be enabled in order to use the
-         functionality of the device.
-
-         This driver can also be built as a module.  If so, the module
-         will be called tps6586x.
-
 config MFD_VX855
        tristate "Support for VIA VX855/VX875 integrated south bridge"
        depends on PCI
@@ -691,6 +691,43 @@ config MFD_OMAP_USB_HOST
          This MFD driver does the required setup functionalities for
          OMAP USB Host drivers.
 
+config MFD_PM8XXX
+       tristate
+
+config MFD_PM8921_CORE
+       tristate "Qualcomm PM8921 PMIC chip"
+       depends on MSM_SSBI
+       select MFD_CORE
+       select MFD_PM8XXX
+       help
+         If you say yes to this option, support will be included for the
+         built-in PM8921 PMIC chip.
+
+         This is required if your board has a PM8921 and uses its features,
+         such as: MPPs, GPIOs, regulators, interrupts, and PWM.
+
+         Say M here if you want to include support for PM8921 chip as a module.
+         This will build a module called "pm8921-core".
+
+config MFD_PM8XXX_IRQ
+       bool "Support for Qualcomm PM8xxx IRQ features"
+       depends on MFD_PM8XXX
+       default y if MFD_PM8XXX
+       help
+         This is the IRQ driver for Qualcomm PM 8xxx PMIC chips.
+
+         This is required to use certain other PM 8xxx features, such as GPIO
+         and MPP.
+
+config MFD_TPS65910
+       bool "TPS65910 Power Management chip"
+       depends on I2C=y
+       select MFD_CORE
+       select GPIO_TPS65910
+       help
+         if you say yes here you get support for the TPS65910 series of
+         Power Management chips.
+
 endif # MFD_SUPPORT
 
 menu "Multimedia Capabilities Port drivers"
index 24aa44448daf5043f926512023d9cb06ff3a5648..efe3cc33ed92ed83a7a8ece78f32686c87bb6170 100644 (file)
@@ -91,3 +91,6 @@ obj-$(CONFIG_MFD_VX855)               += vx855.o
 obj-$(CONFIG_MFD_WL1273_CORE)  += wl1273-core.o
 obj-$(CONFIG_MFD_CS5535)       += cs5535-mfd.o
 obj-$(CONFIG_MFD_OMAP_USB_HOST)        += omap-usb-host.o
+obj-$(CONFIG_MFD_PM8921_CORE)  += pm8921-core.o
+obj-$(CONFIG_MFD_PM8XXX_IRQ)   += pm8xxx-irq.o
+obj-$(CONFIG_MFD_TPS65910)     += tps65910.o tps65910-irq.o
index a751927047ac967b6111359c3d8942253382be12..a20e1c41bed2f67c7fcdf65958ef83e62dde98e2 100644 (file)
@@ -949,8 +949,10 @@ static int __devinit ab3100_probe(struct i2c_client *client,
                goto exit_no_ops;
 
        /* Set up and register the platform devices. */
-       for (i = 0; i < ARRAY_SIZE(ab3100_devs); i++)
-               ab3100_devs[i].mfd_data = ab3100_plf_data;
+       for (i = 0; i < ARRAY_SIZE(ab3100_devs); i++) {
+               ab3100_devs[i].platform_data = ab3100_plf_data;
+               ab3100_devs[i].pdata_size = sizeof(struct ab3100_platform_data);
+       }
 
        err = mfd_add_devices(&client->dev, 0, ab3100_devs,
                ARRAY_SIZE(ab3100_devs), NULL, 0);
index ff86acf3e6bde2b16f6e95f2f8c6af90e92bcd64..3d7dce671b936a1affb6429c2469d56a436e8cf2 100644 (file)
@@ -1320,8 +1320,10 @@ static int __init ab3550_probe(struct i2c_client *client,
                goto exit_no_ops;
 
        /* Set up and register the platform devices. */
-       for (i = 0; i < AB3550_NUM_DEVICES; i++)
-               ab3550_devs[i].mfd_data = ab3550_plf_data->dev_data[i];
+       for (i = 0; i < AB3550_NUM_DEVICES; i++) {
+               ab3550_devs[i].platform_data = ab3550_plf_data->dev_data[i];
+               ab3550_devs[i].pdata_size = ab3550_plf_data->dev_data_sz[i];
+       }
 
        err = mfd_add_devices(&client->dev, 0, ab3550_devs,
                ARRAY_SIZE(ab3550_devs), NULL,
index 67d01c9382844f9e1b6c793181e82cd2865e58c4..fc0c1af1566e08d01b7351de9e1a00466d2c0614 100644 (file)
@@ -254,8 +254,9 @@ static void ab8500_irq_sync_unlock(struct irq_data *data)
                if (new == old)
                        continue;
 
-               /* Interrupt register 12 does'nt exist prior to version 0x20 */
-               if (ab8500_irq_regoffset[i] == 11 && ab8500->chip_id < 0x20)
+               /* Interrupt register 12 doesn't exist prior to version 2.0 */
+               if (ab8500_irq_regoffset[i] == 11 &&
+                       ab8500->chip_id < AB8500_CUT2P0)
                        continue;
 
                ab8500->oldmask[i] = new;
@@ -307,8 +308,8 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
                int status;
                u8 value;
 
-               /* Interrupt register 12 does'nt exist prior to version 0x20 */
-               if (regoffset == 11 && ab8500->chip_id < 0x20)
+               /* Interrupt register 12 doesn't exist prior to version 2.0 */
+               if (regoffset == 11 && ab8500->chip_id < AB8500_CUT2P0)
                        continue;
 
                status = get_register_interruptible(ab8500, AB8500_INTERRUPT,
@@ -724,17 +725,15 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
        if (ret < 0)
                return ret;
 
-       /*
-        * 0x0 - Early Drop
-        * 0x10 - Cut 1.0
-        * 0x11 - Cut 1.1
-        * 0x20 - Cut 2.0
-        * 0x30 - Cut 3.0
-        */
-       if (value == 0x0 || value == 0x10 || value == 0x11 || value == 0x20 ||
-               value == 0x30) {
+       switch (value) {
+       case AB8500_CUTEARLY:
+       case AB8500_CUT1P0:
+       case AB8500_CUT1P1:
+       case AB8500_CUT2P0:
+       case AB8500_CUT3P0:
                dev_info(ab8500->dev, "detected chip, revision: %#x\n", value);
-       } else {
+               break;
+       default:
                dev_err(ab8500->dev, "unknown chip, revision: %#x\n", value);
                return -EINVAL;
        }
@@ -763,8 +762,9 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
 
        /* Clear and mask all interrupts */
        for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) {
-               /* Interrupt register 12 does'nt exist prior to version 0x20 */
-               if (ab8500_irq_regoffset[i] == 11 && ab8500->chip_id < 0x20)
+               /* Interrupt register 12 doesn't exist prior to version 2.0 */
+               if (ab8500_irq_regoffset[i] == 11 &&
+                       ab8500->chip_id < AB8500_CUT2P0)
                        continue;
 
                get_register_interruptible(ab8500, AB8500_INTERRUPT,
index 6421ad1160de2084c2a9a03b69a628c55681f893..f16afb234ff98a250d0ded3a9aac0d1768ab679b 100644 (file)
@@ -57,6 +57,7 @@
 #define SW_AVG_16                      0x60
 #define ADC_SW_CONV                    0x04
 #define EN_ICHAR                       0x80
+#define BTEMP_PULL_UP                  0x08
 #define EN_BUF                         0x40
 #define DIS_ZERO                       0x00
 #define GPADC_BUSY                     0x01
@@ -101,6 +102,7 @@ struct adc_cal_data {
 
 /**
  * struct ab8500_gpadc - AB8500 GPADC device information
+ * @chip_id                    ABB chip id
  * @dev:                       pointer to the struct device
  * @node:                      a list of AB8500 GPADCs, hence prepared for
                                reentrance
@@ -112,6 +114,7 @@ struct adc_cal_data {
  * @cal_data                   array of ADC calibration data structs
  */
 struct ab8500_gpadc {
+       u8 chip_id;
        struct device *dev;
        struct list_head node;
        struct completion ab8500_gpadc_complete;
@@ -274,6 +277,7 @@ int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input)
                dev_err(gpadc->dev, "gpadc_conversion: enable gpadc failed\n");
                goto out;
        }
+
        /* Select the input source and set average samples to 16 */
        ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC,
                AB8500_GPADC_CTRL2_REG, (input | SW_AVG_16));
@@ -282,9 +286,11 @@ int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input)
                        "gpadc_conversion: set avg samples failed\n");
                goto out;
        }
+
        /*
         * Enable ADC, buffering, select rising edge and enable ADC path
-        * charging current sense if it needed
+        * charging current sense if it needed, ABB 3.0 needs some special
+        * treatment too.
         */
        switch (input) {
        case MAIN_CHARGER_C:
@@ -294,6 +300,23 @@ int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input)
                        EN_BUF | EN_ICHAR,
                        EN_BUF | EN_ICHAR);
                break;
+       case BTEMP_BALL:
+               if (gpadc->chip_id >= AB8500_CUT3P0) {
+                       /* Turn on btemp pull-up on ABB 3.0 */
+                       ret = abx500_mask_and_set_register_interruptible(
+                               gpadc->dev,
+                               AB8500_GPADC, AB8500_GPADC_CTRL1_REG,
+                               EN_BUF | BTEMP_PULL_UP,
+                               EN_BUF | BTEMP_PULL_UP);
+
+                /*
+                 * Delay might be needed for ABB8500 cut 3.0, if not, remove
+                 * when hardware will be availible
+                 */
+                       msleep(1);
+                       break;
+               }
+               /* Intentional fallthrough */
        default:
                ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
                        AB8500_GPADC, AB8500_GPADC_CTRL1_REG, EN_BUF, EN_BUF);
@@ -304,6 +327,7 @@ int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input)
                        "gpadc_conversion: select falling edge failed\n");
                goto out;
        }
+
        ret = abx500_mask_and_set_register_interruptible(gpadc->dev,
                AB8500_GPADC, AB8500_GPADC_CTRL1_REG, ADC_SW_CONV, ADC_SW_CONV);
        if (ret < 0) {
@@ -552,6 +576,14 @@ static int __devinit ab8500_gpadc_probe(struct platform_device *pdev)
                goto fail;
        }
 
+       /* Get Chip ID of the ABB ASIC  */
+       ret = abx500_get_chip_id(gpadc->dev);
+       if (ret < 0) {
+               dev_err(gpadc->dev, "failed to get chip ID\n");
+               goto fail_irq;
+       }
+       gpadc->chip_id = (u8) ret;
+
        /* VTVout LDO used to power up ab8500-GPADC */
        gpadc->regu = regulator_get(&pdev->dev, "vddadc");
        if (IS_ERR(gpadc->regu)) {
index 0b4d5b23bec9b36d212fa068c6663b8f23365638..c27fd1fc3b86789a73cf725ce57a5863e5175d6f 100644 (file)
@@ -88,19 +88,19 @@ struct asic3 {
 
 static int asic3_gpio_get(struct gpio_chip *chip, unsigned offset);
 
-static inline void asic3_write_register(struct asic3 *asic,
-                                unsigned int reg, u32 value)
+void asic3_write_register(struct asic3 *asic, unsigned int reg, u32 value)
 {
        iowrite16(value, asic->mapping +
                  (reg >> asic->bus_shift));
 }
+EXPORT_SYMBOL_GPL(asic3_write_register);
 
-static inline u32 asic3_read_register(struct asic3 *asic,
-                              unsigned int reg)
+u32 asic3_read_register(struct asic3 *asic, unsigned int reg)
 {
        return ioread16(asic->mapping +
                        (reg >> asic->bus_shift));
 }
+EXPORT_SYMBOL_GPL(asic3_read_register);
 
 static void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set)
 {
@@ -676,7 +676,8 @@ static struct mfd_cell asic3_cell_ds1wm = {
        .name          = "ds1wm",
        .enable        = ds1wm_enable,
        .disable       = ds1wm_disable,
-       .mfd_data      = &ds1wm_pdata,
+       .platform_data = &ds1wm_pdata,
+       .pdata_size    = sizeof(ds1wm_pdata),
        .num_resources = ARRAY_SIZE(ds1wm_resources),
        .resources     = ds1wm_resources,
 };
@@ -777,12 +778,61 @@ static struct mfd_cell asic3_cell_mmc = {
        .name          = "tmio-mmc",
        .enable        = asic3_mmc_enable,
        .disable       = asic3_mmc_disable,
-       .mfd_data      = &asic3_mmc_data,
+       .platform_data = &asic3_mmc_data,
+       .pdata_size    = sizeof(asic3_mmc_data),
        .num_resources = ARRAY_SIZE(asic3_mmc_resources),
        .resources     = asic3_mmc_resources,
 };
 
+static const int clock_ledn[ASIC3_NUM_LEDS] = {
+       [0] = ASIC3_CLOCK_LED0,
+       [1] = ASIC3_CLOCK_LED1,
+       [2] = ASIC3_CLOCK_LED2,
+};
+
+static int asic3_leds_enable(struct platform_device *pdev)
+{
+       const struct mfd_cell *cell = mfd_get_cell(pdev);
+       struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
+
+       asic3_clk_enable(asic, &asic->clocks[clock_ledn[cell->id]]);
+
+       return 0;
+}
+
+static int asic3_leds_disable(struct platform_device *pdev)
+{
+       const struct mfd_cell *cell = mfd_get_cell(pdev);
+       struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
+
+       asic3_clk_disable(asic, &asic->clocks[clock_ledn[cell->id]]);
+
+       return 0;
+}
+
+static struct mfd_cell asic3_cell_leds[ASIC3_NUM_LEDS] = {
+       [0] = {
+               .name          = "leds-asic3",
+               .id            = 0,
+               .enable        = asic3_leds_enable,
+               .disable       = asic3_leds_disable,
+       },
+       [1] = {
+               .name          = "leds-asic3",
+               .id            = 1,
+               .enable        = asic3_leds_enable,
+               .disable       = asic3_leds_disable,
+       },
+       [2] = {
+               .name          = "leds-asic3",
+               .id            = 2,
+               .enable        = asic3_leds_enable,
+               .disable       = asic3_leds_disable,
+       },
+};
+
 static int __init asic3_mfd_probe(struct platform_device *pdev,
+                                 struct asic3_platform_data *pdata,
                                  struct resource *mem)
 {
        struct asic3 *asic = platform_get_drvdata(pdev);
@@ -806,7 +856,8 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
 
        /* MMC */
        asic->tmio_cnf = ioremap((ASIC3_SD_CONFIG_BASE >> asic->bus_shift) +
-                                mem_sdio->start, 0x400 >> asic->bus_shift);
+                                mem_sdio->start,
+                                ASIC3_SD_CONFIG_SIZE >> asic->bus_shift);
        if (!asic->tmio_cnf) {
                ret = -ENOMEM;
                dev_dbg(asic->dev, "Couldn't ioremap SD_CONFIG\n");
@@ -820,9 +871,23 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
        if (ret < 0)
                goto out;
 
-       if (mem_sdio && (irq >= 0))
+       if (mem_sdio && (irq >= 0)) {
                ret = mfd_add_devices(&pdev->dev, pdev->id,
                        &asic3_cell_mmc, 1, mem_sdio, irq);
+               if (ret < 0)
+                       goto out;
+       }
+
+       if (pdata->leds) {
+               int i;
+
+               for (i = 0; i < ASIC3_NUM_LEDS; ++i) {
+                       asic3_cell_leds[i].platform_data = &pdata->leds[i];
+                       asic3_cell_leds[i].pdata_size = sizeof(pdata->leds[i]);
+               }
+               ret = mfd_add_devices(&pdev->dev, 0,
+                       asic3_cell_leds, ASIC3_NUM_LEDS, NULL, 0);
+       }
 
  out:
        return ret;
@@ -903,7 +968,7 @@ static int __init asic3_probe(struct platform_device *pdev)
         */
        memcpy(asic->clocks, asic3_clk_init, sizeof(asic3_clk_init));
 
-       asic3_mfd_probe(pdev, mem);
+       asic3_mfd_probe(pdev, pdata, mem);
 
        dev_info(asic->dev, "ASIC3 Core driver\n");
 
index 414783b048490b7a03a021781c79acd29d32367c..4e2af2cb2d26a76534c884c3cc57fe97f3c30d52 100644 (file)
@@ -119,12 +119,14 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
        /* Voice codec interface client */
        cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
        cell->name = "davinci-vcif";
-       cell->mfd_data = davinci_vc;
+       cell->platform_data = davinci_vc;
+       cell->pdata_size = sizeof(*davinci_vc);
 
        /* Voice codec CQ93VC client */
        cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
        cell->name = "cq93vc-codec";
-       cell->mfd_data = davinci_vc;
+       cell->platform_data = davinci_vc;
+       cell->pdata_size = sizeof(*davinci_vc);
 
        ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
                              DAVINCI_VC_CELLS, NULL, 0);
index fb9770b39a3253edcc92a83a3e77fb020b72b93f..2808bd125d1352303ee49ed968aca8913dc2d6c8 100644 (file)
@@ -117,7 +117,8 @@ static struct mfd_cell ds1wm_cell __initdata = {
        .name          = "ds1wm",
        .enable        = ds1wm_enable,
        .disable       = ds1wm_disable,
-       .mfd_data      = &ds1wm_pdata,
+       .platform_data = &ds1wm_pdata,
+       .pdata_size    = sizeof(ds1wm_pdata),
        .num_resources = 2,
        .resources     = ds1wm_resources,
 };
@@ -172,6 +173,8 @@ static int __init pasic3_probe(struct platform_device *pdev)
        }
 
        if (pdata && pdata->led_pdata) {
+               led_cell.platform_data = pdata->led_pdata;
+               led_cell.pdata_size = sizeof(struct pasic3_leds_machinfo);
                ret = mfd_add_devices(&pdev->dev, pdev->id, &led_cell, 1, r, 0);
                if (ret < 0)
                        dev_warn(dev, "failed to register LED device\n");
index fc4191137e9069f20cfd4c87ac76987e8eec3435..5c2a06acb77fce5d226c183e2197602eff8f434d 100644 (file)
@@ -86,7 +86,8 @@ static int __devinit cmodio_setup_subdevice(struct cmodio_device *priv,
 
        /* Add platform data */
        pdata->modno = modno;
-       cell->mfd_data = pdata;
+       cell->platform_data = pdata;
+       cell->pdata_size = sizeof(*pdata);
 
        /* MODULbus registers -- PCI BAR3 is big-endian MODULbus access */
        res->flags = IORESOURCE_MEM;
index 58cc5fdde01647be4260f4736a55596d5c684242..e1e59c92f7588592e141839f2af5c0c4298d53d5 100644 (file)
@@ -627,7 +627,7 @@ int __devinit max8925_device_init(struct max8925_chip *chip,
                goto out_dev;
        }
 
-       if (pdata && pdata->regulator[0]) {
+       if (pdata) {
                ret = mfd_add_devices(chip->dev, 0, &regulator_devs[0],
                                      ARRAY_SIZE(regulator_devs),
                                      &regulator_resources[0], 0);
index 668634e89e81e1e616bbdab7562db258de72c8a0..7e4d44bf92ab90b10725ac14310782b1b6143e31 100644 (file)
@@ -683,13 +683,14 @@ out:
 EXPORT_SYMBOL_GPL(mc13783_adc_do_conversion);
 
 static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
-               const char *format, void *pdata)
+               const char *format, void *pdata, size_t pdata_size)
 {
        char buf[30];
        const char *name = mc13xxx_get_chipname(mc13xxx);
 
        struct mfd_cell cell = {
-               .mfd_data = pdata,
+               .platform_data = pdata,
+               .pdata_size = pdata_size,
        };
 
        /* there is no asnprintf in the kernel :-( */
@@ -705,7 +706,7 @@ static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
 
 static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
 {
-       return mc13xxx_add_subdevice_pdata(mc13xxx, format, NULL);
+       return mc13xxx_add_subdevice_pdata(mc13xxx, format, NULL, 0);
 }
 
 static int mc13xxx_probe(struct spi_device *spi)
@@ -764,7 +765,7 @@ err_revision:
 
        if (pdata->flags & MC13XXX_USE_REGULATOR) {
                mc13xxx_add_subdevice_pdata(mc13xxx, "%s-regulator",
-                               &pdata->regulators);
+                               &pdata->regulators, sizeof(pdata->regulators));
        }
 
        if (pdata->flags & MC13XXX_USE_RTC)
@@ -774,7 +775,8 @@ err_revision:
                mc13xxx_add_subdevice(mc13xxx, "%s-ts");
 
        if (pdata->flags & MC13XXX_USE_LED)
-               mc13xxx_add_subdevice_pdata(mc13xxx, "%s-led", pdata->leds);
+               mc13xxx_add_subdevice_pdata(mc13xxx, "%s-led",
+                               pdata->leds, sizeof(*pdata->leds));
 
        return 0;
 }
index f4c8c844b913060c6e0bb12f35db8cba983a81dc..0902523af62d47e33e139c1feab75e5b0ef2a49c 100644 (file)
@@ -88,6 +88,13 @@ static int mfd_add_device(struct device *parent, int id,
 
        pdev->dev.parent = parent;
 
+       if (cell->pdata_size) {
+               ret = platform_device_add_data(pdev,
+                                       cell->platform_data, cell->pdata_size);
+               if (ret)
+                       goto fail_res;
+       }
+
        ret = mfd_platform_add_cell(pdev, cell);
        if (ret)
                goto fail_res;
index 3ab9ffa00aadf8c805477e2c323c2f1f889da2a8..855219526ccb9ec390afe9e2a74b138b1b415f31 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/spinlock.h>
 #include <linux/gpio.h>
 #include <plat/usb.h>
+#include <linux/pm_runtime.h>
 
 #define USBHS_DRIVER_NAME      "usbhs-omap"
 #define OMAP_EHCI_DEVICE       "ehci-omap"
 
 
 struct usbhs_hcd_omap {
-       struct clk                      *usbhost_ick;
-       struct clk                      *usbhost_hs_fck;
-       struct clk                      *usbhost_fs_fck;
        struct clk                      *xclk60mhsp1_ck;
        struct clk                      *xclk60mhsp2_ck;
        struct clk                      *utmi_p1_fck;
@@ -158,8 +156,6 @@ struct usbhs_hcd_omap {
        struct clk                      *usbhost_p2_fck;
        struct clk                      *usbtll_p2_fck;
        struct clk                      *init_60m_fclk;
-       struct clk                      *usbtll_fck;
-       struct clk                      *usbtll_ick;
 
        void __iomem                    *uhh_base;
        void __iomem                    *tll_base;
@@ -281,6 +277,7 @@ static int omap_usbhs_alloc_children(struct platform_device *pdev)
 
        if (!ehci) {
                dev_err(dev, "omap_usbhs_alloc_child failed\n");
+               ret = -ENOMEM;
                goto err_end;
        }
 
@@ -304,13 +301,14 @@ static int omap_usbhs_alloc_children(struct platform_device *pdev)
                sizeof(*ohci_data), dev);
        if (!ohci) {
                dev_err(dev, "omap_usbhs_alloc_child failed\n");
+               ret = -ENOMEM;
                goto err_ehci;
        }
 
        return 0;
 
 err_ehci:
-       platform_device_put(ehci);
+       platform_device_unregister(ehci);
 
 err_end:
        return ret;
@@ -351,46 +349,13 @@ static int __devinit usbhs_omap_probe(struct platform_device *pdev)
        omap->platdata.ehci_data = pdata->ehci_data;
        omap->platdata.ohci_data = pdata->ohci_data;
 
-       omap->usbhost_ick = clk_get(dev, "usbhost_ick");
-       if (IS_ERR(omap->usbhost_ick)) {
-               ret =  PTR_ERR(omap->usbhost_ick);
-               dev_err(dev, "usbhost_ick failed error:%d\n", ret);
-               goto err_end;
-       }
-
-       omap->usbhost_hs_fck = clk_get(dev, "hs_fck");
-       if (IS_ERR(omap->usbhost_hs_fck)) {
-               ret = PTR_ERR(omap->usbhost_hs_fck);
-               dev_err(dev, "usbhost_hs_fck failed error:%d\n", ret);
-               goto err_usbhost_ick;
-       }
-
-       omap->usbhost_fs_fck = clk_get(dev, "fs_fck");
-       if (IS_ERR(omap->usbhost_fs_fck)) {
-               ret = PTR_ERR(omap->usbhost_fs_fck);
-               dev_err(dev, "usbhost_fs_fck failed error:%d\n", ret);
-               goto err_usbhost_hs_fck;
-       }
-
-       omap->usbtll_fck = clk_get(dev, "usbtll_fck");
-       if (IS_ERR(omap->usbtll_fck)) {
-               ret = PTR_ERR(omap->usbtll_fck);
-               dev_err(dev, "usbtll_fck failed error:%d\n", ret);
-               goto err_usbhost_fs_fck;
-       }
-
-       omap->usbtll_ick = clk_get(dev, "usbtll_ick");
-       if (IS_ERR(omap->usbtll_ick)) {
-               ret = PTR_ERR(omap->usbtll_ick);
-               dev_err(dev, "usbtll_ick failed error:%d\n", ret);
-               goto err_usbtll_fck;
-       }
+       pm_runtime_enable(&pdev->dev);
 
        omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
        if (IS_ERR(omap->utmi_p1_fck)) {
                ret = PTR_ERR(omap->utmi_p1_fck);
                dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
-               goto err_usbtll_ick;
+               goto err_end;
        }
 
        omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
@@ -520,22 +485,8 @@ err_xclk60mhsp1_ck:
 err_utmi_p1_fck:
        clk_put(omap->utmi_p1_fck);
 
-err_usbtll_ick:
-       clk_put(omap->usbtll_ick);
-
-err_usbtll_fck:
-       clk_put(omap->usbtll_fck);
-
-err_usbhost_fs_fck:
-       clk_put(omap->usbhost_fs_fck);
-
-err_usbhost_hs_fck:
-       clk_put(omap->usbhost_hs_fck);
-
-err_usbhost_ick:
-       clk_put(omap->usbhost_ick);
-
 err_end:
+       pm_runtime_disable(&pdev->dev);
        kfree(omap);
 
 end_probe:
@@ -569,11 +520,7 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev)
        clk_put(omap->utmi_p2_fck);
        clk_put(omap->xclk60mhsp1_ck);
        clk_put(omap->utmi_p1_fck);
-       clk_put(omap->usbtll_ick);
-       clk_put(omap->usbtll_fck);
-       clk_put(omap->usbhost_fs_fck);
-       clk_put(omap->usbhost_hs_fck);
-       clk_put(omap->usbhost_ick);
+       pm_runtime_disable(&pdev->dev);
        kfree(omap);
 
        return 0;
@@ -693,7 +640,6 @@ static int usbhs_enable(struct device *dev)
        struct usbhs_omap_platform_data *pdata = &omap->platdata;
        unsigned long                   flags = 0;
        int                             ret = 0;
-       unsigned long                   timeout;
        unsigned                        reg;
 
        dev_dbg(dev, "starting TI HSUSB Controller\n");
@@ -706,11 +652,7 @@ static int usbhs_enable(struct device *dev)
        if (omap->count > 0)
                goto end_count;
 
-       clk_enable(omap->usbhost_ick);
-       clk_enable(omap->usbhost_hs_fck);
-       clk_enable(omap->usbhost_fs_fck);
-       clk_enable(omap->usbtll_fck);
-       clk_enable(omap->usbtll_ick);
+       pm_runtime_get_sync(dev);
 
        if (pdata->ehci_data->phy_reset) {
                if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) {
@@ -734,50 +676,6 @@ static int usbhs_enable(struct device *dev)
        omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
        dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
 
-       /* perform TLL soft reset, and wait until reset is complete */
-       usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
-                       OMAP_USBTLL_SYSCONFIG_SOFTRESET);
-
-       /* Wait for TLL reset to complete */
-       timeout = jiffies + msecs_to_jiffies(1000);
-       while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
-                       & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
-               cpu_relax();
-
-               if (time_after(jiffies, timeout)) {
-                       dev_dbg(dev, "operation timed out\n");
-                       ret = -EINVAL;
-                       goto err_tll;
-               }
-       }
-
-       dev_dbg(dev, "TLL RESET DONE\n");
-
-       /* (1<<3) = no idle mode only for initial debugging */
-       usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
-                       OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
-                       OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
-                       OMAP_USBTLL_SYSCONFIG_AUTOIDLE);
-
-       /* Put UHH in NoIdle/NoStandby mode */
-       reg = usbhs_read(omap->uhh_base, OMAP_UHH_SYSCONFIG);
-       if (is_omap_usbhs_rev1(omap)) {
-               reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
-                               | OMAP_UHH_SYSCONFIG_SIDLEMODE
-                               | OMAP_UHH_SYSCONFIG_CACTIVITY
-                               | OMAP_UHH_SYSCONFIG_MIDLEMODE);
-               reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
-
-
-       } else if (is_omap_usbhs_rev2(omap)) {
-               reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR;
-               reg |= OMAP4_UHH_SYSCONFIG_NOIDLE;
-               reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR;
-               reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY;
-       }
-
-       usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
-
        reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
        /* setup ULPI bypass and burst configurations */
        reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
@@ -917,6 +815,8 @@ end_count:
        return 0;
 
 err_tll:
+       pm_runtime_put_sync(dev);
+       spin_unlock_irqrestore(&omap->lock, flags);
        if (pdata->ehci_data->phy_reset) {
                if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
                        gpio_free(pdata->ehci_data->reset_gpio_port[0]);
@@ -924,13 +824,6 @@ err_tll:
                if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
                        gpio_free(pdata->ehci_data->reset_gpio_port[1]);
        }
-
-       clk_disable(omap->usbtll_ick);
-       clk_disable(omap->usbtll_fck);
-       clk_disable(omap->usbhost_fs_fck);
-       clk_disable(omap->usbhost_hs_fck);
-       clk_disable(omap->usbhost_ick);
-       spin_unlock_irqrestore(&omap->lock, flags);
        return ret;
 }
 
@@ -994,6 +887,20 @@ static void usbhs_disable(struct device *dev)
                        dev_dbg(dev, "operation timed out\n");
        }
 
+       if (is_omap_usbhs_rev2(omap)) {
+               if (is_ehci_tll_mode(pdata->port_mode[0]))
+                       clk_enable(omap->usbtll_p1_fck);
+               if (is_ehci_tll_mode(pdata->port_mode[1]))
+                       clk_enable(omap->usbtll_p2_fck);
+               clk_disable(omap->utmi_p2_fck);
+               clk_disable(omap->utmi_p1_fck);
+       }
+
+       pm_runtime_put_sync(dev);
+
+       /* The gpio_free migh sleep; so unlock the spinlock */
+       spin_unlock_irqrestore(&omap->lock, flags);
+
        if (pdata->ehci_data->phy_reset) {
                if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
                        gpio_free(pdata->ehci_data->reset_gpio_port[0]);
@@ -1001,14 +908,7 @@ static void usbhs_disable(struct device *dev)
                if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
                        gpio_free(pdata->ehci_data->reset_gpio_port[1]);
        }
-
-       clk_disable(omap->utmi_p2_fck);
-       clk_disable(omap->utmi_p1_fck);
-       clk_disable(omap->usbtll_ick);
-       clk_disable(omap->usbtll_fck);
-       clk_disable(omap->usbhost_fs_fck);
-       clk_disable(omap->usbhost_hs_fck);
-       clk_disable(omap->usbhost_ick);
+       return;
 
 end_disble:
        spin_unlock_irqrestore(&omap->lock, flags);
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
new file mode 100644 (file)
index 0000000..e873b15
--- /dev/null
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/msm_ssbi.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/pm8xxx/pm8921.h>
+#include <linux/mfd/pm8xxx/core.h>
+
+#define REG_HWREV              0x002  /* PMIC4 revision */
+#define REG_HWREV_2            0x0E8  /* PMIC4 revision 2 */
+
+struct pm8921 {
+       struct device                   *dev;
+       struct pm_irq_chip              *irq_chip;
+};
+
+static int pm8921_readb(const struct device *dev, u16 addr, u8 *val)
+{
+       const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev);
+       const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data;
+
+       return msm_ssbi_read(pmic->dev->parent, addr, val, 1);
+}
+
+static int pm8921_writeb(const struct device *dev, u16 addr, u8 val)
+{
+       const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev);
+       const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data;
+
+       return msm_ssbi_write(pmic->dev->parent, addr, &val, 1);
+}
+
+static int pm8921_read_buf(const struct device *dev, u16 addr, u8 *buf,
+                                                                       int cnt)
+{
+       const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev);
+       const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data;
+
+       return msm_ssbi_read(pmic->dev->parent, addr, buf, cnt);
+}
+
+static int pm8921_write_buf(const struct device *dev, u16 addr, u8 *buf,
+                                                                       int cnt)
+{
+       const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev);
+       const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data;
+
+       return msm_ssbi_write(pmic->dev->parent, addr, buf, cnt);
+}
+
+static int pm8921_read_irq_stat(const struct device *dev, int irq)
+{
+       const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev);
+       const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data;
+
+       return pm8xxx_get_irq_stat(pmic->irq_chip, irq);
+}
+
+static struct pm8xxx_drvdata pm8921_drvdata = {
+       .pmic_readb             = pm8921_readb,
+       .pmic_writeb            = pm8921_writeb,
+       .pmic_read_buf          = pm8921_read_buf,
+       .pmic_write_buf         = pm8921_write_buf,
+       .pmic_read_irq_stat     = pm8921_read_irq_stat,
+};
+
+static int __devinit pm8921_add_subdevices(const struct pm8921_platform_data
+                                          *pdata,
+                                          struct pm8921 *pmic,
+                                          u32 rev)
+{
+       int ret = 0, irq_base = 0;
+       struct pm_irq_chip *irq_chip;
+
+       if (pdata->irq_pdata) {
+               pdata->irq_pdata->irq_cdata.nirqs = PM8921_NR_IRQS;
+               pdata->irq_pdata->irq_cdata.rev = rev;
+               irq_base = pdata->irq_pdata->irq_base;
+               irq_chip = pm8xxx_irq_init(pmic->dev, pdata->irq_pdata);
+
+               if (IS_ERR(irq_chip)) {
+                       pr_err("Failed to init interrupts ret=%ld\n",
+                                       PTR_ERR(irq_chip));
+                       return PTR_ERR(irq_chip);
+               }
+               pmic->irq_chip = irq_chip;
+       }
+       return ret;
+}
+
+static int __devinit pm8921_probe(struct platform_device *pdev)
+{
+       const struct pm8921_platform_data *pdata = pdev->dev.platform_data;
+       struct pm8921 *pmic;
+       int rc;
+       u8 val;
+       u32 rev;
+
+       if (!pdata) {
+               pr_err("missing platform data\n");
+               return -EINVAL;
+       }
+
+       pmic = kzalloc(sizeof(struct pm8921), GFP_KERNEL);
+       if (!pmic) {
+               pr_err("Cannot alloc pm8921 struct\n");
+               return -ENOMEM;
+       }
+
+       /* Read PMIC chip revision */
+       rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV, &val, sizeof(val));
+       if (rc) {
+               pr_err("Failed to read hw rev reg %d:rc=%d\n", REG_HWREV, rc);
+               goto err_read_rev;
+       }
+       pr_info("PMIC revision 1: %02X\n", val);
+       rev = val;
+
+       /* Read PMIC chip revision 2 */
+       rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV_2, &val, sizeof(val));
+       if (rc) {
+               pr_err("Failed to read hw rev 2 reg %d:rc=%d\n",
+                       REG_HWREV_2, rc);
+               goto err_read_rev;
+       }
+       pr_info("PMIC revision 2: %02X\n", val);
+       rev |= val << BITS_PER_BYTE;
+
+       pmic->dev = &pdev->dev;
+       pm8921_drvdata.pm_chip_data = pmic;
+       platform_set_drvdata(pdev, &pm8921_drvdata);
+
+       rc = pm8921_add_subdevices(pdata, pmic, rev);
+       if (rc) {
+               pr_err("Cannot add subdevices rc=%d\n", rc);
+               goto err;
+       }
+
+       /* gpio might not work if no irq device is found */
+       WARN_ON(pmic->irq_chip == NULL);
+
+       return 0;
+
+err:
+       mfd_remove_devices(pmic->dev);
+       platform_set_drvdata(pdev, NULL);
+err_read_rev:
+       kfree(pmic);
+       return rc;
+}
+
+static int __devexit pm8921_remove(struct platform_device *pdev)
+{
+       struct pm8xxx_drvdata *drvdata;
+       struct pm8921 *pmic = NULL;
+
+       drvdata = platform_get_drvdata(pdev);
+       if (drvdata)
+               pmic = drvdata->pm_chip_data;
+       if (pmic)
+               mfd_remove_devices(pmic->dev);
+       if (pmic->irq_chip) {
+               pm8xxx_irq_exit(pmic->irq_chip);
+               pmic->irq_chip = NULL;
+       }
+       platform_set_drvdata(pdev, NULL);
+       kfree(pmic);
+
+       return 0;
+}
+
+static struct platform_driver pm8921_driver = {
+       .probe          = pm8921_probe,
+       .remove         = __devexit_p(pm8921_remove),
+       .driver         = {
+               .name   = "pm8921-core",
+               .owner  = THIS_MODULE,
+       },
+};
+
+static int __init pm8921_init(void)
+{
+       return platform_driver_register(&pm8921_driver);
+}
+subsys_initcall(pm8921_init);
+
+static void __exit pm8921_exit(void)
+{
+       platform_driver_unregister(&pm8921_driver);
+}
+module_exit(pm8921_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC 8921 core driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pm8921-core");
diff --git a/drivers/mfd/pm8xxx-irq.c b/drivers/mfd/pm8xxx-irq.c
new file mode 100644 (file)
index 0000000..d452dd0
--- /dev/null
@@ -0,0 +1,371 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)    "%s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/mfd/pm8xxx/irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* PMIC8xxx IRQ */
+
+#define        SSBI_REG_ADDR_IRQ_BASE          0x1BB
+
+#define        SSBI_REG_ADDR_IRQ_ROOT          (SSBI_REG_ADDR_IRQ_BASE + 0)
+#define        SSBI_REG_ADDR_IRQ_M_STATUS1     (SSBI_REG_ADDR_IRQ_BASE + 1)
+#define        SSBI_REG_ADDR_IRQ_M_STATUS2     (SSBI_REG_ADDR_IRQ_BASE + 2)
+#define        SSBI_REG_ADDR_IRQ_M_STATUS3     (SSBI_REG_ADDR_IRQ_BASE + 3)
+#define        SSBI_REG_ADDR_IRQ_M_STATUS4     (SSBI_REG_ADDR_IRQ_BASE + 4)
+#define        SSBI_REG_ADDR_IRQ_BLK_SEL       (SSBI_REG_ADDR_IRQ_BASE + 5)
+#define        SSBI_REG_ADDR_IRQ_IT_STATUS     (SSBI_REG_ADDR_IRQ_BASE + 6)
+#define        SSBI_REG_ADDR_IRQ_CONFIG        (SSBI_REG_ADDR_IRQ_BASE + 7)
+#define        SSBI_REG_ADDR_IRQ_RT_STATUS     (SSBI_REG_ADDR_IRQ_BASE + 8)
+
+#define        PM_IRQF_LVL_SEL                 0x01    /* level select */
+#define        PM_IRQF_MASK_FE                 0x02    /* mask falling edge */
+#define        PM_IRQF_MASK_RE                 0x04    /* mask rising edge */
+#define        PM_IRQF_CLR                     0x08    /* clear interrupt */
+#define        PM_IRQF_BITS_MASK               0x70
+#define        PM_IRQF_BITS_SHIFT              4
+#define        PM_IRQF_WRITE                   0x80
+
+#define        PM_IRQF_MASK_ALL                (PM_IRQF_MASK_FE | \
+                                       PM_IRQF_MASK_RE)
+
+struct pm_irq_chip {
+       struct device           *dev;
+       spinlock_t              pm_irq_lock;
+       unsigned int            devirq;
+       unsigned int            irq_base;
+       unsigned int            num_irqs;
+       unsigned int            num_blocks;
+       unsigned int            num_masters;
+       u8                      config[0];
+};
+
+static int pm8xxx_read_root_irq(const struct pm_irq_chip *chip, u8 *rp)
+{
+       return pm8xxx_readb(chip->dev, SSBI_REG_ADDR_IRQ_ROOT, rp);
+}
+
+static int pm8xxx_read_master_irq(const struct pm_irq_chip *chip, u8 m, u8 *bp)
+{
+       return pm8xxx_readb(chip->dev,
+                       SSBI_REG_ADDR_IRQ_M_STATUS1 + m, bp);
+}
+
+static int pm8xxx_read_block_irq(struct pm_irq_chip *chip, u8 bp, u8 *ip)
+{
+       int     rc;
+
+       spin_lock(&chip->pm_irq_lock);
+       rc = pm8xxx_writeb(chip->dev, SSBI_REG_ADDR_IRQ_BLK_SEL, bp);
+       if (rc) {
+               pr_err("Failed Selecting Block %d rc=%d\n", bp, rc);
+               goto bail;
+       }
+
+       rc = pm8xxx_readb(chip->dev, SSBI_REG_ADDR_IRQ_IT_STATUS, ip);
+       if (rc)
+               pr_err("Failed Reading Status rc=%d\n", rc);
+bail:
+       spin_unlock(&chip->pm_irq_lock);
+       return rc;
+}
+
+static int pm8xxx_config_irq(struct pm_irq_chip *chip, u8 bp, u8 cp)
+{
+       int     rc;
+
+       spin_lock(&chip->pm_irq_lock);
+       rc = pm8xxx_writeb(chip->dev, SSBI_REG_ADDR_IRQ_BLK_SEL, bp);
+       if (rc) {
+               pr_err("Failed Selecting Block %d rc=%d\n", bp, rc);
+               goto bail;
+       }
+
+       cp |= PM_IRQF_WRITE;
+       rc = pm8xxx_writeb(chip->dev, SSBI_REG_ADDR_IRQ_CONFIG, cp);
+       if (rc)
+               pr_err("Failed Configuring IRQ rc=%d\n", rc);
+bail:
+       spin_unlock(&chip->pm_irq_lock);
+       return rc;
+}
+
+static int pm8xxx_irq_block_handler(struct pm_irq_chip *chip, int block)
+{
+       int pmirq, irq, i, ret = 0;
+       u8 bits;
+
+       ret = pm8xxx_read_block_irq(chip, block, &bits);
+       if (ret) {
+               pr_err("Failed reading %d block ret=%d", block, ret);
+               return ret;
+       }
+       if (!bits) {
+               pr_err("block bit set in master but no irqs: %d", block);
+               return 0;
+       }
+
+       /* Check IRQ bits */
+       for (i = 0; i < 8; i++) {
+               if (bits & (1 << i)) {
+                       pmirq = block * 8 + i;
+                       irq = pmirq + chip->irq_base;
+                       generic_handle_irq(irq);
+               }
+       }
+       return 0;
+}
+
+static int pm8xxx_irq_master_handler(struct pm_irq_chip *chip, int master)
+{
+       u8 blockbits;
+       int block_number, i, ret = 0;
+
+       ret = pm8xxx_read_master_irq(chip, master, &blockbits);
+       if (ret) {
+               pr_err("Failed to read master %d ret=%d\n", master, ret);
+               return ret;
+       }
+       if (!blockbits) {
+               pr_err("master bit set in root but no blocks: %d", master);
+               return 0;
+       }
+
+       for (i = 0; i < 8; i++)
+               if (blockbits & (1 << i)) {
+                       block_number = master * 8 + i;  /* block # */
+                       ret |= pm8xxx_irq_block_handler(chip, block_number);
+               }
+       return ret;
+}
+
+static void pm8xxx_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+       struct pm_irq_chip *chip = irq_desc_get_handler_data(desc);
+       struct irq_chip *irq_chip = irq_desc_get_chip(desc);
+       u8      root;
+       int     i, ret, masters = 0;
+
+       ret = pm8xxx_read_root_irq(chip, &root);
+       if (ret) {
+               pr_err("Can't read root status ret=%d\n", ret);
+               return;
+       }
+
+       /* on pm8xxx series masters start from bit 1 of the root */
+       masters = root >> 1;
+
+       /* Read allowed masters for blocks. */
+       for (i = 0; i < chip->num_masters; i++)
+               if (masters & (1 << i))
+                       pm8xxx_irq_master_handler(chip, i);
+
+       irq_chip->irq_ack(&desc->irq_data);
+}
+
+static void pm8xxx_irq_mask_ack(struct irq_data *d)
+{
+       struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+       unsigned int pmirq = d->irq - chip->irq_base;
+       int     master, irq_bit;
+       u8      block, config;
+
+       block = pmirq / 8;
+       master = block / 8;
+       irq_bit = pmirq % 8;
+
+       config = chip->config[pmirq] | PM_IRQF_MASK_ALL | PM_IRQF_CLR;
+       pm8xxx_config_irq(chip, block, config);
+}
+
+static void pm8xxx_irq_unmask(struct irq_data *d)
+{
+       struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+       unsigned int pmirq = d->irq - chip->irq_base;
+       int     master, irq_bit;
+       u8      block, config;
+
+       block = pmirq / 8;
+       master = block / 8;
+       irq_bit = pmirq % 8;
+
+       config = chip->config[pmirq];
+       pm8xxx_config_irq(chip, block, config);
+}
+
+static int pm8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+       struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d);
+       unsigned int pmirq = d->irq - chip->irq_base;
+       int master, irq_bit;
+       u8 block, config;
+
+       block = pmirq / 8;
+       master = block / 8;
+       irq_bit  = pmirq % 8;
+
+       chip->config[pmirq] = (irq_bit << PM_IRQF_BITS_SHIFT)
+                                                       | PM_IRQF_MASK_ALL;
+       if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+               if (flow_type & IRQF_TRIGGER_RISING)
+                       chip->config[pmirq] &= ~PM_IRQF_MASK_RE;
+               if (flow_type & IRQF_TRIGGER_FALLING)
+                       chip->config[pmirq] &= ~PM_IRQF_MASK_FE;
+       } else {
+               chip->config[pmirq] |= PM_IRQF_LVL_SEL;
+
+               if (flow_type & IRQF_TRIGGER_HIGH)
+                       chip->config[pmirq] &= ~PM_IRQF_MASK_RE;
+               else
+                       chip->config[pmirq] &= ~PM_IRQF_MASK_FE;
+       }
+
+       config = chip->config[pmirq] | PM_IRQF_CLR;
+       return pm8xxx_config_irq(chip, block, config);
+}
+
+static int pm8xxx_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+       return 0;
+}
+
+static struct irq_chip pm8xxx_irq_chip = {
+       .name           = "pm8xxx",
+       .irq_mask_ack   = pm8xxx_irq_mask_ack,
+       .irq_unmask     = pm8xxx_irq_unmask,
+       .irq_set_type   = pm8xxx_irq_set_type,
+       .irq_set_wake   = pm8xxx_irq_set_wake,
+       .flags          = IRQCHIP_MASK_ON_SUSPEND,
+};
+
+/**
+ * pm8xxx_get_irq_stat - get the status of the irq line
+ * @chip: pointer to identify a pmic irq controller
+ * @irq: the irq number
+ *
+ * The pm8xxx gpio and mpp rely on the interrupt block to read
+ * the values on their pins. This function is to facilitate reading
+ * the status of a gpio or an mpp line. The caller has to convert the
+ * gpio number to irq number.
+ *
+ * RETURNS:
+ * an int indicating the value read on that line
+ */
+int pm8xxx_get_irq_stat(struct pm_irq_chip *chip, int irq)
+{
+       int pmirq, rc;
+       u8  block, bits, bit;
+       unsigned long flags;
+
+       if (chip == NULL || irq < chip->irq_base ||
+                       irq >= chip->irq_base + chip->num_irqs)
+               return -EINVAL;
+
+       pmirq = irq - chip->irq_base;
+
+       block = pmirq / 8;
+       bit = pmirq % 8;
+
+       spin_lock_irqsave(&chip->pm_irq_lock, flags);
+
+       rc = pm8xxx_writeb(chip->dev, SSBI_REG_ADDR_IRQ_BLK_SEL, block);
+       if (rc) {
+               pr_err("Failed Selecting block irq=%d pmirq=%d blk=%d rc=%d\n",
+                       irq, pmirq, block, rc);
+               goto bail_out;
+       }
+
+       rc = pm8xxx_readb(chip->dev, SSBI_REG_ADDR_IRQ_RT_STATUS, &bits);
+       if (rc) {
+               pr_err("Failed Configuring irq=%d pmirq=%d blk=%d rc=%d\n",
+                       irq, pmirq, block, rc);
+               goto bail_out;
+       }
+
+       rc = (bits & (1 << bit)) ? 1 : 0;
+
+bail_out:
+       spin_unlock_irqrestore(&chip->pm_irq_lock, flags);
+
+       return rc;
+}
+EXPORT_SYMBOL_GPL(pm8xxx_get_irq_stat);
+
+struct pm_irq_chip *  __devinit pm8xxx_irq_init(struct device *dev,
+                               const struct pm8xxx_irq_platform_data *pdata)
+{
+       struct pm_irq_chip  *chip;
+       int devirq, rc;
+       unsigned int pmirq;
+
+       if (!pdata) {
+               pr_err("No platform data\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       devirq = pdata->devirq;
+       if (devirq < 0) {
+               pr_err("missing devirq\n");
+               rc = devirq;
+               return ERR_PTR(-EINVAL);
+       }
+
+       chip = kzalloc(sizeof(struct pm_irq_chip)
+                       + sizeof(u8) * pdata->irq_cdata.nirqs, GFP_KERNEL);
+       if (!chip) {
+               pr_err("Cannot alloc pm_irq_chip struct\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       chip->dev = dev;
+       chip->devirq = devirq;
+       chip->irq_base = pdata->irq_base;
+       chip->num_irqs = pdata->irq_cdata.nirqs;
+       chip->num_blocks = DIV_ROUND_UP(chip->num_irqs, 8);
+       chip->num_masters = DIV_ROUND_UP(chip->num_blocks, 8);
+       spin_lock_init(&chip->pm_irq_lock);
+
+       for (pmirq = 0; pmirq < chip->num_irqs; pmirq++) {
+               irq_set_chip_and_handler(chip->irq_base + pmirq,
+                               &pm8xxx_irq_chip,
+                               handle_level_irq);
+               irq_set_chip_data(chip->irq_base + pmirq, chip);
+#ifdef CONFIG_ARM
+               set_irq_flags(chip->irq_base + pmirq, IRQF_VALID);
+#else
+               irq_set_noprobe(chip->irq_base + pmirq);
+#endif
+       }
+
+       irq_set_irq_type(devirq, pdata->irq_trigger_flag);
+       irq_set_handler_data(devirq, chip);
+       irq_set_chained_handler(devirq, pm8xxx_irq_handler);
+       set_irq_wake(devirq, 1);
+
+       return chip;
+}
+
+int __devexit pm8xxx_irq_exit(struct pm_irq_chip *chip)
+{
+       irq_set_chained_handler(chip->devirq, NULL);
+       kfree(chip);
+       return 0;
+}
index 10dbe6374a89328b00da7014e4eccfa886906f2f..809bd4a610895c75baf469ceb1d3bec417aa64c8 100644 (file)
@@ -61,12 +61,14 @@ static struct mfd_cell rdc321x_sb_cells[] = {
                .name           = "rdc321x-wdt",
                .resources      = rdc321x_wdt_resource,
                .num_resources  = ARRAY_SIZE(rdc321x_wdt_resource),
-               .mfd_data       = &rdc321x_wdt_pdata,
+               .platform_data  = &rdc321x_wdt_pdata,
+               .pdata_size     = sizeof(rdc321x_wdt_pdata),
        }, {
                .name           = "rdc321x-gpio",
                .resources      = rdc321x_gpio_resources,
                .num_resources  = ARRAY_SIZE(rdc321x_gpio_resources),
-               .mfd_data       = &rdc321x_gpio_pdata,
+               .platform_data  = &rdc321x_gpio_pdata,
+               .pdata_size     = sizeof(rdc321x_gpio_pdata),
        },
 };
 
index 42830e6929649ae99e729479eb76dd1dd4bd917c..91ad21ef7721cddd335e5470717f3f878191bf0d 100644 (file)
@@ -170,7 +170,8 @@ static struct mfd_cell t7l66xb_cells[] = {
                .name = "tmio-mmc",
                .enable = t7l66xb_mmc_enable,
                .disable = t7l66xb_mmc_disable,
-               .mfd_data = &t7166xb_mmc_data,
+               .platform_data = &t7166xb_mmc_data,
+               .pdata_size    = sizeof(t7166xb_mmc_data),
                .num_resources = ARRAY_SIZE(t7l66xb_mmc_resources),
                .resources = t7l66xb_mmc_resources,
        },
@@ -382,7 +383,8 @@ static int t7l66xb_probe(struct platform_device *dev)
 
        t7l66xb_attach_irq(dev);
 
-       t7l66xb_cells[T7L66XB_CELL_NAND].mfd_data = pdata->nand_data;
+       t7l66xb_cells[T7L66XB_CELL_NAND].platform_data = pdata->nand_data;
+       t7l66xb_cells[T7L66XB_CELL_NAND].pdata_size = sizeof(*pdata->nand_data);
 
        ret = mfd_add_devices(&dev->dev, dev->id,
                              t7l66xb_cells, ARRAY_SIZE(t7l66xb_cells),
index b006f7cee9521673acf6e65e9b599dfc98d73a26..ad715bf49cac916fa86b15179c3f6415ecec265f 100644 (file)
@@ -131,7 +131,8 @@ static struct mfd_cell tc6387xb_cells[] = {
                .name = "tmio-mmc",
                .enable = tc6387xb_mmc_enable,
                .disable = tc6387xb_mmc_disable,
-               .mfd_data = &tc6387xb_mmc_data,
+               .platform_data = &tc6387xb_mmc_data,
+               .pdata_size    = sizeof(tc6387xb_mmc_data),
                .num_resources = ARRAY_SIZE(tc6387xb_mmc_resources),
                .resources = tc6387xb_mmc_resources,
        },
index fc53ce287601b84062940d80428fdf3dcb358c16..9612264f0e6dcf7832ebf2f4736815b4eabc6a4b 100644 (file)
@@ -393,7 +393,8 @@ static struct mfd_cell __devinitdata tc6393xb_cells[] = {
                .name = "tmio-mmc",
                .enable = tc6393xb_mmc_enable,
                .resume = tc6393xb_mmc_resume,
-               .mfd_data = &tc6393xb_mmc_data,
+               .platform_data = &tc6393xb_mmc_data,
+               .pdata_size    = sizeof(tc6393xb_mmc_data),
                .num_resources = ARRAY_SIZE(tc6393xb_mmc_resources),
                .resources = tc6393xb_mmc_resources,
        },
@@ -692,8 +693,11 @@ static int __devinit tc6393xb_probe(struct platform_device *dev)
                        goto err_setup;
        }
 
-       tc6393xb_cells[TC6393XB_CELL_NAND].mfd_data = tcpd->nand_data;
-       tc6393xb_cells[TC6393XB_CELL_FB].mfd_data = tcpd->fb_data;
+       tc6393xb_cells[TC6393XB_CELL_NAND].platform_data = tcpd->nand_data;
+       tc6393xb_cells[TC6393XB_CELL_NAND].pdata_size =
+                                               sizeof(*tcpd->nand_data);
+       tc6393xb_cells[TC6393XB_CELL_FB].platform_data = tcpd->fb_data;
+       tc6393xb_cells[TC6393XB_CELL_FB].pdata_size = sizeof(*tcpd->fb_data);
 
        ret = mfd_add_devices(&dev->dev, dev->id,
                        tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells),
index 94c6c8afad12e5abe9e0728b833db5c70f0d23aa..69272e4e34596a60b059a4d30f6eadddd4a473c3 100644 (file)
@@ -384,7 +384,8 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
                .name = "timb-dma",
                .num_resources = ARRAY_SIZE(timberdale_dma_resources),
                .resources = timberdale_dma_resources,
-               .mfd_data = &timb_dma_platform_data,
+               .platform_data = &timb_dma_platform_data,
+               .pdata_size = sizeof(timb_dma_platform_data),
        },
        {
                .name = "timb-uart",
@@ -395,37 +396,43 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = {
                .name = "xiic-i2c",
                .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
                .resources = timberdale_xiic_resources,
-               .mfd_data = &timberdale_xiic_platform_data,
+               .platform_data = &timberdale_xiic_platform_data,
+               .pdata_size = sizeof(timberdale_xiic_platform_data),
        },
        {
                .name = "timb-gpio",
                .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
                .resources = timberdale_gpio_resources,
-               .mfd_data = &timberdale_gpio_platform_data,
+               .platform_data = &timberdale_gpio_platform_data,
+               .pdata_size = sizeof(timberdale_gpio_platform_data),
        },
        {
                .name = "timb-video",
                .num_resources = ARRAY_SIZE(timberdale_video_resources),
                .resources = timberdale_video_resources,
-               .mfd_data = &timberdale_video_platform_data,
+               .platform_data = &timberdale_video_platform_data,
+               .pdata_size = sizeof(timberdale_video_platform_data),
        },
        {
                .name = "timb-radio",
                .num_resources = ARRAY_SIZE(timberdale_radio_resources),
                .resources = timberdale_radio_resources,
-               .mfd_data = &timberdale_radio_platform_data,
+               .platform_data = &timberdale_radio_platform_data,
+               .pdata_size = sizeof(timberdale_radio_platform_data),
        },
        {
                .name = "xilinx_spi",
                .num_resources = ARRAY_SIZE(timberdale_spi_resources),
                .resources = timberdale_spi_resources,
-               .mfd_data = &timberdale_xspi_platform_data,
+               .platform_data = &timberdale_xspi_platform_data,
+               .pdata_size = sizeof(timberdale_xspi_platform_data),
        },
        {
                .name = "ks8842",
                .num_resources = ARRAY_SIZE(timberdale_eth_resources),
                .resources = timberdale_eth_resources,
-               .mfd_data = &timberdale_ks8842_platform_data,
+               .platform_data = &timberdale_ks8842_platform_data,
+               .pdata_size = sizeof(timberdale_ks8842_platform_data),
        },
 };
 
@@ -434,7 +441,8 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
                .name = "timb-dma",
                .num_resources = ARRAY_SIZE(timberdale_dma_resources),
                .resources = timberdale_dma_resources,
-               .mfd_data = &timb_dma_platform_data,
+               .platform_data = &timb_dma_platform_data,
+               .pdata_size = sizeof(timb_dma_platform_data),
        },
        {
                .name = "timb-uart",
@@ -450,13 +458,15 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
                .name = "xiic-i2c",
                .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
                .resources = timberdale_xiic_resources,
-               .mfd_data = &timberdale_xiic_platform_data,
+               .platform_data = &timberdale_xiic_platform_data,
+               .pdata_size = sizeof(timberdale_xiic_platform_data),
        },
        {
                .name = "timb-gpio",
                .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
                .resources = timberdale_gpio_resources,
-               .mfd_data = &timberdale_gpio_platform_data,
+               .platform_data = &timberdale_gpio_platform_data,
+               .pdata_size = sizeof(timberdale_gpio_platform_data),
        },
        {
                .name = "timb-mlogicore",
@@ -467,25 +477,29 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = {
                .name = "timb-video",
                .num_resources = ARRAY_SIZE(timberdale_video_resources),
                .resources = timberdale_video_resources,
-               .mfd_data = &timberdale_video_platform_data,
+               .platform_data = &timberdale_video_platform_data,
+               .pdata_size = sizeof(timberdale_video_platform_data),
        },
        {
                .name = "timb-radio",
                .num_resources = ARRAY_SIZE(timberdale_radio_resources),
                .resources = timberdale_radio_resources,
-               .mfd_data = &timberdale_radio_platform_data,
+               .platform_data = &timberdale_radio_platform_data,
+               .pdata_size = sizeof(timberdale_radio_platform_data),
        },
        {
                .name = "xilinx_spi",
                .num_resources = ARRAY_SIZE(timberdale_spi_resources),
                .resources = timberdale_spi_resources,
-               .mfd_data = &timberdale_xspi_platform_data,
+               .platform_data = &timberdale_xspi_platform_data,
+               .pdata_size = sizeof(timberdale_xspi_platform_data),
        },
        {
                .name = "ks8842",
                .num_resources = ARRAY_SIZE(timberdale_eth_resources),
                .resources = timberdale_eth_resources,
-               .mfd_data = &timberdale_ks8842_platform_data,
+               .platform_data = &timberdale_ks8842_platform_data,
+               .pdata_size = sizeof(timberdale_ks8842_platform_data),
        },
 };
 
@@ -494,7 +508,8 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
                .name = "timb-dma",
                .num_resources = ARRAY_SIZE(timberdale_dma_resources),
                .resources = timberdale_dma_resources,
-               .mfd_data = &timb_dma_platform_data,
+               .platform_data = &timb_dma_platform_data,
+               .pdata_size = sizeof(timb_dma_platform_data),
        },
        {
                .name = "timb-uart",
@@ -505,31 +520,36 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = {
                .name = "xiic-i2c",
                .num_resources = ARRAY_SIZE(timberdale_xiic_resources),
                .resources = timberdale_xiic_resources,
-               .mfd_data = &timberdale_xiic_platform_data,
+               .platform_data = &timberdale_xiic_platform_data,
+               .pdata_size = sizeof(timberdale_xiic_platform_data),
        },
        {
                .name = "timb-gpio",
                .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
                .resources = timberdale_gpio_resources,
-               .mfd_data = &timberdale_gpio_platform_data,
+               .platform_data = &timberdale_gpio_platform_data,
+               .pdata_size = sizeof(timberdale_gpio_platform_data),
        },
        {
                .name = "timb-video",
                .num_resources = ARRAY_SIZE(timberdale_video_resources),
                .resources = timberdale_video_resources,
-               .mfd_data = &timberdale_video_platform_data,
+               .platform_data = &timberdale_video_platform_data,
+               .pdata_size = sizeof(timberdale_video_platform_data),
        },
        {
                .name = "timb-radio",
                .num_resources = ARRAY_SIZE(timberdale_radio_resources),
                .resources = timberdale_radio_resources,
-               .mfd_data = &timberdale_radio_platform_data,
+               .platform_data = &timberdale_radio_platform_data,
+               .pdata_size = sizeof(timberdale_radio_platform_data),
        },
        {
                .name = "xilinx_spi",
                .num_resources = ARRAY_SIZE(timberdale_spi_resources),
                .resources = timberdale_spi_resources,
-               .mfd_data = &timberdale_xspi_platform_data,
+               .platform_data = &timberdale_xspi_platform_data,
+               .pdata_size = sizeof(timberdale_xspi_platform_data),
        },
 };
 
@@ -538,7 +558,8 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
                .name = "timb-dma",
                .num_resources = ARRAY_SIZE(timberdale_dma_resources),
                .resources = timberdale_dma_resources,
-               .mfd_data = &timb_dma_platform_data,
+               .platform_data = &timb_dma_platform_data,
+               .pdata_size = sizeof(timb_dma_platform_data),
        },
        {
                .name = "timb-uart",
@@ -549,37 +570,43 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = {
                .name = "ocores-i2c",
                .num_resources = ARRAY_SIZE(timberdale_ocores_resources),
                .resources = timberdale_ocores_resources,
-               .mfd_data = &timberdale_ocores_platform_data,
+               .platform_data = &timberdale_ocores_platform_data,
+               .pdata_size = sizeof(timberdale_ocores_platform_data),
        },
        {
                .name = "timb-gpio",
                .num_resources = ARRAY_SIZE(timberdale_gpio_resources),
                .resources = timberdale_gpio_resources,
-               .mfd_data = &timberdale_gpio_platform_data,
+               .platform_data = &timberdale_gpio_platform_data,
+               .pdata_size = sizeof(timberdale_gpio_platform_data),
        },
        {
                .name = "timb-video",
                .num_resources = ARRAY_SIZE(timberdale_video_resources),
                .resources = timberdale_video_resources,
-               .mfd_data = &timberdale_video_platform_data,
+               .platform_data = &timberdale_video_platform_data,
+               .pdata_size = sizeof(timberdale_video_platform_data),
        },
        {
                .name = "timb-radio",
                .num_resources = ARRAY_SIZE(timberdale_radio_resources),
                .resources = timberdale_radio_resources,
-               .mfd_data = &timberdale_radio_platform_data,
+               .platform_data = &timberdale_radio_platform_data,
+               .pdata_size = sizeof(timberdale_radio_platform_data),
        },
        {
                .name = "xilinx_spi",
                .num_resources = ARRAY_SIZE(timberdale_spi_resources),
                .resources = timberdale_spi_resources,
-               .mfd_data = &timberdale_xspi_platform_data,
+               .platform_data = &timberdale_xspi_platform_data,
+               .pdata_size = sizeof(timberdale_xspi_platform_data),
        },
        {
                .name = "ks8842",
                .num_resources = ARRAY_SIZE(timberdale_eth_resources),
                .resources = timberdale_eth_resources,
-               .mfd_data = &timberdale_ks8842_platform_data,
+               .platform_data = &timberdale_ks8842_platform_data,
+               .pdata_size = sizeof(timberdale_ks8842_platform_data),
        },
 };
 
index 46d8205646b6cfdd93b90d1ee02c91b8bac5746b..a293b978e27ce19b116025e3ab4e87be6ae0dd74 100644 (file)
@@ -183,7 +183,8 @@ static int __devinit tps6105x_probe(struct i2c_client *client,
        /* Set up and register the platform devices. */
        for (i = 0; i < ARRAY_SIZE(tps6105x_cells); i++) {
                /* One state holder for all drivers, this is simple */
-               tps6105x_cells[i].mfd_data = tps6105x;
+               tps6105x_cells[i].platform_data = tps6105x;
+               tps6105x_cells[i].pdata_size = sizeof(*tps6105x);
        }
 
        ret = mfd_add_devices(&client->dev, 0, tps6105x_cells,
index b600808690c1092e1d9946f848e537647dbd1def..bba26d96c24075a3cbca9a320fabe3177b4de1a8 100644 (file)
@@ -270,8 +270,8 @@ static void tps6586x_gpio_set(struct gpio_chip *chip, unsigned offset,
 {
        struct tps6586x *tps6586x = container_of(chip, struct tps6586x, gpio);
 
-       __tps6586x_write(tps6586x->client, TPS6586X_GPIOSET2,
-                        value << offset);
+       tps6586x_update(tps6586x->dev, TPS6586X_GPIOSET2,
+                       value << offset, 1 << offset);
 }
 
 static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset,
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
new file mode 100644 (file)
index 0000000..2bfad5c
--- /dev/null
@@ -0,0 +1,218 @@
+/*
+ * tps65910-irq.c  --  TI TPS6591x
+ *
+ * Copyright 2010 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under  the terms of the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/mfd/tps65910.h>
+
+static inline int irq_to_tps65910_irq(struct tps65910 *tps65910,
+                                                       int irq)
+{
+       return (irq - tps65910->irq_base);
+}
+
+/*
+ * This is a threaded IRQ handler so can access I2C/SPI.  Since all
+ * interrupts are clear on read the IRQ line will be reasserted and
+ * the physical IRQ will be handled again if another interrupt is
+ * asserted while we run - in the normal course of events this is a
+ * rare occurrence so we save I2C/SPI reads.  We're also assuming that
+ * it's rare to get lots of interrupts firing simultaneously so try to
+ * minimise I/O.
+ */
+static irqreturn_t tps65910_irq(int irq, void *irq_data)
+{
+       struct tps65910 *tps65910 = irq_data;
+       u32 irq_sts;
+       u32 irq_mask;
+       u8 reg;
+       int i;
+
+       tps65910->read(tps65910, TPS65910_INT_STS, 1, &reg);
+       irq_sts = reg;
+       tps65910->read(tps65910, TPS65910_INT_STS2, 1, &reg);
+       irq_sts |= reg << 8;
+       switch (tps65910_chip_id(tps65910)) {
+       case TPS65911:
+               tps65910->read(tps65910, TPS65910_INT_STS3, 1, &reg);
+               irq_sts |= reg << 16;
+       }
+
+       tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
+       irq_mask = reg;
+       tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
+       irq_mask |= reg << 8;
+       switch (tps65910_chip_id(tps65910)) {
+       case TPS65911:
+               tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
+               irq_mask |= reg << 16;
+       }
+
+       irq_sts &= ~irq_mask;
+
+       if (!irq_sts)
+               return IRQ_NONE;
+
+       for (i = 0; i < tps65910->irq_num; i++) {
+
+               if (!(irq_sts & (1 << i)))
+                       continue;
+
+               handle_nested_irq(tps65910->irq_base + i);
+       }
+
+       /* Write the STS register back to clear IRQs we handled */
+       reg = irq_sts & 0xFF;
+       irq_sts >>= 8;
+       tps65910->write(tps65910, TPS65910_INT_STS, 1, &reg);
+       reg = irq_sts & 0xFF;
+       tps65910->write(tps65910, TPS65910_INT_STS2, 1, &reg);
+       switch (tps65910_chip_id(tps65910)) {
+       case TPS65911:
+               reg = irq_sts >> 8;
+               tps65910->write(tps65910, TPS65910_INT_STS3, 1, &reg);
+       }
+
+       return IRQ_HANDLED;
+}
+
+static void tps65910_irq_lock(struct irq_data *data)
+{
+       struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
+
+       mutex_lock(&tps65910->irq_lock);
+}
+
+static void tps65910_irq_sync_unlock(struct irq_data *data)
+{
+       struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
+       u32 reg_mask;
+       u8 reg;
+
+       tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg);
+       reg_mask = reg;
+       tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg);
+       reg_mask |= reg << 8;
+       switch (tps65910_chip_id(tps65910)) {
+       case TPS65911:
+               tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg);
+               reg_mask |= reg << 16;
+       }
+
+       if (tps65910->irq_mask != reg_mask) {
+               reg = tps65910->irq_mask & 0xFF;
+               tps65910->write(tps65910, TPS65910_INT_MSK, 1, &reg);
+               reg = tps65910->irq_mask >> 8 & 0xFF;
+               tps65910->write(tps65910, TPS65910_INT_MSK2, 1, &reg);
+               switch (tps65910_chip_id(tps65910)) {
+               case TPS65911:
+                       reg = tps65910->irq_mask >> 16;
+                       tps65910->write(tps65910, TPS65910_INT_MSK3, 1, &reg);
+               }
+       }
+       mutex_unlock(&tps65910->irq_lock);
+}
+
+static void tps65910_irq_enable(struct irq_data *data)
+{
+       struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
+
+       tps65910->irq_mask &= ~( 1 << irq_to_tps65910_irq(tps65910, data->irq));
+}
+
+static void tps65910_irq_disable(struct irq_data *data)
+{
+       struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
+
+       tps65910->irq_mask |= ( 1 << irq_to_tps65910_irq(tps65910, data->irq));
+}
+
+static struct irq_chip tps65910_irq_chip = {
+       .name = "tps65910",
+       .irq_bus_lock = tps65910_irq_lock,
+       .irq_bus_sync_unlock = tps65910_irq_sync_unlock,
+       .irq_disable = tps65910_irq_disable,
+       .irq_enable = tps65910_irq_enable,
+};
+
+int tps65910_irq_init(struct tps65910 *tps65910, int irq,
+                   struct tps65910_platform_data *pdata)
+{
+       int ret, cur_irq;
+       int flags = IRQF_ONESHOT;
+
+       if (!irq) {
+               dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
+               return -EINVAL;
+       }
+
+       if (!pdata || !pdata->irq_base) {
+               dev_warn(tps65910->dev, "No interrupt support, no IRQ base\n");
+               return -EINVAL;
+       }
+
+       tps65910->irq_mask = 0xFFFFFF;
+
+       mutex_init(&tps65910->irq_lock);
+       tps65910->chip_irq = irq;
+       tps65910->irq_base = pdata->irq_base;
+
+       switch (tps65910_chip_id(tps65910)) {
+       case TPS65910:
+               tps65910->irq_num = TPS65910_NUM_IRQ;
+       case TPS65911:
+               tps65910->irq_num = TPS65911_NUM_IRQ;
+       }
+
+       /* Register with genirq */
+       for (cur_irq = tps65910->irq_base;
+            cur_irq < tps65910->irq_num + tps65910->irq_base;
+            cur_irq++) {
+               irq_set_chip_data(cur_irq, tps65910);
+               irq_set_chip_and_handler(cur_irq, &tps65910_irq_chip,
+                                        handle_edge_irq);
+               irq_set_nested_thread(cur_irq, 1);
+
+               /* ARM needs us to explicitly flag the IRQ as valid
+                * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+               set_irq_flags(cur_irq, IRQF_VALID);
+#else
+               irq_set_noprobe(cur_irq);
+#endif
+       }
+
+       ret = request_threaded_irq(irq, NULL, tps65910_irq, flags,
+                                  "tps65910", tps65910);
+
+       irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
+
+       if (ret != 0)
+               dev_err(tps65910->dev, "Failed to request IRQ: %d\n", ret);
+
+       return ret;
+}
+
+int tps65910_irq_exit(struct tps65910 *tps65910)
+{
+       free_irq(tps65910->chip_irq, tps65910);
+       return 0;
+}
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
new file mode 100644 (file)
index 0000000..2229e66
--- /dev/null
@@ -0,0 +1,229 @@
+/*
+ * tps65910.c  --  TI TPS6591x
+ *
+ * Copyright 2010 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under  the terms of the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tps65910.h>
+
+static struct mfd_cell tps65910s[] = {
+       {
+               .name = "tps65910-pmic",
+       },
+       {
+               .name = "tps65910-rtc",
+       },
+       {
+               .name = "tps65910-power",
+       },
+};
+
+
+static int tps65910_i2c_read(struct tps65910 *tps65910, u8 reg,
+                                 int bytes, void *dest)
+{
+       struct i2c_client *i2c = tps65910->i2c_client;
+       struct i2c_msg xfer[2];
+       int ret;
+
+       /* Write register */
+       xfer[0].addr = i2c->addr;
+       xfer[0].flags = 0;
+       xfer[0].len = 1;
+       xfer[0].buf = &reg;
+
+       /* Read data */
+       xfer[1].addr = i2c->addr;
+       xfer[1].flags = I2C_M_RD;
+       xfer[1].len = bytes;
+       xfer[1].buf = dest;
+
+       ret = i2c_transfer(i2c->adapter, xfer, 2);
+       if (ret == 2)
+               ret = 0;
+       else if (ret >= 0)
+               ret = -EIO;
+
+       return ret;
+}
+
+static int tps65910_i2c_write(struct tps65910 *tps65910, u8 reg,
+                                  int bytes, void *src)
+{
+       struct i2c_client *i2c = tps65910->i2c_client;
+       /* we add 1 byte for device register */
+       u8 msg[TPS65910_MAX_REGISTER + 1];
+       int ret;
+
+       if (bytes > TPS65910_MAX_REGISTER)
+               return -EINVAL;
+
+       msg[0] = reg;
+       memcpy(&msg[1], src, bytes);
+
+       ret = i2c_master_send(i2c, msg, bytes + 1);
+       if (ret < 0)
+               return ret;
+       if (ret != bytes + 1)
+               return -EIO;
+       return 0;
+}
+
+int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
+{
+       u8 data;
+       int err;
+
+       mutex_lock(&tps65910->io_mutex);
+       err = tps65910_i2c_read(tps65910, reg, 1, &data);
+       if (err) {
+               dev_err(tps65910->dev, "read from reg %x failed\n", reg);
+               goto out;
+       }
+
+       data |= mask;
+       err = tps65910_i2c_write(tps65910, reg, 1, &data);
+       if (err)
+               dev_err(tps65910->dev, "write to reg %x failed\n", reg);
+
+out:
+       mutex_unlock(&tps65910->io_mutex);
+       return err;
+}
+EXPORT_SYMBOL_GPL(tps65910_set_bits);
+
+int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
+{
+       u8 data;
+       int err;
+
+       mutex_lock(&tps65910->io_mutex);
+       err = tps65910_i2c_read(tps65910, reg, 1, &data);
+       if (err) {
+               dev_err(tps65910->dev, "read from reg %x failed\n", reg);
+               goto out;
+       }
+
+       data &= mask;
+       err = tps65910_i2c_write(tps65910, reg, 1, &data);
+       if (err)
+               dev_err(tps65910->dev, "write to reg %x failed\n", reg);
+
+out:
+       mutex_unlock(&tps65910->io_mutex);
+       return err;
+}
+EXPORT_SYMBOL_GPL(tps65910_clear_bits);
+
+static int tps65910_i2c_probe(struct i2c_client *i2c,
+                           const struct i2c_device_id *id)
+{
+       struct tps65910 *tps65910;
+       struct tps65910_board *pmic_plat_data;
+       struct tps65910_platform_data *init_data;
+       int ret = 0;
+
+       pmic_plat_data = dev_get_platdata(&i2c->dev);
+       if (!pmic_plat_data)
+               return -EINVAL;
+
+       init_data = kzalloc(sizeof(struct tps65910_platform_data), GFP_KERNEL);
+       if (init_data == NULL)
+               return -ENOMEM;
+
+       init_data->irq = pmic_plat_data->irq;
+       init_data->irq_base = pmic_plat_data->irq;
+
+       tps65910 = kzalloc(sizeof(struct tps65910), GFP_KERNEL);
+       if (tps65910 == NULL)
+               return -ENOMEM;
+
+       i2c_set_clientdata(i2c, tps65910);
+       tps65910->dev = &i2c->dev;
+       tps65910->i2c_client = i2c;
+       tps65910->id = id->driver_data;
+       tps65910->read = tps65910_i2c_read;
+       tps65910->write = tps65910_i2c_write;
+       mutex_init(&tps65910->io_mutex);
+
+       ret = mfd_add_devices(tps65910->dev, -1,
+                             tps65910s, ARRAY_SIZE(tps65910s),
+                             NULL, 0);
+       if (ret < 0)
+               goto err;
+
+       tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
+
+       ret = tps65910_irq_init(tps65910, init_data->irq, init_data);
+       if (ret < 0)
+               goto err;
+
+       return ret;
+
+err:
+       mfd_remove_devices(tps65910->dev);
+       kfree(tps65910);
+       return ret;
+}
+
+static int tps65910_i2c_remove(struct i2c_client *i2c)
+{
+       struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
+
+       mfd_remove_devices(tps65910->dev);
+       kfree(tps65910);
+
+       return 0;
+}
+
+static const struct i2c_device_id tps65910_i2c_id[] = {
+       { "tps65910", TPS65910 },
+       { "tps65911", TPS65911 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, tps65910_i2c_id);
+
+
+static struct i2c_driver tps65910_i2c_driver = {
+       .driver = {
+                  .name = "tps65910",
+                  .owner = THIS_MODULE,
+       },
+       .probe = tps65910_i2c_probe,
+       .remove = tps65910_i2c_remove,
+       .id_table = tps65910_i2c_id,
+};
+
+static int __init tps65910_i2c_init(void)
+{
+       return i2c_add_driver(&tps65910_i2c_driver);
+}
+/* init early so consumer devices can complete system boot */
+subsys_initcall(tps65910_i2c_init);
+
+static void __exit tps65910_i2c_exit(void)
+{
+       i2c_del_driver(&tps65910_i2c_driver);
+}
+module_exit(tps65910_i2c_exit);
+
+MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
+MODULE_AUTHOR("Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>");
+MODULE_DESCRIPTION("TPS6591x chip family multi-function driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c
new file mode 100644 (file)
index 0000000..3d2dc56
--- /dev/null
@@ -0,0 +1,188 @@
+/*
+ * tps65910.c  --  TI TPS6591x
+ *
+ * Copyright 2010 Texas Instruments Inc.
+ *
+ * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under  the terms of the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <linux/mfd/tps65910.h>
+
+#define COMP                                   0
+#define COMP1                                  1
+#define COMP2                                  2
+
+/* Comparator 1 voltage selection table in milivolts */
+static const u16 COMP_VSEL_TABLE[] = {
+       0, 2500, 2500, 2500, 2500, 2550, 2600, 2650,
+       2700, 2750, 2800, 2850, 2900, 2950, 3000, 3050,
+       3100, 3150, 3200, 3250, 3300, 3350, 3400, 3450,
+       3500,
+};
+
+struct comparator {
+       const char *name;
+       int reg;
+       int uV_max;
+       const u16 *vsel_table;
+};
+
+static struct comparator tps_comparators[] = {
+       {
+               .name = "COMP1",
+               .reg = TPS65911_VMBCH,
+               .uV_max = 3500,
+               .vsel_table = COMP_VSEL_TABLE,
+       },
+       {
+               .name = "COMP2",
+               .reg = TPS65911_VMBCH2,
+               .uV_max = 3500,
+               .vsel_table = COMP_VSEL_TABLE,
+       },
+};
+
+static int comp_threshold_set(struct tps65910 *tps65910, int id, int voltage)
+{
+       struct comparator tps_comp = tps_comparators[id];
+       int curr_voltage = 0;
+       int ret;
+       u8 index = 0, val;
+
+       if (id == COMP)
+               return 0;
+
+       while (curr_voltage < tps_comp.uV_max) {
+               curr_voltage = tps_comp.vsel_table[index];
+               if (curr_voltage >= voltage)
+                       break;
+               else if (curr_voltage < voltage)
+                       index ++;
+       }
+
+       if (curr_voltage > tps_comp.uV_max)
+               return -EINVAL;
+
+       val = index << 1;
+       ret = tps65910->write(tps65910, tps_comp.reg, 1, &val);
+
+       return ret;
+}
+
+static int comp_threshold_get(struct tps65910 *tps65910, int id)
+{
+       struct comparator tps_comp = tps_comparators[id];
+       int ret;
+       u8 val;
+
+       if (id == COMP)
+               return 0;
+
+       ret = tps65910->read(tps65910, tps_comp.reg, 1, &val);
+       if (ret < 0)
+               return ret;
+
+       val >>= 1;
+       return tps_comp.vsel_table[val];
+}
+
+static ssize_t comp_threshold_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct tps65910 *tps65910 = dev_get_drvdata(dev->parent);
+       struct attribute comp_attr = attr->attr;
+       int id, uVolt;
+
+       if (!strcmp(comp_attr.name, "comp1_threshold"))
+               id = COMP1;
+       else if (!strcmp(comp_attr.name, "comp2_threshold"))
+               id = COMP2;
+       else
+               return -EINVAL;
+
+       uVolt = comp_threshold_get(tps65910, id);
+
+       return sprintf(buf, "%d\n", uVolt);
+}
+
+static DEVICE_ATTR(comp1_threshold, S_IRUGO, comp_threshold_show, NULL);
+static DEVICE_ATTR(comp2_threshold, S_IRUGO, comp_threshold_show, NULL);
+
+static __devinit int tps65911_comparator_probe(struct platform_device *pdev)
+{
+       struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
+       struct tps65910_platform_data *pdata = dev_get_platdata(tps65910->dev);
+       int ret;
+
+       ret = comp_threshold_set(tps65910, COMP1,  pdata->vmbch_threshold);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "cannot set COMP1 threshold\n");
+               return ret;
+       }
+
+       ret = comp_threshold_set(tps65910, COMP2, pdata->vmbch2_threshold);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "cannot set COMP2 theshold\n");
+               return ret;
+       }
+
+       /* Create sysfs entry */
+       ret = device_create_file(&pdev->dev, &dev_attr_comp1_threshold);
+       if (ret < 0)
+               dev_err(&pdev->dev, "failed to add COMP1 sysfs file\n");
+
+       ret = device_create_file(&pdev->dev, &dev_attr_comp2_threshold);
+       if (ret < 0)
+               dev_err(&pdev->dev, "failed to add COMP2 sysfs file\n");
+
+       return ret;
+}
+
+static __devexit int tps65911_comparator_remove(struct platform_device *pdev)
+{
+       struct tps65910 *tps65910;
+
+       tps65910 = dev_get_drvdata(pdev->dev.parent);
+
+       return 0;
+}
+
+static struct platform_driver tps65911_comparator_driver = {
+       .driver = {
+               .name = "tps65911-comparator",
+               .owner = THIS_MODULE,
+       },
+       .probe = tps65911_comparator_probe,
+       .remove = __devexit_p(tps65911_comparator_remove),
+};
+
+static int __init tps65911_comparator_init(void)
+{
+       return platform_driver_register(&tps65911_comparator_driver);
+}
+subsys_initcall(tps65911_comparator_init);
+
+static void __exit tps65911_comparator_exit(void)
+{
+       platform_driver_unregister(&tps65911_comparator_driver);
+}
+module_exit(tps65911_comparator_exit);
+
+MODULE_AUTHOR("Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>");
+MODULE_DESCRIPTION("TPS65911 comparator driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65911-comparator");
index 960b5bed7f5237afdd769583db90908ed7a45fca..b8f2a4e7f6e767f47da933edd805ffd0bdba2120 100644 (file)
 #define TWL6030_BASEADD_GASGAUGE       0x00C0
 #define TWL6030_BASEADD_PIH            0x00D0
 #define TWL6030_BASEADD_CHARGER                0x00E0
+#define TWL6025_BASEADD_CHARGER                0x00DA
 
 /* subchip/slave 2 0x4A - DFT */
 #define TWL6030_BASEADD_DIEID          0x00C0
 /* is driver active, bound to a chip? */
 static bool inuse;
 
+/* TWL IDCODE Register value */
+static u32 twl_idcode;
+
 static unsigned int twl_id;
 unsigned int twl_rev(void)
 {
@@ -328,6 +332,7 @@ static struct twl_mapping twl6030_map[] = {
 
        { SUB_CHIP_ID0, TWL6030_BASEADD_RTC },
        { SUB_CHIP_ID0, TWL6030_BASEADD_MEM },
+       { SUB_CHIP_ID1, TWL6025_BASEADD_CHARGER },
 };
 
 /*----------------------------------------------------------------------*/
@@ -487,6 +492,58 @@ EXPORT_SYMBOL(twl_i2c_read_u8);
 
 /*----------------------------------------------------------------------*/
 
+/**
+ * twl_read_idcode_register - API to read the IDCODE register.
+ *
+ * Unlocks the IDCODE register and read the 32 bit value.
+ */
+static int twl_read_idcode_register(void)
+{
+       int err;
+
+       err = twl_i2c_write_u8(TWL4030_MODULE_INTBR, TWL_EEPROM_R_UNLOCK,
+                                               REG_UNLOCK_TEST_REG);
+       if (err) {
+               pr_err("TWL4030 Unable to unlock IDCODE registers -%d\n", err);
+               goto fail;
+       }
+
+       err = twl_i2c_read(TWL4030_MODULE_INTBR, (u8 *)(&twl_idcode),
+                                               REG_IDCODE_7_0, 4);
+       if (err) {
+               pr_err("TWL4030: unable to read IDCODE -%d\n", err);
+               goto fail;
+       }
+
+       err = twl_i2c_write_u8(TWL4030_MODULE_INTBR, 0x0, REG_UNLOCK_TEST_REG);
+       if (err)
+               pr_err("TWL4030 Unable to relock IDCODE registers -%d\n", err);
+fail:
+       return err;
+}
+
+/**
+ * twl_get_type - API to get TWL Si type.
+ *
+ * Api to get the TWL Si type from IDCODE value.
+ */
+int twl_get_type(void)
+{
+       return TWL_SIL_TYPE(twl_idcode);
+}
+EXPORT_SYMBOL_GPL(twl_get_type);
+
+/**
+ * twl_get_version - API to get TWL Si version.
+ *
+ * Api to get the TWL Si version from IDCODE value.
+ */
+int twl_get_version(void)
+{
+       return TWL_SIL_REV(twl_idcode);
+}
+EXPORT_SYMBOL_GPL(twl_get_version);
+
 static struct device *
 add_numbered_child(unsigned chip, const char *name, int num,
                void *pdata, unsigned pdata_len,
@@ -549,7 +606,7 @@ static inline struct device *add_child(unsigned chip, const char *name,
 static struct device *
 add_regulator_linked(int num, struct regulator_init_data *pdata,
                struct regulator_consumer_supply *consumers,
-               unsigned num_consumers)
+               unsigned num_consumers, unsigned long features)
 {
        unsigned sub_chip_id;
        /* regulator framework demands init_data ... */
@@ -561,6 +618,8 @@ add_regulator_linked(int num, struct regulator_init_data *pdata,
                pdata->num_consumer_supplies = num_consumers;
        }
 
+       pdata->driver_data = (void *)features;
+
        /* NOTE:  we currently ignore regulator IRQs, e.g. for short circuits */
        sub_chip_id = twl_map[TWL_MODULE_PM_MASTER].sid;
        return add_numbered_child(sub_chip_id, "twl_reg", num,
@@ -568,9 +627,10 @@ add_regulator_linked(int num, struct regulator_init_data *pdata,
 }
 
 static struct device *
-add_regulator(int num, struct regulator_init_data *pdata)
+add_regulator(int num, struct regulator_init_data *pdata,
+               unsigned long features)
 {
-       return add_regulator_linked(num, pdata, NULL, 0);
+       return add_regulator_linked(num, pdata, NULL, 0, features);
 }
 
 /*
@@ -650,17 +710,20 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
                        };
 
                        child = add_regulator_linked(TWL4030_REG_VUSB1V5,
-                                                     &usb_fixed, &usb1v5, 1);
+                                                     &usb_fixed, &usb1v5, 1,
+                                                     features);
                        if (IS_ERR(child))
                                return PTR_ERR(child);
 
                        child = add_regulator_linked(TWL4030_REG_VUSB1V8,
-                                                     &usb_fixed, &usb1v8, 1);
+                                                     &usb_fixed, &usb1v8, 1,
+                                                     features);
                        if (IS_ERR(child))
                                return PTR_ERR(child);
 
                        child = add_regulator_linked(TWL4030_REG_VUSB3V1,
-                                                     &usb_fixed, &usb3v1, 1);
+                                                     &usb_fixed, &usb3v1, 1,
+                                                     features);
                        if (IS_ERR(child))
                                return PTR_ERR(child);
 
@@ -685,9 +748,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
        }
        if (twl_has_usb() && pdata->usb && twl_class_is_6030()) {
 
-               static struct regulator_consumer_supply usb3v3 = {
-                       .supply =       "vusb",
-               };
+               static struct regulator_consumer_supply usb3v3;
+               int regulator;
 
                if (twl_has_regulator()) {
                        /* this is a template that gets copied */
@@ -700,12 +762,22 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
                                        | REGULATOR_CHANGE_STATUS,
                        };
 
-                       child = add_regulator_linked(TWL6030_REG_VUSB,
-                                                     &usb_fixed, &usb3v3, 1);
+                       if (features & TWL6025_SUBCLASS) {
+                               usb3v3.supply = "ldousb";
+                               regulator = TWL6025_REG_LDOUSB;
+                       } else {
+                               usb3v3.supply = "vusb";
+                               regulator = TWL6030_REG_VUSB;
+                       }
+                       child = add_regulator_linked(regulator, &usb_fixed,
+                                                       &usb3v3, 1,
+                                                       features);
                        if (IS_ERR(child))
                                return PTR_ERR(child);
                }
 
+               pdata->usb->features = features;
+
                child = add_child(0, "twl6030_usb",
                        pdata->usb, sizeof(*pdata->usb),
                        true,
@@ -718,7 +790,16 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
                /* we need to connect regulators to this transceiver */
                if (twl_has_regulator() && child)
                        usb3v3.dev = child;
+       } else if (twl_has_regulator() && twl_class_is_6030()) {
+               if (features & TWL6025_SUBCLASS)
+                       child = add_regulator(TWL6025_REG_LDOUSB,
+                                               pdata->ldousb, features);
+               else
+                       child = add_regulator(TWL6030_REG_VUSB,
+                                               pdata->vusb, features);
 
+                       if (IS_ERR(child))
+                                       return PTR_ERR(child);
        }
 
        if (twl_has_watchdog() && twl_class_is_4030()) {
@@ -755,46 +836,55 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
 
        /* twl4030 regulators */
        if (twl_has_regulator() && twl_class_is_4030()) {
-               child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1);
+               child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL4030_REG_VIO, pdata->vio);
+               child = add_regulator(TWL4030_REG_VIO, pdata->vio,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL4030_REG_VDD1, pdata->vdd1);
+               child = add_regulator(TWL4030_REG_VDD1, pdata->vdd1,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL4030_REG_VDD2, pdata->vdd2);
+               child = add_regulator(TWL4030_REG_VDD2, pdata->vdd2,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL4030_REG_VMMC1, pdata->vmmc1);
+               child = add_regulator(TWL4030_REG_VMMC1, pdata->vmmc1,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL4030_REG_VDAC, pdata->vdac);
+               child = add_regulator(TWL4030_REG_VDAC, pdata->vdac,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
                child = add_regulator((features & TWL4030_VAUX2)
                                        ? TWL4030_REG_VAUX2_4030
                                        : TWL4030_REG_VAUX2,
-                               pdata->vaux2);
+                               pdata->vaux2, features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL4030_REG_VINTANA1, pdata->vintana1);
+               child = add_regulator(TWL4030_REG_VINTANA1, pdata->vintana1,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL4030_REG_VINTANA2, pdata->vintana2);
+               child = add_regulator(TWL4030_REG_VINTANA2, pdata->vintana2,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL4030_REG_VINTDIG, pdata->vintdig);
+               child = add_regulator(TWL4030_REG_VINTDIG, pdata->vintdig,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
        }
@@ -802,72 +892,152 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
        /* maybe add LDOs that are omitted on cost-reduced parts */
        if (twl_has_regulator() && !(features & TPS_SUBSET)
          && twl_class_is_4030()) {
-               child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2);
+               child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL4030_REG_VMMC2, pdata->vmmc2);
+               child = add_regulator(TWL4030_REG_VMMC2, pdata->vmmc2,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL4030_REG_VSIM, pdata->vsim);
+               child = add_regulator(TWL4030_REG_VSIM, pdata->vsim,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL4030_REG_VAUX1, pdata->vaux1);
+               child = add_regulator(TWL4030_REG_VAUX1, pdata->vaux1,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL4030_REG_VAUX3, pdata->vaux3);
+               child = add_regulator(TWL4030_REG_VAUX3, pdata->vaux3,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL4030_REG_VAUX4, pdata->vaux4);
+               child = add_regulator(TWL4030_REG_VAUX4, pdata->vaux4,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
        }
 
        /* twl6030 regulators */
+       if (twl_has_regulator() && twl_class_is_6030() &&
+                       !(features & TWL6025_SUBCLASS)) {
+               child = add_regulator(TWL6030_REG_VMMC, pdata->vmmc,
+                                       features);
+               if (IS_ERR(child))
+                       return PTR_ERR(child);
+
+               child = add_regulator(TWL6030_REG_VPP, pdata->vpp,
+                                       features);
+               if (IS_ERR(child))
+                       return PTR_ERR(child);
+
+               child = add_regulator(TWL6030_REG_VUSIM, pdata->vusim,
+                                       features);
+               if (IS_ERR(child))
+                       return PTR_ERR(child);
+
+               child = add_regulator(TWL6030_REG_VCXIO, pdata->vcxio,
+                                       features);
+               if (IS_ERR(child))
+                       return PTR_ERR(child);
+
+               child = add_regulator(TWL6030_REG_VDAC, pdata->vdac,
+                                       features);
+               if (IS_ERR(child))
+                       return PTR_ERR(child);
+
+               child = add_regulator(TWL6030_REG_VAUX1_6030, pdata->vaux1,
+                                       features);
+               if (IS_ERR(child))
+                       return PTR_ERR(child);
+
+               child = add_regulator(TWL6030_REG_VAUX2_6030, pdata->vaux2,
+                                       features);
+               if (IS_ERR(child))
+                       return PTR_ERR(child);
+
+               child = add_regulator(TWL6030_REG_VAUX3_6030, pdata->vaux3,
+                                       features);
+               if (IS_ERR(child))
+                       return PTR_ERR(child);
+
+               child = add_regulator(TWL6030_REG_CLK32KG, pdata->clk32kg,
+                                       features);
+               if (IS_ERR(child))
+                       return PTR_ERR(child);
+       }
+
+       /* 6030 and 6025 share this regulator */
        if (twl_has_regulator() && twl_class_is_6030()) {
-               child = add_regulator(TWL6030_REG_VMMC, pdata->vmmc);
+               child = add_regulator(TWL6030_REG_VANA, pdata->vana,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
+       }
 
-               child = add_regulator(TWL6030_REG_VPP, pdata->vpp);
+       /* twl6025 regulators */
+       if (twl_has_regulator() && twl_class_is_6030() &&
+                       (features & TWL6025_SUBCLASS)) {
+               child = add_regulator(TWL6025_REG_LDO5, pdata->ldo5,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL6030_REG_VUSIM, pdata->vusim);
+               child = add_regulator(TWL6025_REG_LDO1, pdata->ldo1,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL6030_REG_VANA, pdata->vana);
+               child = add_regulator(TWL6025_REG_LDO7, pdata->ldo7,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL6030_REG_VCXIO, pdata->vcxio);
+               child = add_regulator(TWL6025_REG_LDO6, pdata->ldo6,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL6030_REG_VDAC, pdata->vdac);
+               child = add_regulator(TWL6025_REG_LDOLN, pdata->ldoln,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL6030_REG_VAUX1_6030, pdata->vaux1);
+               child = add_regulator(TWL6025_REG_LDO2, pdata->ldo2,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL6030_REG_VAUX2_6030, pdata->vaux2);
+               child = add_regulator(TWL6025_REG_LDO4, pdata->ldo4,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL6030_REG_VAUX3_6030, pdata->vaux3);
+               child = add_regulator(TWL6025_REG_LDO3, pdata->ldo3,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
 
-               child = add_regulator(TWL6030_REG_CLK32KG, pdata->clk32kg);
+               child = add_regulator(TWL6025_REG_SMPS3, pdata->smps3,
+                                       features);
                if (IS_ERR(child))
                        return PTR_ERR(child);
+
+               child = add_regulator(TWL6025_REG_SMPS4, pdata->smps4,
+                                       features);
+               if (IS_ERR(child))
+                       return PTR_ERR(child);
+
+               child = add_regulator(TWL6025_REG_VIO, pdata->vio6025,
+                                       features);
+               if (IS_ERR(child))
+                       return PTR_ERR(child);
+
        }
 
        if (twl_has_bci() && pdata->bci &&
@@ -1014,6 +1184,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
        unsigned                        i;
        struct twl4030_platform_data    *pdata = client->dev.platform_data;
        u8 temp;
+       int ret = 0;
 
        if (!pdata) {
                dev_dbg(&client->dev, "no platform data?\n");
@@ -1060,6 +1231,12 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
        /* setup clock framework */
        clocks_init(&client->dev, pdata->clock);
 
+       /* read TWL IDCODE Register */
+       if (twl_id == TWL4030_CLASS_ID) {
+               ret = twl_read_idcode_register();
+               WARN(ret < 0, "Error: reading twl_idcode register value\n");
+       }
+
        /* load power event scripts */
        if (twl_has_power() && pdata->power)
                twl4030_power_init(pdata->power);
@@ -1108,6 +1285,7 @@ static const struct i2c_device_id twl_ids[] = {
        { "tps65930", TPS_SUBSET },     /* fewer LDOs and DACs; no charger */
        { "tps65920", TPS_SUBSET },     /* fewer LDOs; no codec or charger */
        { "twl6030", TWL6030_CLASS },   /* "Phoenix power chip" */
+       { "twl6025", TWL6030_CLASS | TWL6025_SUBCLASS }, /* "Phoenix lite" */
        { /* end of list */ },
 };
 MODULE_DEVICE_TABLE(i2c, twl_ids);
index c02fded316c9f1ce1ee8fd2343b1bfb476399a9f..2bf4136464c1f30c2428638de1b7f1fddaf3b3f0 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * MFD driver for twl4030 codec submodule
  *
- * Author:     Peter Ujfalusi <peter.ujfalusi@nokia.com>
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
  *
  * Copyright:   (C) 2009 Nokia Corporation
  *
@@ -208,13 +208,15 @@ static int __devinit twl4030_codec_probe(struct platform_device *pdev)
        if (pdata->audio) {
                cell = &codec->cells[childs];
                cell->name = "twl4030-codec";
-               cell->mfd_data = pdata->audio;
+               cell->platform_data = pdata->audio;
+               cell->pdata_size = sizeof(*pdata->audio);
                childs++;
        }
        if (pdata->vibra) {
                cell = &codec->cells[childs];
                cell->name = "twl4030-vibra";
-               cell->mfd_data = pdata->vibra;
+               cell->platform_data = pdata->vibra;
+               cell->pdata_size = sizeof(*pdata->vibra);
                childs++;
        }
 
@@ -270,6 +272,6 @@ static void __devexit twl4030_codec_exit(void)
 }
 module_exit(twl4030_codec_exit);
 
-MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@nokia.com>");
+MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
 MODULE_LICENSE("GPL");
 
index 2c0d4d16491ab4c3ef35d881c3fb11334b25ed7d..a764676f09220146fb0ebc84addfe5e79ad9943e 100644 (file)
@@ -120,7 +120,7 @@ static u8 res_config_addrs[] = {
        [RES_HFCLKOUT]  = 0x8b,
        [RES_32KCLKOUT] = 0x8e,
        [RES_RESET]     = 0x91,
-       [RES_Main_Ref]  = 0x94,
+       [RES_MAIN_REF]  = 0x94,
 };
 
 static int __init twl4030_write_script_byte(u8 address, u8 byte)
@@ -448,7 +448,7 @@ static int __init load_twl4030_script(struct twl4030_script *tscript,
                        goto out;
        }
        if (tscript->flags & TWL4030_SLEEP_SCRIPT) {
-               if (order)
+               if (!order)
                        pr_warning("TWL4030: Bad order of scripts (sleep "\
                                        "script before wakeup) Leads to boot"\
                                        "failure on some boards\n");
@@ -485,9 +485,9 @@ int twl4030_remove_script(u8 flags)
                        return err;
        }
        if (flags & TWL4030_WAKEUP12_SCRIPT) {
-               if (err)
                err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT,
                                R_SEQ_ADD_S2A12);
+               if (err)
                        return err;
        }
        if (flags & TWL4030_WAKEUP3_SCRIPT) {
index dfbae34e18046c69d28f04fa502172a9346cb585..eb3b5f88e566c73e6599705649c83ec9b2758f2d 100644 (file)
@@ -76,8 +76,8 @@ static int twl6030_interrupt_mapping[24] = {
        USBOTG_INTR_OFFSET,     /* Bit 18       ID                      */
        USB_PRES_INTR_OFFSET,   /* Bit 19       VBUS                    */
        CHARGER_INTR_OFFSET,    /* Bit 20       CHRG_CTRL               */
-       CHARGER_INTR_OFFSET,    /* Bit 21       EXT_CHRG                */
-       CHARGER_INTR_OFFSET,    /* Bit 22       INT_CHRG                */
+       CHARGERFAULT_INTR_OFFSET,       /* Bit 21       EXT_CHRG        */
+       CHARGERFAULT_INTR_OFFSET,       /* Bit 22       INT_CHRG        */
        RSV_INTR_OFFSET,        /* Bit 23       Reserved                */
 };
 /*----------------------------------------------------------------------*/
index 04914f2836c0ad3d22a97d2787d789a1f34e1ae9..d97a86945174519a6d333bf0527875b63537384d 100644 (file)
@@ -153,7 +153,6 @@ out:
  */
 static int wl1273_fm_set_volume(struct wl1273_core *core, unsigned int volume)
 {
-       u16 val;
        int r;
 
        if (volume > WL1273_MAX_VOLUME)
@@ -217,7 +216,8 @@ static int __devinit wl1273_core_probe(struct i2c_client *client,
 
        cell = &core->cells[children];
        cell->name = "wl1273_fm_radio";
-       cell->mfd_data = &core;
+       cell->platform_data = &core;
+       cell->pdata_size = sizeof(core);
        children++;
 
        core->read = wl1273_fm_read_reg;
@@ -231,7 +231,8 @@ static int __devinit wl1273_core_probe(struct i2c_client *client,
 
                dev_dbg(&client->dev, "%s: Have codec.\n", __func__);
                cell->name = "wl1273-codec";
-               cell->mfd_data = &core;
+               cell->platform_data = &core;
+               cell->pdata_size = sizeof(core);
                children++;
        }
 
index 3fe9a58fe6c76a304a2eb61e93c375840d4b3d2d..265f75fc6a25f404a60d0954f2228585bb149938 100644 (file)
@@ -1442,7 +1442,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
        struct wm831x_pdata *pdata = wm831x->dev->platform_data;
        int rev;
        enum wm831x_parent parent;
-       int ret;
+       int ret, i;
 
        mutex_init(&wm831x->io_lock);
        mutex_init(&wm831x->key_lock);
@@ -1581,6 +1581,17 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
                }
        }
 
+       if (pdata) {
+               for (i = 0; i < ARRAY_SIZE(pdata->gpio_defaults); i++) {
+                       if (!pdata->gpio_defaults[i])
+                               continue;
+
+                       wm831x_reg_write(wm831x,
+                                        WM831X_GPIO1_CONTROL + i,
+                                        pdata->gpio_defaults[i] & 0xffff);
+               }
+       }
+
        ret = wm831x_irq_init(wm831x, irq);
        if (ret != 0)
                goto err;
index 23e66af89dea12d0871aa44fe2b824e692aee8b3..42b928ec891e6b4b2a60a6323ed5d6265659e978 100644 (file)
@@ -515,12 +515,6 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
                                 0xffff);
        }
 
-       if (!irq) {
-               dev_warn(wm831x->dev,
-                        "No interrupt specified - functionality limited\n");
-               return 0;
-       }
-
        if (!pdata || !pdata->irq_base) {
                dev_err(wm831x->dev,
                        "No interrupt base specified, no interrupts\n");
@@ -567,15 +561,22 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
 #endif
        }
 
-       ret = request_threaded_irq(irq, NULL, wm831x_irq_thread,
-                                  IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-                                  "wm831x", wm831x);
-       if (ret != 0) {
-               dev_err(wm831x->dev, "Failed to request IRQ %d: %d\n",
-                       irq, ret);
-               return ret;
+       if (irq) {
+               ret = request_threaded_irq(irq, NULL, wm831x_irq_thread,
+                                          IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+                                          "wm831x", wm831x);
+               if (ret != 0) {
+                       dev_err(wm831x->dev, "Failed to request IRQ %d: %d\n",
+                               irq, ret);
+                       return ret;
+               }
+       } else {
+               dev_warn(wm831x->dev,
+                        "No interrupt specified - functionality limited\n");
        }
 
+
+
        /* Enable top level interrupts, we mask at secondary level */
        wm831x_reg_write(wm831x, WM831X_SYSTEM_INTERRUPTS_MASK, 0);
 
index 3a6e78cb038458936e4679483ba564ac98f25936..597f82edacaa8cc010aaa2b3e28d3e5985a17f81 100644 (file)
@@ -245,7 +245,8 @@ static int wm8400_register_codec(struct wm8400 *wm8400)
 {
        struct mfd_cell cell = {
                .name = "wm8400-codec",
-               .mfd_data = wm8400,
+               .platform_data = wm8400,
+               .pdata_size = sizeof(*wm8400),
        };
 
        return mfd_add_devices(wm8400->dev, -1, &cell, 1, NULL, 0);
index 74f16f167b8e1dcce5faaa41d75b1a2452f0eb5f..b0c56313dbbb4928826c08f535f3305490c7c61e 100644 (file)
@@ -285,33 +285,26 @@ static void hw_break_val_write(void)
 static int check_and_rewind_pc(char *put_str, char *arg)
 {
        unsigned long addr = lookup_addr(arg);
+       unsigned long ip;
        int offset = 0;
 
        kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs,
                 NUMREGBYTES);
        gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs);
-       v2printk("Stopped at IP: %lx\n", instruction_pointer(&kgdbts_regs));
-#ifdef CONFIG_X86
-       /* On x86 a breakpoint stop requires it to be decremented */
-       if (addr + 1 == kgdbts_regs.ip)
-               offset = -1;
-#elif defined(CONFIG_SUPERH)
-       /* On SUPERH a breakpoint stop requires it to be decremented */
-       if (addr + 2 == kgdbts_regs.pc)
-               offset = -2;
+       ip = instruction_pointer(&kgdbts_regs);
+       v2printk("Stopped at IP: %lx\n", ip);
+#ifdef GDB_ADJUSTS_BREAK_OFFSET
+       /* On some arches, a breakpoint stop requires it to be decremented */
+       if (addr + BREAK_INSTR_SIZE == ip)
+               offset = -BREAK_INSTR_SIZE;
 #endif
-       if (strcmp(arg, "silent") &&
-               instruction_pointer(&kgdbts_regs) + offset != addr) {
+       if (strcmp(arg, "silent") && ip + offset != addr) {
                eprintk("kgdbts: BP mismatch %lx expected %lx\n",
-                          instruction_pointer(&kgdbts_regs) + offset, addr);
+                          ip + offset, addr);
                return 1;
        }
-#ifdef CONFIG_X86
-       /* On x86 adjust the instruction pointer if needed */
-       kgdbts_regs.ip += offset;
-#elif defined(CONFIG_SUPERH)
-       kgdbts_regs.pc += offset;
-#endif
+       /* Readjust the instruction pointer if needed */
+       instruction_pointer_set(&kgdbts_regs, ip + offset);
        return 0;
 }
 
index 14479f9ef53f93cb514e54974650af5a901f3996..8d185de90d207ded1a256b32cdd69ae11d6ec2c6 100644 (file)
@@ -69,7 +69,7 @@ static int __devinit tmio_mmc_probe(struct platform_device *pdev)
        if (pdev->num_resources != 2)
                goto out;
 
-       pdata = mfd_get_data(pdev);
+       pdata = pdev->dev.platform_data;
        if (!pdata || !pdata->hclk)
                goto out;
 
index 14c578707824b5e598ce5216da991ac52183e64f..c004e474631b4c3deca7ae4ffff18333e5384c04 100644 (file)
@@ -372,7 +372,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
 
 static int tmio_probe(struct platform_device *dev)
 {
-       struct tmio_nand_data *data = mfd_get_data(dev);
+       struct tmio_nand_data *data = dev->dev.platform_data;
        struct resource *fcr = platform_get_resource(dev,
                        IORESOURCE_MEM, 0);
        struct resource *ccr = platform_get_resource(dev,
index 6141667c5fb76b678619057bf9de5588a8a5b396..17b4dd94da907f388e66e9e5bd6ec6d5daf50148 100644 (file)
@@ -113,9 +113,11 @@ MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
 module_param(tx_queues, int, 0);
 MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
 module_param_named(num_grat_arp, num_peer_notif, int, 0644);
-MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on failover event (alias of num_unsol_na)");
+MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
+                              "failover event (alias of num_unsol_na)");
 module_param_named(num_unsol_na, num_peer_notif, int, 0644);
-MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on failover event (alias of num_grat_arp)");
+MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
+                              "failover event (alias of num_grat_arp)");
 module_param(miimon, int, 0);
 MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
 module_param(updelay, int, 0);
@@ -127,7 +129,7 @@ module_param(use_carrier, int, 0);
 MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
                              "0 for off, 1 for on (default)");
 module_param(mode, charp, 0);
-MODULE_PARM_DESC(mode, "Mode of operation : 0 for balance-rr, "
+MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
                       "1 for active-backup, 2 for balance-xor, "
                       "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
                       "6 for balance-alb");
@@ -142,27 +144,35 @@ MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
                                   "2 for only on active slave "
                                   "failure");
 module_param(lacp_rate, charp, 0);
-MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner "
-                           "(slow/fast)");
+MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
+                           "0 for slow, 1 for fast");
 module_param(ad_select, charp, 0);
-MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic: stable (0, default), bandwidth (1), count (2)");
+MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; "
+                           "0 for stable (default), 1 for bandwidth, "
+                           "2 for count");
 module_param(xmit_hash_policy, charp, 0);
-MODULE_PARM_DESC(xmit_hash_policy, "XOR hashing method: 0 for layer 2 (default)"
-                                  ", 1 for layer 3+4");
+MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
+                                  "0 for layer 2 (default), 1 for layer 3+4, "
+                                  "2 for layer 2+3");
 module_param(arp_interval, int, 0);
 MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
 module_param_array(arp_ip_target, charp, NULL, 0);
 MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
 module_param(arp_validate, charp, 0);
-MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all");
+MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
+                              "0 for none (default), 1 for active, "
+                              "2 for backup, 3 for all");
 module_param(fail_over_mac, charp, 0);
-MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC.  none (default), active or follow");
+MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
+                               "the same MAC; 0 for none (default), "
+                               "1 for active, 2 for follow");
 module_param(all_slaves_active, int, 0);
 MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
-                                    "by setting active flag for all slaves "
+                                    "by setting active flag for all slaves; "
                                     "0 for never (default), 1 for always.");
 module_param(resend_igmp, int, 0);
-MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link failure");
+MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
+                             "link failure");
 
 /*----------------------------- Global variables ----------------------------*/
 
index 587fba48cdd9594af60827a4626196ee1483a695..f1942cab35f65cbc237cffe9c797690d7ad5b6ac 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/platform_device.h>
-#include <linux/mfd/core.h>
 
 #include <linux/netdevice.h>
 #include <linux/can.h>
@@ -1644,7 +1643,7 @@ static int __devinit ican3_probe(struct platform_device *pdev)
        struct device *dev;
        int ret;
 
-       pdata = mfd_get_data(pdev);
+       pdata = pdev->dev.platform_data;
        if (!pdata)
                return -ENXIO;
 
index 807b6bb200eb28da5658b7cf8497dba7b8ab4821..29a4f06fbfcf0004c53e96d51cc5d6da0aaaf76b 100644 (file)
@@ -1772,7 +1772,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
        /* obtain emac clock from kernel */
        emac_clk = clk_get(&pdev->dev, NULL);
        if (IS_ERR(emac_clk)) {
-               printk(KERN_ERR "DaVinci EMAC: Failed to get EMAC clock\n");
+               dev_err(&pdev->dev, "failed to get EMAC clock\n");
                return -EBUSY;
        }
        emac_bus_frequency = clk_get_rate(emac_clk);
@@ -1780,7 +1780,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
 
        ndev = alloc_etherdev(sizeof(struct emac_priv));
        if (!ndev) {
-               printk(KERN_ERR "DaVinci EMAC: Error allocating net_device\n");
+               dev_err(&pdev->dev, "error allocating net_device\n");
                clk_put(emac_clk);
                return -ENOMEM;
        }
@@ -1795,7 +1795,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
 
        pdata = pdev->dev.platform_data;
        if (!pdata) {
-               printk(KERN_ERR "DaVinci EMAC: No platform data\n");
+               dev_err(&pdev->dev, "no platform data\n");
                return -ENODEV;
        }
 
@@ -1814,7 +1814,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
        /* Get EMAC platform data */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
-               dev_err(emac_dev, "DaVinci EMAC: Error getting res\n");
+               dev_err(&pdev->dev,"error getting res\n");
                rc = -ENOENT;
                goto probe_quit;
        }
@@ -1822,14 +1822,14 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
        priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
        size = res->end - res->start + 1;
        if (!request_mem_region(res->start, size, ndev->name)) {
-               dev_err(emac_dev, "DaVinci EMAC: failed request_mem_region() for regs\n");
+               dev_err(&pdev->dev, "failed request_mem_region() for regs\n");
                rc = -ENXIO;
                goto probe_quit;
        }
 
        priv->remap_addr = ioremap(res->start, size);
        if (!priv->remap_addr) {
-               dev_err(emac_dev, "Unable to map IO\n");
+               dev_err(&pdev->dev, "unable to map IO\n");
                rc = -ENOMEM;
                release_mem_region(res->start, size);
                goto probe_quit;
@@ -1863,7 +1863,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
 
        priv->dma = cpdma_ctlr_create(&dma_params);
        if (!priv->dma) {
-               dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n");
+               dev_err(&pdev->dev, "error initializing DMA\n");
                rc = -ENOMEM;
                goto no_dma;
        }
@@ -1879,7 +1879,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
        if (!res) {
-               dev_err(emac_dev, "DaVinci EMAC: Error getting irq res\n");
+               dev_err(&pdev->dev, "error getting irq res\n");
                rc = -ENOENT;
                goto no_irq_res;
        }
@@ -1888,8 +1888,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
        if (!is_valid_ether_addr(priv->mac_addr)) {
                /* Use random MAC if none passed */
                random_ether_addr(priv->mac_addr);
-               printk(KERN_WARNING "%s: using random MAC addr: %pM\n",
-                               __func__, priv->mac_addr);
+               dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
+                                                       priv->mac_addr);
        }
 
        ndev->netdev_ops = &emac_netdev_ops;
@@ -1902,7 +1902,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
        SET_NETDEV_DEV(ndev, &pdev->dev);
        rc = register_netdev(ndev);
        if (rc) {
-               dev_err(emac_dev, "DaVinci EMAC: Error in register_netdev\n");
+               dev_err(&pdev->dev, "error in register_netdev\n");
                rc = -ENODEV;
                goto netdev_reg_err;
        }
index a3c0dc9d8b98305ecbeda73f2bf61eb5a2c82b8c..9537aaa50c2f5f9633922b582ea4e9d6d67f3572 100644 (file)
@@ -69,7 +69,7 @@ static const char paranoia_str[] = KERN_ERR
 
 static const char bc_drvname[] = "baycom_epp";
 static const char bc_drvinfo[] = KERN_INFO "baycom_epp: (C) 1998-2000 Thomas Sailer, HB9JNX/AE4WA\n"
-"baycom_epp: version 0.7 compiled " __TIME__ " " __DATE__ "\n";
+"baycom_epp: version 0.7\n";
 
 /* --------------------------------------------------------------------- */
 
index 5f5af9a606f8aaddbb4b8724a651db14e5ecee05..279d2296290a46d5567eb0647dcca528ca3d18f4 100644 (file)
 
 static const char bc_drvname[] = "baycom_par";
 static const char bc_drvinfo[] = KERN_INFO "baycom_par: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
-"baycom_par: version 0.9 compiled " __TIME__ " " __DATE__ "\n";
+"baycom_par: version 0.9\n";
 
 /* --------------------------------------------------------------------- */
 
index 3e25f10cabd60e9c93bc0898d23dfe1bf5f0351a..99cdce33df8b944b4575085b0327315460cf67da 100644 (file)
@@ -92,7 +92,7 @@
 
 static const char bc_drvname[] = "baycom_ser_fdx";
 static const char bc_drvinfo[] = KERN_INFO "baycom_ser_fdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
-"baycom_ser_fdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n";
+"baycom_ser_fdx: version 0.10\n";
 
 /* --------------------------------------------------------------------- */
 
index 1686f6dcbbce849525968a57152009faa24e2b63..d92fe6ca788f902dfa5b40cb47a1fcb65c6c9ab7 100644 (file)
@@ -80,7 +80,7 @@
 
 static const char bc_drvname[] = "baycom_ser_hdx";
 static const char bc_drvinfo[] = KERN_INFO "baycom_ser_hdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n"
-"baycom_ser_hdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n";
+"baycom_ser_hdx: version 0.10\n";
 
 /* --------------------------------------------------------------------- */
 
index 5b37579e84b788952988ce5f4b5d48738e16e472..a4a3516b6bbf87d05545a48d8e46627a5f81ee7c 100644 (file)
@@ -749,7 +749,7 @@ EXPORT_SYMBOL(hdlcdrv_unregister);
 static int __init hdlcdrv_init_driver(void)
 {
        printk(KERN_INFO "hdlcdrv: (C) 1996-2000 Thomas Sailer HB9JNX/AE4WA\n");
-       printk(KERN_INFO "hdlcdrv: version 0.8 compiled " __TIME__ " " __DATE__ "\n");
+       printk(KERN_INFO "hdlcdrv: version 0.8\n");
        return 0;
 }
 
index f0d8346d0fa5d2d3dcf62c402a4d65d5dc9729d6..4d40626b3bfa989b7051a7ea5872faa06ba61601 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
-#include <linux/mfd/core.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
@@ -1146,7 +1145,7 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
        struct resource *iomem;
        struct net_device *netdev;
        struct ks8842_adapter *adapter;
-       struct ks8842_platform_data *pdata = mfd_get_data(pdev);
+       struct ks8842_platform_data *pdata = pdev->dev.platform_data;
        u16 id;
        unsigned i;
 
index 737b59f1a8dc4a28f535434ca207128fad1d4f76..9617d3d0ee39e055a94150d6ee0cf8b2014cf0e1 100644 (file)
@@ -3242,8 +3242,7 @@ static inline void show_version(void)
        rcsdate++;
        tmp = strrchr(rcsdate, ' ');
        *tmp = '\0';
-       printk(KERN_INFO "Cyclades-PC300 driver %s %s (built %s %s)\n", 
-               rcsvers, rcsdate, __DATE__, __TIME__);
+       printk(KERN_INFO "Cyclades-PC300 driver %s %s\n", rcsvers, rcsdate);
 }                              /* show_version */
 
 static const struct net_device_ops cpc_netdev_ops = {
index d3d7809af8bf5287653ebe4b3e968489eba2c4cd..0dc34f12f92e5865f22ad503c582f8cf6ba4f88a 100644 (file)
@@ -2203,7 +2203,6 @@ static __exit void parport_ip32_unregister_port(struct parport *p)
 static int __init parport_ip32_init(void)
 {
        pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n");
-       pr_debug1(PPIP32 "Compiled on %s, %s\n", __DATE__, __TIME__);
        this_port = parport_ip32_probe_port();
        return IS_ERR(this_port) ? PTR_ERR(this_port) : 0;
 }
index 52a462fc6b84467f3aafca8d09cf5630b7d500bd..e57b50b385655d24eb5aeb94866a68cef42813e2 100644 (file)
@@ -68,6 +68,13 @@ config BATTERY_DS2760
        help
          Say Y here to enable support for batteries with ds2760 chip.
 
+config BATTERY_DS2780
+       tristate "DS2780 battery driver"
+       select W1
+       select W1_SLAVE_DS2780
+       help
+         Say Y here to enable support for batteries with ds2780 chip.
+
 config BATTERY_DS2782
        tristate "DS2782/DS2786 standalone gas-gauge"
        depends on I2C
@@ -203,6 +210,15 @@ config CHARGER_ISP1704
          Say Y to enable support for USB Charger Detection with
          ISP1707/ISP1704 USB transceivers.
 
+config CHARGER_MAX8903
+       tristate "MAX8903 Battery DC-DC Charger for USB and Adapter Power"
+       depends on GENERIC_HARDIRQS
+       help
+         Say Y to enable support for the MAX8903 DC-DC charger and sysfs.
+         The driver supports controlling charger-enable and current-limit
+         pins based on the status of charger connections with interrupt
+         handlers.
+
 config CHARGER_TWL4030
        tristate "OMAP TWL4030 BCI charger driver"
        depends on TWL4030_CORE
index 8385bfae872836bf367e937b1f627cbe82c39421..009a90fa8ac9538e320ec1f0f9196b44e87054f1 100644 (file)
@@ -15,6 +15,7 @@ obj-$(CONFIG_WM8350_POWER)    += wm8350_power.o
 obj-$(CONFIG_TEST_POWER)       += test_power.o
 
 obj-$(CONFIG_BATTERY_DS2760)   += ds2760_battery.o
+obj-$(CONFIG_BATTERY_DS2780)   += ds2780_battery.o
 obj-$(CONFIG_BATTERY_DS2782)   += ds2782_battery.o
 obj-$(CONFIG_BATTERY_PMU)      += pmu_battery.o
 obj-$(CONFIG_BATTERY_OLPC)     += olpc_battery.o
@@ -32,5 +33,6 @@ obj-$(CONFIG_CHARGER_PCF50633)        += pcf50633-charger.o
 obj-$(CONFIG_BATTERY_JZ4740)   += jz4740-battery.o
 obj-$(CONFIG_BATTERY_INTEL_MID)        += intel_mid_battery.o
 obj-$(CONFIG_CHARGER_ISP1704)  += isp1704_charger.o
+obj-$(CONFIG_CHARGER_MAX8903)  += max8903_charger.o
 obj-$(CONFIG_CHARGER_TWL4030)  += twl4030_charger.o
 obj-$(CONFIG_CHARGER_GPIO)     += gpio-charger.o
index 59e68dbd028b31e4bf8e17f5e33bc19e0deb7492..bb16f5b7e167490519d2793cbaa1546192e447f5 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it>
  * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it>
  * Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de>
+ * Copyright (C) 2011 Pali Rohár <pali.rohar@gmail.com>
  *
  * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
  *
@@ -76,7 +77,7 @@ struct bq27x00_reg_cache {
        int time_to_empty_avg;
        int time_to_full;
        int charge_full;
-       int charge_counter;
+       int cycle_count;
        int capacity;
        int flags;
 
@@ -115,7 +116,7 @@ static enum power_supply_property bq27x00_battery_props[] = {
        POWER_SUPPLY_PROP_CHARGE_FULL,
        POWER_SUPPLY_PROP_CHARGE_NOW,
        POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
-       POWER_SUPPLY_PROP_CHARGE_COUNTER,
+       POWER_SUPPLY_PROP_CYCLE_COUNT,
        POWER_SUPPLY_PROP_ENERGY_NOW,
 };
 
@@ -267,7 +268,7 @@ static void bq27x00_update(struct bq27x00_device_info *di)
                cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP);
                cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF);
                cache.charge_full = bq27x00_battery_read_lmd(di);
-               cache.charge_counter = bq27x00_battery_read_cyct(di);
+               cache.cycle_count = bq27x00_battery_read_cyct(di);
 
                if (!is_bq27500)
                        cache.current_now = bq27x00_read(di, BQ27x00_REG_AI, false);
@@ -496,8 +497,8 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
        case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
                ret = bq27x00_simple_value(di->charge_design_full, val);
                break;
-       case POWER_SUPPLY_PROP_CHARGE_COUNTER:
-               ret = bq27x00_simple_value(di->cache.charge_counter, val);
+       case POWER_SUPPLY_PROP_CYCLE_COUNT:
+               ret = bq27x00_simple_value(di->cache.cycle_count, val);
                break;
        case POWER_SUPPLY_PROP_ENERGY_NOW:
                ret = bq27x00_battery_energy(di, val);
index e534290f32561d6d8322375f6baccf086f0c6de1..f2c9cc33c0f9f795a81125368c1dc758c2dd81c8 100644 (file)
@@ -86,7 +86,11 @@ static int rated_capacities[] = {
        920,    /* NEC */
        1440,   /* Samsung */
        1440,   /* BYD */
+#ifdef CONFIG_MACH_H4700
+       1800,   /* HP iPAQ hx4700 3.7V 1800mAh (359113-001) */
+#else
        1440,   /* Lishen */
+#endif
        1440,   /* NEC */
        2880,   /* Samsung */
        2880,   /* BYD */
@@ -186,7 +190,7 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di)
 
        scale[0] = di->full_active_uAh;
        for (i = 1; i < 5; i++)
-               scale[i] = scale[i - 1] + di->raw[DS2760_ACTIVE_FULL + 2 + i];
+               scale[i] = scale[i - 1] + di->raw[DS2760_ACTIVE_FULL + 1 + i];
 
        di->full_active_uAh = battery_interpolate(scale, di->temp_C / 10);
        di->full_active_uAh *= 1000; /* convert to ÂµAh */
diff --git a/drivers/power/ds2780_battery.c b/drivers/power/ds2780_battery.c
new file mode 100644 (file)
index 0000000..1fefe82
--- /dev/null
@@ -0,0 +1,853 @@
+/*
+ * 1-wire client/driver for the Maxim/Dallas DS2780 Stand-Alone Fuel Gauge IC
+ *
+ * Copyright (C) 2010 Indesign, LLC
+ *
+ * Author: Clifton Barnes <cabarnes@indesign-llc.com>
+ *
+ * Based on ds2760_battery and ds2782_battery drivers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/param.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/idr.h>
+
+#include "../w1/w1.h"
+#include "../w1/slaves/w1_ds2780.h"
+
+/* Current unit measurement in uA for a 1 milli-ohm sense resistor */
+#define DS2780_CURRENT_UNITS   1563
+/* Charge unit measurement in uAh for a 1 milli-ohm sense resistor */
+#define DS2780_CHARGE_UNITS            6250
+/* Number of bytes in user EEPROM space */
+#define DS2780_USER_EEPROM_SIZE                (DS2780_EEPROM_BLOCK0_END - \
+                                       DS2780_EEPROM_BLOCK0_START + 1)
+/* Number of bytes in parameter EEPROM space */
+#define DS2780_PARAM_EEPROM_SIZE       (DS2780_EEPROM_BLOCK1_END - \
+                                       DS2780_EEPROM_BLOCK1_START + 1)
+
+struct ds2780_device_info {
+       struct device *dev;
+       struct power_supply bat;
+       struct device *w1_dev;
+};
+
+enum current_types {
+       CURRENT_NOW,
+       CURRENT_AVG,
+};
+
+static const char model[] = "DS2780";
+static const char manufacturer[] = "Maxim/Dallas";
+
+static inline struct ds2780_device_info *to_ds2780_device_info(
+       struct power_supply *psy)
+{
+       return container_of(psy, struct ds2780_device_info, bat);
+}
+
+static inline struct power_supply *to_power_supply(struct device *dev)
+{
+       return dev_get_drvdata(dev);
+}
+
+static inline int ds2780_read8(struct device *dev, u8 *val, int addr)
+{
+       return w1_ds2780_io(dev, val, addr, sizeof(u8), 0);
+}
+
+static int ds2780_read16(struct device *dev, s16 *val, int addr)
+{
+       int ret;
+       u8 raw[2];
+
+       ret = w1_ds2780_io(dev, raw, addr, sizeof(u8) * 2, 0);
+       if (ret < 0)
+               return ret;
+
+       *val = (raw[0] << 8) | raw[1];
+
+       return 0;
+}
+
+static inline int ds2780_read_block(struct device *dev, u8 *val, int addr,
+       size_t count)
+{
+       return w1_ds2780_io(dev, val, addr, count, 0);
+}
+
+static inline int ds2780_write(struct device *dev, u8 *val, int addr,
+       size_t count)
+{
+       return w1_ds2780_io(dev, val, addr, count, 1);
+}
+
+static inline int ds2780_store_eeprom(struct device *dev, int addr)
+{
+       return w1_ds2780_eeprom_cmd(dev, addr, W1_DS2780_COPY_DATA);
+}
+
+static inline int ds2780_recall_eeprom(struct device *dev, int addr)
+{
+       return w1_ds2780_eeprom_cmd(dev, addr, W1_DS2780_RECALL_DATA);
+}
+
+static int ds2780_save_eeprom(struct ds2780_device_info *dev_info, int reg)
+{
+       int ret;
+
+       ret = ds2780_store_eeprom(dev_info->w1_dev, reg);
+       if (ret < 0)
+               return ret;
+
+       ret = ds2780_recall_eeprom(dev_info->w1_dev, reg);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+/* Set sense resistor value in mhos */
+static int ds2780_set_sense_register(struct ds2780_device_info *dev_info,
+       u8 conductance)
+{
+       int ret;
+
+       ret = ds2780_write(dev_info->w1_dev, &conductance,
+                               DS2780_RSNSP_REG, sizeof(u8));
+       if (ret < 0)
+               return ret;
+
+       return ds2780_save_eeprom(dev_info, DS2780_RSNSP_REG);
+}
+
+/* Get RSGAIN value from 0 to 1.999 in steps of 0.001 */
+static int ds2780_get_rsgain_register(struct ds2780_device_info *dev_info,
+       u16 *rsgain)
+{
+       return ds2780_read16(dev_info->w1_dev, rsgain, DS2780_RSGAIN_MSB_REG);
+}
+
+/* Set RSGAIN value from 0 to 1.999 in steps of 0.001 */
+static int ds2780_set_rsgain_register(struct ds2780_device_info *dev_info,
+       u16 rsgain)
+{
+       int ret;
+       u8 raw[] = {rsgain >> 8, rsgain & 0xFF};
+
+       ret = ds2780_write(dev_info->w1_dev, raw,
+                               DS2780_RSGAIN_MSB_REG, sizeof(u8) * 2);
+       if (ret < 0)
+               return ret;
+
+       return ds2780_save_eeprom(dev_info, DS2780_RSGAIN_MSB_REG);
+}
+
+static int ds2780_get_voltage(struct ds2780_device_info *dev_info,
+       int *voltage_uV)
+{
+       int ret;
+       s16 voltage_raw;
+
+       /*
+        * The voltage value is located in 10 bits across the voltage MSB
+        * and LSB registers in two's compliment form
+        * Sign bit of the voltage value is in bit 7 of the voltage MSB register
+        * Bits 9 - 3 of the voltage value are in bits 6 - 0 of the
+        * voltage MSB register
+        * Bits 2 - 0 of the voltage value are in bits 7 - 5 of the
+        * voltage LSB register
+        */
+       ret = ds2780_read16(dev_info->w1_dev, &voltage_raw,
+                               DS2780_VOLT_MSB_REG);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * DS2780 reports voltage in units of 4.88mV, but the battery class
+        * reports in units of uV, so convert by multiplying by 4880.
+        */
+       *voltage_uV = (voltage_raw / 32) * 4880;
+       return 0;
+}
+
+static int ds2780_get_temperature(struct ds2780_device_info *dev_info,
+       int *temperature)
+{
+       int ret;
+       s16 temperature_raw;
+
+       /*
+        * The temperature value is located in 10 bits across the temperature
+        * MSB and LSB registers in two's compliment form
+        * Sign bit of the temperature value is in bit 7 of the temperature
+        * MSB register
+        * Bits 9 - 3 of the temperature value are in bits 6 - 0 of the
+        * temperature MSB register
+        * Bits 2 - 0 of the temperature value are in bits 7 - 5 of the
+        * temperature LSB register
+        */
+       ret = ds2780_read16(dev_info->w1_dev, &temperature_raw,
+                               DS2780_TEMP_MSB_REG);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Temperature is measured in units of 0.125 degrees celcius, the
+        * power_supply class measures temperature in tenths of degrees
+        * celsius. The temperature value is stored as a 10 bit number, plus
+        * sign in the upper bits of a 16 bit register.
+        */
+       *temperature = ((temperature_raw / 32) * 125) / 100;
+       return 0;
+}
+
+static int ds2780_get_current(struct ds2780_device_info *dev_info,
+       enum current_types type, int *current_uA)
+{
+       int ret, sense_res;
+       s16 current_raw;
+       u8 sense_res_raw, reg_msb;
+
+       /*
+        * The units of measurement for current are dependent on the value of
+        * the sense resistor.
+        */
+       ret = ds2780_read8(dev_info->w1_dev, &sense_res_raw, DS2780_RSNSP_REG);
+       if (ret < 0)
+               return ret;
+
+       if (sense_res_raw == 0) {
+               dev_err(dev_info->dev, "sense resistor value is 0\n");
+               return -ENXIO;
+       }
+       sense_res = 1000 / sense_res_raw;
+
+       if (type == CURRENT_NOW)
+               reg_msb = DS2780_CURRENT_MSB_REG;
+       else if (type == CURRENT_AVG)
+               reg_msb = DS2780_IAVG_MSB_REG;
+       else
+               return -EINVAL;
+
+       /*
+        * The current value is located in 16 bits across the current MSB
+        * and LSB registers in two's compliment form
+        * Sign bit of the current value is in bit 7 of the current MSB register
+        * Bits 14 - 8 of the current value are in bits 6 - 0 of the current
+        * MSB register
+        * Bits 7 - 0 of the current value are in bits 7 - 0 of the current
+        * LSB register
+        */
+       ret = ds2780_read16(dev_info->w1_dev, &current_raw, reg_msb);
+       if (ret < 0)
+               return ret;
+
+       *current_uA = current_raw * (DS2780_CURRENT_UNITS / sense_res);
+       return 0;
+}
+
+static int ds2780_get_accumulated_current(struct ds2780_device_info *dev_info,
+       int *accumulated_current)
+{
+       int ret, sense_res;
+       s16 current_raw;
+       u8 sense_res_raw;
+
+       /*
+        * The units of measurement for accumulated current are dependent on
+        * the value of the sense resistor.
+        */
+       ret = ds2780_read8(dev_info->w1_dev, &sense_res_raw, DS2780_RSNSP_REG);
+       if (ret < 0)
+               return ret;
+
+       if (sense_res_raw == 0) {
+               dev_err(dev_info->dev, "sense resistor value is 0\n");
+               return -ENXIO;
+       }
+       sense_res = 1000 / sense_res_raw;
+
+       /*
+        * The ACR value is located in 16 bits across the ACR MSB and
+        * LSB registers
+        * Bits 15 - 8 of the ACR value are in bits 7 - 0 of the ACR
+        * MSB register
+        * Bits 7 - 0 of the ACR value are in bits 7 - 0 of the ACR
+        * LSB register
+        */
+       ret = ds2780_read16(dev_info->w1_dev, &current_raw, DS2780_ACR_MSB_REG);
+       if (ret < 0)
+               return ret;
+
+       *accumulated_current = current_raw * (DS2780_CHARGE_UNITS / sense_res);
+       return 0;
+}
+
+static int ds2780_get_capacity(struct ds2780_device_info *dev_info,
+       int *capacity)
+{
+       int ret;
+       u8 raw;
+
+       ret = ds2780_read8(dev_info->w1_dev, &raw, DS2780_RARC_REG);
+       if (ret < 0)
+               return ret;
+
+       *capacity = raw;
+       return raw;
+}
+
+static int ds2780_get_status(struct ds2780_device_info *dev_info, int *status)
+{
+       int ret, current_uA, capacity;
+
+       ret = ds2780_get_current(dev_info, CURRENT_NOW, &current_uA);
+       if (ret < 0)
+               return ret;
+
+       ret = ds2780_get_capacity(dev_info, &capacity);
+       if (ret < 0)
+               return ret;
+
+       if (capacity == 100)
+               *status = POWER_SUPPLY_STATUS_FULL;
+       else if (current_uA == 0)
+               *status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+       else if (current_uA < 0)
+               *status = POWER_SUPPLY_STATUS_DISCHARGING;
+       else
+               *status = POWER_SUPPLY_STATUS_CHARGING;
+
+       return 0;
+}
+
+static int ds2780_get_charge_now(struct ds2780_device_info *dev_info,
+       int *charge_now)
+{
+       int ret;
+       u16 charge_raw;
+
+       /*
+        * The RAAC value is located in 16 bits across the RAAC MSB and
+        * LSB registers
+        * Bits 15 - 8 of the RAAC value are in bits 7 - 0 of the RAAC
+        * MSB register
+        * Bits 7 - 0 of the RAAC value are in bits 7 - 0 of the RAAC
+        * LSB register
+        */
+       ret = ds2780_read16(dev_info->w1_dev, &charge_raw, DS2780_RAAC_MSB_REG);
+       if (ret < 0)
+               return ret;
+
+       *charge_now = charge_raw * 1600;
+       return 0;
+}
+
+static int ds2780_get_control_register(struct ds2780_device_info *dev_info,
+       u8 *control_reg)
+{
+       return ds2780_read8(dev_info->w1_dev, control_reg, DS2780_CONTROL_REG);
+}
+
+static int ds2780_set_control_register(struct ds2780_device_info *dev_info,
+       u8 control_reg)
+{
+       int ret;
+
+       ret = ds2780_write(dev_info->w1_dev, &control_reg,
+                               DS2780_CONTROL_REG, sizeof(u8));
+       if (ret < 0)
+               return ret;
+
+       return ds2780_save_eeprom(dev_info, DS2780_CONTROL_REG);
+}
+
+static int ds2780_battery_get_property(struct power_supply *psy,
+       enum power_supply_property psp,
+       union power_supply_propval *val)
+{
+       int ret = 0;
+       struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+               ret = ds2780_get_voltage(dev_info, &val->intval);
+               break;
+
+       case POWER_SUPPLY_PROP_TEMP:
+               ret = ds2780_get_temperature(dev_info, &val->intval);
+               break;
+
+       case POWER_SUPPLY_PROP_MODEL_NAME:
+               val->strval = model;
+               break;
+
+       case POWER_SUPPLY_PROP_MANUFACTURER:
+               val->strval = manufacturer;
+               break;
+
+       case POWER_SUPPLY_PROP_CURRENT_NOW:
+               ret = ds2780_get_current(dev_info, CURRENT_NOW, &val->intval);
+               break;
+
+       case POWER_SUPPLY_PROP_CURRENT_AVG:
+               ret = ds2780_get_current(dev_info, CURRENT_AVG, &val->intval);
+               break;
+
+       case POWER_SUPPLY_PROP_STATUS:
+               ret = ds2780_get_status(dev_info, &val->intval);
+               break;
+
+       case POWER_SUPPLY_PROP_CAPACITY:
+               ret = ds2780_get_capacity(dev_info, &val->intval);
+               break;
+
+       case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+               ret = ds2780_get_accumulated_current(dev_info, &val->intval);
+               break;
+
+       case POWER_SUPPLY_PROP_CHARGE_NOW:
+               ret = ds2780_get_charge_now(dev_info, &val->intval);
+               break;
+
+       default:
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+static enum power_supply_property ds2780_battery_props[] = {
+       POWER_SUPPLY_PROP_STATUS,
+       POWER_SUPPLY_PROP_VOLTAGE_NOW,
+       POWER_SUPPLY_PROP_TEMP,
+       POWER_SUPPLY_PROP_MODEL_NAME,
+       POWER_SUPPLY_PROP_MANUFACTURER,
+       POWER_SUPPLY_PROP_CURRENT_NOW,
+       POWER_SUPPLY_PROP_CURRENT_AVG,
+       POWER_SUPPLY_PROP_CAPACITY,
+       POWER_SUPPLY_PROP_CHARGE_COUNTER,
+       POWER_SUPPLY_PROP_CHARGE_NOW,
+};
+
+static ssize_t ds2780_get_pmod_enabled(struct device *dev,
+       struct device_attribute *attr,
+       char *buf)
+{
+       int ret;
+       u8 control_reg;
+       struct power_supply *psy = to_power_supply(dev);
+       struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
+
+       /* Get power mode */
+       ret = ds2780_get_control_register(dev_info, &control_reg);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%d\n",
+                !!(control_reg & DS2780_CONTROL_REG_PMOD));
+}
+
+static ssize_t ds2780_set_pmod_enabled(struct device *dev,
+       struct device_attribute *attr,
+       const char *buf,
+       size_t count)
+{
+       int ret;
+       u8 control_reg, new_setting;
+       struct power_supply *psy = to_power_supply(dev);
+       struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
+
+       /* Set power mode */
+       ret = ds2780_get_control_register(dev_info, &control_reg);
+       if (ret < 0)
+               return ret;
+
+       ret = kstrtou8(buf, 0, &new_setting);
+       if (ret < 0)
+               return ret;
+
+       if ((new_setting != 0) && (new_setting != 1)) {
+               dev_err(dev_info->dev, "Invalid pmod setting (0 or 1)\n");
+               return -EINVAL;
+       }
+
+       if (new_setting)
+               control_reg |= DS2780_CONTROL_REG_PMOD;
+       else
+               control_reg &= ~DS2780_CONTROL_REG_PMOD;
+
+       ret = ds2780_set_control_register(dev_info, control_reg);
+       if (ret < 0)
+               return ret;
+
+       return count;
+}
+
+static ssize_t ds2780_get_sense_resistor_value(struct device *dev,
+       struct device_attribute *attr,
+       char *buf)
+{
+       int ret;
+       u8 sense_resistor;
+       struct power_supply *psy = to_power_supply(dev);
+       struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
+
+       ret = ds2780_read8(dev_info->w1_dev, &sense_resistor, DS2780_RSNSP_REG);
+       if (ret < 0)
+               return ret;
+
+       ret = sprintf(buf, "%d\n", sense_resistor);
+       return ret;
+}
+
+static ssize_t ds2780_set_sense_resistor_value(struct device *dev,
+       struct device_attribute *attr,
+       const char *buf,
+       size_t count)
+{
+       int ret;
+       u8 new_setting;
+       struct power_supply *psy = to_power_supply(dev);
+       struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
+
+       ret = kstrtou8(buf, 0, &new_setting);
+       if (ret < 0)
+               return ret;
+
+       ret = ds2780_set_sense_register(dev_info, new_setting);
+       if (ret < 0)
+               return ret;
+
+       return count;
+}
+
+static ssize_t ds2780_get_rsgain_setting(struct device *dev,
+       struct device_attribute *attr,
+       char *buf)
+{
+       int ret;
+       u16 rsgain;
+       struct power_supply *psy = to_power_supply(dev);
+       struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
+
+       ret = ds2780_get_rsgain_register(dev_info, &rsgain);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%d\n", rsgain);
+}
+
+static ssize_t ds2780_set_rsgain_setting(struct device *dev,
+       struct device_attribute *attr,
+       const char *buf,
+       size_t count)
+{
+       int ret;
+       u16 new_setting;
+       struct power_supply *psy = to_power_supply(dev);
+       struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
+
+       ret = kstrtou16(buf, 0, &new_setting);
+       if (ret < 0)
+               return ret;
+
+       /* Gain can only be from 0 to 1.999 in steps of .001 */
+       if (new_setting > 1999) {
+               dev_err(dev_info->dev, "Invalid rsgain setting (0 - 1999)\n");
+               return -EINVAL;
+       }
+
+       ret = ds2780_set_rsgain_register(dev_info, new_setting);
+       if (ret < 0)
+               return ret;
+
+       return count;
+}
+
+static ssize_t ds2780_get_pio_pin(struct device *dev,
+       struct device_attribute *attr,
+       char *buf)
+{
+       int ret;
+       u8 sfr;
+       struct power_supply *psy = to_power_supply(dev);
+       struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
+
+       ret = ds2780_read8(dev_info->w1_dev, &sfr, DS2780_SFR_REG);
+       if (ret < 0)
+               return ret;
+
+       ret = sprintf(buf, "%d\n", sfr & DS2780_SFR_REG_PIOSC);
+       return ret;
+}
+
+static ssize_t ds2780_set_pio_pin(struct device *dev,
+       struct device_attribute *attr,
+       const char *buf,
+       size_t count)
+{
+       int ret;
+       u8 new_setting;
+       struct power_supply *psy = to_power_supply(dev);
+       struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
+
+       ret = kstrtou8(buf, 0, &new_setting);
+       if (ret < 0)
+               return ret;
+
+       if ((new_setting != 0) && (new_setting != 1)) {
+               dev_err(dev_info->dev, "Invalid pio_pin setting (0 or 1)\n");
+               return -EINVAL;
+       }
+
+       ret = ds2780_write(dev_info->w1_dev, &new_setting,
+                               DS2780_SFR_REG, sizeof(u8));
+       if (ret < 0)
+               return ret;
+
+       return count;
+}
+
+static ssize_t ds2780_read_param_eeprom_bin(struct file *filp,
+                               struct kobject *kobj,
+                               struct bin_attribute *bin_attr,
+                               char *buf, loff_t off, size_t count)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct power_supply *psy = to_power_supply(dev);
+       struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
+
+       count = min_t(loff_t, count,
+               DS2780_EEPROM_BLOCK1_END -
+               DS2780_EEPROM_BLOCK1_START + 1 - off);
+
+       return ds2780_read_block(dev_info->w1_dev, buf,
+                               DS2780_EEPROM_BLOCK1_START + off, count);
+}
+
+static ssize_t ds2780_write_param_eeprom_bin(struct file *filp,
+                               struct kobject *kobj,
+                               struct bin_attribute *bin_attr,
+                               char *buf, loff_t off, size_t count)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct power_supply *psy = to_power_supply(dev);
+       struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
+       int ret;
+
+       count = min_t(loff_t, count,
+               DS2780_EEPROM_BLOCK1_END -
+               DS2780_EEPROM_BLOCK1_START + 1 - off);
+
+       ret = ds2780_write(dev_info->w1_dev, buf,
+                               DS2780_EEPROM_BLOCK1_START + off, count);
+       if (ret < 0)
+               return ret;
+
+       ret = ds2780_save_eeprom(dev_info, DS2780_EEPROM_BLOCK1_START);
+       if (ret < 0)
+               return ret;
+
+       return count;
+}
+
+static struct bin_attribute ds2780_param_eeprom_bin_attr = {
+       .attr = {
+               .name = "param_eeprom",
+               .mode = S_IRUGO | S_IWUSR,
+       },
+       .size = DS2780_EEPROM_BLOCK1_END - DS2780_EEPROM_BLOCK1_START + 1,
+       .read = ds2780_read_param_eeprom_bin,
+       .write = ds2780_write_param_eeprom_bin,
+};
+
+static ssize_t ds2780_read_user_eeprom_bin(struct file *filp,
+                               struct kobject *kobj,
+                               struct bin_attribute *bin_attr,
+                               char *buf, loff_t off, size_t count)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct power_supply *psy = to_power_supply(dev);
+       struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
+
+       count = min_t(loff_t, count,
+               DS2780_EEPROM_BLOCK0_END -
+               DS2780_EEPROM_BLOCK0_START + 1 - off);
+
+       return ds2780_read_block(dev_info->w1_dev, buf,
+                               DS2780_EEPROM_BLOCK0_START + off, count);
+
+}
+
+static ssize_t ds2780_write_user_eeprom_bin(struct file *filp,
+                               struct kobject *kobj,
+                               struct bin_attribute *bin_attr,
+                               char *buf, loff_t off, size_t count)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct power_supply *psy = to_power_supply(dev);
+       struct ds2780_device_info *dev_info = to_ds2780_device_info(psy);
+       int ret;
+
+       count = min_t(loff_t, count,
+               DS2780_EEPROM_BLOCK0_END -
+               DS2780_EEPROM_BLOCK0_START + 1 - off);
+
+       ret = ds2780_write(dev_info->w1_dev, buf,
+                               DS2780_EEPROM_BLOCK0_START + off, count);
+       if (ret < 0)
+               return ret;
+
+       ret = ds2780_save_eeprom(dev_info, DS2780_EEPROM_BLOCK0_START);
+       if (ret < 0)
+               return ret;
+
+       return count;
+}
+
+static struct bin_attribute ds2780_user_eeprom_bin_attr = {
+       .attr = {
+               .name = "user_eeprom",
+               .mode = S_IRUGO | S_IWUSR,
+       },
+       .size = DS2780_EEPROM_BLOCK0_END - DS2780_EEPROM_BLOCK0_START + 1,
+       .read = ds2780_read_user_eeprom_bin,
+       .write = ds2780_write_user_eeprom_bin,
+};
+
+static DEVICE_ATTR(pmod_enabled, S_IRUGO | S_IWUSR, ds2780_get_pmod_enabled,
+       ds2780_set_pmod_enabled);
+static DEVICE_ATTR(sense_resistor_value, S_IRUGO | S_IWUSR,
+       ds2780_get_sense_resistor_value, ds2780_set_sense_resistor_value);
+static DEVICE_ATTR(rsgain_setting, S_IRUGO | S_IWUSR, ds2780_get_rsgain_setting,
+       ds2780_set_rsgain_setting);
+static DEVICE_ATTR(pio_pin, S_IRUGO | S_IWUSR, ds2780_get_pio_pin,
+       ds2780_set_pio_pin);
+
+
+static struct attribute *ds2780_attributes[] = {
+       &dev_attr_pmod_enabled.attr,
+       &dev_attr_sense_resistor_value.attr,
+       &dev_attr_rsgain_setting.attr,
+       &dev_attr_pio_pin.attr,
+       NULL
+};
+
+static const struct attribute_group ds2780_attr_group = {
+       .attrs = ds2780_attributes,
+};
+
+static int __devinit ds2780_battery_probe(struct platform_device *pdev)
+{
+       int ret = 0;
+       struct ds2780_device_info *dev_info;
+
+       dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
+       if (!dev_info) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+
+       platform_set_drvdata(pdev, dev_info);
+
+       dev_info->dev                   = &pdev->dev;
+       dev_info->w1_dev                = pdev->dev.parent;
+       dev_info->bat.name              = dev_name(&pdev->dev);
+       dev_info->bat.type              = POWER_SUPPLY_TYPE_BATTERY;
+       dev_info->bat.properties        = ds2780_battery_props;
+       dev_info->bat.num_properties    = ARRAY_SIZE(ds2780_battery_props);
+       dev_info->bat.get_property      = ds2780_battery_get_property;
+
+       ret = power_supply_register(&pdev->dev, &dev_info->bat);
+       if (ret) {
+               dev_err(dev_info->dev, "failed to register battery\n");
+               goto fail_free_info;
+       }
+
+       ret = sysfs_create_group(&dev_info->bat.dev->kobj, &ds2780_attr_group);
+       if (ret) {
+               dev_err(dev_info->dev, "failed to create sysfs group\n");
+               goto fail_unregister;
+       }
+
+       ret = sysfs_create_bin_file(&dev_info->bat.dev->kobj,
+                                       &ds2780_param_eeprom_bin_attr);
+       if (ret) {
+               dev_err(dev_info->dev,
+                               "failed to create param eeprom bin file");
+               goto fail_remove_group;
+       }
+
+       ret = sysfs_create_bin_file(&dev_info->bat.dev->kobj,
+                                       &ds2780_user_eeprom_bin_attr);
+       if (ret) {
+               dev_err(dev_info->dev,
+                               "failed to create user eeprom bin file");
+               goto fail_remove_bin_file;
+       }
+
+       return 0;
+
+fail_remove_bin_file:
+       sysfs_remove_bin_file(&dev_info->bat.dev->kobj,
+                               &ds2780_param_eeprom_bin_attr);
+fail_remove_group:
+       sysfs_remove_group(&dev_info->bat.dev->kobj, &ds2780_attr_group);
+fail_unregister:
+       power_supply_unregister(&dev_info->bat);
+fail_free_info:
+       kfree(dev_info);
+fail:
+       return ret;
+}
+
+static int __devexit ds2780_battery_remove(struct platform_device *pdev)
+{
+       struct ds2780_device_info *dev_info = platform_get_drvdata(pdev);
+
+       /* remove attributes */
+       sysfs_remove_group(&dev_info->bat.dev->kobj, &ds2780_attr_group);
+
+       power_supply_unregister(&dev_info->bat);
+
+       kfree(dev_info);
+       return 0;
+}
+
+MODULE_ALIAS("platform:ds2780-battery");
+
+static struct platform_driver ds2780_battery_driver = {
+       .driver = {
+               .name = "ds2780-battery",
+       },
+       .probe    = ds2780_battery_probe,
+       .remove   = ds2780_battery_remove,
+};
+
+static int __init ds2780_battery_init(void)
+{
+       return platform_driver_register(&ds2780_battery_driver);
+}
+
+static void __exit ds2780_battery_exit(void)
+{
+       platform_driver_unregister(&ds2780_battery_driver);
+}
+
+module_init(ds2780_battery_init);
+module_exit(ds2780_battery_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>");
+MODULE_DESCRIPTION("Maxim/Dallas DS2780 Stand-Alone Fuel Gauage IC driver");
index 25b88ac1d44c78eb3ff5258b05f0c68965dabdcb..718f2c537827a5a38ae90617bc91efb3eba9c8e0 100644 (file)
@@ -161,12 +161,27 @@ static int __devexit gpio_charger_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int gpio_charger_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct gpio_charger *gpio_charger = platform_get_drvdata(pdev);
+
+       power_supply_changed(&gpio_charger->charger);
+
+       return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(gpio_charger_pm_ops, NULL, gpio_charger_resume);
+
 static struct platform_driver gpio_charger_driver = {
        .probe = gpio_charger_probe,
        .remove = __devexit_p(gpio_charger_remove),
        .driver = {
                .name = "gpio-charger",
                .owner = THIS_MODULE,
+               .pm = &gpio_charger_pm_ops,
        },
 };
 
index 2ad9b14a5ce37c4ec15f8cb8b319c7845e982c4f..f6d72b402a8e3dc48d37319f6ebac236df71d60a 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/usb/ulpi.h>
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
+#include <linux/power/isp1704_charger.h>
 
 /* Vendor specific Power Control register */
 #define ISP1704_PWR_CTRL               0x3d
@@ -70,6 +71,18 @@ struct isp1704_charger {
        unsigned                max_power;
 };
 
+/*
+ * Disable/enable the power from the isp1704 if a function for it
+ * has been provided with platform data.
+ */
+static void isp1704_charger_set_power(struct isp1704_charger *isp, bool on)
+{
+       struct isp1704_charger_data     *board = isp->dev->platform_data;
+
+       if (board->set_power)
+               board->set_power(on);
+}
+
 /*
  * Determine is the charging port DCP (dedicated charger) or CDP (Host/HUB
  * chargers).
@@ -222,6 +235,9 @@ static void isp1704_charger_work(struct work_struct *data)
 
        mutex_lock(&lock);
 
+       if (event != USB_EVENT_NONE)
+               isp1704_charger_set_power(isp, 1);
+
        switch (event) {
        case USB_EVENT_VBUS:
                isp->online = true;
@@ -269,6 +285,8 @@ static void isp1704_charger_work(struct work_struct *data)
                 */
                if (isp->otg->gadget)
                        usb_gadget_disconnect(isp->otg->gadget);
+
+               isp1704_charger_set_power(isp, 0);
                break;
        case USB_EVENT_ENUMERATED:
                if (isp->present)
@@ -394,6 +412,8 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
        isp->dev = &pdev->dev;
        platform_set_drvdata(pdev, isp);
 
+       isp1704_charger_set_power(isp, 1);
+
        ret = isp1704_test_ulpi(isp);
        if (ret < 0)
                goto fail1;
@@ -434,6 +454,7 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
 
        /* Detect charger if VBUS is valid (the cable was already plugged). */
        ret = otg_io_read(isp->otg, ULPI_USB_INT_STS);
+       isp1704_charger_set_power(isp, 0);
        if ((ret & ULPI_INT_VBUS_VALID) && !isp->otg->default_a) {
                isp->event = USB_EVENT_VBUS;
                schedule_work(&isp->work);
@@ -459,6 +480,7 @@ static int __devexit isp1704_charger_remove(struct platform_device *pdev)
        otg_unregister_notifier(isp->otg, &isp->nb);
        power_supply_unregister(&isp->psy);
        otg_put_transceiver(isp->otg);
+       isp1704_charger_set_power(isp, 0);
        kfree(isp);
 
        return 0;
diff --git a/drivers/power/max8903_charger.c b/drivers/power/max8903_charger.c
new file mode 100644 (file)
index 0000000..33ff0e3
--- /dev/null
@@ -0,0 +1,391 @@
+/*
+ * max8903_charger.c - Maxim 8903 USB/Adapter Charger Driver
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/power_supply.h>
+#include <linux/platform_device.h>
+#include <linux/power/max8903_charger.h>
+
+struct max8903_data {
+       struct max8903_pdata *pdata;
+       struct device *dev;
+       struct power_supply psy;
+       bool fault;
+       bool usb_in;
+       bool ta_in;
+};
+
+static enum power_supply_property max8903_charger_props[] = {
+       POWER_SUPPLY_PROP_STATUS, /* Charger status output */
+       POWER_SUPPLY_PROP_ONLINE, /* External power source */
+       POWER_SUPPLY_PROP_HEALTH, /* Fault or OK */
+};
+
+static int max8903_get_property(struct power_supply *psy,
+               enum power_supply_property psp,
+               union power_supply_propval *val)
+{
+       struct max8903_data *data = container_of(psy,
+                       struct max8903_data, psy);
+
+       switch (psp) {
+       case POWER_SUPPLY_PROP_STATUS:
+               val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
+               if (data->pdata->chg) {
+                       if (gpio_get_value(data->pdata->chg) == 0)
+                               val->intval = POWER_SUPPLY_STATUS_CHARGING;
+                       else if (data->usb_in || data->ta_in)
+                               val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+                       else
+                               val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+               }
+               break;
+       case POWER_SUPPLY_PROP_ONLINE:
+               val->intval = 0;
+               if (data->usb_in || data->ta_in)
+                       val->intval = 1;
+               break;
+       case POWER_SUPPLY_PROP_HEALTH:
+               val->intval = POWER_SUPPLY_HEALTH_GOOD;
+               if (data->fault)
+                       val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static irqreturn_t max8903_dcin(int irq, void *_data)
+{
+       struct max8903_data *data = _data;
+       struct max8903_pdata *pdata = data->pdata;
+       bool ta_in;
+       enum power_supply_type old_type;
+
+       ta_in = gpio_get_value(pdata->dok) ? false : true;
+
+       if (ta_in == data->ta_in)
+               return IRQ_HANDLED;
+
+       data->ta_in = ta_in;
+
+       /* Set Current-Limit-Mode 1:DC 0:USB */
+       if (pdata->dcm)
+               gpio_set_value(pdata->dcm, ta_in ? 1 : 0);
+
+       /* Charger Enable / Disable (cen is negated) */
+       if (pdata->cen)
+               gpio_set_value(pdata->cen, ta_in ? 0 :
+                               (data->usb_in ? 0 : 1));
+
+       dev_dbg(data->dev, "TA(DC-IN) Charger %s.\n", ta_in ?
+                       "Connected" : "Disconnected");
+
+       old_type = data->psy.type;
+
+       if (data->ta_in)
+               data->psy.type = POWER_SUPPLY_TYPE_MAINS;
+       else if (data->usb_in)
+               data->psy.type = POWER_SUPPLY_TYPE_USB;
+       else
+               data->psy.type = POWER_SUPPLY_TYPE_BATTERY;
+
+       if (old_type != data->psy.type)
+               power_supply_changed(&data->psy);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t max8903_usbin(int irq, void *_data)
+{
+       struct max8903_data *data = _data;
+       struct max8903_pdata *pdata = data->pdata;
+       bool usb_in;
+       enum power_supply_type old_type;
+
+       usb_in = gpio_get_value(pdata->uok) ? false : true;
+
+       if (usb_in == data->usb_in)
+               return IRQ_HANDLED;
+
+       data->usb_in = usb_in;
+
+       /* Do not touch Current-Limit-Mode */
+
+       /* Charger Enable / Disable (cen is negated) */
+       if (pdata->cen)
+               gpio_set_value(pdata->cen, usb_in ? 0 :
+                               (data->ta_in ? 0 : 1));
+
+       dev_dbg(data->dev, "USB Charger %s.\n", usb_in ?
+                       "Connected" : "Disconnected");
+
+       old_type = data->psy.type;
+
+       if (data->ta_in)
+               data->psy.type = POWER_SUPPLY_TYPE_MAINS;
+       else if (data->usb_in)
+               data->psy.type = POWER_SUPPLY_TYPE_USB;
+       else
+               data->psy.type = POWER_SUPPLY_TYPE_BATTERY;
+
+       if (old_type != data->psy.type)
+               power_supply_changed(&data->psy);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t max8903_fault(int irq, void *_data)
+{
+       struct max8903_data *data = _data;
+       struct max8903_pdata *pdata = data->pdata;
+       bool fault;
+
+       fault = gpio_get_value(pdata->flt) ? false : true;
+
+       if (fault == data->fault)
+               return IRQ_HANDLED;
+
+       data->fault = fault;
+
+       if (fault)
+               dev_err(data->dev, "Charger suffers a fault and stops.\n");
+       else
+               dev_err(data->dev, "Charger recovered from a fault.\n");
+
+       return IRQ_HANDLED;
+}
+
+static __devinit int max8903_probe(struct platform_device *pdev)
+{
+       struct max8903_data *data;
+       struct device *dev = &pdev->dev;
+       struct max8903_pdata *pdata = pdev->dev.platform_data;
+       int ret = 0;
+       int gpio;
+       int ta_in = 0;
+       int usb_in = 0;
+
+       data = kzalloc(sizeof(struct max8903_data), GFP_KERNEL);
+       if (data == NULL) {
+               dev_err(dev, "Cannot allocate memory.\n");
+               return -ENOMEM;
+       }
+       data->pdata = pdata;
+       data->dev = dev;
+       platform_set_drvdata(pdev, data);
+
+       if (pdata->dc_valid == false && pdata->usb_valid == false) {
+               dev_err(dev, "No valid power sources.\n");
+               ret = -EINVAL;
+               goto err;
+       }
+
+       if (pdata->dc_valid) {
+               if (pdata->dok && gpio_is_valid(pdata->dok) &&
+                               pdata->dcm && gpio_is_valid(pdata->dcm)) {
+                       gpio = pdata->dok; /* PULL_UPed Interrupt */
+                       ta_in = gpio_get_value(gpio) ? 0 : 1;
+
+                       gpio = pdata->dcm; /* Output */
+                       gpio_set_value(gpio, ta_in);
+               } else {
+                       dev_err(dev, "When DC is wired, DOK and DCM should"
+                                       " be wired as well.\n");
+                       ret = -EINVAL;
+                       goto err;
+               }
+       } else {
+               if (pdata->dcm) {
+                       if (gpio_is_valid(pdata->dcm))
+                               gpio_set_value(pdata->dcm, 0);
+                       else {
+                               dev_err(dev, "Invalid pin: dcm.\n");
+                               ret = -EINVAL;
+                               goto err;
+                       }
+               }
+       }
+
+       if (pdata->usb_valid) {
+               if (pdata->uok && gpio_is_valid(pdata->uok)) {
+                       gpio = pdata->uok;
+                       usb_in = gpio_get_value(gpio) ? 0 : 1;
+               } else {
+                       dev_err(dev, "When USB is wired, UOK should be wired."
+                                       "as well.\n");
+                       ret = -EINVAL;
+                       goto err;
+               }
+       }
+
+       if (pdata->cen) {
+               if (gpio_is_valid(pdata->cen)) {
+                       gpio_set_value(pdata->cen, (ta_in || usb_in) ? 0 : 1);
+               } else {
+                       dev_err(dev, "Invalid pin: cen.\n");
+                       ret = -EINVAL;
+                       goto err;
+               }
+       }
+
+       if (pdata->chg) {
+               if (!gpio_is_valid(pdata->chg)) {
+                       dev_err(dev, "Invalid pin: chg.\n");
+                       ret = -EINVAL;
+                       goto err;
+               }
+       }
+
+       if (pdata->flt) {
+               if (!gpio_is_valid(pdata->flt)) {
+                       dev_err(dev, "Invalid pin: flt.\n");
+                       ret = -EINVAL;
+                       goto err;
+               }
+       }
+
+       if (pdata->usus) {
+               if (!gpio_is_valid(pdata->usus)) {
+                       dev_err(dev, "Invalid pin: usus.\n");
+                       ret = -EINVAL;
+                       goto err;
+               }
+       }
+
+       data->fault = false;
+       data->ta_in = ta_in;
+       data->usb_in = usb_in;
+
+       data->psy.name = "max8903_charger";
+       data->psy.type = (ta_in) ? POWER_SUPPLY_TYPE_MAINS :
+                       ((usb_in) ? POWER_SUPPLY_TYPE_USB :
+                        POWER_SUPPLY_TYPE_BATTERY);
+       data->psy.get_property = max8903_get_property;
+       data->psy.properties = max8903_charger_props;
+       data->psy.num_properties = ARRAY_SIZE(max8903_charger_props);
+
+       ret = power_supply_register(dev, &data->psy);
+       if (ret) {
+               dev_err(dev, "failed: power supply register.\n");
+               goto err;
+       }
+
+       if (pdata->dc_valid) {
+               ret = request_threaded_irq(gpio_to_irq(pdata->dok),
+                               NULL, max8903_dcin,
+                               IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+                               "MAX8903 DC IN", data);
+               if (ret) {
+                       dev_err(dev, "Cannot request irq %d for DC (%d)\n",
+                                       gpio_to_irq(pdata->dok), ret);
+                       goto err_psy;
+               }
+       }
+
+       if (pdata->usb_valid) {
+               ret = request_threaded_irq(gpio_to_irq(pdata->uok),
+                               NULL, max8903_usbin,
+                               IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+                               "MAX8903 USB IN", data);
+               if (ret) {
+                       dev_err(dev, "Cannot request irq %d for USB (%d)\n",
+                                       gpio_to_irq(pdata->uok), ret);
+                       goto err_dc_irq;
+               }
+       }
+
+       if (pdata->flt) {
+               ret = request_threaded_irq(gpio_to_irq(pdata->flt),
+                               NULL, max8903_fault,
+                               IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+                               "MAX8903 Fault", data);
+               if (ret) {
+                       dev_err(dev, "Cannot request irq %d for Fault (%d)\n",
+                                       gpio_to_irq(pdata->flt), ret);
+                       goto err_usb_irq;
+               }
+       }
+
+       return 0;
+
+err_usb_irq:
+       if (pdata->usb_valid)
+               free_irq(gpio_to_irq(pdata->uok), data);
+err_dc_irq:
+       if (pdata->dc_valid)
+               free_irq(gpio_to_irq(pdata->dok), data);
+err_psy:
+       power_supply_unregister(&data->psy);
+err:
+       kfree(data);
+       return ret;
+}
+
+static __devexit int max8903_remove(struct platform_device *pdev)
+{
+       struct max8903_data *data = platform_get_drvdata(pdev);
+
+       if (data) {
+               struct max8903_pdata *pdata = data->pdata;
+
+               if (pdata->flt)
+                       free_irq(gpio_to_irq(pdata->flt), data);
+               if (pdata->usb_valid)
+                       free_irq(gpio_to_irq(pdata->uok), data);
+               if (pdata->dc_valid)
+                       free_irq(gpio_to_irq(pdata->dok), data);
+               power_supply_unregister(&data->psy);
+               kfree(data);
+       }
+
+       return 0;
+}
+
+static struct platform_driver max8903_driver = {
+       .probe  = max8903_probe,
+       .remove = __devexit_p(max8903_remove),
+       .driver = {
+               .name   = "max8903-charger",
+               .owner  = THIS_MODULE,
+       },
+};
+
+static int __init max8903_init(void)
+{
+       return platform_driver_register(&max8903_driver);
+}
+module_init(max8903_init);
+
+static void __exit max8903_exit(void)
+{
+       platform_driver_unregister(&max8903_driver);
+}
+module_exit(max8903_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MAX8903 Charger Driver");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_ALIAS("max8903-charger");
index 8e5aec26086681c03908a91cfd3e3ea4a314e9f5..a70e16d3a3dc1d31942ccf6cf788d06b1c0ea3b1 100644 (file)
@@ -425,16 +425,11 @@ static __devexit int max8925_deinit_charger(struct max8925_power_info *info)
 static __devinit int max8925_power_probe(struct platform_device *pdev)
 {
        struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
-       struct max8925_platform_data *max8925_pdata;
        struct max8925_power_pdata *pdata = NULL;
        struct max8925_power_info *info;
        int ret;
 
-       if (pdev->dev.parent->platform_data) {
-               max8925_pdata = pdev->dev.parent->platform_data;
-               pdata = max8925_pdata->power;
-       }
-
+       pdata = pdev->dev.platform_data;
        if (!pdata) {
                dev_err(&pdev->dev, "platform data isn't assigned to "
                        "power supply\n");
@@ -447,6 +442,7 @@ static __devinit int max8925_power_probe(struct platform_device *pdev)
        info->chip = chip;
        info->gpm = chip->i2c;
        info->adc = chip->adc;
+       platform_set_drvdata(pdev, info);
 
        info->ac.name = "max8925-ac";
        info->ac.type = POWER_SUPPLY_TYPE_MAINS;
@@ -482,8 +478,6 @@ static __devinit int max8925_power_probe(struct platform_device *pdev)
        info->topoff_threshold = pdata->topoff_threshold;
        info->fast_charge = pdata->fast_charge;
        info->set_charger = pdata->set_charger;
-       dev_set_drvdata(&pdev->dev, info);
-       platform_set_drvdata(pdev, info);
 
        max8925_init_charger(chip, info);
        return 0;
index 0cd9f67d33e543a0e0a06cea9a2496e12211adff..b527c93bf2f3fbb11869333aa0cb36cad22ffea5 100644 (file)
@@ -3,6 +3,12 @@
  *
  * Copyright 2010  Anton Vorontsov <cbouatmailru@gmail.com>
  *
+ * Dynamic module parameter code from the Virtual Battery Driver
+ * Copyright (C) 2008 Pylone, Inc.
+ * By: Masashi YOKOTA <yokota@pylone.jp>
+ * Originally found here:
+ * http://downloads.pylone.jp/src/virtual_battery/virtual_battery-0.0.1.tar.bz2
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
 #include <linux/delay.h>
 #include <linux/vermagic.h>
 
-static int test_power_ac_online = 1;
-static int test_power_battery_status = POWER_SUPPLY_STATUS_CHARGING;
+static int ac_online                   = 1;
+static int battery_status              = POWER_SUPPLY_STATUS_DISCHARGING;
+static int battery_health              = POWER_SUPPLY_HEALTH_GOOD;
+static int battery_present             = 1; /* true */
+static int battery_technology          = POWER_SUPPLY_TECHNOLOGY_LION;
+static int battery_capacity            = 50;
 
 static int test_power_get_ac_property(struct power_supply *psy,
                                      enum power_supply_property psp,
@@ -24,7 +34,7 @@ static int test_power_get_ac_property(struct power_supply *psy,
 {
        switch (psp) {
        case POWER_SUPPLY_PROP_ONLINE:
-               val->intval = test_power_ac_online;
+               val->intval = ac_online;
                break;
        default:
                return -EINVAL;
@@ -47,22 +57,30 @@ static int test_power_get_battery_property(struct power_supply *psy,
                val->strval = UTS_RELEASE;
                break;
        case POWER_SUPPLY_PROP_STATUS:
-               val->intval = test_power_battery_status;
+               val->intval = battery_status;
                break;
        case POWER_SUPPLY_PROP_CHARGE_TYPE:
                val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
                break;
        case POWER_SUPPLY_PROP_HEALTH:
-               val->intval = POWER_SUPPLY_HEALTH_GOOD;
+               val->intval = battery_health;
+               break;
+       case POWER_SUPPLY_PROP_PRESENT:
+               val->intval = battery_present;
                break;
        case POWER_SUPPLY_PROP_TECHNOLOGY:
-               val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+               val->intval = battery_technology;
                break;
        case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
                val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
                break;
        case POWER_SUPPLY_PROP_CAPACITY:
-               val->intval = 50;
+       case POWER_SUPPLY_PROP_CHARGE_NOW:
+               val->intval = battery_capacity;
+               break;
+       case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
+       case POWER_SUPPLY_PROP_CHARGE_FULL:
+               val->intval = 100;
                break;
        case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
        case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
@@ -84,9 +102,11 @@ static enum power_supply_property test_power_battery_props[] = {
        POWER_SUPPLY_PROP_STATUS,
        POWER_SUPPLY_PROP_CHARGE_TYPE,
        POWER_SUPPLY_PROP_HEALTH,
+       POWER_SUPPLY_PROP_PRESENT,
        POWER_SUPPLY_PROP_TECHNOLOGY,
+       POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
        POWER_SUPPLY_PROP_CHARGE_FULL,
-       POWER_SUPPLY_PROP_CHARGE_EMPTY,
+       POWER_SUPPLY_PROP_CHARGE_NOW,
        POWER_SUPPLY_PROP_CAPACITY,
        POWER_SUPPLY_PROP_CAPACITY_LEVEL,
        POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
@@ -118,6 +138,7 @@ static struct power_supply test_power_supplies[] = {
        },
 };
 
+
 static int __init test_power_init(void)
 {
        int i;
@@ -145,8 +166,8 @@ static void __exit test_power_exit(void)
        int i;
 
        /* Let's see how we handle changes... */
-       test_power_ac_online = 0;
-       test_power_battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
+       ac_online = 0;
+       battery_status = POWER_SUPPLY_STATUS_DISCHARGING;
        for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++)
                power_supply_changed(&test_power_supplies[i]);
        pr_info("%s: 'changed' event sent, sleeping for 10 seconds...\n",
@@ -158,6 +179,241 @@ static void __exit test_power_exit(void)
 }
 module_exit(test_power_exit);
 
+
+
+#define MAX_KEYLENGTH 256
+struct battery_property_map {
+       int value;
+       char const *key;
+};
+
+static struct battery_property_map map_ac_online[] = {
+       { 0,  "on"  },
+       { 1,  "off" },
+       { -1, NULL  },
+};
+
+static struct battery_property_map map_status[] = {
+       { POWER_SUPPLY_STATUS_CHARGING,     "charging"     },
+       { POWER_SUPPLY_STATUS_DISCHARGING,  "discharging"  },
+       { POWER_SUPPLY_STATUS_NOT_CHARGING, "not-charging" },
+       { POWER_SUPPLY_STATUS_FULL,         "full"         },
+       { -1,                               NULL           },
+};
+
+static struct battery_property_map map_health[] = {
+       { POWER_SUPPLY_HEALTH_GOOD,           "good"        },
+       { POWER_SUPPLY_HEALTH_OVERHEAT,       "overheat"    },
+       { POWER_SUPPLY_HEALTH_DEAD,           "dead"        },
+       { POWER_SUPPLY_HEALTH_OVERVOLTAGE,    "overvoltage" },
+       { POWER_SUPPLY_HEALTH_UNSPEC_FAILURE, "failure"     },
+       { -1,                                 NULL          },
+};
+
+static struct battery_property_map map_present[] = {
+       { 0,  "false" },
+       { 1,  "true"  },
+       { -1, NULL    },
+};
+
+static struct battery_property_map map_technology[] = {
+       { POWER_SUPPLY_TECHNOLOGY_NiMH, "NiMH" },
+       { POWER_SUPPLY_TECHNOLOGY_LION, "LION" },
+       { POWER_SUPPLY_TECHNOLOGY_LIPO, "LIPO" },
+       { POWER_SUPPLY_TECHNOLOGY_LiFe, "LiFe" },
+       { POWER_SUPPLY_TECHNOLOGY_NiCd, "NiCd" },
+       { POWER_SUPPLY_TECHNOLOGY_LiMn, "LiMn" },
+       { -1,                           NULL   },
+};
+
+
+static int map_get_value(struct battery_property_map *map, const char *key,
+                               int def_val)
+{
+       char buf[MAX_KEYLENGTH];
+       int cr;
+
+       strncpy(buf, key, MAX_KEYLENGTH);
+       buf[MAX_KEYLENGTH-1] = '\0';
+
+       cr = strnlen(buf, MAX_KEYLENGTH) - 1;
+       if (buf[cr] == '\n')
+               buf[cr] = '\0';
+
+       while (map->key) {
+               if (strncasecmp(map->key, buf, MAX_KEYLENGTH) == 0)
+                       return map->value;
+               map++;
+       }
+
+       return def_val;
+}
+
+
+static const char *map_get_key(struct battery_property_map *map, int value,
+                               const char *def_key)
+{
+       while (map->key) {
+               if (map->value == value)
+                       return map->key;
+               map++;
+       }
+
+       return def_key;
+}
+
+static int param_set_ac_online(const char *key, const struct kernel_param *kp)
+{
+       ac_online = map_get_value(map_ac_online, key, ac_online);
+       power_supply_changed(&test_power_supplies[0]);
+       return 0;
+}
+
+static int param_get_ac_online(char *buffer, const struct kernel_param *kp)
+{
+       strcpy(buffer, map_get_key(map_ac_online, ac_online, "unknown"));
+       return strlen(buffer);
+}
+
+static int param_set_battery_status(const char *key,
+                                       const struct kernel_param *kp)
+{
+       battery_status = map_get_value(map_status, key, battery_status);
+       power_supply_changed(&test_power_supplies[1]);
+       return 0;
+}
+
+static int param_get_battery_status(char *buffer, const struct kernel_param *kp)
+{
+       strcpy(buffer, map_get_key(map_status, battery_status, "unknown"));
+       return strlen(buffer);
+}
+
+static int param_set_battery_health(const char *key,
+                                       const struct kernel_param *kp)
+{
+       battery_health = map_get_value(map_health, key, battery_health);
+       power_supply_changed(&test_power_supplies[1]);
+       return 0;
+}
+
+static int param_get_battery_health(char *buffer, const struct kernel_param *kp)
+{
+       strcpy(buffer, map_get_key(map_health, battery_health, "unknown"));
+       return strlen(buffer);
+}
+
+static int param_set_battery_present(const char *key,
+                                       const struct kernel_param *kp)
+{
+       battery_present = map_get_value(map_present, key, battery_present);
+       power_supply_changed(&test_power_supplies[0]);
+       return 0;
+}
+
+static int param_get_battery_present(char *buffer,
+                                       const struct kernel_param *kp)
+{
+       strcpy(buffer, map_get_key(map_present, battery_present, "unknown"));
+       return strlen(buffer);
+}
+
+static int param_set_battery_technology(const char *key,
+                                       const struct kernel_param *kp)
+{
+       battery_technology = map_get_value(map_technology, key,
+                                               battery_technology);
+       power_supply_changed(&test_power_supplies[1]);
+       return 0;
+}
+
+static int param_get_battery_technology(char *buffer,
+                                       const struct kernel_param *kp)
+{
+       strcpy(buffer,
+               map_get_key(map_technology, battery_technology, "unknown"));
+       return strlen(buffer);
+}
+
+static int param_set_battery_capacity(const char *key,
+                                       const struct kernel_param *kp)
+{
+       int tmp;
+
+       if (1 != sscanf(key, "%d", &tmp))
+               return -EINVAL;
+
+       battery_capacity = tmp;
+       power_supply_changed(&test_power_supplies[1]);
+       return 0;
+}
+
+#define param_get_battery_capacity param_get_int
+
+
+
+static struct kernel_param_ops param_ops_ac_online = {
+       .set = param_set_ac_online,
+       .get = param_get_ac_online,
+};
+
+static struct kernel_param_ops param_ops_battery_status = {
+       .set = param_set_battery_status,
+       .get = param_get_battery_status,
+};
+
+static struct kernel_param_ops param_ops_battery_present = {
+       .set = param_set_battery_present,
+       .get = param_get_battery_present,
+};
+
+static struct kernel_param_ops param_ops_battery_technology = {
+       .set = param_set_battery_technology,
+       .get = param_get_battery_technology,
+};
+
+static struct kernel_param_ops param_ops_battery_health = {
+       .set = param_set_battery_health,
+       .get = param_get_battery_health,
+};
+
+static struct kernel_param_ops param_ops_battery_capacity = {
+       .set = param_set_battery_capacity,
+       .get = param_get_battery_capacity,
+};
+
+
+#define param_check_ac_online(name, p) __param_check(name, p, void);
+#define param_check_battery_status(name, p) __param_check(name, p, void);
+#define param_check_battery_present(name, p) __param_check(name, p, void);
+#define param_check_battery_technology(name, p) __param_check(name, p, void);
+#define param_check_battery_health(name, p) __param_check(name, p, void);
+#define param_check_battery_capacity(name, p) __param_check(name, p, void);
+
+
+module_param(ac_online, ac_online, 0644);
+MODULE_PARM_DESC(ac_online, "AC charging state <on|off>");
+
+module_param(battery_status, battery_status, 0644);
+MODULE_PARM_DESC(battery_status,
+       "battery status <charging|discharging|not-charging|full>");
+
+module_param(battery_present, battery_present, 0644);
+MODULE_PARM_DESC(battery_present,
+       "battery presence state <good|overheat|dead|overvoltage|failure>");
+
+module_param(battery_technology, battery_technology, 0644);
+MODULE_PARM_DESC(battery_technology,
+       "battery technology <NiMH|LION|LIPO|LiFe|NiCd|LiMn>");
+
+module_param(battery_health, battery_health, 0644);
+MODULE_PARM_DESC(battery_health,
+       "battery health state <good|overheat|dead|overvoltage|failure>");
+
+module_param(battery_capacity, battery_capacity, 0644);
+MODULE_PARM_DESC(battery_capacity, "battery capacity (percentage)");
+
+
 MODULE_DESCRIPTION("Power supply driver for testing");
 MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>");
 MODULE_LICENSE("GPL");
index e5ced3a4c1ed9a8ad4b38467cfe199a15e472bc3..d119c38b3ff632ef158f66a971799c522202c6a3 100644 (file)
@@ -271,24 +271,33 @@ static int __devexit z2_batt_remove(struct i2c_client *client)
 }
 
 #ifdef CONFIG_PM
-static int z2_batt_suspend(struct i2c_client *client, pm_message_t state)
+static int z2_batt_suspend(struct device *dev)
 {
+       struct i2c_client *client = to_i2c_client(dev);
        struct z2_charger *charger = i2c_get_clientdata(client);
 
        flush_work_sync(&charger->bat_work);
        return 0;
 }
 
-static int z2_batt_resume(struct i2c_client *client)
+static int z2_batt_resume(struct device *dev)
 {
+       struct i2c_client *client = to_i2c_client(dev);
        struct z2_charger *charger = i2c_get_clientdata(client);
 
        schedule_work(&charger->bat_work);
        return 0;
 }
+
+static const struct dev_pm_ops z2_battery_pm_ops = {
+       .suspend        = z2_batt_suspend,
+       .resume         = z2_batt_resume,
+};
+
+#define        Z2_BATTERY_PM_OPS       (&z2_battery_pm_ops)
+
 #else
-#define z2_batt_suspend NULL
-#define z2_batt_resume NULL
+#define        Z2_BATTERY_PM_OPS       (NULL)
 #endif
 
 static const struct i2c_device_id z2_batt_id[] = {
@@ -301,11 +310,10 @@ static struct i2c_driver z2_batt_driver = {
        .driver = {
                .name   = "z2-battery",
                .owner  = THIS_MODULE,
+               .pm     = Z2_BATTERY_PM_OPS
        },
        .probe          = z2_batt_probe,
        .remove         = z2_batt_remove,
-       .suspend        = z2_batt_suspend,
-       .resume         = z2_batt_resume,
        .id_table       = z2_batt_id,
 };
 
index 859251250b55bb400d548924711e665ebb26b5f2..d63fddb0fbb0d5ead8cadb323bfbff49b2a3e00f 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
-#include <linux/mfd/core.h>
 #include <linux/mfd/88pm860x.h>
 
 struct pm8607_regulator_info {
@@ -399,36 +398,33 @@ static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
 {
        struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
        struct pm8607_regulator_info *info = NULL;
-       struct regulator_init_data *pdata;
-       struct mfd_cell *cell;
+       struct regulator_init_data *pdata = pdev->dev.platform_data;
+       struct resource *res;
        int i;
 
-       cell = pdev->dev.platform_data;
-       if (cell == NULL)
-               return -ENODEV;
-       pdata = cell->mfd_data;
-       if (pdata == NULL)
+       res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+       if (res == NULL) {
+               dev_err(&pdev->dev, "No I/O resource!\n");
                return -EINVAL;
-
+       }
        for (i = 0; i < ARRAY_SIZE(pm8607_regulator_info); i++) {
                info = &pm8607_regulator_info[i];
-               if (!strcmp(info->desc.name, pdata->constraints.name))
+               if (info->desc.id == res->start)
                        break;
        }
-       if (i > ARRAY_SIZE(pm8607_regulator_info)) {
-               dev_err(&pdev->dev, "Failed to find regulator %s\n",
-                       pdata->constraints.name);
+       if ((i < 0) || (i > PM8607_ID_RG_MAX)) {
+               dev_err(&pdev->dev, "Failed to find regulator %llu\n",
+                       (unsigned long long)res->start);
                return -EINVAL;
        }
-
        info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion;
        info->chip = chip;
 
        /* check DVC ramp slope double */
-       if (!strcmp(info->desc.name, "BUCK3"))
-               if (info->chip->buck3_double)
-                       info->slope_double = 1;
+       if ((i == PM8607_ID_BUCK3) && info->chip->buck3_double)
+               info->slope_double = 1;
 
+       /* replace driver_data with info */
        info->regulator = regulator_register(&info->desc, &pdev->dev,
                                             pdata, info);
        if (IS_ERR(info->regulator)) {
index f0b13a0d1851542ca4ca05157fff2981799fe482..d7ed20f293d7f4af578d8d8098fae6bc609fb8b7 100644 (file)
@@ -297,5 +297,11 @@ config REGULATOR_TPS6524X
          serial interface currently supported on the sequencer serial
          port controller.
 
+config REGULATOR_TPS65910
+       tristate "TI TPS65910 Power Regulator"
+       depends on MFD_TPS65910
+       help
+         This driver supports TPS65910 voltage regulator chips.
+
 endif
 
index 165ff5371e9e0ab78615763abbcf026001fd4e24..3932d2ec38f36f08ea209dfd50f0a32ba1b75121 100644 (file)
@@ -42,5 +42,6 @@ obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
 obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o
 obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o
 obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
+obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
 
 ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
index b1d77946e9c64dd69aed6f840fcaf2c685846e92..585e4946fe0a7157b6ccaf6ae59465f788505a38 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/platform_device.h>
 #include <linux/regulator/driver.h>
 #include <linux/mfd/abx500.h>
-#include <linux/mfd/core.h>
 
 /* LDO registers and some handy masking definitions for AB3100 */
 #define AB3100_LDO_A           0x40
@@ -582,7 +581,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
 
 static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
 {
-       struct ab3100_platform_data *plfdata = mfd_get_data(pdev);
+       struct ab3100_platform_data *plfdata = pdev->dev.platform_data;
        int err = 0;
        u8 data;
        int i;
index 0fae51c4845a3cf703f220c11d313be6a8ee099c..d3e38790906ed889de2b78454516ec1410eb6466 100644 (file)
@@ -158,6 +158,13 @@ static int regulator_check_consumers(struct regulator_dev *rdev,
        struct regulator *regulator;
 
        list_for_each_entry(regulator, &rdev->consumer_list, list) {
+               /*
+                * Assume consumers that didn't say anything are OK
+                * with anything in the constraint range.
+                */
+               if (!regulator->min_uV && !regulator->max_uV)
+                       continue;
+
                if (*max_uV > regulator->max_uV)
                        *max_uV = regulator->max_uV;
                if (*min_uV < regulator->min_uV)
@@ -197,9 +204,9 @@ static int regulator_check_current_limit(struct regulator_dev *rdev,
 }
 
 /* operating mode constraint check */
-static int regulator_check_mode(struct regulator_dev *rdev, int mode)
+static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode)
 {
-       switch (mode) {
+       switch (*mode) {
        case REGULATOR_MODE_FAST:
        case REGULATOR_MODE_NORMAL:
        case REGULATOR_MODE_IDLE:
@@ -217,11 +224,17 @@ static int regulator_check_mode(struct regulator_dev *rdev, int mode)
                rdev_err(rdev, "operation not allowed\n");
                return -EPERM;
        }
-       if (!(rdev->constraints->valid_modes_mask & mode)) {
-               rdev_err(rdev, "invalid mode %x\n", mode);
-               return -EINVAL;
+
+       /* The modes are bitmasks, the most power hungry modes having
+        * the lowest values. If the requested mode isn't supported
+        * try higher modes. */
+       while (*mode) {
+               if (rdev->constraints->valid_modes_mask & *mode)
+                       return 0;
+               *mode /= 2;
        }
-       return 0;
+
+       return -EINVAL;
 }
 
 /* dynamic regulator mode switching constraint check */
@@ -612,7 +625,7 @@ static void drms_uA_update(struct regulator_dev *rdev)
                                                  output_uV, current_uA);
 
        /* check the new mode is allowed */
-       err = regulator_check_mode(rdev, mode);
+       err = regulator_mode_constrain(rdev, &mode);
        if (err == 0)
                rdev->desc->ops->set_mode(rdev, mode);
 }
@@ -718,6 +731,10 @@ static void print_constraints(struct regulator_dev *rdev)
                        count += sprintf(buf + count, "at %d mV ", ret / 1000);
        }
 
+       if (constraints->uV_offset)
+               count += sprintf(buf, "%dmV offset ",
+                                constraints->uV_offset / 1000);
+
        if (constraints->min_uA && constraints->max_uA) {
                if (constraints->min_uA == constraints->max_uA)
                        count += sprintf(buf + count, "%d mA ",
@@ -1498,13 +1515,14 @@ static int _regulator_force_disable(struct regulator_dev *rdev,
  */
 int regulator_force_disable(struct regulator *regulator)
 {
+       struct regulator_dev *rdev = regulator->rdev;
        struct regulator_dev *supply_rdev = NULL;
        int ret;
 
-       mutex_lock(&regulator->rdev->mutex);
+       mutex_lock(&rdev->mutex);
        regulator->uA_load = 0;
-       ret = _regulator_force_disable(regulator->rdev, &supply_rdev);
-       mutex_unlock(&regulator->rdev->mutex);
+       ret = _regulator_force_disable(rdev, &supply_rdev);
+       mutex_unlock(&rdev->mutex);
 
        if (supply_rdev)
                regulator_disable(get_device_regulator(rdev_get_dev(supply_rdev)));
@@ -1634,6 +1652,9 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
 
        trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV);
 
+       min_uV += rdev->constraints->uV_offset;
+       max_uV += rdev->constraints->uV_offset;
+
        if (rdev->desc->ops->set_voltage) {
                ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV,
                                                   &selector);
@@ -1858,18 +1879,22 @@ EXPORT_SYMBOL_GPL(regulator_sync_voltage);
 
 static int _regulator_get_voltage(struct regulator_dev *rdev)
 {
-       int sel;
+       int sel, ret;
 
        if (rdev->desc->ops->get_voltage_sel) {
                sel = rdev->desc->ops->get_voltage_sel(rdev);
                if (sel < 0)
                        return sel;
-               return rdev->desc->ops->list_voltage(rdev, sel);
-       }
-       if (rdev->desc->ops->get_voltage)
-               return rdev->desc->ops->get_voltage(rdev);
-       else
+               ret = rdev->desc->ops->list_voltage(rdev, sel);
+       } else if (rdev->desc->ops->get_voltage) {
+               ret = rdev->desc->ops->get_voltage(rdev);
+       } else {
                return -EINVAL;
+       }
+
+       if (ret < 0)
+               return ret;
+       return ret - rdev->constraints->uV_offset;
 }
 
 /**
@@ -2005,7 +2030,7 @@ int regulator_set_mode(struct regulator *regulator, unsigned int mode)
        }
 
        /* constraints check */
-       ret = regulator_check_mode(rdev, mode);
+       ret = regulator_mode_constrain(rdev, &mode);
        if (ret < 0)
                goto out;
 
@@ -2081,16 +2106,26 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
 
        mutex_lock(&rdev->mutex);
 
+       /*
+        * first check to see if we can set modes at all, otherwise just
+        * tell the consumer everything is OK.
+        */
        regulator->uA_load = uA_load;
        ret = regulator_check_drms(rdev);
-       if (ret < 0)
+       if (ret < 0) {
+               ret = 0;
                goto out;
-       ret = -EINVAL;
+       }
 
-       /* sanity check */
        if (!rdev->desc->ops->get_optimum_mode)
                goto out;
 
+       /*
+        * we can actually do this so any errors are indicators of
+        * potential real failure.
+        */
+       ret = -EINVAL;
+
        /* get output voltage */
        output_uV = _regulator_get_voltage(rdev);
        if (output_uV <= 0) {
@@ -2116,7 +2151,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load)
        mode = rdev->desc->ops->get_optimum_mode(rdev,
                                                 input_uV, output_uV,
                                                 total_uA_load);
-       ret = regulator_check_mode(rdev, mode);
+       ret = regulator_mode_constrain(rdev, &mode);
        if (ret < 0) {
                rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n",
                         total_uA_load, input_uV, output_uV);
@@ -2589,14 +2624,6 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
        if (ret < 0)
                goto scrub;
 
-       /* set supply regulator if it exists */
-       if (init_data->supply_regulator && init_data->supply_regulator_dev) {
-               dev_err(dev,
-                       "Supply regulator specified by both name and dev\n");
-               ret = -EINVAL;
-               goto scrub;
-       }
-
        if (init_data->supply_regulator) {
                struct regulator_dev *r;
                int found = 0;
@@ -2621,14 +2648,6 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
                        goto scrub;
        }
 
-       if (init_data->supply_regulator_dev) {
-               dev_warn(dev, "Uses supply_regulator_dev instead of regulator_supply\n");
-               ret = set_supply(rdev,
-                       dev_get_drvdata(init_data->supply_regulator_dev));
-               if (ret < 0)
-                       goto scrub;
-       }
-
        /* add consumers devices */
        for (i = 0; i < init_data->num_consumer_supplies; i++) {
                ret = set_consumer_device_supply(rdev,
index 1089a961616e093179e676bccb14b139e0de5d0d..e5f7b8fe51f4b74fe4a71599043e4b396823ee19 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/err.h>
 #include <linux/spinlock.h>
 #include <linux/platform_device.h>
-#include <linux/mfd/core.h>
 #include <linux/mfd/db8500-prcmu.h>
 #include <linux/regulator/driver.h>
 #include <linux/regulator/machine.h>
@@ -471,7 +470,8 @@ static struct db8500_regulator_info
 
 static int __devinit db8500_regulator_probe(struct platform_device *pdev)
 {
-       struct regulator_init_data *db8500_init_data = mfd_get_data(pdev);
+       struct regulator_init_data *db8500_init_data =
+                                       dev_get_platdata(&pdev->dev);
        int i, err;
 
        /* register all regulators */
index 8ae147549c6aabb2c65b9fd60f8aec8bb6f67a26..e4dbd667c043577b2cfe250df38fbbdcf3869eb1 100644 (file)
 #define SD1_DVM_SHIFT          5               /* SDCTL1 bit5 */
 #define SD1_DVM_EN             6               /* SDV1 bit 6 */
 
+/* bit definitions in SD & LDO control registers */
+#define OUT_ENABLE             0x1f            /* Power U/D sequence as I2C */
+#define OUT_DISABLE            0x1e            /* Power U/D sequence as I2C */
+
 struct max8925_regulator_info {
        struct regulator_desc   desc;
        struct regulator_dev    *regulator;
@@ -93,8 +97,8 @@ static int max8925_enable(struct regulator_dev *rdev)
        struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
 
        return max8925_set_bits(info->i2c, info->enable_reg,
-                               1 << info->enable_bit,
-                               1 << info->enable_bit);
+                               OUT_ENABLE << info->enable_bit,
+                               OUT_ENABLE << info->enable_bit);
 }
 
 static int max8925_disable(struct regulator_dev *rdev)
@@ -102,7 +106,8 @@ static int max8925_disable(struct regulator_dev *rdev)
        struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
 
        return max8925_set_bits(info->i2c, info->enable_reg,
-                               1 << info->enable_bit, 0);
+                               OUT_ENABLE << info->enable_bit,
+                               OUT_DISABLE << info->enable_bit);
 }
 
 static int max8925_is_enabled(struct regulator_dev *rdev)
index 77e0cfb30b23fcc5ff274cf66ac96b94a532a62d..10d5a1d9768e3fd973ea4f8ea6ab388d9a24eaaa 100644 (file)
@@ -267,7 +267,6 @@ static int max8997_get_enable_register(struct regulator_dev *rdev,
        default:
                /* Not controllable or not exists */
                return -EINVAL;
-               break;
        }
 
        return 0;
@@ -1033,11 +1032,11 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
 
        /* For the safety, set max voltage before setting up */
        for (i = 0; i < 8; i++) {
-               max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS(i + 1),
+               max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i,
                                max_buck1, 0x3f);
-               max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS(i + 1),
+               max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i,
                                max_buck2, 0x3f);
-               max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS(i + 1),
+               max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i,
                                max_buck5, 0x3f);
        }
 
@@ -1114,13 +1113,13 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
 
        /* Initialize all the DVS related BUCK registers */
        for (i = 0; i < 8; i++) {
-               max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS(i + 1),
+               max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i,
                                max8997->buck1_vol[i],
                                0x3f);
-               max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS(i + 1),
+               max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i,
                                max8997->buck2_vol[i],
                                0x3f);
-               max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS(i + 1),
+               max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i,
                                max8997->buck5_vol[i],
                                0x3f);
        }
index f57e9c42fdb4e2957ee767d68897ca36e57ae219..41a1495eec2bc28c5d58a9b68b8c2ef2f7b558a3 100644 (file)
@@ -732,13 +732,15 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
                if (!pdata->buck1_set1) {
                        printk(KERN_ERR "MAX8998 SET1 GPIO defined as 0 !\n");
                        WARN_ON(!pdata->buck1_set1);
-                       return -EIO;
+                       ret = -EIO;
+                       goto err_free_mem;
                }
                /* Check if SET2 is not equal to 0 */
                if (!pdata->buck1_set2) {
                        printk(KERN_ERR "MAX8998 SET2 GPIO defined as 0 !\n");
                        WARN_ON(!pdata->buck1_set2);
-                       return -EIO;
+                       ret = -EIO;
+                       goto err_free_mem;
                }
 
                gpio_request(pdata->buck1_set1, "MAX8998 BUCK1_SET1");
@@ -758,7 +760,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
                max8998->buck1_vol[0] = i;
                ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i);
                if (ret)
-                       return ret;
+                       goto err_free_mem;
 
                /* Set predefined value for BUCK1 register 2 */
                i = 0;
@@ -770,7 +772,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
                max8998->buck1_vol[1] = i;
                ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i);
                if (ret)
-                       return ret;
+                       goto err_free_mem;
 
                /* Set predefined value for BUCK1 register 3 */
                i = 0;
@@ -782,7 +784,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
                max8998->buck1_vol[2] = i;
                ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE3, i);
                if (ret)
-                       return ret;
+                       goto err_free_mem;
 
                /* Set predefined value for BUCK1 register 4 */
                i = 0;
@@ -794,7 +796,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
                max8998->buck1_vol[3] = i;
                ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE4, i);
                if (ret)
-                       return ret;
+                       goto err_free_mem;
 
        }
 
@@ -803,7 +805,8 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
                if (!pdata->buck2_set3) {
                        printk(KERN_ERR "MAX8998 SET3 GPIO defined as 0 !\n");
                        WARN_ON(!pdata->buck2_set3);
-                       return -EIO;
+                       ret = -EIO;
+                       goto err_free_mem;
                }
                gpio_request(pdata->buck2_set3, "MAX8998 BUCK2_SET3");
                gpio_direction_output(pdata->buck2_set3,
@@ -818,7 +821,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
                max8998->buck2_vol[0] = i;
                ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i);
                if (ret)
-                       return ret;
+                       goto err_free_mem;
 
                /* BUCK2 register 2 */
                i = 0;
@@ -830,7 +833,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
                max8998->buck2_vol[1] = i;
                ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i);
                if (ret)
-                       return ret;
+                       goto err_free_mem;
        }
 
        for (i = 0; i < pdata->num_regulators; i++) {
@@ -860,6 +863,7 @@ err:
                if (rdev[i])
                        regulator_unregister(rdev[i]);
 
+err_free_mem:
        kfree(max8998->rdev);
        kfree(max8998);
 
index b8a00c7fa4418719c26e8760d61fcd6b561e6def..730f43ad415b11fb83803f84577eb1e5d7c4c738 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/regulator/driver.h>
 #include <linux/platform_device.h>
 #include <linux/kernel.h>
-#include <linux/mfd/core.h>
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/err.h>
@@ -337,7 +336,8 @@ static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
 {
        struct mc13xxx_regulator_priv *priv;
        struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent);
-       struct mc13783_regulator_platform_data *pdata = mfd_get_data(pdev);
+       struct mc13783_regulator_platform_data *pdata =
+               dev_get_platdata(&pdev->dev);
        struct mc13783_regulator_init_data *init_data;
        int i, ret;
 
@@ -381,7 +381,8 @@ err:
 static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
 {
        struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
-       struct mc13783_regulator_platform_data *pdata = mfd_get_data(pdev);
+       struct mc13783_regulator_platform_data *pdata =
+               dev_get_platdata(&pdev->dev);
        int i;
 
        platform_set_drvdata(pdev, NULL);
index 6f15168e5ed494e04cbc4d38a9589bdc0847c97c..3285d41842f281d4433cbab2e5752d5b50c1632c 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/regulator/driver.h>
 #include <linux/platform_device.h>
 #include <linux/kernel.h>
-#include <linux/mfd/core.h>
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/err.h>
@@ -432,7 +431,8 @@ static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
                int min_uV, int max_uV, unsigned *selector)
 {
        struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev);
-       int hi, value, val, mask, id = rdev_get_id(rdev);
+       int hi, value, mask, id = rdev_get_id(rdev);
+       u32 valread;
        int ret;
 
        dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
@@ -448,15 +448,16 @@ static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
 
        mc13xxx_lock(priv->mc13xxx);
        ret = mc13xxx_reg_read(priv->mc13xxx,
-               mc13892_regulators[id].vsel_reg, &val);
+               mc13892_regulators[id].vsel_reg, &valread);
        if (ret)
                goto err;
 
-       hi  = val & MC13892_SWITCHERS0_SWxHI;
-       if (value > 1375)
+       if (value > 1375000)
                hi = 1;
-       if (value < 1100)
+       else if (value < 1100000)
                hi = 0;
+       else
+               hi = valread & MC13892_SWITCHERS0_SWxHI;
 
        if (hi) {
                value = (value - 1100000) / 25000;
@@ -465,8 +466,10 @@ static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev,
                value = (value - 600000) / 25000;
 
        mask = mc13892_regulators[id].vsel_mask | MC13892_SWITCHERS0_SWxHI;
-       ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg,
-                       mask, value << mc13892_regulators[id].vsel_shift);
+       valread = (valread & ~mask) |
+                       (value << mc13892_regulators[id].vsel_shift);
+       ret = mc13xxx_reg_write(priv->mc13xxx, mc13892_regulators[id].vsel_reg,
+                       valread);
 err:
        mc13xxx_unlock(priv->mc13xxx);
 
@@ -521,7 +524,8 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
 {
        struct mc13xxx_regulator_priv *priv;
        struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent);
-       struct mc13xxx_regulator_platform_data *pdata = mfd_get_data(pdev);
+       struct mc13xxx_regulator_platform_data *pdata =
+               dev_get_platdata(&pdev->dev);
        struct mc13xxx_regulator_init_data *init_data;
        int i, ret;
        u32 val;
@@ -595,7 +599,8 @@ err_free:
 static int __devexit mc13892_regulator_remove(struct platform_device *pdev)
 {
        struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
-       struct mc13xxx_regulator_platform_data *pdata = mfd_get_data(pdev);
+       struct mc13xxx_regulator_platform_data *pdata =
+               dev_get_platdata(&pdev->dev);
        int i;
 
        platform_set_drvdata(pdev, NULL);
index 2bb5de1f2421283013e512ab6e643f4b9f05f38a..bc27ab1363784db2788dc6e158a6c68f63b9362c 100644 (file)
@@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
 
        dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
 
-       BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages);
+       BUG_ON(val >= mc13xxx_regulators[id].desc.n_voltages);
 
        return mc13xxx_regulators[id].voltages[val];
 }
index 1661499feda4ea59ae3965290faca158218731a1..1011873896dc9205b4844aed9531de8cd9051531 100644 (file)
@@ -137,7 +137,7 @@ static struct regulator_desc tps6105x_regulator_desc = {
  */
 static int __devinit tps6105x_regulator_probe(struct platform_device *pdev)
 {
-       struct tps6105x *tps6105x = mfd_get_data(pdev);
+       struct tps6105x *tps6105x = dev_get_platdata(&pdev->dev);
        struct tps6105x_platform_data *pdata = tps6105x->pdata;
        int ret;
 
@@ -158,13 +158,14 @@ static int __devinit tps6105x_regulator_probe(struct platform_device *pdev)
                        "failed to register regulator\n");
                return ret;
        }
+       platform_set_drvdata(pdev, tps6105x);
 
        return 0;
 }
 
 static int __devexit tps6105x_regulator_remove(struct platform_device *pdev)
 {
-       struct tps6105x *tps6105x = platform_get_drvdata(pdev);
+       struct tps6105x *tps6105x = dev_get_platdata(&pdev->dev);
        regulator_unregister(tps6105x->regulator);
        return 0;
 }
index 60a7ca5409e9766bfe829c0d581d4dca910acc98..fbddc15e1811fefdec139965a425f45a5c05d473 100644 (file)
@@ -466,7 +466,6 @@ static struct regulator_ops tps65023_ldo_ops = {
 static int __devinit tps_65023_probe(struct i2c_client *client,
                                     const struct i2c_device_id *id)
 {
-       static int desc_id;
        const struct tps_info *info = (void *)id->driver_data;
        struct regulator_init_data *init_data;
        struct regulator_dev *rdev;
@@ -499,7 +498,7 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
                tps->info[i] = info;
 
                tps->desc[i].name = info->name;
-               tps->desc[i].id = desc_id++;
+               tps->desc[i].id = i;
                tps->desc[i].n_voltages = num_voltages[i];
                tps->desc[i].ops = (i > TPS65023_DCDC_3 ?
                                        &tps65023_ldo_ops : &tps65023_dcdc_ops);
index 0647552905992348652bc6316e7bec23e3257892..bfffabc21edabdffe051466b9d11b9fd60890400 100644 (file)
@@ -553,7 +553,6 @@ static __devinit
 int tps6507x_pmic_probe(struct platform_device *pdev)
 {
        struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent);
-       static int desc_id;
        struct tps_info *info = &tps6507x_pmic_regs[0];
        struct regulator_init_data *init_data;
        struct regulator_dev *rdev;
@@ -598,7 +597,7 @@ int tps6507x_pmic_probe(struct platform_device *pdev)
                }
 
                tps->desc[i].name = info->name;
-               tps->desc[i].id = desc_id++;
+               tps->desc[i].id = i;
                tps->desc[i].n_voltages = num_voltages[i];
                tps->desc[i].ops = (i > TPS6507X_DCDC_3 ?
                &tps6507x_pmic_ldo_ops : &tps6507x_pmic_dcdc_ops);
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
new file mode 100644 (file)
index 0000000..55dd4e6
--- /dev/null
@@ -0,0 +1,993 @@
+/*
+ * tps65910.c  --  TI tps65910
+ *
+ * Copyright 2010 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under  the terms of the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/mfd/tps65910.h>
+
+#define TPS65910_REG_VRTC              0
+#define TPS65910_REG_VIO               1
+#define TPS65910_REG_VDD1              2
+#define TPS65910_REG_VDD2              3
+#define TPS65910_REG_VDD3              4
+#define TPS65910_REG_VDIG1             5
+#define TPS65910_REG_VDIG2             6
+#define TPS65910_REG_VPLL              7
+#define TPS65910_REG_VDAC              8
+#define TPS65910_REG_VAUX1             9
+#define TPS65910_REG_VAUX2             10
+#define TPS65910_REG_VAUX33            11
+#define TPS65910_REG_VMMC              12
+
+#define TPS65911_REG_VDDCTRL           4
+#define TPS65911_REG_LDO1              5
+#define TPS65911_REG_LDO2              6
+#define TPS65911_REG_LDO3              7
+#define TPS65911_REG_LDO4              8
+#define TPS65911_REG_LDO5              9
+#define TPS65911_REG_LDO6              10
+#define TPS65911_REG_LDO7              11
+#define TPS65911_REG_LDO8              12
+
+#define TPS65910_NUM_REGULATOR         13
+#define TPS65910_SUPPLY_STATE_ENABLED  0x1
+
+/* supported VIO voltages in milivolts */
+static const u16 VIO_VSEL_table[] = {
+       1500, 1800, 2500, 3300,
+};
+
+/* VSEL tables for TPS65910 specific LDOs and dcdc's */
+
+/* supported VDD3 voltages in milivolts */
+static const u16 VDD3_VSEL_table[] = {
+       5000,
+};
+
+/* supported VDIG1 voltages in milivolts */
+static const u16 VDIG1_VSEL_table[] = {
+       1200, 1500, 1800, 2700,
+};
+
+/* supported VDIG2 voltages in milivolts */
+static const u16 VDIG2_VSEL_table[] = {
+       1000, 1100, 1200, 1800,
+};
+
+/* supported VPLL voltages in milivolts */
+static const u16 VPLL_VSEL_table[] = {
+       1000, 1100, 1800, 2500,
+};
+
+/* supported VDAC voltages in milivolts */
+static const u16 VDAC_VSEL_table[] = {
+       1800, 2600, 2800, 2850,
+};
+
+/* supported VAUX1 voltages in milivolts */
+static const u16 VAUX1_VSEL_table[] = {
+       1800, 2500, 2800, 2850,
+};
+
+/* supported VAUX2 voltages in milivolts */
+static const u16 VAUX2_VSEL_table[] = {
+       1800, 2800, 2900, 3300,
+};
+
+/* supported VAUX33 voltages in milivolts */
+static const u16 VAUX33_VSEL_table[] = {
+       1800, 2000, 2800, 3300,
+};
+
+/* supported VMMC voltages in milivolts */
+static const u16 VMMC_VSEL_table[] = {
+       1800, 2800, 3000, 3300,
+};
+
+struct tps_info {
+       const char *name;
+       unsigned min_uV;
+       unsigned max_uV;
+       u8 table_len;
+       const u16 *table;
+};
+
+static struct tps_info tps65910_regs[] = {
+       {
+               .name = "VRTC",
+       },
+       {
+               .name = "VIO",
+               .min_uV = 1500000,
+               .max_uV = 3300000,
+               .table_len = ARRAY_SIZE(VIO_VSEL_table),
+               .table = VIO_VSEL_table,
+       },
+       {
+               .name = "VDD1",
+               .min_uV = 600000,
+               .max_uV = 4500000,
+       },
+       {
+               .name = "VDD2",
+               .min_uV = 600000,
+               .max_uV = 4500000,
+       },
+       {
+               .name = "VDD3",
+               .min_uV = 5000000,
+               .max_uV = 5000000,
+               .table_len = ARRAY_SIZE(VDD3_VSEL_table),
+               .table = VDD3_VSEL_table,
+       },
+       {
+               .name = "VDIG1",
+               .min_uV = 1200000,
+               .max_uV = 2700000,
+               .table_len = ARRAY_SIZE(VDIG1_VSEL_table),
+               .table = VDIG1_VSEL_table,
+       },
+       {
+               .name = "VDIG2",
+               .min_uV = 1000000,
+               .max_uV = 1800000,
+               .table_len = ARRAY_SIZE(VDIG2_VSEL_table),
+               .table = VDIG2_VSEL_table,
+       },
+       {
+               .name = "VPLL",
+               .min_uV = 1000000,
+               .max_uV = 2500000,
+               .table_len = ARRAY_SIZE(VPLL_VSEL_table),
+               .table = VPLL_VSEL_table,
+       },
+       {
+               .name = "VDAC",
+               .min_uV = 1800000,
+               .max_uV = 2850000,
+               .table_len = ARRAY_SIZE(VDAC_VSEL_table),
+               .table = VDAC_VSEL_table,
+       },
+       {
+               .name = "VAUX1",
+               .min_uV = 1800000,
+               .max_uV = 2850000,
+               .table_len = ARRAY_SIZE(VAUX1_VSEL_table),
+               .table = VAUX1_VSEL_table,
+       },
+       {
+               .name = "VAUX2",
+               .min_uV = 1800000,
+               .max_uV = 3300000,
+               .table_len = ARRAY_SIZE(VAUX2_VSEL_table),
+               .table = VAUX2_VSEL_table,
+       },
+       {
+               .name = "VAUX33",
+               .min_uV = 1800000,
+               .max_uV = 3300000,
+               .table_len = ARRAY_SIZE(VAUX33_VSEL_table),
+               .table = VAUX33_VSEL_table,
+       },
+       {
+               .name = "VMMC",
+               .min_uV = 1800000,
+               .max_uV = 3300000,
+               .table_len = ARRAY_SIZE(VMMC_VSEL_table),
+               .table = VMMC_VSEL_table,
+       },
+};
+
+static struct tps_info tps65911_regs[] = {
+       {
+               .name = "VIO",
+               .min_uV = 1500000,
+               .max_uV = 3300000,
+               .table_len = ARRAY_SIZE(VIO_VSEL_table),
+               .table = VIO_VSEL_table,
+       },
+       {
+               .name = "VDD1",
+               .min_uV = 600000,
+               .max_uV = 4500000,
+       },
+       {
+               .name = "VDD2",
+               .min_uV = 600000,
+               .max_uV = 4500000,
+       },
+       {
+               .name = "VDDCTRL",
+               .min_uV = 600000,
+               .max_uV = 1400000,
+       },
+       {
+               .name = "LDO1",
+               .min_uV = 1000000,
+               .max_uV = 3300000,
+       },
+       {
+               .name = "LDO2",
+               .min_uV = 1000000,
+               .max_uV = 3300000,
+       },
+       {
+               .name = "LDO3",
+               .min_uV = 1000000,
+               .max_uV = 3300000,
+       },
+       {
+               .name = "LDO4",
+               .min_uV = 1000000,
+               .max_uV = 3300000,
+       },
+       {
+               .name = "LDO5",
+               .min_uV = 1000000,
+               .max_uV = 3300000,
+       },
+       {
+               .name = "LDO6",
+               .min_uV = 1000000,
+               .max_uV = 3300000,
+       },
+       {
+               .name = "LDO7",
+               .min_uV = 1000000,
+               .max_uV = 3300000,
+       },
+       {
+               .name = "LDO8",
+               .min_uV = 1000000,
+               .max_uV = 3300000,
+       },
+};
+
+struct tps65910_reg {
+       struct regulator_desc desc[TPS65910_NUM_REGULATOR];
+       struct tps65910 *mfd;
+       struct regulator_dev *rdev[TPS65910_NUM_REGULATOR];
+       struct tps_info *info[TPS65910_NUM_REGULATOR];
+       struct mutex mutex;
+       int mode;
+       int  (*get_ctrl_reg)(int);
+};
+
+static inline int tps65910_read(struct tps65910_reg *pmic, u8 reg)
+{
+       u8 val;
+       int err;
+
+       err = pmic->mfd->read(pmic->mfd, reg, 1, &val);
+       if (err)
+               return err;
+
+       return val;
+}
+
+static inline int tps65910_write(struct tps65910_reg *pmic, u8 reg, u8 val)
+{
+       return pmic->mfd->write(pmic->mfd, reg, 1, &val);
+}
+
+static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg,
+                                       u8 set_mask, u8 clear_mask)
+{
+       int err, data;
+
+       mutex_lock(&pmic->mutex);
+
+       data = tps65910_read(pmic, reg);
+       if (data < 0) {
+               dev_err(pmic->mfd->dev, "Read from reg 0x%x failed\n", reg);
+               err = data;
+               goto out;
+       }
+
+       data &= ~clear_mask;
+       data |= set_mask;
+       err = tps65910_write(pmic, reg, data);
+       if (err)
+               dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
+
+out:
+       mutex_unlock(&pmic->mutex);
+       return err;
+}
+
+static int tps65910_reg_read(struct tps65910_reg *pmic, u8 reg)
+{
+       int data;
+
+       mutex_lock(&pmic->mutex);
+
+       data = tps65910_read(pmic, reg);
+       if (data < 0)
+               dev_err(pmic->mfd->dev, "Read from reg 0x%x failed\n", reg);
+
+       mutex_unlock(&pmic->mutex);
+       return data;
+}
+
+static int tps65910_reg_write(struct tps65910_reg *pmic, u8 reg, u8 val)
+{
+       int err;
+
+       mutex_lock(&pmic->mutex);
+
+       err = tps65910_write(pmic, reg, val);
+       if (err < 0)
+               dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
+
+       mutex_unlock(&pmic->mutex);
+       return err;
+}
+
+static int tps65910_get_ctrl_register(int id)
+{
+       switch (id) {
+       case TPS65910_REG_VRTC:
+               return TPS65910_VRTC;
+       case TPS65910_REG_VIO:
+               return TPS65910_VIO;
+       case TPS65910_REG_VDD1:
+               return TPS65910_VDD1;
+       case TPS65910_REG_VDD2:
+               return TPS65910_VDD2;
+       case TPS65910_REG_VDD3:
+               return TPS65910_VDD3;
+       case TPS65910_REG_VDIG1:
+               return TPS65910_VDIG1;
+       case TPS65910_REG_VDIG2:
+               return TPS65910_VDIG2;
+       case TPS65910_REG_VPLL:
+               return TPS65910_VPLL;
+       case TPS65910_REG_VDAC:
+               return TPS65910_VDAC;
+       case TPS65910_REG_VAUX1:
+               return TPS65910_VAUX1;
+       case TPS65910_REG_VAUX2:
+               return TPS65910_VAUX2;
+       case TPS65910_REG_VAUX33:
+               return TPS65910_VAUX33;
+       case TPS65910_REG_VMMC:
+               return TPS65910_VMMC;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int tps65911_get_ctrl_register(int id)
+{
+       switch (id) {
+       case TPS65910_REG_VRTC:
+               return TPS65910_VRTC;
+       case TPS65910_REG_VIO:
+               return TPS65910_VIO;
+       case TPS65910_REG_VDD1:
+               return TPS65910_VDD1;
+       case TPS65910_REG_VDD2:
+               return TPS65910_VDD2;
+       case TPS65911_REG_VDDCTRL:
+               return TPS65911_VDDCTRL;
+       case TPS65911_REG_LDO1:
+               return TPS65911_LDO1;
+       case TPS65911_REG_LDO2:
+               return TPS65911_LDO2;
+       case TPS65911_REG_LDO3:
+               return TPS65911_LDO3;
+       case TPS65911_REG_LDO4:
+               return TPS65911_LDO4;
+       case TPS65911_REG_LDO5:
+               return TPS65911_LDO5;
+       case TPS65911_REG_LDO6:
+               return TPS65911_LDO6;
+       case TPS65911_REG_LDO7:
+               return TPS65911_LDO7;
+       case TPS65911_REG_LDO8:
+               return TPS65911_LDO8;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int tps65910_is_enabled(struct regulator_dev *dev)
+{
+       struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+       int reg, value, id = rdev_get_id(dev);
+
+       reg = pmic->get_ctrl_reg(id);
+       if (reg < 0)
+               return reg;
+
+       value = tps65910_reg_read(pmic, reg);
+       if (value < 0)
+               return value;
+
+       return value & TPS65910_SUPPLY_STATE_ENABLED;
+}
+
+static int tps65910_enable(struct regulator_dev *dev)
+{
+       struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+       struct tps65910 *mfd = pmic->mfd;
+       int reg, id = rdev_get_id(dev);
+
+       reg = pmic->get_ctrl_reg(id);
+       if (reg < 0)
+               return reg;
+
+       return tps65910_set_bits(mfd, reg, TPS65910_SUPPLY_STATE_ENABLED);
+}
+
+static int tps65910_disable(struct regulator_dev *dev)
+{
+       struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+       struct tps65910 *mfd = pmic->mfd;
+       int reg, id = rdev_get_id(dev);
+
+       reg = pmic->get_ctrl_reg(id);
+       if (reg < 0)
+               return reg;
+
+       return tps65910_clear_bits(mfd, reg, TPS65910_SUPPLY_STATE_ENABLED);
+}
+
+
+static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
+{
+       struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+       struct tps65910 *mfd = pmic->mfd;
+       int reg, value, id = rdev_get_id(dev);
+
+       reg = pmic->get_ctrl_reg(id);
+       if (reg < 0)
+               return reg;
+
+       switch (mode) {
+       case REGULATOR_MODE_NORMAL:
+               return tps65910_modify_bits(pmic, reg, LDO_ST_ON_BIT,
+                                                       LDO_ST_MODE_BIT);
+       case REGULATOR_MODE_IDLE:
+               value = LDO_ST_ON_BIT | LDO_ST_MODE_BIT;
+               return tps65910_set_bits(mfd, reg, value);
+       case REGULATOR_MODE_STANDBY:
+               return tps65910_clear_bits(mfd, reg, LDO_ST_ON_BIT);
+       }
+
+       return -EINVAL;
+}
+
+static unsigned int tps65910_get_mode(struct regulator_dev *dev)
+{
+       struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+       int reg, value, id = rdev_get_id(dev);
+
+       reg = pmic->get_ctrl_reg(id);
+       if (reg < 0)
+               return reg;
+
+       value = tps65910_reg_read(pmic, reg);
+       if (value < 0)
+               return value;
+
+       if (value & LDO_ST_ON_BIT)
+               return REGULATOR_MODE_STANDBY;
+       else if (value & LDO_ST_MODE_BIT)
+               return REGULATOR_MODE_IDLE;
+       else
+               return REGULATOR_MODE_NORMAL;
+}
+
+static int tps65910_get_voltage_dcdc(struct regulator_dev *dev)
+{
+       struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+       int id = rdev_get_id(dev), voltage = 0;
+       int opvsel = 0, srvsel = 0, vselmax = 0, mult = 0, sr = 0;
+
+       switch (id) {
+       case TPS65910_REG_VDD1:
+               opvsel = tps65910_reg_read(pmic, TPS65910_VDD1_OP);
+               mult = tps65910_reg_read(pmic, TPS65910_VDD1);
+               mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT;
+               srvsel = tps65910_reg_read(pmic, TPS65910_VDD1_SR);
+               sr = opvsel & VDD1_OP_CMD_MASK;
+               opvsel &= VDD1_OP_SEL_MASK;
+               srvsel &= VDD1_SR_SEL_MASK;
+               vselmax = 75;
+               break;
+       case TPS65910_REG_VDD2:
+               opvsel = tps65910_reg_read(pmic, TPS65910_VDD2_OP);
+               mult = tps65910_reg_read(pmic, TPS65910_VDD2);
+               mult = (mult & VDD2_VGAIN_SEL_MASK) >> VDD2_VGAIN_SEL_SHIFT;
+               srvsel = tps65910_reg_read(pmic, TPS65910_VDD2_SR);
+               sr = opvsel & VDD2_OP_CMD_MASK;
+               opvsel &= VDD2_OP_SEL_MASK;
+               srvsel &= VDD2_SR_SEL_MASK;
+               vselmax = 75;
+               break;
+       case TPS65911_REG_VDDCTRL:
+               opvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_OP);
+               srvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_SR);
+               sr = opvsel & VDDCTRL_OP_CMD_MASK;
+               opvsel &= VDDCTRL_OP_SEL_MASK;
+               srvsel &= VDDCTRL_SR_SEL_MASK;
+               vselmax = 64;
+               break;
+       }
+
+       /* multiplier 0 == 1 but 2,3 normal */
+       if (!mult)
+               mult=1;
+
+       if (sr) {
+               /* normalise to valid range */
+               if (srvsel < 3)
+                       srvsel = 3;
+               if (srvsel > vselmax)
+                       srvsel = vselmax;
+               srvsel -= 3;
+
+               voltage = (srvsel * VDD1_2_OFFSET + VDD1_2_MIN_VOLT) * 100;
+       } else {
+
+               /* normalise to valid range*/
+               if (opvsel < 3)
+                       opvsel = 3;
+               if (opvsel > vselmax)
+                       opvsel = vselmax;
+               opvsel -= 3;
+
+               voltage = (opvsel * VDD1_2_OFFSET + VDD1_2_MIN_VOLT) * 100;
+       }
+
+       voltage *= mult;
+
+       return voltage;
+}
+
+static int tps65910_get_voltage(struct regulator_dev *dev)
+{
+       struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+       int reg, value, id = rdev_get_id(dev), voltage = 0;
+
+       reg = pmic->get_ctrl_reg(id);
+       if (reg < 0)
+               return reg;
+
+       value = tps65910_reg_read(pmic, reg);
+       if (value < 0)
+               return value;
+
+       switch (id) {
+       case TPS65910_REG_VIO:
+       case TPS65910_REG_VDIG1:
+       case TPS65910_REG_VDIG2:
+       case TPS65910_REG_VPLL:
+       case TPS65910_REG_VDAC:
+       case TPS65910_REG_VAUX1:
+       case TPS65910_REG_VAUX2:
+       case TPS65910_REG_VAUX33:
+       case TPS65910_REG_VMMC:
+               value &= LDO_SEL_MASK;
+               value >>= LDO_SEL_SHIFT;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       voltage = pmic->info[id]->table[value] * 1000;
+
+       return voltage;
+}
+
+static int tps65910_get_voltage_vdd3(struct regulator_dev *dev)
+{
+       return 5 * 1000 * 1000;
+}
+
+static int tps65911_get_voltage(struct regulator_dev *dev)
+{
+       struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+       int step_mv, id = rdev_get_id(dev);
+       u8 value, reg;
+
+       reg = pmic->get_ctrl_reg(id);
+
+       value = tps65910_reg_read(pmic, reg);
+
+       switch (id) {
+       case TPS65911_REG_LDO1:
+       case TPS65911_REG_LDO2:
+       case TPS65911_REG_LDO4:
+               value &= LDO1_SEL_MASK;
+               value >>= LDO_SEL_SHIFT;
+               /* The first 5 values of the selector correspond to 1V */
+               if (value < 5)
+                       value = 0;
+               else
+                       value -= 4;
+
+               step_mv = 50;
+               break;
+       case TPS65911_REG_LDO3:
+       case TPS65911_REG_LDO5:
+       case TPS65911_REG_LDO6:
+       case TPS65911_REG_LDO7:
+       case TPS65911_REG_LDO8:
+               value &= LDO3_SEL_MASK;
+               value >>= LDO_SEL_SHIFT;
+               /* The first 3 values of the selector correspond to 1V */
+               if (value < 3)
+                       value = 0;
+               else
+                       value -= 2;
+
+               step_mv = 100;
+               break;
+       case TPS65910_REG_VIO:
+               return pmic->info[id]->table[value] * 1000;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return (LDO_MIN_VOLT + value * step_mv) * 1000;
+}
+
+static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
+                               unsigned selector)
+{
+       struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+       int id = rdev_get_id(dev), vsel;
+       int dcdc_mult = 0;
+
+       switch (id) {
+       case TPS65910_REG_VDD1:
+               dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1;
+               if (dcdc_mult == 1)
+                       dcdc_mult--;
+               vsel = (selector % VDD1_2_NUM_VOLTS) + 3;
+
+               tps65910_modify_bits(pmic, TPS65910_VDD1,
+                               (dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
+                                               VDD1_VGAIN_SEL_MASK);
+               tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel);
+               break;
+       case TPS65910_REG_VDD2:
+               dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1;
+               if (dcdc_mult == 1)
+                       dcdc_mult--;
+               vsel = (selector % VDD1_2_NUM_VOLTS) + 3;
+
+               tps65910_modify_bits(pmic, TPS65910_VDD2,
+                               (dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
+                                               VDD1_VGAIN_SEL_MASK);
+               tps65910_reg_write(pmic, TPS65910_VDD2_OP, vsel);
+               break;
+       case TPS65911_REG_VDDCTRL:
+               vsel = selector;
+               tps65910_reg_write(pmic, TPS65911_VDDCTRL_OP, vsel);
+       }
+
+       return 0;
+}
+
+static int tps65910_set_voltage(struct regulator_dev *dev, unsigned selector)
+{
+       struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+       int reg, id = rdev_get_id(dev);
+
+       reg = pmic->get_ctrl_reg(id);
+       if (reg < 0)
+               return reg;
+
+       switch (id) {
+       case TPS65910_REG_VIO:
+       case TPS65910_REG_VDIG1:
+       case TPS65910_REG_VDIG2:
+       case TPS65910_REG_VPLL:
+       case TPS65910_REG_VDAC:
+       case TPS65910_REG_VAUX1:
+       case TPS65910_REG_VAUX2:
+       case TPS65910_REG_VAUX33:
+       case TPS65910_REG_VMMC:
+               return tps65910_modify_bits(pmic, reg,
+                               (selector << LDO_SEL_SHIFT), LDO_SEL_MASK);
+       }
+
+       return -EINVAL;
+}
+
+static int tps65911_set_voltage(struct regulator_dev *dev, unsigned selector)
+{
+       struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+       int reg, id = rdev_get_id(dev);
+
+       reg = pmic->get_ctrl_reg(id);
+       if (reg < 0)
+               return reg;
+
+       switch (id) {
+       case TPS65911_REG_LDO1:
+       case TPS65911_REG_LDO2:
+       case TPS65911_REG_LDO4:
+               return tps65910_modify_bits(pmic, reg,
+                               (selector << LDO_SEL_SHIFT), LDO1_SEL_MASK);
+       case TPS65911_REG_LDO3:
+       case TPS65911_REG_LDO5:
+       case TPS65911_REG_LDO6:
+       case TPS65911_REG_LDO7:
+       case TPS65911_REG_LDO8:
+       case TPS65910_REG_VIO:
+               return tps65910_modify_bits(pmic, reg,
+                               (selector << LDO_SEL_SHIFT), LDO3_SEL_MASK);
+       }
+
+       return -EINVAL;
+}
+
+
+static int tps65910_list_voltage_dcdc(struct regulator_dev *dev,
+                                       unsigned selector)
+{
+       int volt, mult = 1, id = rdev_get_id(dev);
+
+       switch (id) {
+       case TPS65910_REG_VDD1:
+       case TPS65910_REG_VDD2:
+               mult = (selector / VDD1_2_NUM_VOLTS) + 1;
+               volt = VDD1_2_MIN_VOLT +
+                               (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET;
+       case TPS65911_REG_VDDCTRL:
+               volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET);
+       }
+
+       return  volt * 100 * mult;
+}
+
+static int tps65910_list_voltage(struct regulator_dev *dev,
+                                       unsigned selector)
+{
+       struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+       int id = rdev_get_id(dev), voltage;
+
+       if (id < TPS65910_REG_VIO || id > TPS65910_REG_VMMC)
+               return -EINVAL;
+
+       if (selector >= pmic->info[id]->table_len)
+               return -EINVAL;
+       else
+               voltage = pmic->info[id]->table[selector] * 1000;
+
+       return voltage;
+}
+
+static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector)
+{
+       struct tps65910_reg *pmic = rdev_get_drvdata(dev);
+       int step_mv = 0, id = rdev_get_id(dev);
+
+       switch(id) {
+       case TPS65911_REG_LDO1:
+       case TPS65911_REG_LDO2:
+       case TPS65911_REG_LDO4:
+               /* The first 5 values of the selector correspond to 1V */
+               if (selector < 5)
+                       selector = 0;
+               else
+                       selector -= 4;
+
+               step_mv = 50;
+               break;
+       case TPS65911_REG_LDO3:
+       case TPS65911_REG_LDO5:
+       case TPS65911_REG_LDO6:
+       case TPS65911_REG_LDO7:
+       case TPS65911_REG_LDO8:
+               /* The first 3 values of the selector correspond to 1V */
+               if (selector < 3)
+                       selector = 0;
+               else
+                       selector -= 2;
+
+               step_mv = 100;
+               break;
+       case TPS65910_REG_VIO:
+               return pmic->info[id]->table[selector] * 1000;
+       default:
+               return -EINVAL;
+       }
+
+       return (LDO_MIN_VOLT + selector * step_mv) * 1000;
+}
+
+/* Regulator ops (except VRTC) */
+static struct regulator_ops tps65910_ops_dcdc = {
+       .is_enabled             = tps65910_is_enabled,
+       .enable                 = tps65910_enable,
+       .disable                = tps65910_disable,
+       .set_mode               = tps65910_set_mode,
+       .get_mode               = tps65910_get_mode,
+       .get_voltage            = tps65910_get_voltage_dcdc,
+       .set_voltage_sel        = tps65910_set_voltage_dcdc,
+       .list_voltage           = tps65910_list_voltage_dcdc,
+};
+
+static struct regulator_ops tps65910_ops_vdd3 = {
+       .is_enabled             = tps65910_is_enabled,
+       .enable                 = tps65910_enable,
+       .disable                = tps65910_disable,
+       .set_mode               = tps65910_set_mode,
+       .get_mode               = tps65910_get_mode,
+       .get_voltage            = tps65910_get_voltage_vdd3,
+       .list_voltage           = tps65910_list_voltage,
+};
+
+static struct regulator_ops tps65910_ops = {
+       .is_enabled             = tps65910_is_enabled,
+       .enable                 = tps65910_enable,
+       .disable                = tps65910_disable,
+       .set_mode               = tps65910_set_mode,
+       .get_mode               = tps65910_get_mode,
+       .get_voltage            = tps65910_get_voltage,
+       .set_voltage_sel        = tps65910_set_voltage,
+       .list_voltage           = tps65910_list_voltage,
+};
+
+static struct regulator_ops tps65911_ops = {
+       .is_enabled             = tps65910_is_enabled,
+       .enable                 = tps65910_enable,
+       .disable                = tps65910_disable,
+       .set_mode               = tps65910_set_mode,
+       .get_mode               = tps65910_get_mode,
+       .get_voltage            = tps65911_get_voltage,
+       .set_voltage_sel        = tps65911_set_voltage,
+       .list_voltage           = tps65911_list_voltage,
+};
+
+static __devinit int tps65910_probe(struct platform_device *pdev)
+{
+       struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
+       struct tps_info *info;
+       struct regulator_init_data *reg_data;
+       struct regulator_dev *rdev;
+       struct tps65910_reg *pmic;
+       struct tps65910_board *pmic_plat_data;
+       int i, err;
+
+       pmic_plat_data = dev_get_platdata(tps65910->dev);
+       if (!pmic_plat_data)
+               return -EINVAL;
+
+       reg_data = pmic_plat_data->tps65910_pmic_init_data;
+
+       pmic = kzalloc(sizeof(*pmic), GFP_KERNEL);
+       if (!pmic)
+               return -ENOMEM;
+
+       mutex_init(&pmic->mutex);
+       pmic->mfd = tps65910;
+       platform_set_drvdata(pdev, pmic);
+
+       /* Give control of all register to control port */
+       tps65910_set_bits(pmic->mfd, TPS65910_DEVCTRL,
+                               DEVCTRL_SR_CTL_I2C_SEL_MASK);
+
+       switch(tps65910_chip_id(tps65910)) {
+       case TPS65910:
+               pmic->get_ctrl_reg = &tps65910_get_ctrl_register;
+               info = tps65910_regs;
+       case TPS65911:
+               pmic->get_ctrl_reg = &tps65911_get_ctrl_register;
+               info = tps65911_regs;
+       default:
+               pr_err("Invalid tps chip version\n");
+               return -ENODEV;
+       }
+
+       for (i = 0; i < TPS65910_NUM_REGULATOR; i++, info++, reg_data++) {
+               /* Register the regulators */
+               pmic->info[i] = info;
+
+               pmic->desc[i].name = info->name;
+               pmic->desc[i].id = i;
+               pmic->desc[i].n_voltages = info->table_len;
+
+               if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) {
+                       pmic->desc[i].ops = &tps65910_ops_dcdc;
+               } else if (i == TPS65910_REG_VDD3) {
+                       if (tps65910_chip_id(tps65910) == TPS65910)
+                               pmic->desc[i].ops = &tps65910_ops_vdd3;
+                       else
+                               pmic->desc[i].ops = &tps65910_ops_dcdc;
+               } else {
+                       if (tps65910_chip_id(tps65910) == TPS65910)
+                               pmic->desc[i].ops = &tps65910_ops;
+                       else
+                               pmic->desc[i].ops = &tps65911_ops;
+               }
+
+               pmic->desc[i].type = REGULATOR_VOLTAGE;
+               pmic->desc[i].owner = THIS_MODULE;
+
+               rdev = regulator_register(&pmic->desc[i],
+                               tps65910->dev, reg_data, pmic);
+               if (IS_ERR(rdev)) {
+                       dev_err(tps65910->dev,
+                               "failed to register %s regulator\n",
+                               pdev->name);
+                       err = PTR_ERR(rdev);
+                       goto err;
+               }
+
+               /* Save regulator for cleanup */
+               pmic->rdev[i] = rdev;
+       }
+       return 0;
+
+err:
+       while (--i >= 0)
+               regulator_unregister(pmic->rdev[i]);
+
+       kfree(pmic);
+       return err;
+}
+
+static int __devexit tps65910_remove(struct platform_device *pdev)
+{
+       struct tps65910_reg *tps65910_reg = platform_get_drvdata(pdev);
+       int i;
+
+       for (i = 0; i < TPS65910_NUM_REGULATOR; i++)
+               regulator_unregister(tps65910_reg->rdev[i]);
+
+       kfree(tps65910_reg);
+       return 0;
+}
+
+static struct platform_driver tps65910_driver = {
+       .driver = {
+               .name = "tps65910-pmic",
+               .owner = THIS_MODULE,
+       },
+       .probe = tps65910_probe,
+       .remove = __devexit_p(tps65910_remove),
+};
+
+static int __init tps65910_init(void)
+{
+       return platform_driver_register(&tps65910_driver);
+}
+subsys_initcall(tps65910_init);
+
+static void __exit tps65910_cleanup(void)
+{
+       platform_driver_unregister(&tps65910_driver);
+}
+module_exit(tps65910_cleanup);
+
+MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
+MODULE_DESCRIPTION("TPS6507x voltage regulator driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:tps65910-pmic");
index 6a292852a3583899a1d1c1a2c3236e76aea6d118..87fe0f75a56eed01375e0e2a393dbea6c1cd98b2 100644 (file)
@@ -51,8 +51,13 @@ struct twlreg_info {
        u16                     min_mV;
        u16                     max_mV;
 
+       u8                      flags;
+
        /* used by regulator core */
        struct regulator_desc   desc;
+
+       /* chip specific features */
+       unsigned long           features;
 };
 
 
@@ -70,12 +75,35 @@ struct twlreg_info {
 #define VREG_TRANS             1
 #define VREG_STATE             2
 #define VREG_VOLTAGE           3
+#define VREG_VOLTAGE_SMPS      4
 /* TWL6030 Misc register offsets */
 #define VREG_BC_ALL            1
 #define VREG_BC_REF            2
 #define VREG_BC_PROC           3
 #define VREG_BC_CLK_RST                4
 
+/* TWL6030 LDO register values for CFG_STATE */
+#define TWL6030_CFG_STATE_OFF  0x00
+#define TWL6030_CFG_STATE_ON   0x01
+#define TWL6030_CFG_STATE_OFF2 0x02
+#define TWL6030_CFG_STATE_SLEEP        0x03
+#define TWL6030_CFG_STATE_GRP_SHIFT    5
+#define TWL6030_CFG_STATE_APP_SHIFT    2
+#define TWL6030_CFG_STATE_APP_MASK     (0x03 << TWL6030_CFG_STATE_APP_SHIFT)
+#define TWL6030_CFG_STATE_APP(v)       (((v) & TWL6030_CFG_STATE_APP_MASK) >>\
+                                               TWL6030_CFG_STATE_APP_SHIFT)
+
+/* Flags for SMPS Voltage reading */
+#define SMPS_OFFSET_EN         BIT(0)
+#define SMPS_EXTENDED_EN       BIT(1)
+
+/* twl6025 SMPS EPROM values */
+#define TWL6030_SMPS_OFFSET            0xB0
+#define TWL6030_SMPS_MULT              0xB3
+#define SMPS_MULTOFFSET_SMPS4  BIT(0)
+#define SMPS_MULTOFFSET_VIO    BIT(1)
+#define SMPS_MULTOFFSET_SMPS3  BIT(6)
+
 static inline int
 twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset)
 {
@@ -118,21 +146,38 @@ static int twlreg_grp(struct regulator_dev *rdev)
 #define P2_GRP_6030    BIT(1)          /* "peripherals" */
 #define P1_GRP_6030    BIT(0)          /* CPU/Linux */
 
-static int twlreg_is_enabled(struct regulator_dev *rdev)
+static int twl4030reg_is_enabled(struct regulator_dev *rdev)
 {
        int     state = twlreg_grp(rdev);
 
        if (state < 0)
                return state;
 
-       if (twl_class_is_4030())
-               state &= P1_GRP_4030;
+       return state & P1_GRP_4030;
+}
+
+static int twl6030reg_is_enabled(struct regulator_dev *rdev)
+{
+       struct twlreg_info      *info = rdev_get_drvdata(rdev);
+       int                     grp = 0, val;
+
+       if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
+               grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
+       if (grp < 0)
+               return grp;
+
+       if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
+               grp &= P1_GRP_6030;
        else
-               state &= P1_GRP_6030;
-       return state;
+               grp = 1;
+
+       val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
+       val = TWL6030_CFG_STATE_APP(val);
+
+       return grp && (val == TWL6030_CFG_STATE_ON);
 }
 
-static int twlreg_enable(struct regulator_dev *rdev)
+static int twl4030reg_enable(struct regulator_dev *rdev)
 {
        struct twlreg_info      *info = rdev_get_drvdata(rdev);
        int                     grp;
@@ -142,10 +187,7 @@ static int twlreg_enable(struct regulator_dev *rdev)
        if (grp < 0)
                return grp;
 
-       if (twl_class_is_4030())
-               grp |= P1_GRP_4030;
-       else
-               grp |= P1_GRP_6030;
+       grp |= P1_GRP_4030;
 
        ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
 
@@ -154,29 +196,63 @@ static int twlreg_enable(struct regulator_dev *rdev)
        return ret;
 }
 
-static int twlreg_disable(struct regulator_dev *rdev)
+static int twl6030reg_enable(struct regulator_dev *rdev)
+{
+       struct twlreg_info      *info = rdev_get_drvdata(rdev);
+       int                     grp = 0;
+       int                     ret;
+
+       if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
+               grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
+       if (grp < 0)
+               return grp;
+
+       ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+                       grp << TWL6030_CFG_STATE_GRP_SHIFT |
+                       TWL6030_CFG_STATE_ON);
+
+       udelay(info->delay);
+
+       return ret;
+}
+
+static int twl4030reg_disable(struct regulator_dev *rdev)
 {
        struct twlreg_info      *info = rdev_get_drvdata(rdev);
        int                     grp;
+       int                     ret;
 
        grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
        if (grp < 0)
                return grp;
 
-       if (twl_class_is_4030())
-               grp &= ~(P1_GRP_4030 | P2_GRP_4030 | P3_GRP_4030);
-       else
-               grp &= ~(P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030);
+       grp &= ~(P1_GRP_4030 | P2_GRP_4030 | P3_GRP_4030);
 
-       return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
+       ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp);
+
+       return ret;
 }
 
-static int twlreg_get_status(struct regulator_dev *rdev)
+static int twl6030reg_disable(struct regulator_dev *rdev)
 {
-       int     state = twlreg_grp(rdev);
+       struct twlreg_info      *info = rdev_get_drvdata(rdev);
+       int                     grp = 0;
+       int                     ret;
+
+       if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
+               grp = P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030;
+
+       /* For 6030, set the off state for all grps enabled */
+       ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE,
+                       (grp) << TWL6030_CFG_STATE_GRP_SHIFT |
+                       TWL6030_CFG_STATE_OFF);
+
+       return ret;
+}
 
-       if (twl_class_is_6030())
-               return 0; /* FIXME return for 6030 regulator */
+static int twl4030reg_get_status(struct regulator_dev *rdev)
+{
+       int     state = twlreg_grp(rdev);
 
        if (state < 0)
                return state;
@@ -190,15 +266,39 @@ static int twlreg_get_status(struct regulator_dev *rdev)
                : REGULATOR_STATUS_STANDBY;
 }
 
-static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode)
+static int twl6030reg_get_status(struct regulator_dev *rdev)
+{
+       struct twlreg_info      *info = rdev_get_drvdata(rdev);
+       int                     val;
+
+       val = twlreg_grp(rdev);
+       if (val < 0)
+               return val;
+
+       val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE);
+
+       switch (TWL6030_CFG_STATE_APP(val)) {
+       case TWL6030_CFG_STATE_ON:
+               return REGULATOR_STATUS_NORMAL;
+
+       case TWL6030_CFG_STATE_SLEEP:
+               return REGULATOR_STATUS_STANDBY;
+
+       case TWL6030_CFG_STATE_OFF:
+       case TWL6030_CFG_STATE_OFF2:
+       default:
+               break;
+       }
+
+       return REGULATOR_STATUS_OFF;
+}
+
+static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
 {
        struct twlreg_info      *info = rdev_get_drvdata(rdev);
        unsigned                message;
        int                     status;
 
-       if (twl_class_is_6030())
-               return 0; /* FIXME return for 6030 regulator */
-
        /* We can only set the mode through state machine commands... */
        switch (mode) {
        case REGULATOR_MODE_NORMAL:
@@ -227,6 +327,36 @@ static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode)
                        message & 0xff, TWL4030_PM_MASTER_PB_WORD_LSB);
 }
 
+static int twl6030reg_set_mode(struct regulator_dev *rdev, unsigned mode)
+{
+       struct twlreg_info      *info = rdev_get_drvdata(rdev);
+       int grp = 0;
+       int val;
+
+       if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS)))
+               grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP);
+
+       if (grp < 0)
+               return grp;
+
+       /* Compose the state register settings */
+       val = grp << TWL6030_CFG_STATE_GRP_SHIFT;
+       /* We can only set the mode through state machine commands... */
+       switch (mode) {
+       case REGULATOR_MODE_NORMAL:
+               val |= TWL6030_CFG_STATE_ON;
+               break;
+       case REGULATOR_MODE_STANDBY:
+               val |= TWL6030_CFG_STATE_SLEEP;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, val);
+}
+
 /*----------------------------------------------------------------------*/
 
 /*
@@ -375,13 +505,13 @@ static struct regulator_ops twl4030ldo_ops = {
        .set_voltage    = twl4030ldo_set_voltage,
        .get_voltage    = twl4030ldo_get_voltage,
 
-       .enable         = twlreg_enable,
-       .disable        = twlreg_disable,
-       .is_enabled     = twlreg_is_enabled,
+       .enable         = twl4030reg_enable,
+       .disable        = twl4030reg_disable,
+       .is_enabled     = twl4030reg_is_enabled,
 
-       .set_mode       = twlreg_set_mode,
+       .set_mode       = twl4030reg_set_mode,
 
-       .get_status     = twlreg_get_status,
+       .get_status     = twl4030reg_get_status,
 };
 
 static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
@@ -433,13 +563,13 @@ static struct regulator_ops twl6030ldo_ops = {
        .set_voltage    = twl6030ldo_set_voltage,
        .get_voltage    = twl6030ldo_get_voltage,
 
-       .enable         = twlreg_enable,
-       .disable        = twlreg_disable,
-       .is_enabled     = twlreg_is_enabled,
+       .enable         = twl6030reg_enable,
+       .disable        = twl6030reg_disable,
+       .is_enabled     = twl6030reg_is_enabled,
 
-       .set_mode       = twlreg_set_mode,
+       .set_mode       = twl6030reg_set_mode,
 
-       .get_status     = twlreg_get_status,
+       .get_status     = twl6030reg_get_status,
 };
 
 /*----------------------------------------------------------------------*/
@@ -461,25 +591,242 @@ static int twlfixed_get_voltage(struct regulator_dev *rdev)
        return info->min_mV * 1000;
 }
 
-static struct regulator_ops twlfixed_ops = {
+static struct regulator_ops twl4030fixed_ops = {
+       .list_voltage   = twlfixed_list_voltage,
+
+       .get_voltage    = twlfixed_get_voltage,
+
+       .enable         = twl4030reg_enable,
+       .disable        = twl4030reg_disable,
+       .is_enabled     = twl4030reg_is_enabled,
+
+       .set_mode       = twl4030reg_set_mode,
+
+       .get_status     = twl4030reg_get_status,
+};
+
+static struct regulator_ops twl6030fixed_ops = {
        .list_voltage   = twlfixed_list_voltage,
 
        .get_voltage    = twlfixed_get_voltage,
 
-       .enable         = twlreg_enable,
-       .disable        = twlreg_disable,
-       .is_enabled     = twlreg_is_enabled,
+       .enable         = twl6030reg_enable,
+       .disable        = twl6030reg_disable,
+       .is_enabled     = twl6030reg_is_enabled,
 
-       .set_mode       = twlreg_set_mode,
+       .set_mode       = twl6030reg_set_mode,
 
-       .get_status     = twlreg_get_status,
+       .get_status     = twl6030reg_get_status,
 };
 
 static struct regulator_ops twl6030_fixed_resource = {
-       .enable         = twlreg_enable,
-       .disable        = twlreg_disable,
-       .is_enabled     = twlreg_is_enabled,
-       .get_status     = twlreg_get_status,
+       .enable         = twl6030reg_enable,
+       .disable        = twl6030reg_disable,
+       .is_enabled     = twl6030reg_is_enabled,
+       .get_status     = twl6030reg_get_status,
+};
+
+/*
+ * SMPS status and control
+ */
+
+static int twl6030smps_list_voltage(struct regulator_dev *rdev, unsigned index)
+{
+       struct twlreg_info      *info = rdev_get_drvdata(rdev);
+
+       int voltage = 0;
+
+       switch (info->flags) {
+       case SMPS_OFFSET_EN:
+               voltage = 100000;
+               /* fall through */
+       case 0:
+               switch (index) {
+               case 0:
+                       voltage = 0;
+                       break;
+               case 58:
+                       voltage = 1350 * 1000;
+                       break;
+               case 59:
+                       voltage = 1500 * 1000;
+                       break;
+               case 60:
+                       voltage = 1800 * 1000;
+                       break;
+               case 61:
+                       voltage = 1900 * 1000;
+                       break;
+               case 62:
+                       voltage = 2100 * 1000;
+                       break;
+               default:
+                       voltage += (600000 + (12500 * (index - 1)));
+               }
+               break;
+       case SMPS_EXTENDED_EN:
+               switch (index) {
+               case 0:
+                       voltage = 0;
+                       break;
+               case 58:
+                       voltage = 2084 * 1000;
+                       break;
+               case 59:
+                       voltage = 2315 * 1000;
+                       break;
+               case 60:
+                       voltage = 2778 * 1000;
+                       break;
+               case 61:
+                       voltage = 2932 * 1000;
+                       break;
+               case 62:
+                       voltage = 3241 * 1000;
+                       break;
+               default:
+                       voltage = (1852000 + (38600 * (index - 1)));
+               }
+               break;
+       case SMPS_OFFSET_EN | SMPS_EXTENDED_EN:
+               switch (index) {
+               case 0:
+                       voltage = 0;
+                       break;
+               case 58:
+                       voltage = 4167 * 1000;
+                       break;
+               case 59:
+                       voltage = 2315 * 1000;
+                       break;
+               case 60:
+                       voltage = 2778 * 1000;
+                       break;
+               case 61:
+                       voltage = 2932 * 1000;
+                       break;
+               case 62:
+                       voltage = 3241 * 1000;
+                       break;
+               default:
+                       voltage = (2161000 + (38600 * (index - 1)));
+               }
+               break;
+       }
+
+       return voltage;
+}
+
+static int
+twl6030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
+                       unsigned int *selector)
+{
+       struct twlreg_info      *info = rdev_get_drvdata(rdev);
+       int     vsel = 0;
+
+       switch (info->flags) {
+       case 0:
+               if (min_uV == 0)
+                       vsel = 0;
+               else if ((min_uV >= 600000) && (max_uV <= 1300000)) {
+                       vsel = (min_uV - 600000) / 125;
+                       if (vsel % 100)
+                               vsel += 100;
+                       vsel /= 100;
+                       vsel++;
+               }
+               /* Values 1..57 for vsel are linear and can be calculated
+                * values 58..62 are non linear.
+                */
+               else if ((min_uV > 1900000) && (max_uV >= 2100000))
+                       vsel = 62;
+               else if ((min_uV > 1800000) && (max_uV >= 1900000))
+                       vsel = 61;
+               else if ((min_uV > 1500000) && (max_uV >= 1800000))
+                       vsel = 60;
+               else if ((min_uV > 1350000) && (max_uV >= 1500000))
+                       vsel = 59;
+               else if ((min_uV > 1300000) && (max_uV >= 1350000))
+                       vsel = 58;
+               else
+                       return -EINVAL;
+               break;
+       case SMPS_OFFSET_EN:
+               if (min_uV == 0)
+                       vsel = 0;
+               else if ((min_uV >= 700000) && (max_uV <= 1420000)) {
+                       vsel = (min_uV - 700000) / 125;
+                       if (vsel % 100)
+                               vsel += 100;
+                       vsel /= 100;
+                       vsel++;
+               }
+               /* Values 1..57 for vsel are linear and can be calculated
+                * values 58..62 are non linear.
+                */
+               else if ((min_uV > 1900000) && (max_uV >= 2100000))
+                       vsel = 62;
+               else if ((min_uV > 1800000) && (max_uV >= 1900000))
+                       vsel = 61;
+               else if ((min_uV > 1350000) && (max_uV >= 1800000))
+                       vsel = 60;
+               else if ((min_uV > 1350000) && (max_uV >= 1500000))
+                       vsel = 59;
+               else if ((min_uV > 1300000) && (max_uV >= 1350000))
+                       vsel = 58;
+               else
+                       return -EINVAL;
+               break;
+       case SMPS_EXTENDED_EN:
+               if (min_uV == 0)
+                       vsel = 0;
+               else if ((min_uV >= 1852000) && (max_uV <= 4013600)) {
+                       vsel = (min_uV - 1852000) / 386;
+                       if (vsel % 100)
+                               vsel += 100;
+                       vsel /= 100;
+                       vsel++;
+               }
+               break;
+       case SMPS_OFFSET_EN|SMPS_EXTENDED_EN:
+               if (min_uV == 0)
+                       vsel = 0;
+               else if ((min_uV >= 2161000) && (max_uV <= 4321000)) {
+                       vsel = (min_uV - 1852000) / 386;
+                       if (vsel % 100)
+                               vsel += 100;
+                       vsel /= 100;
+                       vsel++;
+               }
+               break;
+       }
+
+       *selector = vsel;
+
+       return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS,
+                                                       vsel);
+}
+
+static int twl6030smps_get_voltage_sel(struct regulator_dev *rdev)
+{
+       struct twlreg_info      *info = rdev_get_drvdata(rdev);
+
+       return twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS);
+}
+
+static struct regulator_ops twlsmps_ops = {
+       .list_voltage           = twl6030smps_list_voltage,
+
+       .set_voltage            = twl6030smps_set_voltage,
+       .get_voltage_sel        = twl6030smps_get_voltage_sel,
+
+       .enable                 = twl6030reg_enable,
+       .disable                = twl6030reg_disable,
+       .is_enabled             = twl6030reg_is_enabled,
+
+       .set_mode               = twl6030reg_set_mode,
+
+       .get_status             = twl6030reg_get_status,
 };
 
 /*----------------------------------------------------------------------*/
@@ -487,11 +834,10 @@ static struct regulator_ops twl6030_fixed_resource = {
 #define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
                        remap_conf) \
                TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
-                       remap_conf, TWL4030)
-#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
-                       remap_conf) \
+                       remap_conf, TWL4030, twl4030fixed_ops)
+#define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay) \
                TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \
-                       remap_conf, TWL6030)
+                       0x0, TWL6030, twl6030fixed_ops)
 
 #define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \
        .base = offset, \
@@ -510,13 +856,11 @@ static struct regulator_ops twl6030_fixed_resource = {
                }, \
        }
 
-#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num, \
-               remap_conf) { \
+#define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \
        .base = offset, \
        .id = num, \
        .min_mV = min_mVolts, \
        .max_mV = max_mVolts, \
-       .remap = remap_conf, \
        .desc = { \
                .name = #label, \
                .id = TWL6030_REG_##label, \
@@ -527,9 +871,23 @@ static struct regulator_ops twl6030_fixed_resource = {
                }, \
        }
 
+#define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \
+       .base = offset, \
+       .id = num, \
+       .min_mV = min_mVolts, \
+       .max_mV = max_mVolts, \
+       .desc = { \
+               .name = #label, \
+               .id = TWL6025_REG_##label, \
+               .n_voltages = ((max_mVolts - min_mVolts)/100) + 1, \
+               .ops = &twl6030ldo_ops, \
+               .type = REGULATOR_VOLTAGE, \
+               .owner = THIS_MODULE, \
+               }, \
+       }
 
 #define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \
-               family) { \
+               family, operations) { \
        .base = offset, \
        .id = num, \
        .min_mV = mVolts, \
@@ -539,17 +897,16 @@ static struct regulator_ops twl6030_fixed_resource = {
                .name = #label, \
                .id = family##_REG_##label, \
                .n_voltages = 1, \
-               .ops = &twlfixed_ops, \
+               .ops = &operations, \
                .type = REGULATOR_VOLTAGE, \
                .owner = THIS_MODULE, \
                }, \
        }
 
-#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay, remap_conf) { \
+#define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay) { \
        .base = offset, \
        .id = num, \
        .delay = turnon_delay, \
-       .remap = remap_conf, \
        .desc = { \
                .name = #label, \
                .id = TWL6030_REG_##label, \
@@ -559,6 +916,21 @@ static struct regulator_ops twl6030_fixed_resource = {
                }, \
        }
 
+#define TWL6025_ADJUSTABLE_SMPS(label, offset, num) { \
+       .base = offset, \
+       .id = num, \
+       .min_mV = 600, \
+       .max_mV = 2100, \
+       .desc = { \
+               .name = #label, \
+               .id = TWL6025_REG_##label, \
+               .n_voltages = 63, \
+               .ops = &twlsmps_ops, \
+               .type = REGULATOR_VOLTAGE, \
+               .owner = THIS_MODULE, \
+               }, \
+       }
+
 /*
  * We list regulators here if systems need some level of
  * software control over them after boot.
@@ -589,19 +961,52 @@ static struct twlreg_info twl_regs[] = {
        /* 6030 REG with base as PMC Slave Misc : 0x0030 */
        /* Turnon-delay and remap configuration values for 6030 are not
           verified since the specification is not public */
-       TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1, 0x21),
-       TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2, 0x21),
-       TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3, 0x21),
-       TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4, 0x21),
-       TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5, 0x21),
-       TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7, 0x21),
-       TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x21),
-       TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x21),
-       TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x21),
-       TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x21),
-       TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0, 0x21),
+       TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1),
+       TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2),
+       TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3),
+       TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4),
+       TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5),
+       TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7),
+       TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0),
+       TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0),
+       TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0),
+       TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0),
+       TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0),
+
+       /* 6025 are renamed compared to 6030 versions */
+       TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300, 1),
+       TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300, 2),
+       TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300, 3),
+       TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300, 4),
+       TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300, 5),
+       TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300, 7),
+       TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300, 16),
+       TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300, 17),
+       TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300, 18),
+
+       TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34, 1),
+       TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10, 2),
+       TWL6025_ADJUSTABLE_SMPS(VIO, 0x16, 3),
 };
 
+static u8 twl_get_smps_offset(void)
+{
+       u8 value;
+
+       twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value,
+                       TWL6030_SMPS_OFFSET);
+       return value;
+}
+
+static u8 twl_get_smps_mult(void)
+{
+       u8 value;
+
+       twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value,
+                       TWL6030_SMPS_MULT);
+       return value;
+}
+
 static int __devinit twlreg_probe(struct platform_device *pdev)
 {
        int                             i;
@@ -623,6 +1028,9 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
        if (!initdata)
                return -EINVAL;
 
+       /* copy the features into regulator data */
+       info->features = (unsigned long)initdata->driver_data;
+
        /* Constrain board-specific capabilities according to what
         * this driver and the chip itself can actually do.
         */
@@ -645,6 +1053,27 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
                break;
        }
 
+       switch (pdev->id) {
+       case TWL6025_REG_SMPS3:
+               if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS3)
+                       info->flags |= SMPS_EXTENDED_EN;
+               if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS3)
+                       info->flags |= SMPS_OFFSET_EN;
+               break;
+       case TWL6025_REG_SMPS4:
+               if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS4)
+                       info->flags |= SMPS_EXTENDED_EN;
+               if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS4)
+                       info->flags |= SMPS_OFFSET_EN;
+               break;
+       case TWL6025_REG_VIO:
+               if (twl_get_smps_mult() & SMPS_MULTOFFSET_VIO)
+                       info->flags |= SMPS_EXTENDED_EN;
+               if (twl_get_smps_offset() & SMPS_MULTOFFSET_VIO)
+                       info->flags |= SMPS_OFFSET_EN;
+               break;
+       }
+
        rdev = regulator_register(&info->desc, &pdev->dev, initdata, info);
        if (IS_ERR(rdev)) {
                dev_err(&pdev->dev, "can't register %s, %ld\n",
@@ -653,7 +1082,8 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
        }
        platform_set_drvdata(pdev, rdev);
 
-       twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP,
+       if (twl_class_is_4030())
+               twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP,
                                                info->remap);
 
        /* NOTE:  many regulators support short-circuit IRQs (presentable
index e93453b1b9788020646ca8e1a81964567d99710d..a0982e80985198004ae990218c38718d8616eec6 100644 (file)
@@ -600,7 +600,6 @@ err:
 static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
 {
        struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
-       struct wm831x *wm831x = dcdc->wm831x;
 
        platform_set_drvdata(pdev, NULL);
 
@@ -776,7 +775,6 @@ err:
 static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
 {
        struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
-       struct wm831x *wm831x = dcdc->wm831x;
 
        platform_set_drvdata(pdev, NULL);
 
index b42d01cef35a62b87bfcbb5c5cd12d2b65d659cb..0f12c70bebc9cd49ec51dc5f6e64997e0366aaff 100644 (file)
@@ -55,7 +55,7 @@ static int wm8400_ldo_list_voltage(struct regulator_dev *dev,
                return 1600000 + ((selector - 14) * 100000);
 }
 
-static int wm8400_ldo_get_voltage(struct regulator_dev *dev)
+static int wm8400_ldo_get_voltage_sel(struct regulator_dev *dev)
 {
        struct wm8400 *wm8400 = rdev_get_drvdata(dev);
        u16 val;
@@ -63,7 +63,7 @@ static int wm8400_ldo_get_voltage(struct regulator_dev *dev)
        val = wm8400_reg_read(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev));
        val &= WM8400_LDO1_VSEL_MASK;
 
-       return wm8400_ldo_list_voltage(dev, val);
+       return val;
 }
 
 static int wm8400_ldo_set_voltage(struct regulator_dev *dev,
@@ -104,7 +104,7 @@ static struct regulator_ops wm8400_ldo_ops = {
        .enable = wm8400_ldo_enable,
        .disable = wm8400_ldo_disable,
        .list_voltage = wm8400_ldo_list_voltage,
-       .get_voltage = wm8400_ldo_get_voltage,
+       .get_voltage_sel = wm8400_ldo_get_voltage_sel,
        .set_voltage = wm8400_ldo_set_voltage,
 };
 
@@ -145,7 +145,7 @@ static int wm8400_dcdc_list_voltage(struct regulator_dev *dev,
        return 850000 + (selector * 25000);
 }
 
-static int wm8400_dcdc_get_voltage(struct regulator_dev *dev)
+static int wm8400_dcdc_get_voltage_sel(struct regulator_dev *dev)
 {
        struct wm8400 *wm8400 = rdev_get_drvdata(dev);
        u16 val;
@@ -154,7 +154,7 @@ static int wm8400_dcdc_get_voltage(struct regulator_dev *dev)
        val = wm8400_reg_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset);
        val &= WM8400_DC1_VSEL_MASK;
 
-       return 850000 + (25000 * val);
+       return val;
 }
 
 static int wm8400_dcdc_set_voltage(struct regulator_dev *dev,
@@ -261,7 +261,7 @@ static struct regulator_ops wm8400_dcdc_ops = {
        .enable = wm8400_dcdc_enable,
        .disable = wm8400_dcdc_disable,
        .list_voltage = wm8400_dcdc_list_voltage,
-       .get_voltage = wm8400_dcdc_get_voltage,
+       .get_voltage_sel = wm8400_dcdc_get_voltage_sel,
        .set_voltage = wm8400_dcdc_set_voltage,
        .get_mode = wm8400_dcdc_get_mode,
        .set_mode = wm8400_dcdc_set_mode,
index b8f4e9e66cd516c3987ba1983da791bc3d059d70..f822e13dc04b43ca0902e273c6060f1f838fbfdb 100644 (file)
@@ -125,6 +125,16 @@ comment "I2C RTC drivers"
 
 if I2C
 
+config RTC_DRV_88PM860X
+       tristate "Marvell 88PM860x"
+       depends on RTC_CLASS && I2C && MFD_88PM860X
+       help
+         If you say yes here you get support for RTC function in Marvell
+         88PM860x chips.
+
+         This driver can also be built as a module. If so, the module
+         will be called rtc-88pm860x.
+
 config RTC_DRV_DS1307
        tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025"
        help
@@ -351,12 +361,39 @@ config RTC_DRV_RX8025
          This driver can also be built as a module. If so, the module
          will be called rtc-rx8025.
 
+config RTC_DRV_EM3027
+       tristate "EM Microelectronic EM3027"
+       help
+         If you say yes here you get support for the EM
+         Microelectronic EM3027 RTC chips.
+
+         This driver can also be built as a module. If so, the module
+         will be called rtc-em3027.
+
+config RTC_DRV_RV3029C2
+       tristate "Micro Crystal RTC"
+       help
+         If you say yes here you get support for the Micro Crystal
+         RV3029-C2 RTC chips.
+
+         This driver can also be built as a module. If so, the module
+         will be called rtc-rv3029c2.
+
 endif # I2C
 
 comment "SPI RTC drivers"
 
 if SPI_MASTER
 
+config RTC_DRV_M41T93
+        tristate "ST M41T93"
+        help
+          If you say yes here you will get support for the
+          ST M41T93 SPI RTC chip.
+
+          This driver can also be built as a module. If so, the module
+          will be called rtc-m41t93.
+
 config RTC_DRV_M41T94
        tristate "ST M41T94"
        help
@@ -645,6 +682,14 @@ config RTC_DRV_WM8350
          This driver can also be built as a module. If so, the module
          will be called "rtc-wm8350".
 
+config RTC_DRV_SPEAR
+       tristate "SPEAR ST RTC"
+       depends on PLAT_SPEAR
+       default y
+       help
+        If you say Y here you will get support for the RTC found on
+        spear
+
 config RTC_DRV_PCF50633
        depends on MFD_PCF50633
        tristate "NXP PCF50633 RTC"
@@ -874,6 +919,13 @@ config RTC_DRV_PXA
          This RTC driver uses PXA RTC registers available since pxa27x
          series (RDxR, RYxR) instead of legacy RCNR, RTAR.
 
+config RTC_DRV_VT8500
+       tristate "VIA/WonderMedia 85xx SoC RTC"
+       depends on ARCH_VT8500
+       help
+         If you say Y here you will get access to the real time clock
+         built into your VIA VT8500 SoC or its relatives.
+
 
 config RTC_DRV_SUN4V
        bool "SUN4V Hypervisor RTC"
index 9574748d1c7312ba50af8485a9f37f0e1c31afd6..213d725f16d464cc0cc7527a2041f2d468d899f8 100644 (file)
@@ -15,6 +15,7 @@ rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
 
 # Keep the list ordered.
 
+obj-$(CONFIG_RTC_DRV_88PM860X)  += rtc-88pm860x.o
 obj-$(CONFIG_RTC_DRV_AB3100)   += rtc-ab3100.o
 obj-$(CONFIG_RTC_DRV_AB8500)   += rtc-ab8500.o
 obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
@@ -43,6 +44,7 @@ obj-$(CONFIG_RTC_DRV_DS1742)  += rtc-ds1742.o
 obj-$(CONFIG_RTC_DRV_DS3232)   += rtc-ds3232.o
 obj-$(CONFIG_RTC_DRV_DS3234)   += rtc-ds3234.o
 obj-$(CONFIG_RTC_DRV_EFI)      += rtc-efi.o
+obj-$(CONFIG_RTC_DRV_EM3027)   += rtc-em3027.o
 obj-$(CONFIG_RTC_DRV_EP93XX)   += rtc-ep93xx.o
 obj-$(CONFIG_RTC_DRV_FM3130)   += rtc-fm3130.o
 obj-$(CONFIG_RTC_DRV_GENERIC)  += rtc-generic.o
@@ -52,6 +54,7 @@ obj-$(CONFIG_RTC_DRV_ISL12022)        += rtc-isl12022.o
 obj-$(CONFIG_RTC_DRV_JZ4740)   += rtc-jz4740.o
 obj-$(CONFIG_RTC_DRV_LPC32XX)  += rtc-lpc32xx.o
 obj-$(CONFIG_RTC_DRV_M41T80)   += rtc-m41t80.o
+obj-$(CONFIG_RTC_DRV_M41T93)   += rtc-m41t93.o
 obj-$(CONFIG_RTC_DRV_M41T94)   += rtc-m41t94.o
 obj-$(CONFIG_RTC_DRV_M48T35)   += rtc-m48t35.o
 obj-$(CONFIG_RTC_DRV_M48T59)   += rtc-m48t59.o
@@ -81,12 +84,14 @@ obj-$(CONFIG_RTC_DRV_RP5C01)        += rtc-rp5c01.o
 obj-$(CONFIG_RTC_DRV_RS5C313)  += rtc-rs5c313.o
 obj-$(CONFIG_RTC_DRV_RS5C348)  += rtc-rs5c348.o
 obj-$(CONFIG_RTC_DRV_RS5C372)  += rtc-rs5c372.o
+obj-$(CONFIG_RTC_DRV_RV3029C2) += rtc-rv3029c2.o
 obj-$(CONFIG_RTC_DRV_RX8025)   += rtc-rx8025.o
 obj-$(CONFIG_RTC_DRV_RX8581)   += rtc-rx8581.o
 obj-$(CONFIG_RTC_DRV_S35390A)  += rtc-s35390a.o
 obj-$(CONFIG_RTC_DRV_S3C)      += rtc-s3c.o
 obj-$(CONFIG_RTC_DRV_SA1100)   += rtc-sa1100.o
 obj-$(CONFIG_RTC_DRV_SH)       += rtc-sh.o
+obj-$(CONFIG_RTC_DRV_SPEAR)    += rtc-spear.o
 obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o
 obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o
 obj-$(CONFIG_RTC_DRV_STMP)     += rtc-stmp3xxx.o
@@ -98,6 +103,7 @@ obj-$(CONFIG_RTC_DRV_TWL4030)        += rtc-twl.o
 obj-$(CONFIG_RTC_DRV_TX4939)   += rtc-tx4939.o
 obj-$(CONFIG_RTC_DRV_V3020)    += rtc-v3020.o
 obj-$(CONFIG_RTC_DRV_VR41XX)   += rtc-vr41xx.o
+obj-$(CONFIG_RTC_DRV_VT8500)   += rtc-vt8500.o
 obj-$(CONFIG_RTC_DRV_WM831X)   += rtc-wm831x.o
 obj-$(CONFIG_RTC_DRV_WM8350)   += rtc-wm8350.o
 obj-$(CONFIG_RTC_DRV_X1205)    += rtc-x1205.o
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
new file mode 100644 (file)
index 0000000..64b847b
--- /dev/null
@@ -0,0 +1,427 @@
+/*
+ * Real Time Clock driver for Marvell 88PM860x PMIC
+ *
+ * Copyright (c) 2010 Marvell International Ltd.
+ * Author:     Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/rtc.h>
+#include <linux/delay.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/88pm860x.h>
+
+#define VRTC_CALIBRATION
+
+struct pm860x_rtc_info {
+       struct pm860x_chip      *chip;
+       struct i2c_client       *i2c;
+       struct rtc_device       *rtc_dev;
+       struct device           *dev;
+       struct delayed_work     calib_work;
+
+       int                     irq;
+       int                     vrtc;
+       int                     (*sync)(unsigned int ticks);
+};
+
+#define REG_VRTC_MEAS1         0x7D
+
+#define REG0_ADDR              0xB0
+#define REG1_ADDR              0xB2
+#define REG2_ADDR              0xB4
+#define REG3_ADDR              0xB6
+
+#define REG0_DATA              0xB1
+#define REG1_DATA              0xB3
+#define REG2_DATA              0xB5
+#define REG3_DATA              0xB7
+
+/* bit definitions of Measurement Enable Register 2 (0x51) */
+#define MEAS2_VRTC             (1 << 0)
+
+/* bit definitions of RTC Register 1 (0xA0) */
+#define ALARM_EN               (1 << 3)
+#define ALARM_WAKEUP           (1 << 4)
+#define ALARM                  (1 << 5)
+#define RTC1_USE_XO            (1 << 7)
+
+#define VRTC_CALIB_INTERVAL    (HZ * 60 * 10)          /* 10 minutes */
+
+static irqreturn_t rtc_update_handler(int irq, void *data)
+{
+       struct pm860x_rtc_info *info = (struct pm860x_rtc_info *)data;
+       int mask;
+
+       mask = ALARM | ALARM_WAKEUP;
+       pm860x_set_bits(info->i2c, PM8607_RTC1, mask | ALARM_EN, mask);
+       rtc_update_irq(info->rtc_dev, 1, RTC_AF);
+       return IRQ_HANDLED;
+}
+
+static int pm860x_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+       struct pm860x_rtc_info *info = dev_get_drvdata(dev);
+
+       if (enabled)
+               pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM, ALARM);
+       else
+               pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM, 0);
+       return 0;
+}
+
+/*
+ * Calculate the next alarm time given the requested alarm time mask
+ * and the current time.
+ */
+static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now,
+                               struct rtc_time *alrm)
+{
+       unsigned long next_time;
+       unsigned long now_time;
+
+       next->tm_year = now->tm_year;
+       next->tm_mon = now->tm_mon;
+       next->tm_mday = now->tm_mday;
+       next->tm_hour = alrm->tm_hour;
+       next->tm_min = alrm->tm_min;
+       next->tm_sec = alrm->tm_sec;
+
+       rtc_tm_to_time(now, &now_time);
+       rtc_tm_to_time(next, &next_time);
+
+       if (next_time < now_time) {
+               /* Advance one day */
+               next_time += 60 * 60 * 24;
+               rtc_time_to_tm(next_time, next);
+       }
+}
+
+static int pm860x_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+       struct pm860x_rtc_info *info = dev_get_drvdata(dev);
+       unsigned char buf[8];
+       unsigned long ticks, base, data;
+
+       pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf);
+       dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1],
+               buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
+       base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7];
+
+       /* load 32-bit read-only counter */
+       pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf);
+       data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+       ticks = base + data;
+       dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
+               base, data, ticks);
+
+       rtc_time_to_tm(ticks, tm);
+
+       return 0;
+}
+
+static int pm860x_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+       struct pm860x_rtc_info *info = dev_get_drvdata(dev);
+       unsigned char buf[4];
+       unsigned long ticks, base, data;
+
+       if ((tm->tm_year < 70) || (tm->tm_year > 138)) {
+               dev_dbg(info->dev, "Set time %d out of range. "
+                       "Please set time between 1970 to 2038.\n",
+                       1900 + tm->tm_year);
+               return -EINVAL;
+       }
+       rtc_tm_to_time(tm, &ticks);
+
+       /* load 32-bit read-only counter */
+       pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf);
+       data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+       base = ticks - data;
+       dev_dbg(info->dev, "set base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
+               base, data, ticks);
+
+       pm860x_page_reg_write(info->i2c, REG0_DATA, (base >> 24) & 0xFF);
+       pm860x_page_reg_write(info->i2c, REG1_DATA, (base >> 16) & 0xFF);
+       pm860x_page_reg_write(info->i2c, REG2_DATA, (base >> 8) & 0xFF);
+       pm860x_page_reg_write(info->i2c, REG3_DATA, base & 0xFF);
+
+       if (info->sync)
+               info->sync(ticks);
+       return 0;
+}
+
+static int pm860x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+       struct pm860x_rtc_info *info = dev_get_drvdata(dev);
+       unsigned char buf[8];
+       unsigned long ticks, base, data;
+       int ret;
+
+       pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf);
+       dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1],
+               buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
+       base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7];
+
+       pm860x_bulk_read(info->i2c, PM8607_RTC_EXPIRE1, 4, buf);
+       data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+       ticks = base + data;
+       dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
+               base, data, ticks);
+
+       rtc_time_to_tm(ticks, &alrm->time);
+       ret = pm860x_reg_read(info->i2c, PM8607_RTC1);
+       alrm->enabled = (ret & ALARM_EN) ? 1 : 0;
+       alrm->pending = (ret & (ALARM | ALARM_WAKEUP)) ? 1 : 0;
+       return 0;
+}
+
+static int pm860x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+       struct pm860x_rtc_info *info = dev_get_drvdata(dev);
+       struct rtc_time now_tm, alarm_tm;
+       unsigned long ticks, base, data;
+       unsigned char buf[8];
+       int mask;
+
+       pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM_EN, 0);
+
+       pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf);
+       dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1],
+               buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
+       base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7];
+
+       /* load 32-bit read-only counter */
+       pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf);
+       data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
+       ticks = base + data;
+       dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
+               base, data, ticks);
+
+       rtc_time_to_tm(ticks, &now_tm);
+       rtc_next_alarm_time(&alarm_tm, &now_tm, &alrm->time);
+       /* get new ticks for alarm in 24 hours */
+       rtc_tm_to_time(&alarm_tm, &ticks);
+       data = ticks - base;
+
+       buf[0] = data & 0xff;
+       buf[1] = (data >> 8) & 0xff;
+       buf[2] = (data >> 16) & 0xff;
+       buf[3] = (data >> 24) & 0xff;
+       pm860x_bulk_write(info->i2c, PM8607_RTC_EXPIRE1, 4, buf);
+       if (alrm->enabled) {
+               mask = ALARM | ALARM_WAKEUP | ALARM_EN;
+               pm860x_set_bits(info->i2c, PM8607_RTC1, mask, mask);
+       } else {
+               mask = ALARM | ALARM_WAKEUP | ALARM_EN;
+               pm860x_set_bits(info->i2c, PM8607_RTC1, mask,
+                               ALARM | ALARM_WAKEUP);
+       }
+       return 0;
+}
+
+static const struct rtc_class_ops pm860x_rtc_ops = {
+       .read_time      = pm860x_rtc_read_time,
+       .set_time       = pm860x_rtc_set_time,
+       .read_alarm     = pm860x_rtc_read_alarm,
+       .set_alarm      = pm860x_rtc_set_alarm,
+       .alarm_irq_enable = pm860x_rtc_alarm_irq_enable,
+};
+
+#ifdef VRTC_CALIBRATION
+static void calibrate_vrtc_work(struct work_struct *work)
+{
+       struct pm860x_rtc_info *info = container_of(work,
+               struct pm860x_rtc_info, calib_work.work);
+       unsigned char buf[2];
+       unsigned int sum, data, mean, vrtc_set;
+       int i;
+
+       for (i = 0, sum = 0; i < 16; i++) {
+               msleep(100);
+               pm860x_bulk_read(info->i2c, REG_VRTC_MEAS1, 2, buf);
+               data = (buf[0] << 4) | buf[1];
+               data = (data * 5400) >> 12;     /* convert to mv */
+               sum += data;
+       }
+       mean = sum >> 4;
+       vrtc_set = 2700 + (info->vrtc & 0x3) * 200;
+       dev_dbg(info->dev, "mean:%d, vrtc_set:%d\n", mean, vrtc_set);
+
+       sum = pm860x_reg_read(info->i2c, PM8607_RTC_MISC1);
+       data = sum & 0x3;
+       if ((mean + 200) < vrtc_set) {
+               /* try higher voltage */
+               if (++data == 4)
+                       goto out;
+               data = (sum & 0xf8) | (data & 0x3);
+               pm860x_reg_write(info->i2c, PM8607_RTC_MISC1, data);
+       } else if ((mean - 200) > vrtc_set) {
+               /* try lower voltage */
+               if (data-- == 0)
+                       goto out;
+               data = (sum & 0xf8) | (data & 0x3);
+               pm860x_reg_write(info->i2c, PM8607_RTC_MISC1, data);
+       } else
+               goto out;
+       dev_dbg(info->dev, "set 0x%x to RTC_MISC1\n", data);
+       /* trigger next calibration since VRTC is updated */
+       schedule_delayed_work(&info->calib_work, VRTC_CALIB_INTERVAL);
+       return;
+out:
+       /* disable measurement */
+       pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0);
+       dev_dbg(info->dev, "finish VRTC calibration\n");
+       return;
+}
+#endif
+
+static int __devinit pm860x_rtc_probe(struct platform_device *pdev)
+{
+       struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
+       struct pm860x_rtc_pdata *pdata = NULL;
+       struct pm860x_rtc_info *info;
+       struct rtc_time tm;
+       unsigned long ticks = 0;
+       int ret;
+
+       pdata = pdev->dev.platform_data;
+       if (pdata == NULL)
+               dev_warn(&pdev->dev, "No platform data!\n");
+
+       info = kzalloc(sizeof(struct pm860x_rtc_info), GFP_KERNEL);
+       if (!info)
+               return -ENOMEM;
+       info->irq = platform_get_irq(pdev, 0);
+       if (info->irq < 0) {
+               dev_err(&pdev->dev, "No IRQ resource!\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       info->chip = chip;
+       info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion;
+       info->dev = &pdev->dev;
+       dev_set_drvdata(&pdev->dev, info);
+
+       ret = request_threaded_irq(info->irq, NULL, rtc_update_handler,
+                                  IRQF_ONESHOT, "rtc", info);
+       if (ret < 0) {
+               dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n",
+                       info->irq, ret);
+               goto out;
+       }
+
+       /* set addresses of 32-bit base value for RTC time */
+       pm860x_page_reg_write(info->i2c, REG0_ADDR, REG0_DATA);
+       pm860x_page_reg_write(info->i2c, REG1_ADDR, REG1_DATA);
+       pm860x_page_reg_write(info->i2c, REG2_ADDR, REG2_DATA);
+       pm860x_page_reg_write(info->i2c, REG3_ADDR, REG3_DATA);
+
+       ret = pm860x_rtc_read_time(&pdev->dev, &tm);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to read initial time.\n");
+               goto out_rtc;
+       }
+       if ((tm.tm_year < 70) || (tm.tm_year > 138)) {
+               tm.tm_year = 70;
+               tm.tm_mon = 0;
+               tm.tm_mday = 1;
+               tm.tm_hour = 0;
+               tm.tm_min = 0;
+               tm.tm_sec = 0;
+               ret = pm860x_rtc_set_time(&pdev->dev, &tm);
+               if (ret < 0) {
+                       dev_err(&pdev->dev, "Failed to set initial time.\n");
+                       goto out_rtc;
+               }
+       }
+       rtc_tm_to_time(&tm, &ticks);
+       if (pdata && pdata->sync) {
+               pdata->sync(ticks);
+               info->sync = pdata->sync;
+       }
+
+       info->rtc_dev = rtc_device_register("88pm860x-rtc", &pdev->dev,
+                                           &pm860x_rtc_ops, THIS_MODULE);
+       ret = PTR_ERR(info->rtc_dev);
+       if (IS_ERR(info->rtc_dev)) {
+               dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
+               goto out_rtc;
+       }
+
+       /*
+        * enable internal XO instead of internal 3.25MHz clock since it can
+        * free running in PMIC power-down state.
+        */
+       pm860x_set_bits(info->i2c, PM8607_RTC1, RTC1_USE_XO, RTC1_USE_XO);
+
+#ifdef VRTC_CALIBRATION
+       /* <00> -- 2.7V, <01> -- 2.9V, <10> -- 3.1V, <11> -- 3.3V */
+       if (pdata && pdata->vrtc)
+               info->vrtc = pdata->vrtc & 0x3;
+       else
+               info->vrtc = 1;
+       pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, MEAS2_VRTC);
+
+       /* calibrate VRTC */
+       INIT_DELAYED_WORK(&info->calib_work, calibrate_vrtc_work);
+       schedule_delayed_work(&info->calib_work, VRTC_CALIB_INTERVAL);
+#endif /* VRTC_CALIBRATION */
+       return 0;
+out_rtc:
+       free_irq(info->irq, info);
+out:
+       kfree(info);
+       return ret;
+}
+
+static int __devexit pm860x_rtc_remove(struct platform_device *pdev)
+{
+       struct pm860x_rtc_info *info = platform_get_drvdata(pdev);
+
+#ifdef VRTC_CALIBRATION
+       flush_scheduled_work();
+       /* disable measurement */
+       pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0);
+#endif /* VRTC_CALIBRATION */
+
+       platform_set_drvdata(pdev, NULL);
+       rtc_device_unregister(info->rtc_dev);
+       free_irq(info->irq, info);
+       kfree(info);
+       return 0;
+}
+
+static struct platform_driver pm860x_rtc_driver = {
+       .driver         = {
+               .name   = "88pm860x-rtc",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = pm860x_rtc_probe,
+       .remove         = __devexit_p(pm860x_rtc_remove),
+};
+
+static int __init pm860x_rtc_init(void)
+{
+       return platform_driver_register(&pm860x_rtc_driver);
+}
+module_init(pm860x_rtc_init);
+
+static void __exit pm860x_rtc_exit(void)
+{
+       platform_driver_unregister(&pm860x_rtc_driver);
+}
+module_exit(pm860x_rtc_exit);
+
+MODULE_DESCRIPTION("Marvell 88PM860x RTC driver");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-em3027.c b/drivers/rtc/rtc-em3027.c
new file mode 100644 (file)
index 0000000..d8e1c25
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * An rtc/i2c driver for the EM Microelectronic EM3027
+ * Copyright 2011 CompuLab, Ltd.
+ *
+ * Author: Mike Rapoport <mike@compulab.co.il>
+ *
+ * Based on rtc-ds1672.c by Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+
+/* Registers */
+#define EM3027_REG_ON_OFF_CTRL 0x00
+#define EM3027_REG_IRQ_CTRL    0x01
+#define EM3027_REG_IRQ_FLAGS   0x02
+#define EM3027_REG_STATUS      0x03
+#define EM3027_REG_RST_CTRL    0x04
+
+#define EM3027_REG_WATCH_SEC   0x08
+#define EM3027_REG_WATCH_MIN   0x09
+#define EM3027_REG_WATCH_HOUR  0x0a
+#define EM3027_REG_WATCH_DATE  0x0b
+#define EM3027_REG_WATCH_DAY   0x0c
+#define EM3027_REG_WATCH_MON   0x0d
+#define EM3027_REG_WATCH_YEAR  0x0e
+
+#define EM3027_REG_ALARM_SEC   0x10
+#define EM3027_REG_ALARM_MIN   0x11
+#define EM3027_REG_ALARM_HOUR  0x12
+#define EM3027_REG_ALARM_DATE  0x13
+#define EM3027_REG_ALARM_DAY   0x14
+#define EM3027_REG_ALARM_MON   0x15
+#define EM3027_REG_ALARM_YEAR  0x16
+
+static struct i2c_driver em3027_driver;
+
+static int em3027_get_time(struct device *dev, struct rtc_time *tm)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+
+       unsigned char addr = EM3027_REG_WATCH_SEC;
+       unsigned char buf[7];
+
+       struct i2c_msg msgs[] = {
+               {client->addr, 0, 1, &addr},            /* setup read addr */
+               {client->addr, I2C_M_RD, 7, buf},       /* read time/date */
+       };
+
+       /* read time/date registers */
+       if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
+               dev_err(&client->dev, "%s: read error\n", __func__);
+               return -EIO;
+       }
+
+       tm->tm_sec      = bcd2bin(buf[0]);
+       tm->tm_min      = bcd2bin(buf[1]);
+       tm->tm_hour     = bcd2bin(buf[2]);
+       tm->tm_mday     = bcd2bin(buf[3]);
+       tm->tm_wday     = bcd2bin(buf[4]);
+       tm->tm_mon      = bcd2bin(buf[5]);
+       tm->tm_year     = bcd2bin(buf[6]) + 100;
+
+       return 0;
+}
+
+static int em3027_set_time(struct device *dev, struct rtc_time *tm)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       unsigned char buf[8];
+
+       struct i2c_msg msg = {
+               client->addr, 0, 8, buf,        /* write time/date */
+       };
+
+       buf[0] = EM3027_REG_WATCH_SEC;
+       buf[1] = bin2bcd(tm->tm_sec);
+       buf[2] = bin2bcd(tm->tm_min);
+       buf[3] = bin2bcd(tm->tm_hour);
+       buf[4] = bin2bcd(tm->tm_mday);
+       buf[5] = bin2bcd(tm->tm_wday);
+       buf[6] = bin2bcd(tm->tm_mon);
+       buf[7] = bin2bcd(tm->tm_year % 100);
+
+       /* write time/date registers */
+       if ((i2c_transfer(client->adapter, &msg, 1)) != 1) {
+               dev_err(&client->dev, "%s: write error\n", __func__);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static const struct rtc_class_ops em3027_rtc_ops = {
+       .read_time = em3027_get_time,
+       .set_time = em3027_set_time,
+};
+
+static int em3027_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
+{
+       struct rtc_device *rtc;
+
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+               return -ENODEV;
+
+       rtc = rtc_device_register(em3027_driver.driver.name, &client->dev,
+                                 &em3027_rtc_ops, THIS_MODULE);
+       if (IS_ERR(rtc))
+               return PTR_ERR(rtc);
+
+       i2c_set_clientdata(client, rtc);
+
+       return 0;
+}
+
+static int em3027_remove(struct i2c_client *client)
+{
+       struct rtc_device *rtc = i2c_get_clientdata(client);
+
+       if (rtc)
+               rtc_device_unregister(rtc);
+
+       return 0;
+}
+
+static struct i2c_device_id em3027_id[] = {
+       { "em3027", 0 },
+       { }
+};
+
+static struct i2c_driver em3027_driver = {
+       .driver = {
+                  .name = "rtc-em3027",
+       },
+       .probe = &em3027_probe,
+       .remove = &em3027_remove,
+       .id_table = em3027_id,
+};
+
+static int __init em3027_init(void)
+{
+       return i2c_add_driver(&em3027_driver);
+}
+
+static void __exit em3027_exit(void)
+{
+       i2c_del_driver(&em3027_driver);
+}
+
+MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
+MODULE_DESCRIPTION("EM Microelectronic EM3027 RTC driver");
+MODULE_LICENSE("GPL");
+
+module_init(em3027_init);
+module_exit(em3027_exit);
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c
new file mode 100644 (file)
index 0000000..1a84b3e
--- /dev/null
@@ -0,0 +1,225 @@
+/*
+ *
+ * Driver for ST M41T93 SPI RTC
+ *
+ * (c) 2010 Nikolaus Voss, Weinmann Medical GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/bcd.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/spi/spi.h>
+
+#define M41T93_REG_SSEC                        0
+#define M41T93_REG_ST_SEC              1
+#define M41T93_REG_MIN                 2
+#define M41T93_REG_CENT_HOUR           3
+#define M41T93_REG_WDAY                        4
+#define M41T93_REG_DAY                 5
+#define M41T93_REG_MON                 6
+#define M41T93_REG_YEAR                        7
+
+
+#define M41T93_REG_ALM_HOUR_HT         0xc
+#define M41T93_REG_FLAGS               0xf
+
+#define M41T93_FLAG_ST                 (1 << 7)
+#define M41T93_FLAG_OF                 (1 << 2)
+#define M41T93_FLAG_BL                 (1 << 4)
+#define M41T93_FLAG_HT                 (1 << 6)
+
+static inline int m41t93_set_reg(struct spi_device *spi, u8 addr, u8 data)
+{
+       u8 buf[2];
+
+       /* MSB must be '1' to write */
+       buf[0] = addr | 0x80;
+       buf[1] = data;
+
+       return spi_write(spi, buf, sizeof(buf));
+}
+
+static int m41t93_set_time(struct device *dev, struct rtc_time *tm)
+{
+       struct spi_device *spi = to_spi_device(dev);
+       u8 buf[9] = {0x80};        /* write cmd + 8 data bytes */
+       u8 * const data = &buf[1]; /* ptr to first data byte */
+
+       dev_dbg(dev, "%s secs=%d, mins=%d, "
+               "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
+               "write", tm->tm_sec, tm->tm_min,
+               tm->tm_hour, tm->tm_mday,
+               tm->tm_mon, tm->tm_year, tm->tm_wday);
+
+       if (tm->tm_year < 100) {
+               dev_warn(&spi->dev, "unsupported date (before 2000-01-01).\n");
+               return -EINVAL;
+       }
+
+       data[M41T93_REG_SSEC]           = 0;
+       data[M41T93_REG_ST_SEC]         = bin2bcd(tm->tm_sec);
+       data[M41T93_REG_MIN]            = bin2bcd(tm->tm_min);
+       data[M41T93_REG_CENT_HOUR]      = bin2bcd(tm->tm_hour) |
+                                               ((tm->tm_year/100-1) << 6);
+       data[M41T93_REG_DAY]            = bin2bcd(tm->tm_mday);
+       data[M41T93_REG_WDAY]           = bin2bcd(tm->tm_wday + 1);
+       data[M41T93_REG_MON]            = bin2bcd(tm->tm_mon + 1);
+       data[M41T93_REG_YEAR]           = bin2bcd(tm->tm_year % 100);
+
+       return spi_write(spi, buf, sizeof(buf));
+}
+
+
+static int m41t93_get_time(struct device *dev, struct rtc_time *tm)
+{
+       struct spi_device *spi = to_spi_device(dev);
+       const u8 start_addr = 0;
+       u8 buf[8];
+       int century_after_1900;
+       int tmp;
+       int ret = 0;
+
+       /* Check status of clock. Two states must be considered:
+          1. halt bit (HT) is set: the clock is running but update of readout
+             registers has been disabled due to power failure. This is normal
+             case after poweron. Time is valid after resetting HT bit.
+          2. oscillator fail bit (OF) is set. Oscillator has be stopped and
+             time is invalid:
+             a) OF can be immeditely reset.
+             b) OF cannot be immediately reset: oscillator has to be restarted.
+       */
+       tmp = spi_w8r8(spi, M41T93_REG_ALM_HOUR_HT);
+       if (tmp < 0)
+               return tmp;
+
+       if (tmp & M41T93_FLAG_HT) {
+               dev_dbg(&spi->dev, "HT bit is set, reenable clock update.\n");
+               m41t93_set_reg(spi, M41T93_REG_ALM_HOUR_HT,
+                              tmp & ~M41T93_FLAG_HT);
+       }
+
+       tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
+       if (tmp < 0)
+               return tmp;
+
+       if (tmp & M41T93_FLAG_OF) {
+               ret = -EINVAL;
+               dev_warn(&spi->dev, "OF bit is set, resetting.\n");
+               m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF);
+
+               tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
+               if (tmp < 0)
+                       return tmp;
+               else if (tmp & M41T93_FLAG_OF) {
+                       u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST;
+
+                       dev_warn(&spi->dev,
+                                "OF bit is still set, kickstarting clock.\n");
+                       m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
+                       reset_osc &= ~M41T93_FLAG_ST;
+                       m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
+               }
+       }
+
+       if (tmp & M41T93_FLAG_BL)
+               dev_warn(&spi->dev, "BL bit is set, replace battery.\n");
+
+       /* read actual time/date */
+       tmp = spi_write_then_read(spi, &start_addr, 1, buf, sizeof(buf));
+       if (tmp < 0)
+               return tmp;
+
+       tm->tm_sec      = bcd2bin(buf[M41T93_REG_ST_SEC]);
+       tm->tm_min      = bcd2bin(buf[M41T93_REG_MIN]);
+       tm->tm_hour     = bcd2bin(buf[M41T93_REG_CENT_HOUR] & 0x3f);
+       tm->tm_mday     = bcd2bin(buf[M41T93_REG_DAY]);
+       tm->tm_mon      = bcd2bin(buf[M41T93_REG_MON]) - 1;
+       tm->tm_wday     = bcd2bin(buf[M41T93_REG_WDAY] & 0x0f) - 1;
+
+       century_after_1900 = (buf[M41T93_REG_CENT_HOUR] >> 6) + 1;
+       tm->tm_year = bcd2bin(buf[M41T93_REG_YEAR]) + century_after_1900 * 100;
+
+       dev_dbg(dev, "%s secs=%d, mins=%d, "
+               "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n",
+               "read", tm->tm_sec, tm->tm_min,
+               tm->tm_hour, tm->tm_mday,
+               tm->tm_mon, tm->tm_year, tm->tm_wday);
+
+       return ret < 0 ? ret : rtc_valid_tm(tm);
+}
+
+
+static const struct rtc_class_ops m41t93_rtc_ops = {
+       .read_time      = m41t93_get_time,
+       .set_time       = m41t93_set_time,
+};
+
+static struct spi_driver m41t93_driver;
+
+static int __devinit m41t93_probe(struct spi_device *spi)
+{
+       struct rtc_device *rtc;
+       int res;
+
+       spi->bits_per_word = 8;
+       spi_setup(spi);
+
+       res = spi_w8r8(spi, M41T93_REG_WDAY);
+       if (res < 0 || (res & 0xf8) != 0) {
+               dev_err(&spi->dev, "not found 0x%x.\n", res);
+               return -ENODEV;
+       }
+
+       rtc = rtc_device_register(m41t93_driver.driver.name,
+               &spi->dev, &m41t93_rtc_ops, THIS_MODULE);
+       if (IS_ERR(rtc))
+               return PTR_ERR(rtc);
+
+       dev_set_drvdata(&spi->dev, rtc);
+
+       return 0;
+}
+
+
+static int __devexit m41t93_remove(struct spi_device *spi)
+{
+       struct rtc_device *rtc = platform_get_drvdata(spi);
+
+       if (rtc)
+               rtc_device_unregister(rtc);
+
+       return 0;
+}
+
+static struct spi_driver m41t93_driver = {
+       .driver = {
+               .name   = "rtc-m41t93",
+               .bus    = &spi_bus_type,
+               .owner  = THIS_MODULE,
+       },
+       .probe  = m41t93_probe,
+       .remove = __devexit_p(m41t93_remove),
+};
+
+static __init int m41t93_init(void)
+{
+       return spi_register_driver(&m41t93_driver);
+}
+module_init(m41t93_init);
+
+static __exit void m41t93_exit(void)
+{
+       spi_unregister_driver(&m41t93_driver);
+}
+module_exit(m41t93_exit);
+
+MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>");
+MODULE_DESCRIPTION("Driver for ST M41T93 SPI RTC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:rtc-m41t93");
index b2f096871a97cc5157dbbe75ee80738a941134b1..0cec5650d56a3d03e7ed1c50fb3a3f46a90043bf 100644 (file)
@@ -380,7 +380,7 @@ cleanup1:
 cleanup0:
        dev_set_drvdata(dev, NULL);
        mrst_rtc.dev = NULL;
-       release_region(iomem->start, iomem->end + 1 - iomem->start);
+       release_mem_region(iomem->start, resource_size(iomem));
        dev_err(dev, "rtc-mrst: unable to initialise\n");
        return retval;
 }
@@ -406,7 +406,7 @@ static void __devexit rtc_mrst_do_remove(struct device *dev)
        mrst->rtc = NULL;
 
        iomem = mrst->iomem;
-       release_region(iomem->start, iomem->end + 1 - iomem->start);
+       release_mem_region(iomem->start, resource_size(iomem));
        mrst->iomem = NULL;
 
        mrst->dev = NULL;
index d814417bee8c08b68224fafcbeb39e7ca405fd5d..39e41fbdf08ba6de6259347feb0cc5dee76803d7 100644 (file)
@@ -55,12 +55,6 @@ static const u32 PIE_BIT_DEF[MAX_PIE_NUM][2] = {
        { MAX_PIE_FREQ, RTC_SAM7_BIT },
 };
 
-/* Those are the bits from a classic RTC we want to mimic */
-#define RTC_IRQF       0x80    /* any of the following 3 is active */
-#define RTC_PF         0x40    /* Periodic interrupt */
-#define RTC_AF         0x20    /* Alarm interrupt */
-#define RTC_UF         0x10    /* Update interrupt for 1Hz RTC */
-
 #define MXC_RTC_TIME   0
 #define MXC_RTC_ALARM  1
 
index f90c574f9d055a8cffb8398c03b51c83327c0d50..0c423892923c99f7ed04a4d9a2d0e387de7e7b57 100644 (file)
@@ -58,7 +58,6 @@ struct pcf50633_time {
 
 struct pcf50633_rtc {
        int alarm_enabled;
-       int second_enabled;
        int alarm_pending;
 
        struct pcf50633 *pcf;
@@ -143,7 +142,7 @@ static int pcf50633_rtc_set_time(struct device *dev, struct rtc_time *tm)
 {
        struct pcf50633_rtc *rtc;
        struct pcf50633_time pcf_tm;
-       int second_masked, alarm_masked, ret = 0;
+       int alarm_masked, ret = 0;
 
        rtc = dev_get_drvdata(dev);
 
@@ -162,11 +161,8 @@ static int pcf50633_rtc_set_time(struct device *dev, struct rtc_time *tm)
                pcf_tm.time[PCF50633_TI_SEC]);
 
 
-       second_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_SECOND);
        alarm_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_ALARM);
 
-       if (!second_masked)
-               pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_SECOND);
        if (!alarm_masked)
                pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM);
 
@@ -175,8 +171,6 @@ static int pcf50633_rtc_set_time(struct device *dev, struct rtc_time *tm)
                                             PCF50633_TI_EXTENT,
                                             &pcf_tm.time[0]);
 
-       if (!second_masked)
-               pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_SECOND);
        if (!alarm_masked)
                pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM);
 
@@ -250,15 +244,8 @@ static void pcf50633_rtc_irq(int irq, void *data)
 {
        struct pcf50633_rtc *rtc = data;
 
-       switch (irq) {
-       case PCF50633_IRQ_ALARM:
-               rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
-               rtc->alarm_pending = 1;
-               break;
-       case PCF50633_IRQ_SECOND:
-               rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF);
-               break;
-       }
+       rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
+       rtc->alarm_pending = 1;
 }
 
 static int __devinit pcf50633_rtc_probe(struct platform_device *pdev)
@@ -282,9 +269,6 @@ static int __devinit pcf50633_rtc_probe(struct platform_device *pdev)
 
        pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_ALARM,
                                        pcf50633_rtc_irq, rtc);
-       pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_SECOND,
-                                       pcf50633_rtc_irq, rtc);
-
        return 0;
 }
 
@@ -295,7 +279,6 @@ static int __devexit pcf50633_rtc_remove(struct platform_device *pdev)
        rtc = platform_get_drvdata(pdev);
 
        pcf50633_free_irq(rtc->pcf, PCF50633_IRQ_ALARM);
-       pcf50633_free_irq(rtc->pcf, PCF50633_IRQ_SECOND);
 
        rtc_device_unregister(rtc->rtc_dev);
        kfree(rtc);
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
new file mode 100644 (file)
index 0000000..ea09ff2
--- /dev/null
@@ -0,0 +1,454 @@
+/*
+ * Micro Crystal RV-3029C2 rtc class driver
+ *
+ * Author: Gregory Hermant <gregory.hermant@calao-systems.com>
+ *
+ * based on previously existing rtc class drivers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * NOTE: Currently this driver only supports the bare minimum for read
+ * and write the RTC and alarms. The extra features provided by this chip
+ * (trickle charger, eeprom, T° compensation) are unavailable.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/bcd.h>
+#include <linux/rtc.h>
+
+/* Register map */
+/* control section */
+#define RV3029C2_ONOFF_CTRL            0x00
+#define RV3029C2_IRQ_CTRL              0x01
+#define RV3029C2_IRQ_CTRL_AIE          (1 << 0)
+#define RV3029C2_IRQ_FLAGS             0x02
+#define RV3029C2_IRQ_FLAGS_AF          (1 << 0)
+#define RV3029C2_STATUS                        0x03
+#define RV3029C2_STATUS_VLOW1          (1 << 2)
+#define RV3029C2_STATUS_VLOW2          (1 << 3)
+#define RV3029C2_STATUS_SR             (1 << 4)
+#define RV3029C2_STATUS_PON            (1 << 5)
+#define RV3029C2_STATUS_EEBUSY         (1 << 7)
+#define RV3029C2_RST_CTRL              0x04
+#define RV3029C2_CONTROL_SECTION_LEN   0x05
+
+/* watch section */
+#define RV3029C2_W_SEC                 0x08
+#define RV3029C2_W_MINUTES             0x09
+#define RV3029C2_W_HOURS               0x0A
+#define RV3029C2_REG_HR_12_24          (1<<6)  /* 24h/12h mode */
+#define RV3029C2_REG_HR_PM             (1<<5)  /* PM/AM bit in 12h mode */
+#define RV3029C2_W_DATE                        0x0B
+#define RV3029C2_W_DAYS                        0x0C
+#define RV3029C2_W_MONTHS              0x0D
+#define RV3029C2_W_YEARS               0x0E
+#define RV3029C2_WATCH_SECTION_LEN     0x07
+
+/* alarm section */
+#define RV3029C2_A_SC                  0x10
+#define RV3029C2_A_MN                  0x11
+#define RV3029C2_A_HR                  0x12
+#define RV3029C2_A_DT                  0x13
+#define RV3029C2_A_DW                  0x14
+#define RV3029C2_A_MO                  0x15
+#define RV3029C2_A_YR                  0x16
+#define RV3029C2_ALARM_SECTION_LEN     0x07
+
+/* timer section */
+#define RV3029C2_TIMER_LOW             0x18
+#define RV3029C2_TIMER_HIGH            0x19
+
+/* temperature section */
+#define RV3029C2_TEMP_PAGE             0x20
+
+/* eeprom data section */
+#define RV3029C2_E2P_EEDATA1           0x28
+#define RV3029C2_E2P_EEDATA2           0x29
+
+/* eeprom control section */
+#define RV3029C2_CONTROL_E2P_EECTRL    0x30
+#define RV3029C2_TRICKLE_1K            (1<<0)  /*  1K resistance */
+#define RV3029C2_TRICKLE_5K            (1<<1)  /*  5K resistance */
+#define RV3029C2_TRICKLE_20K           (1<<2)  /* 20K resistance */
+#define RV3029C2_TRICKLE_80K           (1<<3)  /* 80K resistance */
+#define RV3029C2_CONTROL_E2P_XTALOFFSET        0x31
+#define RV3029C2_CONTROL_E2P_QCOEF     0x32
+#define RV3029C2_CONTROL_E2P_TURNOVER  0x33
+
+/* user ram section */
+#define RV3029C2_USR1_RAM_PAGE         0x38
+#define RV3029C2_USR1_SECTION_LEN      0x04
+#define RV3029C2_USR2_RAM_PAGE         0x3C
+#define RV3029C2_USR2_SECTION_LEN      0x04
+
+static int
+rv3029c2_i2c_read_regs(struct i2c_client *client, u8 reg, u8 *buf,
+       unsigned len)
+{
+       int ret;
+
+       if ((reg > RV3029C2_USR1_RAM_PAGE + 7) ||
+               (reg + len > RV3029C2_USR1_RAM_PAGE + 8))
+               return -EINVAL;
+
+       ret = i2c_smbus_read_i2c_block_data(client, reg, len, buf);
+       if (ret < 0)
+               return ret;
+       if (ret < len)
+               return -EIO;
+       return 0;
+}
+
+static int
+rv3029c2_i2c_write_regs(struct i2c_client *client, u8 reg, u8 const buf[],
+                       unsigned len)
+{
+       if ((reg > RV3029C2_USR1_RAM_PAGE + 7) ||
+               (reg + len > RV3029C2_USR1_RAM_PAGE + 8))
+               return -EINVAL;
+
+       return i2c_smbus_write_i2c_block_data(client, reg, len, buf);
+}
+
+static int
+rv3029c2_i2c_get_sr(struct i2c_client *client, u8 *buf)
+{
+       int ret = rv3029c2_i2c_read_regs(client, RV3029C2_STATUS, buf, 1);
+
+       if (ret < 0)
+               return -EIO;
+       dev_dbg(&client->dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
+       return 0;
+}
+
+static int
+rv3029c2_i2c_set_sr(struct i2c_client *client, u8 val)
+{
+       u8 buf[1];
+       int sr;
+
+       buf[0] = val;
+       sr = rv3029c2_i2c_write_regs(client, RV3029C2_STATUS, buf, 1);
+       dev_dbg(&client->dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]);
+       if (sr < 0)
+               return -EIO;
+       return 0;
+}
+
+static int
+rv3029c2_i2c_read_time(struct i2c_client *client, struct rtc_time *tm)
+{
+       u8 buf[1];
+       int ret;
+       u8 regs[RV3029C2_WATCH_SECTION_LEN] = { 0, };
+
+       ret = rv3029c2_i2c_get_sr(client, buf);
+       if (ret < 0) {
+               dev_err(&client->dev, "%s: reading SR failed\n", __func__);
+               return -EIO;
+       }
+
+       ret = rv3029c2_i2c_read_regs(client, RV3029C2_W_SEC , regs,
+                                       RV3029C2_WATCH_SECTION_LEN);
+       if (ret < 0) {
+               dev_err(&client->dev, "%s: reading RTC section failed\n",
+                       __func__);
+               return ret;
+       }
+
+       tm->tm_sec = bcd2bin(regs[RV3029C2_W_SEC-RV3029C2_W_SEC]);
+       tm->tm_min = bcd2bin(regs[RV3029C2_W_MINUTES-RV3029C2_W_SEC]);
+
+       /* HR field has a more complex interpretation */
+       {
+               const u8 _hr = regs[RV3029C2_W_HOURS-RV3029C2_W_SEC];
+               if (_hr & RV3029C2_REG_HR_12_24) {
+                       /* 12h format */
+                       tm->tm_hour = bcd2bin(_hr & 0x1f);
+                       if (_hr & RV3029C2_REG_HR_PM)   /* PM flag set */
+                               tm->tm_hour += 12;
+               } else /* 24h format */
+                       tm->tm_hour = bcd2bin(_hr & 0x3f);
+       }
+
+       tm->tm_mday = bcd2bin(regs[RV3029C2_W_DATE-RV3029C2_W_SEC]);
+       tm->tm_mon = bcd2bin(regs[RV3029C2_W_MONTHS-RV3029C2_W_SEC]) - 1;
+       tm->tm_year = bcd2bin(regs[RV3029C2_W_YEARS-RV3029C2_W_SEC]) + 100;
+       tm->tm_wday = bcd2bin(regs[RV3029C2_W_DAYS-RV3029C2_W_SEC]) - 1;
+
+       return 0;
+}
+
+static int rv3029c2_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+       return rv3029c2_i2c_read_time(to_i2c_client(dev), tm);
+}
+
+static int
+rv3029c2_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
+{
+       struct rtc_time *const tm = &alarm->time;
+       int ret;
+       u8 regs[8];
+
+       ret = rv3029c2_i2c_get_sr(client, regs);
+       if (ret < 0) {
+               dev_err(&client->dev, "%s: reading SR failed\n", __func__);
+               return -EIO;
+       }
+
+       ret = rv3029c2_i2c_read_regs(client, RV3029C2_A_SC, regs,
+                                       RV3029C2_ALARM_SECTION_LEN);
+
+       if (ret < 0) {
+               dev_err(&client->dev, "%s: reading alarm section failed\n",
+                       __func__);
+               return ret;
+       }
+
+       tm->tm_sec = bcd2bin(regs[RV3029C2_A_SC-RV3029C2_A_SC] & 0x7f);
+       tm->tm_min = bcd2bin(regs[RV3029C2_A_MN-RV3029C2_A_SC] & 0x7f);
+       tm->tm_hour = bcd2bin(regs[RV3029C2_A_HR-RV3029C2_A_SC] & 0x3f);
+       tm->tm_mday = bcd2bin(regs[RV3029C2_A_DT-RV3029C2_A_SC] & 0x3f);
+       tm->tm_mon = bcd2bin(regs[RV3029C2_A_MO-RV3029C2_A_SC] & 0x1f) - 1;
+       tm->tm_year = bcd2bin(regs[RV3029C2_A_YR-RV3029C2_A_SC] & 0x7f) + 100;
+       tm->tm_wday = bcd2bin(regs[RV3029C2_A_DW-RV3029C2_A_SC] & 0x07) - 1;
+
+       return 0;
+}
+
+static int
+rv3029c2_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+       return rv3029c2_i2c_read_alarm(to_i2c_client(dev), alarm);
+}
+
+static int rv3029c2_rtc_i2c_alarm_set_irq(struct i2c_client *client,
+                                       int enable)
+{
+       int ret;
+       u8 buf[1];
+
+       /* enable AIE irq */
+       ret = rv3029c2_i2c_read_regs(client, RV3029C2_IRQ_CTRL, buf, 1);
+       if (ret < 0) {
+               dev_err(&client->dev, "can't read INT reg\n");
+               return ret;
+       }
+       if (enable)
+               buf[0] |= RV3029C2_IRQ_CTRL_AIE;
+       else
+               buf[0] &= ~RV3029C2_IRQ_CTRL_AIE;
+
+       ret = rv3029c2_i2c_write_regs(client, RV3029C2_IRQ_CTRL, buf, 1);
+       if (ret < 0) {
+               dev_err(&client->dev, "can't set INT reg\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int rv3029c2_rtc_i2c_set_alarm(struct i2c_client *client,
+                                       struct rtc_wkalrm *alarm)
+{
+       struct rtc_time *const tm = &alarm->time;
+       int ret;
+       u8 regs[8];
+
+       /*
+        * The clock has an 8 bit wide bcd-coded register (they never learn)
+        * for the year. tm_year is an offset from 1900 and we are interested
+        * in the 2000-2099 range, so any value less than 100 is invalid.
+       */
+       if (tm->tm_year < 100)
+               return -EINVAL;
+
+       ret = rv3029c2_i2c_get_sr(client, regs);
+       if (ret < 0) {
+               dev_err(&client->dev, "%s: reading SR failed\n", __func__);
+               return -EIO;
+       }
+       regs[RV3029C2_A_SC-RV3029C2_A_SC] = bin2bcd(tm->tm_sec & 0x7f);
+       regs[RV3029C2_A_MN-RV3029C2_A_SC] = bin2bcd(tm->tm_min & 0x7f);
+       regs[RV3029C2_A_HR-RV3029C2_A_SC] = bin2bcd(tm->tm_hour & 0x3f);
+       regs[RV3029C2_A_DT-RV3029C2_A_SC] = bin2bcd(tm->tm_mday & 0x3f);
+       regs[RV3029C2_A_MO-RV3029C2_A_SC] = bin2bcd((tm->tm_mon & 0x1f) - 1);
+       regs[RV3029C2_A_DW-RV3029C2_A_SC] = bin2bcd((tm->tm_wday & 7) - 1);
+       regs[RV3029C2_A_YR-RV3029C2_A_SC] = bin2bcd((tm->tm_year & 0x7f) - 100);
+
+       ret = rv3029c2_i2c_write_regs(client, RV3029C2_A_SC, regs,
+                                       RV3029C2_ALARM_SECTION_LEN);
+       if (ret < 0)
+               return ret;
+
+       if (alarm->enabled) {
+               u8 buf[1];
+
+               /* clear AF flag */
+               ret = rv3029c2_i2c_read_regs(client, RV3029C2_IRQ_FLAGS,
+                                               buf, 1);
+               if (ret < 0) {
+                       dev_err(&client->dev, "can't read alarm flag\n");
+                       return ret;
+               }
+               buf[0] &= ~RV3029C2_IRQ_FLAGS_AF;
+               ret = rv3029c2_i2c_write_regs(client, RV3029C2_IRQ_FLAGS,
+                                               buf, 1);
+               if (ret < 0) {
+                       dev_err(&client->dev, "can't set alarm flag\n");
+                       return ret;
+               }
+               /* enable AIE irq */
+               ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1);
+               if (ret)
+                       return ret;
+
+               dev_dbg(&client->dev, "alarm IRQ armed\n");
+       } else {
+               /* disable AIE irq */
+               ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1);
+               if (ret)
+                       return ret;
+
+               dev_dbg(&client->dev, "alarm IRQ disabled\n");
+       }
+
+       return 0;
+}
+
+static int rv3029c2_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+{
+       return rv3029c2_rtc_i2c_set_alarm(to_i2c_client(dev), alarm);
+}
+
+static int
+rv3029c2_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm)
+{
+       u8 regs[8];
+       int ret;
+
+       /*
+        * The clock has an 8 bit wide bcd-coded register (they never learn)
+        * for the year. tm_year is an offset from 1900 and we are interested
+        * in the 2000-2099 range, so any value less than 100 is invalid.
+       */
+       if (tm->tm_year < 100)
+               return -EINVAL;
+
+       regs[RV3029C2_W_SEC-RV3029C2_W_SEC] = bin2bcd(tm->tm_sec);
+       regs[RV3029C2_W_MINUTES-RV3029C2_W_SEC] = bin2bcd(tm->tm_min);
+       regs[RV3029C2_W_HOURS-RV3029C2_W_SEC] = bin2bcd(tm->tm_hour);
+       regs[RV3029C2_W_DATE-RV3029C2_W_SEC] = bin2bcd(tm->tm_mday);
+       regs[RV3029C2_W_MONTHS-RV3029C2_W_SEC] = bin2bcd(tm->tm_mon+1);
+       regs[RV3029C2_W_DAYS-RV3029C2_W_SEC] = bin2bcd((tm->tm_wday & 7)+1);
+       regs[RV3029C2_W_YEARS-RV3029C2_W_SEC] = bin2bcd(tm->tm_year - 100);
+
+       ret = rv3029c2_i2c_write_regs(client, RV3029C2_W_SEC, regs,
+                                       RV3029C2_WATCH_SECTION_LEN);
+       if (ret < 0)
+               return ret;
+
+       ret = rv3029c2_i2c_get_sr(client, regs);
+       if (ret < 0) {
+               dev_err(&client->dev, "%s: reading SR failed\n", __func__);
+               return ret;
+       }
+       /* clear PON bit */
+       ret = rv3029c2_i2c_set_sr(client, (regs[0] & ~RV3029C2_STATUS_PON));
+       if (ret < 0) {
+               dev_err(&client->dev, "%s: reading SR failed\n", __func__);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int rv3029c2_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+       return rv3029c2_i2c_set_time(to_i2c_client(dev), tm);
+}
+
+static const struct rtc_class_ops rv3029c2_rtc_ops = {
+       .read_time      = rv3029c2_rtc_read_time,
+       .set_time       = rv3029c2_rtc_set_time,
+       .read_alarm     = rv3029c2_rtc_read_alarm,
+       .set_alarm      = rv3029c2_rtc_set_alarm,
+};
+
+static struct i2c_device_id rv3029c2_id[] = {
+       { "rv3029c2", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, rv3029c2_id);
+
+static int __devinit
+rv3029c2_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+       struct rtc_device *rtc;
+       int rc = 0;
+       u8 buf[1];
+
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_EMUL))
+               return -ENODEV;
+
+       rtc = rtc_device_register(client->name,
+                               &client->dev, &rv3029c2_rtc_ops,
+                               THIS_MODULE);
+
+       if (IS_ERR(rtc))
+               return PTR_ERR(rtc);
+
+       i2c_set_clientdata(client, rtc);
+
+       rc = rv3029c2_i2c_get_sr(client, buf);
+       if (rc < 0) {
+               dev_err(&client->dev, "reading status failed\n");
+               goto exit_unregister;
+       }
+
+       return 0;
+
+exit_unregister:
+       rtc_device_unregister(rtc);
+
+       return rc;
+}
+
+static int __devexit rv3029c2_remove(struct i2c_client *client)
+{
+       struct rtc_device *rtc = i2c_get_clientdata(client);
+
+       rtc_device_unregister(rtc);
+
+       return 0;
+}
+
+static struct i2c_driver rv3029c2_driver = {
+       .driver = {
+               .name = "rtc-rv3029c2",
+       },
+       .probe = rv3029c2_probe,
+       .remove = __devexit_p(rv3029c2_remove),
+       .id_table = rv3029c2_id,
+};
+
+static int __init rv3029c2_init(void)
+{
+       return i2c_add_driver(&rv3029c2_driver);
+}
+
+static void __exit rv3029c2_exit(void)
+{
+       i2c_del_driver(&rv3029c2_driver);
+}
+
+module_init(rv3029c2_init);
+module_exit(rv3029c2_exit);
+
+MODULE_AUTHOR("Gregory Hermant <gregory.hermant@calao-systems.com>");
+MODULE_DESCRIPTION("Micro Crystal RV3029C2 RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
new file mode 100644 (file)
index 0000000..893bac2
--- /dev/null
@@ -0,0 +1,534 @@
+/*
+ * drivers/rtc/rtc-spear.c
+ *
+ * Copyright (C) 2010 ST Microelectronics
+ * Rajeev Kumar<rajeev-dlh.kumar@st.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/bcd.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/* RTC registers */
+#define TIME_REG               0x00
+#define DATE_REG               0x04
+#define ALARM_TIME_REG         0x08
+#define ALARM_DATE_REG         0x0C
+#define CTRL_REG               0x10
+#define STATUS_REG             0x14
+
+/* TIME_REG & ALARM_TIME_REG */
+#define SECONDS_UNITS          (0xf<<0)        /* seconds units position */
+#define SECONDS_TENS           (0x7<<4)        /* seconds tens position */
+#define MINUTES_UNITS          (0xf<<8)        /* minutes units position */
+#define MINUTES_TENS           (0x7<<12)       /* minutes tens position */
+#define HOURS_UNITS            (0xf<<16)       /* hours units position */
+#define HOURS_TENS             (0x3<<20)       /* hours tens position */
+
+/* DATE_REG & ALARM_DATE_REG */
+#define DAYS_UNITS             (0xf<<0)        /* days units position */
+#define DAYS_TENS              (0x3<<4)        /* days tens position */
+#define MONTHS_UNITS           (0xf<<8)        /* months units position */
+#define MONTHS_TENS            (0x1<<12)       /* months tens position */
+#define YEARS_UNITS            (0xf<<16)       /* years units position */
+#define YEARS_TENS             (0xf<<20)       /* years tens position */
+#define YEARS_HUNDREDS         (0xf<<24)       /* years hundereds position */
+#define YEARS_MILLENIUMS       (0xf<<28)       /* years millenium position */
+
+/* MASK SHIFT TIME_REG & ALARM_TIME_REG*/
+#define SECOND_SHIFT           0x00            /* seconds units */
+#define MINUTE_SHIFT           0x08            /* minutes units position */
+#define HOUR_SHIFT             0x10            /* hours units position */
+#define MDAY_SHIFT             0x00            /* Month day shift */
+#define MONTH_SHIFT            0x08            /* Month shift */
+#define YEAR_SHIFT             0x10            /* Year shift */
+
+#define SECOND_MASK            0x7F
+#define MIN_MASK               0x7F
+#define HOUR_MASK              0x3F
+#define DAY_MASK               0x3F
+#define MONTH_MASK             0x7F
+#define YEAR_MASK              0xFFFF
+
+/* date reg equal to time reg, for debug only */
+#define TIME_BYP               (1<<9)
+#define INT_ENABLE             (1<<31)         /* interrupt enable */
+
+/* STATUS_REG */
+#define CLK_UNCONNECTED                (1<<0)
+#define PEND_WR_TIME           (1<<2)
+#define PEND_WR_DATE           (1<<3)
+#define LOST_WR_TIME           (1<<4)
+#define LOST_WR_DATE           (1<<5)
+#define RTC_INT_MASK           (1<<31)
+#define STATUS_BUSY            (PEND_WR_TIME | PEND_WR_DATE)
+#define STATUS_FAIL            (LOST_WR_TIME | LOST_WR_DATE)
+
+struct spear_rtc_config {
+       struct clk *clk;
+       spinlock_t lock;
+       void __iomem *ioaddr;
+};
+
+static inline void spear_rtc_clear_interrupt(struct spear_rtc_config *config)
+{
+       unsigned int val;
+       unsigned long flags;
+
+       spin_lock_irqsave(&config->lock, flags);
+       val = readl(config->ioaddr + STATUS_REG);
+       val |= RTC_INT_MASK;
+       writel(val, config->ioaddr + STATUS_REG);
+       spin_unlock_irqrestore(&config->lock, flags);
+}
+
+static inline void spear_rtc_enable_interrupt(struct spear_rtc_config *config)
+{
+       unsigned int val;
+
+       val = readl(config->ioaddr + CTRL_REG);
+       if (!(val & INT_ENABLE)) {
+               spear_rtc_clear_interrupt(config);
+               val |= INT_ENABLE;
+               writel(val, config->ioaddr + CTRL_REG);
+       }
+}
+
+static inline void spear_rtc_disable_interrupt(struct spear_rtc_config *config)
+{
+       unsigned int val;
+
+       val = readl(config->ioaddr + CTRL_REG);
+       if (val & INT_ENABLE) {
+               val &= ~INT_ENABLE;
+               writel(val, config->ioaddr + CTRL_REG);
+       }
+}
+
+static inline int is_write_complete(struct spear_rtc_config *config)
+{
+       int ret = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&config->lock, flags);
+       if ((readl(config->ioaddr + STATUS_REG)) & STATUS_FAIL)
+               ret = -EIO;
+       spin_unlock_irqrestore(&config->lock, flags);
+
+       return ret;
+}
+
+static void rtc_wait_not_busy(struct spear_rtc_config *config)
+{
+       int status, count = 0;
+       unsigned long flags;
+
+       /* Assuming BUSY may stay active for 80 msec) */
+       for (count = 0; count < 80; count++) {
+               spin_lock_irqsave(&config->lock, flags);
+               status = readl(config->ioaddr + STATUS_REG);
+               spin_unlock_irqrestore(&config->lock, flags);
+               if ((status & STATUS_BUSY) == 0)
+                       break;
+               /* check status busy, after each msec */
+               msleep(1);
+       }
+}
+
+static irqreturn_t spear_rtc_irq(int irq, void *dev_id)
+{
+       struct rtc_device *rtc = (struct rtc_device *)dev_id;
+       struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
+       unsigned long flags, events = 0;
+       unsigned int irq_data;
+
+       spin_lock_irqsave(&config->lock, flags);
+       irq_data = readl(config->ioaddr + STATUS_REG);
+       spin_unlock_irqrestore(&config->lock, flags);
+
+       if ((irq_data & RTC_INT_MASK)) {
+               spear_rtc_clear_interrupt(config);
+               events = RTC_IRQF | RTC_AF;
+               rtc_update_irq(rtc, 1, events);
+               return IRQ_HANDLED;
+       } else
+               return IRQ_NONE;
+
+}
+
+static int tm2bcd(struct rtc_time *tm)
+{
+       if (rtc_valid_tm(tm) != 0)
+               return -EINVAL;
+       tm->tm_sec = bin2bcd(tm->tm_sec);
+       tm->tm_min = bin2bcd(tm->tm_min);
+       tm->tm_hour = bin2bcd(tm->tm_hour);
+       tm->tm_mday = bin2bcd(tm->tm_mday);
+       tm->tm_mon = bin2bcd(tm->tm_mon + 1);
+       tm->tm_year = bin2bcd(tm->tm_year);
+
+       return 0;
+}
+
+static void bcd2tm(struct rtc_time *tm)
+{
+       tm->tm_sec = bcd2bin(tm->tm_sec);
+       tm->tm_min = bcd2bin(tm->tm_min);
+       tm->tm_hour = bcd2bin(tm->tm_hour);
+       tm->tm_mday = bcd2bin(tm->tm_mday);
+       tm->tm_mon = bcd2bin(tm->tm_mon) - 1;
+       /* epoch == 1900 */
+       tm->tm_year = bcd2bin(tm->tm_year);
+}
+
+/*
+ * spear_rtc_read_time - set the time
+ * @dev: rtc device in use
+ * @tm: holds date and time
+ *
+ * This function read time and date. On success it will return 0
+ * otherwise -ve error is returned.
+ */
+static int spear_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct rtc_device *rtc = platform_get_drvdata(pdev);
+       struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
+       unsigned int time, date;
+
+       /* we don't report wday/yday/isdst ... */
+       rtc_wait_not_busy(config);
+
+       time = readl(config->ioaddr + TIME_REG);
+       date = readl(config->ioaddr + DATE_REG);
+       tm->tm_sec = (time >> SECOND_SHIFT) & SECOND_MASK;
+       tm->tm_min = (time >> MINUTE_SHIFT) & MIN_MASK;
+       tm->tm_hour = (time >> HOUR_SHIFT) & HOUR_MASK;
+       tm->tm_mday = (date >> MDAY_SHIFT) & DAY_MASK;
+       tm->tm_mon = (date >> MONTH_SHIFT) & MONTH_MASK;
+       tm->tm_year = (date >> YEAR_SHIFT) & YEAR_MASK;
+
+       bcd2tm(tm);
+       return 0;
+}
+
+/*
+ * spear_rtc_set_time - set the time
+ * @dev: rtc device in use
+ * @tm: holds date and time
+ *
+ * This function set time and date. On success it will return 0
+ * otherwise -ve error is returned.
+ */
+static int spear_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct rtc_device *rtc = platform_get_drvdata(pdev);
+       struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
+       unsigned int time, date, err = 0;
+
+       if (tm2bcd(tm) < 0)
+               return -EINVAL;
+
+       rtc_wait_not_busy(config);
+       time = (tm->tm_sec << SECOND_SHIFT) | (tm->tm_min << MINUTE_SHIFT) |
+               (tm->tm_hour << HOUR_SHIFT);
+       date = (tm->tm_mday << MDAY_SHIFT) | (tm->tm_mon << MONTH_SHIFT) |
+               (tm->tm_year << YEAR_SHIFT);
+       writel(time, config->ioaddr + TIME_REG);
+       writel(date, config->ioaddr + DATE_REG);
+       err = is_write_complete(config);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+/*
+ * spear_rtc_read_alarm - read the alarm time
+ * @dev: rtc device in use
+ * @alm: holds alarm date and time
+ *
+ * This function read alarm time and date. On success it will return 0
+ * otherwise -ve error is returned.
+ */
+static int spear_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct rtc_device *rtc = platform_get_drvdata(pdev);
+       struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
+       unsigned int time, date;
+
+       rtc_wait_not_busy(config);
+
+       time = readl(config->ioaddr + ALARM_TIME_REG);
+       date = readl(config->ioaddr + ALARM_DATE_REG);
+       alm->time.tm_sec = (time >> SECOND_SHIFT) & SECOND_MASK;
+       alm->time.tm_min = (time >> MINUTE_SHIFT) & MIN_MASK;
+       alm->time.tm_hour = (time >> HOUR_SHIFT) & HOUR_MASK;
+       alm->time.tm_mday = (date >> MDAY_SHIFT) & DAY_MASK;
+       alm->time.tm_mon = (date >> MONTH_SHIFT) & MONTH_MASK;
+       alm->time.tm_year = (date >> YEAR_SHIFT) & YEAR_MASK;
+
+       bcd2tm(&alm->time);
+       alm->enabled = readl(config->ioaddr + CTRL_REG) & INT_ENABLE;
+
+       return 0;
+}
+
+/*
+ * spear_rtc_set_alarm - set the alarm time
+ * @dev: rtc device in use
+ * @alm: holds alarm date and time
+ *
+ * This function set alarm time and date. On success it will return 0
+ * otherwise -ve error is returned.
+ */
+static int spear_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct rtc_device *rtc = platform_get_drvdata(pdev);
+       struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
+       unsigned int time, date, err = 0;
+
+       if (tm2bcd(&alm->time) < 0)
+               return -EINVAL;
+
+       rtc_wait_not_busy(config);
+
+       time = (alm->time.tm_sec << SECOND_SHIFT) | (alm->time.tm_min <<
+                       MINUTE_SHIFT) | (alm->time.tm_hour << HOUR_SHIFT);
+       date = (alm->time.tm_mday << MDAY_SHIFT) | (alm->time.tm_mon <<
+                       MONTH_SHIFT) | (alm->time.tm_year << YEAR_SHIFT);
+
+       writel(time, config->ioaddr + ALARM_TIME_REG);
+       writel(date, config->ioaddr + ALARM_DATE_REG);
+       err = is_write_complete(config);
+       if (err < 0)
+               return err;
+
+       if (alm->enabled)
+               spear_rtc_enable_interrupt(config);
+       else
+               spear_rtc_disable_interrupt(config);
+
+       return 0;
+}
+static struct rtc_class_ops spear_rtc_ops = {
+       .read_time = spear_rtc_read_time,
+       .set_time = spear_rtc_set_time,
+       .read_alarm = spear_rtc_read_alarm,
+       .set_alarm = spear_rtc_set_alarm,
+};
+
+static int __devinit spear_rtc_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       struct rtc_device *rtc;
+       struct spear_rtc_config *config;
+       unsigned int status = 0;
+       int irq;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "no resource defined\n");
+               return -EBUSY;
+       }
+       if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+               dev_err(&pdev->dev, "rtc region already claimed\n");
+               return -EBUSY;
+       }
+
+       config = kzalloc(sizeof(*config), GFP_KERNEL);
+       if (!config) {
+               dev_err(&pdev->dev, "out of memory\n");
+               status = -ENOMEM;
+               goto err_release_region;
+       }
+
+       config->clk = clk_get(&pdev->dev, NULL);
+       if (IS_ERR(config->clk)) {
+               status = PTR_ERR(config->clk);
+               goto err_kfree;
+       }
+
+       status = clk_enable(config->clk);
+       if (status < 0)
+               goto err_clk_put;
+
+       config->ioaddr = ioremap(res->start, resource_size(res));
+       if (!config->ioaddr) {
+               dev_err(&pdev->dev, "ioremap fail\n");
+               status = -ENOMEM;
+               goto err_disable_clock;
+       }
+
+       spin_lock_init(&config->lock);
+
+       rtc = rtc_device_register(pdev->name, &pdev->dev, &spear_rtc_ops,
+                       THIS_MODULE);
+       if (IS_ERR(rtc)) {
+               dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
+                               PTR_ERR(rtc));
+               status = PTR_ERR(rtc);
+               goto err_iounmap;
+       }
+
+       platform_set_drvdata(pdev, rtc);
+       dev_set_drvdata(&rtc->dev, config);
+
+       /* alarm irqs */
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(&pdev->dev, "no update irq?\n");
+               status = irq;
+               goto err_clear_platdata;
+       }
+
+       status = request_irq(irq, spear_rtc_irq, 0, pdev->name, rtc);
+       if (status) {
+               dev_err(&pdev->dev, "Alarm interrupt IRQ%d already \
+                               claimed\n", irq);
+               goto err_clear_platdata;
+       }
+
+       if (!device_can_wakeup(&pdev->dev))
+               device_init_wakeup(&pdev->dev, 1);
+
+       return 0;
+
+err_clear_platdata:
+       platform_set_drvdata(pdev, NULL);
+       dev_set_drvdata(&rtc->dev, NULL);
+       rtc_device_unregister(rtc);
+err_iounmap:
+       iounmap(config->ioaddr);
+err_disable_clock:
+       clk_disable(config->clk);
+err_clk_put:
+       clk_put(config->clk);
+err_kfree:
+       kfree(config);
+err_release_region:
+       release_mem_region(res->start, resource_size(res));
+
+       return status;
+}
+
+static int __devexit spear_rtc_remove(struct platform_device *pdev)
+{
+       struct rtc_device *rtc = platform_get_drvdata(pdev);
+       struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
+       int irq;
+       struct resource *res;
+
+       /* leave rtc running, but disable irqs */
+       spear_rtc_disable_interrupt(config);
+       device_init_wakeup(&pdev->dev, 0);
+       irq = platform_get_irq(pdev, 0);
+       if (irq)
+               free_irq(irq, pdev);
+       clk_disable(config->clk);
+       clk_put(config->clk);
+       iounmap(config->ioaddr);
+       kfree(config);
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (res)
+               release_mem_region(res->start, resource_size(res));
+       platform_set_drvdata(pdev, NULL);
+       dev_set_drvdata(&rtc->dev, NULL);
+       rtc_device_unregister(rtc);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int spear_rtc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+       struct rtc_device *rtc = platform_get_drvdata(pdev);
+       struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
+       int irq;
+
+       irq = platform_get_irq(pdev, 0);
+       if (device_may_wakeup(&pdev->dev))
+               enable_irq_wake(irq);
+       else {
+               spear_rtc_disable_interrupt(config);
+               clk_disable(config->clk);
+       }
+
+       return 0;
+}
+
+static int spear_rtc_resume(struct platform_device *pdev)
+{
+       struct rtc_device *rtc = platform_get_drvdata(pdev);
+       struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
+       int irq;
+
+       irq = platform_get_irq(pdev, 0);
+
+       if (device_may_wakeup(&pdev->dev))
+               disable_irq_wake(irq);
+       else {
+               clk_enable(config->clk);
+               spear_rtc_enable_interrupt(config);
+       }
+
+       return 0;
+}
+
+#else
+#define spear_rtc_suspend      NULL
+#define spear_rtc_resume       NULL
+#endif
+
+static void spear_rtc_shutdown(struct platform_device *pdev)
+{
+       struct rtc_device *rtc = platform_get_drvdata(pdev);
+       struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev);
+
+       spear_rtc_disable_interrupt(config);
+       clk_disable(config->clk);
+}
+
+static struct platform_driver spear_rtc_driver = {
+       .probe = spear_rtc_probe,
+       .remove = __devexit_p(spear_rtc_remove),
+       .suspend = spear_rtc_suspend,
+       .resume = spear_rtc_resume,
+       .shutdown = spear_rtc_shutdown,
+       .driver = {
+               .name = "rtc-spear",
+       },
+};
+
+static int __init rtc_init(void)
+{
+       return platform_driver_register(&spear_rtc_driver);
+}
+module_init(rtc_init);
+
+static void __exit rtc_exit(void)
+{
+       platform_driver_unregister(&spear_rtc_driver);
+}
+module_exit(rtc_exit);
+
+MODULE_ALIAS("platform:rtc-spear");
+MODULE_AUTHOR("Rajeev Kumar <rajeev-dlh.kumar@st.com>");
+MODULE_DESCRIPTION("ST SPEAr Realtime Clock Driver (RTC)");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
new file mode 100644 (file)
index 0000000..b8bc862
--- /dev/null
@@ -0,0 +1,366 @@
+/*
+ * drivers/rtc/rtc-vt8500.c
+ *
+ *  Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
+ *
+ * Based on rtc-pxa.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/rtc.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/bcd.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/*
+ * Register definitions
+ */
+#define VT8500_RTC_TS          0x00    /* Time set */
+#define VT8500_RTC_DS          0x04    /* Date set */
+#define VT8500_RTC_AS          0x08    /* Alarm set */
+#define VT8500_RTC_CR          0x0c    /* Control */
+#define VT8500_RTC_TR          0x10    /* Time read */
+#define VT8500_RTC_DR          0x14    /* Date read */
+#define VT8500_RTC_WS          0x18    /* Write status */
+#define VT8500_RTC_CL          0x20    /* Calibration */
+#define VT8500_RTC_IS          0x24    /* Interrupt status */
+#define VT8500_RTC_ST          0x28    /* Status */
+
+#define INVALID_TIME_BIT       (1 << 31)
+
+#define DATE_CENTURY_S         19
+#define DATE_YEAR_S            11
+#define DATE_YEAR_MASK         (0xff << DATE_YEAR_S)
+#define DATE_MONTH_S           6
+#define DATE_MONTH_MASK                (0x1f << DATE_MONTH_S)
+#define DATE_DAY_MASK          0x3f
+
+#define TIME_DOW_S             20
+#define TIME_DOW_MASK          (0x07 << TIME_DOW_S)
+#define TIME_HOUR_S            14
+#define TIME_HOUR_MASK         (0x3f << TIME_HOUR_S)
+#define TIME_MIN_S             7
+#define TIME_MIN_MASK          (0x7f << TIME_MIN_S)
+#define TIME_SEC_MASK          0x7f
+
+#define ALARM_DAY_S            20
+#define ALARM_DAY_MASK         (0x3f << ALARM_DAY_S)
+
+#define ALARM_DAY_BIT          (1 << 29)
+#define ALARM_HOUR_BIT         (1 << 28)
+#define ALARM_MIN_BIT          (1 << 27)
+#define ALARM_SEC_BIT          (1 << 26)
+
+#define ALARM_ENABLE_MASK      (ALARM_DAY_BIT \
+                               | ALARM_HOUR_BIT \
+                               | ALARM_MIN_BIT \
+                               | ALARM_SEC_BIT)
+
+#define VT8500_RTC_CR_ENABLE   (1 << 0)        /* Enable RTC */
+#define VT8500_RTC_CR_24H      (1 << 1)        /* 24h time format */
+#define VT8500_RTC_CR_SM_ENABLE        (1 << 2)        /* Enable periodic irqs */
+#define VT8500_RTC_CR_SM_SEC   (1 << 3)        /* 0: 1Hz/60, 1: 1Hz */
+#define VT8500_RTC_CR_CALIB    (1 << 4)        /* Enable calibration */
+
+struct vt8500_rtc {
+       void __iomem            *regbase;
+       struct resource         *res;
+       int                     irq_alarm;
+       int                     irq_hz;
+       struct rtc_device       *rtc;
+       spinlock_t              lock;           /* Protects this structure */
+};
+
+static irqreturn_t vt8500_rtc_irq(int irq, void *dev_id)
+{
+       struct vt8500_rtc *vt8500_rtc = dev_id;
+       u32 isr;
+       unsigned long events = 0;
+
+       spin_lock(&vt8500_rtc->lock);
+
+       /* clear interrupt sources */
+       isr = readl(vt8500_rtc->regbase + VT8500_RTC_IS);
+       writel(isr, vt8500_rtc->regbase + VT8500_RTC_IS);
+
+       spin_unlock(&vt8500_rtc->lock);
+
+       if (isr & 1)
+               events |= RTC_AF | RTC_IRQF;
+
+       /* Only second/minute interrupts are supported */
+       if (isr & 2)
+               events |= RTC_UF | RTC_IRQF;
+
+       rtc_update_irq(vt8500_rtc->rtc, 1, events);
+
+       return IRQ_HANDLED;
+}
+
+static int vt8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+       struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
+       u32 date, time;
+
+       date = readl(vt8500_rtc->regbase + VT8500_RTC_DR);
+       time = readl(vt8500_rtc->regbase + VT8500_RTC_TR);
+
+       tm->tm_sec = bcd2bin(time & TIME_SEC_MASK);
+       tm->tm_min = bcd2bin((time & TIME_MIN_MASK) >> TIME_MIN_S);
+       tm->tm_hour = bcd2bin((time & TIME_HOUR_MASK) >> TIME_HOUR_S);
+       tm->tm_mday = bcd2bin(date & DATE_DAY_MASK);
+       tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S);
+       tm->tm_year = bcd2bin((date & DATE_YEAR_MASK) >> DATE_YEAR_S)
+                       + ((date >> DATE_CENTURY_S) & 1 ? 200 : 100);
+       tm->tm_wday = (time & TIME_DOW_MASK) >> TIME_DOW_S;
+
+       return 0;
+}
+
+static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+       struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
+
+       if (tm->tm_year < 100) {
+               dev_warn(dev, "Only years 2000-2199 are supported by the "
+                             "hardware!\n");
+               return -EINVAL;
+       }
+
+       writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S)
+               | (bin2bcd(tm->tm_mon) << DATE_MONTH_S)
+               | (bin2bcd(tm->tm_mday)),
+               vt8500_rtc->regbase + VT8500_RTC_DS);
+       writel((bin2bcd(tm->tm_wday) << TIME_DOW_S)
+               | (bin2bcd(tm->tm_hour) << TIME_HOUR_S)
+               | (bin2bcd(tm->tm_min) << TIME_MIN_S)
+               | (bin2bcd(tm->tm_sec)),
+               vt8500_rtc->regbase + VT8500_RTC_TS);
+
+       return 0;
+}
+
+static int vt8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+       struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
+       u32 isr, alarm;
+
+       alarm = readl(vt8500_rtc->regbase + VT8500_RTC_AS);
+       isr = readl(vt8500_rtc->regbase + VT8500_RTC_IS);
+
+       alrm->time.tm_mday = bcd2bin((alarm & ALARM_DAY_MASK) >> ALARM_DAY_S);
+       alrm->time.tm_hour = bcd2bin((alarm & TIME_HOUR_MASK) >> TIME_HOUR_S);
+       alrm->time.tm_min = bcd2bin((alarm & TIME_MIN_MASK) >> TIME_MIN_S);
+       alrm->time.tm_sec = bcd2bin((alarm & TIME_SEC_MASK));
+
+       alrm->enabled = (alarm & ALARM_ENABLE_MASK) ? 1 : 0;
+
+       alrm->pending = (isr & 1) ? 1 : 0;
+       return rtc_valid_tm(&alrm->time);
+}
+
+static int vt8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+       struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
+
+       writel((alrm->enabled ? ALARM_ENABLE_MASK : 0)
+               | (bin2bcd(alrm->time.tm_mday) << ALARM_DAY_S)
+               | (bin2bcd(alrm->time.tm_hour) << TIME_HOUR_S)
+               | (bin2bcd(alrm->time.tm_min) << TIME_MIN_S)
+               | (bin2bcd(alrm->time.tm_sec)),
+               vt8500_rtc->regbase + VT8500_RTC_AS);
+
+       return 0;
+}
+
+static int vt8500_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+       struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
+       unsigned long tmp = readl(vt8500_rtc->regbase + VT8500_RTC_AS);
+
+       if (enabled)
+               tmp |= ALARM_ENABLE_MASK;
+       else
+               tmp &= ~ALARM_ENABLE_MASK;
+
+       writel(tmp, vt8500_rtc->regbase + VT8500_RTC_AS);
+       return 0;
+}
+
+static int vt8500_update_irq_enable(struct device *dev, unsigned int enabled)
+{
+       struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev);
+       unsigned long tmp = readl(vt8500_rtc->regbase + VT8500_RTC_CR);
+
+       if (enabled)
+               tmp |= VT8500_RTC_CR_SM_SEC | VT8500_RTC_CR_SM_ENABLE;
+       else
+               tmp &= ~VT8500_RTC_CR_SM_ENABLE;
+
+       writel(tmp, vt8500_rtc->regbase + VT8500_RTC_CR);
+       return 0;
+}
+
+static const struct rtc_class_ops vt8500_rtc_ops = {
+       .read_time = vt8500_rtc_read_time,
+       .set_time = vt8500_rtc_set_time,
+       .read_alarm = vt8500_rtc_read_alarm,
+       .set_alarm = vt8500_rtc_set_alarm,
+       .alarm_irq_enable = vt8500_alarm_irq_enable,
+       .update_irq_enable = vt8500_update_irq_enable,
+};
+
+static int __devinit vt8500_rtc_probe(struct platform_device *pdev)
+{
+       struct vt8500_rtc *vt8500_rtc;
+       int ret;
+
+       vt8500_rtc = kzalloc(sizeof(struct vt8500_rtc), GFP_KERNEL);
+       if (!vt8500_rtc)
+               return -ENOMEM;
+
+       spin_lock_init(&vt8500_rtc->lock);
+       platform_set_drvdata(pdev, vt8500_rtc);
+
+       vt8500_rtc->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!vt8500_rtc->res) {
+               dev_err(&pdev->dev, "No I/O memory resource defined\n");
+               ret = -ENXIO;
+               goto err_free;
+       }
+
+       vt8500_rtc->irq_alarm = platform_get_irq(pdev, 0);
+       if (vt8500_rtc->irq_alarm < 0) {
+               dev_err(&pdev->dev, "No alarm IRQ resource defined\n");
+               ret = -ENXIO;
+               goto err_free;
+       }
+
+       vt8500_rtc->irq_hz = platform_get_irq(pdev, 1);
+       if (vt8500_rtc->irq_hz < 0) {
+               dev_err(&pdev->dev, "No 1Hz IRQ resource defined\n");
+               ret = -ENXIO;
+               goto err_free;
+       }
+
+       vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start,
+                                            resource_size(vt8500_rtc->res),
+                                            "vt8500-rtc");
+       if (vt8500_rtc->res == NULL) {
+               dev_err(&pdev->dev, "failed to request I/O memory\n");
+               ret = -EBUSY;
+               goto err_free;
+       }
+
+       vt8500_rtc->regbase = ioremap(vt8500_rtc->res->start,
+                                     resource_size(vt8500_rtc->res));
+       if (!vt8500_rtc->regbase) {
+               dev_err(&pdev->dev, "Unable to map RTC I/O memory\n");
+               ret = -EBUSY;
+               goto err_release;
+       }
+
+       /* Enable the second/minute interrupt generation and enable RTC */
+       writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H
+               | VT8500_RTC_CR_SM_ENABLE | VT8500_RTC_CR_SM_SEC,
+              vt8500_rtc->regbase + VT8500_RTC_CR);
+
+       vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev,
+                                             &vt8500_rtc_ops, THIS_MODULE);
+       if (IS_ERR(vt8500_rtc->rtc)) {
+               ret = PTR_ERR(vt8500_rtc->rtc);
+               dev_err(&pdev->dev,
+                       "Failed to register RTC device -> %d\n", ret);
+               goto err_unmap;
+       }
+
+       ret = request_irq(vt8500_rtc->irq_hz, vt8500_rtc_irq, 0,
+                         "rtc 1Hz", vt8500_rtc);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "can't get irq %i, err %d\n",
+                       vt8500_rtc->irq_hz, ret);
+               goto err_unreg;
+       }
+
+       ret = request_irq(vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0,
+                         "rtc alarm", vt8500_rtc);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "can't get irq %i, err %d\n",
+                       vt8500_rtc->irq_alarm, ret);
+               goto err_free_hz;
+       }
+
+       return 0;
+
+err_free_hz:
+       free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
+err_unreg:
+       rtc_device_unregister(vt8500_rtc->rtc);
+err_unmap:
+       iounmap(vt8500_rtc->regbase);
+err_release:
+       release_mem_region(vt8500_rtc->res->start,
+                          resource_size(vt8500_rtc->res));
+err_free:
+       kfree(vt8500_rtc);
+       return ret;
+}
+
+static int __devexit vt8500_rtc_remove(struct platform_device *pdev)
+{
+       struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev);
+
+       free_irq(vt8500_rtc->irq_alarm, vt8500_rtc);
+       free_irq(vt8500_rtc->irq_hz, vt8500_rtc);
+
+       rtc_device_unregister(vt8500_rtc->rtc);
+
+       /* Disable alarm matching */
+       writel(0, vt8500_rtc->regbase + VT8500_RTC_IS);
+       iounmap(vt8500_rtc->regbase);
+       release_mem_region(vt8500_rtc->res->start,
+                          resource_size(vt8500_rtc->res));
+
+       kfree(vt8500_rtc);
+       platform_set_drvdata(pdev, NULL);
+
+       return 0;
+}
+
+static struct platform_driver vt8500_rtc_driver = {
+       .probe          = vt8500_rtc_probe,
+       .remove         = __devexit_p(vt8500_rtc_remove),
+       .driver         = {
+               .name   = "vt8500-rtc",
+               .owner  = THIS_MODULE,
+       },
+};
+
+static int __init vt8500_rtc_init(void)
+{
+       return platform_driver_register(&vt8500_rtc_driver);
+}
+module_init(vt8500_rtc_init);
+
+static void __exit vt8500_rtc_exit(void)
+{
+       platform_driver_unregister(&vt8500_rtc_driver);
+}
+module_exit(vt8500_rtc_exit);
+
+MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
+MODULE_DESCRIPTION("VIA VT8500 SoC Realtime Clock Driver (RTC)");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:vt8500-rtc");
index 85dddb1e4126be512d843ceef08123fd86cc1b0d..46784b83c5c4e1a4fcac2184dcebd6e37d014da3 100644 (file)
@@ -24,7 +24,7 @@
 #include <asm/debug.h>
 #include <asm/ebcdic.h>
 #include <asm/io.h>
-#include <asm/s390_ext.h>
+#include <asm/irq.h>
 #include <asm/vtoc.h>
 #include <asm/diag.h>
 
@@ -642,7 +642,7 @@ dasd_diag_init(void)
        }
        ASCEBC(dasd_diag_discipline.ebcname, 4);
 
-       ctl_set_bit(0, 9);
+       service_subclass_irq_register();
        register_external_interrupt(0x2603, dasd_ext_handler);
        dasd_diag_discipline_pointer = &dasd_diag_discipline;
        return 0;
@@ -652,7 +652,7 @@ static void __exit
 dasd_diag_cleanup(void)
 {
        unregister_external_interrupt(0x2603, dasd_ext_handler);
-       ctl_clear_bit(0, 9);
+       service_subclass_irq_unregister();
        dasd_diag_discipline_pointer = NULL;
 }
 
index b76c61f824857d4ed05d3a38e9657f078098ef90..eaa7e78186f969f0176a4c9f067688e1bf981bab 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/suspend.h>
 #include <linux/completion.h>
 #include <linux/platform_device.h>
-#include <asm/s390_ext.h>
 #include <asm/types.h>
 #include <asm/irq.h>
 
@@ -885,12 +884,12 @@ sclp_check_interface(void)
                spin_unlock_irqrestore(&sclp_lock, flags);
                /* Enable service-signal interruption - needs to happen
                 * with IRQs enabled. */
-               ctl_set_bit(0, 9);
+               service_subclass_irq_register();
                /* Wait for signal from interrupt or timeout */
                sclp_sync_wait();
                /* Disable service-signal interruption - needs to happen
                 * with IRQs enabled. */
-               ctl_clear_bit(0,9);
+               service_subclass_irq_unregister();
                spin_lock_irqsave(&sclp_lock, flags);
                del_timer(&sclp_request_timer);
                if (sclp_init_req.status == SCLP_REQ_DONE &&
@@ -1070,7 +1069,7 @@ sclp_init(void)
        spin_unlock_irqrestore(&sclp_lock, flags);
        /* Enable service-signal external interruption - needs to happen with
         * IRQs enabled. */
-       ctl_set_bit(0, 9);
+       service_subclass_irq_register();
        sclp_init_mask(1);
        return 0;
 
index 607998f0b7d8580c05c4fec47cf672bfad9d3c14..aec60d55b10dc238e5b3529507fe9afc43db173e 100644 (file)
@@ -25,7 +25,6 @@
 #include <asm/kvm_para.h>
 #include <asm/kvm_virtio.h>
 #include <asm/setup.h>
-#include <asm/s390_ext.h>
 #include <asm/irq.h>
 
 #define VIRTIO_SUBCODE_64 0x0D00
@@ -441,7 +440,7 @@ static int __init kvm_devices_init(void)
 
        INIT_WORK(&hotplug_work, hotplug_devices);
 
-       ctl_set_bit(0, 9);
+       service_subclass_irq_register();
        register_external_interrupt(0x2603, kvm_extint_handler);
 
        scan_devices();
index 4ff26521d75fe7982797b995319438a7eacd297f..3382475dc22dcf1ac4167aed67da6056539b2131 100644 (file)
@@ -59,7 +59,6 @@
 #ifndef AAC_DRIVER_BRANCH
 #define AAC_DRIVER_BRANCH              ""
 #endif
-#define AAC_DRIVER_BUILD_DATE          __DATE__ " " __TIME__
 #define AAC_DRIVERNAME                 "aacraid"
 
 #ifdef AAC_DRIVER_BUILD
@@ -67,7 +66,7 @@
 #define str(x) _str(x)
 #define AAC_DRIVER_FULL_VERSION        AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH
 #else
-#define AAC_DRIVER_FULL_VERSION        AAC_DRIVER_VERSION AAC_DRIVER_BRANCH " " AAC_DRIVER_BUILD_DATE
+#define AAC_DRIVER_FULL_VERSION        AAC_DRIVER_VERSION AAC_DRIVER_BRANCH
 #endif
 
 MODULE_AUTHOR("Red Hat Inc and Adaptec");
index 92109b126391ded6206c5d80dabd457ec03891e3..112f1bec7756d4ac2da9ed16790a746d8104a110 100644 (file)
@@ -2227,7 +2227,7 @@ static int in2000_proc_info(struct Scsi_Host *instance, char *buf, char **start,
        bp = buf;
        *bp = '\0';
        if (hd->proc & PR_VERSION) {
-               sprintf(tbuf, "\nVersion %s - %s. Compiled %s %s", IN2000_VERSION, IN2000_DATE, __DATE__, __TIME__);
+               sprintf(tbuf, "\nVersion %s - %s.", IN2000_VERSION, IN2000_DATE);
                strcat(bp, tbuf);
        }
        if (hd->proc & PR_INFO) {
index 7f636b118287a7bb0f9e3ac7d1cdecb27ed0598d..fca6a895307093a4630530b83e03e66b7fd447cd 100644 (file)
@@ -4252,8 +4252,8 @@ static ssize_t pmcraid_show_drv_version(
        char *buf
 )
 {
-       return snprintf(buf, PAGE_SIZE, "version: %s, build date: %s\n",
-                       PMCRAID_DRIVER_VERSION, PMCRAID_DRIVER_DATE);
+       return snprintf(buf, PAGE_SIZE, "version: %s\n",
+                       PMCRAID_DRIVER_VERSION);
 }
 
 static struct device_attribute pmcraid_driver_version_attr = {
@@ -6096,9 +6096,8 @@ static int __init pmcraid_init(void)
        dev_t dev;
        int error;
 
-       pmcraid_info("%s Device Driver version: %s %s\n",
-                        PMCRAID_DRIVER_NAME,
-                        PMCRAID_DRIVER_VERSION, PMCRAID_DRIVER_DATE);
+       pmcraid_info("%s Device Driver version: %s\n",
+                        PMCRAID_DRIVER_NAME, PMCRAID_DRIVER_VERSION);
 
        error = alloc_chrdev_region(&dev, 0,
                                    PMCRAID_MAX_ADAPTERS,
index 34e4c915002ef37332afc6ea949e94d90a8b22b3..f920baf3ff24638f6c28a1ecf36bc2d70916f9c4 100644 (file)
@@ -43,7 +43,6 @@
 #define PMCRAID_DRIVER_NAME            "PMC MaxRAID"
 #define PMCRAID_DEVFILE                        "pmcsas"
 #define PMCRAID_DRIVER_VERSION         "1.0.3"
-#define PMCRAID_DRIVER_DATE            __DATE__
 
 #define PMCRAID_FW_VERSION_1           0x002
 
index 97ae716134d05fb1ae76732925e480e736592081..c0ee4ea28a19707896c3a74021c4e231bd5d4880 100644 (file)
@@ -2051,8 +2051,7 @@ wd33c93_init(struct Scsi_Host *instance, const wd33c93_regs regs,
        for (i = 0; i < MAX_SETUP_ARGS; i++)
                printk("%s,", setup_args[i]);
        printk("\n");
-       printk("           Version %s - %s, Compiled %s at %s\n",
-              WD33C93_VERSION, WD33C93_DATE, __DATE__, __TIME__);
+       printk("           Version %s - %s\n", WD33C93_VERSION, WD33C93_DATE);
 }
 
 int
@@ -2132,8 +2131,8 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off
        bp = buf;
        *bp = '\0';
        if (hd->proc & PR_VERSION) {
-               sprintf(tbuf, "\nVersion %s - %s. Compiled %s %s",
-                       WD33C93_VERSION, WD33C93_DATE, __DATE__, __TIME__);
+               sprintf(tbuf, "\nVersion %s - %s.",
+                       WD33C93_VERSION, WD33C93_DATE);
                strcat(bp, tbuf);
        }
        if (hd->proc & PR_INFO) {
index fc14b8dea0d71daf1a72f0f17dd5228b07c7ba33..fbd96b29530d524cd6c9fb761b948ce278e1554b 100644 (file)
@@ -271,8 +271,8 @@ config SPI_ORION
          This enables using the SPI master controller on the Orion chips.
 
 config SPI_PL022
-       tristate "ARM AMBA PL022 SSP controller (EXPERIMENTAL)"
-       depends on ARM_AMBA && EXPERIMENTAL
+       tristate "ARM AMBA PL022 SSP controller"
+       depends on ARM_AMBA
        default y if MACH_U300
        default y if ARCH_REALVIEW
        default y if INTEGRATOR_IMPD1
index 08de58e7f59f7f6521ffb5122078103473c66117..6a9e58dd36c7e17cd5d4a7bbd58ed10ab4476f32 100644 (file)
  * GNU General Public License for more details.
  */
 
-/*
- * TODO:
- * - add timeout on polled transfers
- */
-
 #include <linux/init.h>
 #include <linux/module.h>
 #include <linux/device.h>
 
 #define CLEAR_ALL_INTERRUPTS  0x3
 
+#define SPI_POLLING_TIMEOUT 1000
+
 
 /*
  * The type of reading going on on this chip
@@ -1063,7 +1060,7 @@ static int __init pl022_dma_probe(struct pl022 *pl022)
                                            pl022->master_info->dma_filter,
                                            pl022->master_info->dma_rx_param);
        if (!pl022->dma_rx_channel) {
-               dev_err(&pl022->adev->dev, "no RX DMA channel!\n");
+               dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n");
                goto err_no_rxchan;
        }
 
@@ -1071,13 +1068,13 @@ static int __init pl022_dma_probe(struct pl022 *pl022)
                                            pl022->master_info->dma_filter,
                                            pl022->master_info->dma_tx_param);
        if (!pl022->dma_tx_channel) {
-               dev_err(&pl022->adev->dev, "no TX DMA channel!\n");
+               dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n");
                goto err_no_txchan;
        }
 
        pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
        if (!pl022->dummypage) {
-               dev_err(&pl022->adev->dev, "no DMA dummypage!\n");
+               dev_dbg(&pl022->adev->dev, "no DMA dummypage!\n");
                goto err_no_dummypage;
        }
 
@@ -1093,6 +1090,8 @@ err_no_txchan:
        dma_release_channel(pl022->dma_rx_channel);
        pl022->dma_rx_channel = NULL;
 err_no_rxchan:
+       dev_err(&pl022->adev->dev,
+                       "Failed to work in dma mode, work without dma!\n");
        return -ENODEV;
 }
 
@@ -1378,6 +1377,7 @@ static void do_polling_transfer(struct pl022 *pl022)
        struct spi_transfer *transfer = NULL;
        struct spi_transfer *previous = NULL;
        struct chip_data *chip;
+       unsigned long time, timeout;
 
        chip = pl022->cur_chip;
        message = pl022->cur_msg;
@@ -1415,9 +1415,19 @@ static void do_polling_transfer(struct pl022 *pl022)
                       SSP_CR1(pl022->virtbase));
 
                dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
-               /* FIXME: insert a timeout so we don't hang here indefinitely */
-               while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end)
+
+               timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT);
+               while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) {
+                       time = jiffies;
                        readwriter(pl022);
+                       if (time_after(time, timeout)) {
+                               dev_warn(&pl022->adev->dev,
+                               "%s: timeout!\n", __func__);
+                               message->state = STATE_ERROR;
+                               goto out;
+                       }
+                       cpu_relax();
+               }
 
                /* Update total byte transferred */
                message->actual_length += pl022->cur_transfer->len;
@@ -1426,7 +1436,7 @@ static void do_polling_transfer(struct pl022 *pl022)
                /* Move to next transfer */
                message->state = next_transfer(pl022);
        }
-
+out:
        /* Handle end of message */
        if (message->state == STATE_DONE)
                message->status = 0;
@@ -2107,7 +2117,7 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
        if (platform_info->enable_dma) {
                status = pl022_dma_probe(pl022);
                if (status != 0)
-                       goto err_no_dma;
+                       platform_info->enable_dma = 0;
        }
 
        /* Initialize and start queue */
@@ -2143,7 +2153,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
  err_init_queue:
        destroy_queue(pl022);
        pl022_dma_remove(pl022);
- err_no_dma:
        free_irq(adev->irq[0], pl022);
  err_no_irq:
        clk_put(pl022->clk);
index 871e337c917fe7313621e651be0da62891545b05..919fa9d9e16bab34fef8af03a5a6286ac9fad394 100644 (file)
@@ -58,8 +58,6 @@ struct chip_data {
        u8 bits_per_word;
        u16 clk_div;            /* baud rate divider */
        u32 speed_hz;           /* baud rate */
-       int (*write)(struct dw_spi *dws);
-       int (*read)(struct dw_spi *dws);
        void (*cs_control)(u32 command);
 };
 
@@ -162,107 +160,70 @@ static inline void mrst_spi_debugfs_remove(struct dw_spi *dws)
 }
 #endif /* CONFIG_DEBUG_FS */
 
-static void wait_till_not_busy(struct dw_spi *dws)
+/* Return the max entries we can fill into tx fifo */
+static inline u32 tx_max(struct dw_spi *dws)
 {
-       unsigned long end = jiffies + 1 + usecs_to_jiffies(5000);
+       u32 tx_left, tx_room, rxtx_gap;
 
-       while (time_before(jiffies, end)) {
-               if (!(dw_readw(dws, sr) & SR_BUSY))
-                       return;
-               cpu_relax();
-       }
-       dev_err(&dws->master->dev,
-               "DW SPI: Status keeps busy for 5000us after a read/write!\n");
-}
-
-static void flush(struct dw_spi *dws)
-{
-       while (dw_readw(dws, sr) & SR_RF_NOT_EMPT) {
-               dw_readw(dws, dr);
-               cpu_relax();
-       }
-
-       wait_till_not_busy(dws);
-}
-
-static int null_writer(struct dw_spi *dws)
-{
-       u8 n_bytes = dws->n_bytes;
+       tx_left = (dws->tx_end - dws->tx) / dws->n_bytes;
+       tx_room = dws->fifo_len - dw_readw(dws, txflr);
 
-       if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
-               || (dws->tx == dws->tx_end))
-               return 0;
-       dw_writew(dws, dr, 0);
-       dws->tx += n_bytes;
+       /*
+        * Another concern is about the tx/rx mismatch, we
+        * though to use (dws->fifo_len - rxflr - txflr) as
+        * one maximum value for tx, but it doesn't cover the
+        * data which is out of tx/rx fifo and inside the
+        * shift registers. So a control from sw point of
+        * view is taken.
+        */
+       rxtx_gap =  ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx))
+                       / dws->n_bytes;
 
-       wait_till_not_busy(dws);
-       return 1;
+       return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap));
 }
 
-static int null_reader(struct dw_spi *dws)
+/* Return the max entries we should read out of rx fifo */
+static inline u32 rx_max(struct dw_spi *dws)
 {
-       u8 n_bytes = dws->n_bytes;
+       u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
 
-       while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
-               && (dws->rx < dws->rx_end)) {
-               dw_readw(dws, dr);
-               dws->rx += n_bytes;
-       }
-       wait_till_not_busy(dws);
-       return dws->rx == dws->rx_end;
+       return min(rx_left, (u32)dw_readw(dws, rxflr));
 }
 
-static int u8_writer(struct dw_spi *dws)
+static void dw_writer(struct dw_spi *dws)
 {
-       if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
-               || (dws->tx == dws->tx_end))
-               return 0;
+       u32 max = tx_max(dws);
+       u16 txw = 0;
 
-       dw_writew(dws, dr, *(u8 *)(dws->tx));
-       ++dws->tx;
-
-       wait_till_not_busy(dws);
-       return 1;
-}
-
-static int u8_reader(struct dw_spi *dws)
-{
-       while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
-               && (dws->rx < dws->rx_end)) {
-               *(u8 *)(dws->rx) = dw_readw(dws, dr);
-               ++dws->rx;
+       while (max--) {
+               /* Set the tx word if the transfer's original "tx" is not null */
+               if (dws->tx_end - dws->len) {
+                       if (dws->n_bytes == 1)
+                               txw = *(u8 *)(dws->tx);
+                       else
+                               txw = *(u16 *)(dws->tx);
+               }
+               dw_writew(dws, dr, txw);
+               dws->tx += dws->n_bytes;
        }
-
-       wait_till_not_busy(dws);
-       return dws->rx == dws->rx_end;
 }
 
-static int u16_writer(struct dw_spi *dws)
+static void dw_reader(struct dw_spi *dws)
 {
-       if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL)
-               || (dws->tx == dws->tx_end))
-               return 0;
+       u32 max = rx_max(dws);
+       u16 rxw;
 
-       dw_writew(dws, dr, *(u16 *)(dws->tx));
-       dws->tx += 2;
-
-       wait_till_not_busy(dws);
-       return 1;
-}
-
-static int u16_reader(struct dw_spi *dws)
-{
-       u16 temp;
-
-       while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT)
-               && (dws->rx < dws->rx_end)) {
-               temp = dw_readw(dws, dr);
-               *(u16 *)(dws->rx) = temp;
-               dws->rx += 2;
+       while (max--) {
+               rxw = dw_readw(dws, dr);
+               /* Care rx only if the transfer's original "rx" is not null */
+               if (dws->rx_end - dws->len) {
+                       if (dws->n_bytes == 1)
+                               *(u8 *)(dws->rx) = rxw;
+                       else
+                               *(u16 *)(dws->rx) = rxw;
+               }
+               dws->rx += dws->n_bytes;
        }
-
-       wait_till_not_busy(dws);
-       return dws->rx == dws->rx_end;
 }
 
 static void *next_transfer(struct dw_spi *dws)
@@ -334,8 +295,7 @@ static void giveback(struct dw_spi *dws)
 
 static void int_error_stop(struct dw_spi *dws, const char *msg)
 {
-       /* Stop and reset hw */
-       flush(dws);
+       /* Stop the hw */
        spi_enable_chip(dws, 0);
 
        dev_err(&dws->master->dev, "%s\n", msg);
@@ -362,35 +322,28 @@ EXPORT_SYMBOL_GPL(dw_spi_xfer_done);
 
 static irqreturn_t interrupt_transfer(struct dw_spi *dws)
 {
-       u16 irq_status, irq_mask = 0x3f;
-       u32 int_level = dws->fifo_len / 2;
-       u32 left;
+       u16 irq_status = dw_readw(dws, isr);
 
-       irq_status = dw_readw(dws, isr) & irq_mask;
        /* Error handling */
        if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
                dw_readw(dws, txoicr);
                dw_readw(dws, rxoicr);
                dw_readw(dws, rxuicr);
-               int_error_stop(dws, "interrupt_transfer: fifo overrun");
+               int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
                return IRQ_HANDLED;
        }
 
+       dw_reader(dws);
+       if (dws->rx_end == dws->rx) {
+               spi_mask_intr(dws, SPI_INT_TXEI);
+               dw_spi_xfer_done(dws);
+               return IRQ_HANDLED;
+       }
        if (irq_status & SPI_INT_TXEI) {
                spi_mask_intr(dws, SPI_INT_TXEI);
-
-               left = (dws->tx_end - dws->tx) / dws->n_bytes;
-               left = (left > int_level) ? int_level : left;
-
-               while (left--)
-                       dws->write(dws);
-               dws->read(dws);
-
-               /* Re-enable the IRQ if there is still data left to tx */
-               if (dws->tx_end > dws->tx)
-                       spi_umask_intr(dws, SPI_INT_TXEI);
-               else
-                       dw_spi_xfer_done(dws);
+               dw_writer(dws);
+               /* Enable TX irq always, it will be disabled when RX finished */
+               spi_umask_intr(dws, SPI_INT_TXEI);
        }
 
        return IRQ_HANDLED;
@@ -399,15 +352,13 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws)
 static irqreturn_t dw_spi_irq(int irq, void *dev_id)
 {
        struct dw_spi *dws = dev_id;
-       u16 irq_status, irq_mask = 0x3f;
+       u16 irq_status = dw_readw(dws, isr) & 0x3f;
 
-       irq_status = dw_readw(dws, isr) & irq_mask;
        if (!irq_status)
                return IRQ_NONE;
 
        if (!dws->cur_msg) {
                spi_mask_intr(dws, SPI_INT_TXEI);
-               /* Never fail */
                return IRQ_HANDLED;
        }
 
@@ -417,13 +368,11 @@ static irqreturn_t dw_spi_irq(int irq, void *dev_id)
 /* Must be called inside pump_transfers() */
 static void poll_transfer(struct dw_spi *dws)
 {
-       while (dws->write(dws))
-               dws->read(dws);
-       /*
-        * There is a possibility that the last word of a transaction
-        * will be lost if data is not ready. Re-read to solve this issue.
-        */
-       dws->read(dws);
+       do {
+               dw_writer(dws);
+               dw_reader(dws);
+               cpu_relax();
+       } while (dws->rx_end > dws->rx);
 
        dw_spi_xfer_done(dws);
 }
@@ -483,8 +432,6 @@ static void pump_transfers(unsigned long data)
        dws->tx_end = dws->tx + transfer->len;
        dws->rx = transfer->rx_buf;
        dws->rx_end = dws->rx + transfer->len;
-       dws->write = dws->tx ? chip->write : null_writer;
-       dws->read = dws->rx ? chip->read : null_reader;
        dws->cs_change = transfer->cs_change;
        dws->len = dws->cur_transfer->len;
        if (chip != dws->prev_chip)
@@ -518,20 +465,8 @@ static void pump_transfers(unsigned long data)
 
                switch (bits) {
                case 8:
-                       dws->n_bytes = 1;
-                       dws->dma_width = 1;
-                       dws->read = (dws->read != null_reader) ?
-                                       u8_reader : null_reader;
-                       dws->write = (dws->write != null_writer) ?
-                                       u8_writer : null_writer;
-                       break;
                case 16:
-                       dws->n_bytes = 2;
-                       dws->dma_width = 2;
-                       dws->read = (dws->read != null_reader) ?
-                                       u16_reader : null_reader;
-                       dws->write = (dws->write != null_writer) ?
-                                       u16_writer : null_writer;
+                       dws->n_bytes = dws->dma_width = bits >> 3;
                        break;
                default:
                        printk(KERN_ERR "MRST SPI0: unsupported bits:"
@@ -575,7 +510,7 @@ static void pump_transfers(unsigned long data)
                txint_level = dws->fifo_len / 2;
                txint_level = (templen > txint_level) ? txint_level : templen;
 
-               imask |= SPI_INT_TXEI;
+               imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI;
                dws->transfer_handler = interrupt_transfer;
        }
 
@@ -733,13 +668,9 @@ static int dw_spi_setup(struct spi_device *spi)
        if (spi->bits_per_word <= 8) {
                chip->n_bytes = 1;
                chip->dma_width = 1;
-               chip->read = u8_reader;
-               chip->write = u8_writer;
        } else if (spi->bits_per_word <= 16) {
                chip->n_bytes = 2;
                chip->dma_width = 2;
-               chip->read = u16_reader;
-               chip->write = u16_writer;
        } else {
                /* Never take >16b case for MRST SPIC */
                dev_err(&spi->dev, "invalid wordsize\n");
@@ -851,7 +782,6 @@ static void spi_hw_init(struct dw_spi *dws)
        spi_enable_chip(dws, 0);
        spi_mask_intr(dws, 0xff);
        spi_enable_chip(dws, 1);
-       flush(dws);
 
        /*
         * Try to detect the FIFO depth if not set by interface driver,
index b23e452adaf7d94cb09f255e5c745fdd0627500c..7a5e78d2a5cb98c1de9c04fbde849885e643dd4e 100644 (file)
@@ -137,8 +137,6 @@ struct dw_spi {
        u8                      max_bits_per_word;      /* maxim is 16b */
        u32                     dma_width;
        int                     cs_change;
-       int                     (*write)(struct dw_spi *dws);
-       int                     (*read)(struct dw_spi *dws);
        irqreturn_t             (*transfer_handler)(struct dw_spi *dws);
        void                    (*cs_control)(u32 command);
 
index 82b9a428c323eab0a0ea2fa4d8f033ba67025ee5..2e13a14bba3fdd62a3ded1711de7dc75fe785dfc 100644 (file)
@@ -1047,8 +1047,8 @@ static u8 *buf;
  * spi_{async,sync}() calls with dma-safe buffers.
  */
 int spi_write_then_read(struct spi_device *spi,
-               const u8 *txbuf, unsigned n_tx,
-               u8 *rxbuf, unsigned n_rx)
+               const void *txbuf, unsigned n_tx,
+               void *rxbuf, unsigned n_rx)
 {
        static DEFINE_MUTEX(lock);
 
index d5be18b3078c4e3440c45d1bd3fed1b66fc44491..3cd15f690f1667fcb8e274f74375ab7d28676599 100644 (file)
@@ -463,7 +463,7 @@ static int __devexit nuc900_spi_remove(struct platform_device *dev)
 
        platform_set_drvdata(dev, NULL);
 
-       spi_unregister_master(hw->master);
+       spi_bitbang_stop(&hw->bitbang);
 
        clk_disable(hw->clk);
        clk_put(hw->clk);
index 151a95e40653e8908e362968212abd182aeaa5ae..1a5fcabfd56502ad47786c41f219240d165e2589 100644 (file)
@@ -668,7 +668,7 @@ static int __exit s3c24xx_spi_remove(struct platform_device *dev)
 
        platform_set_drvdata(dev, NULL);
 
-       spi_unregister_master(hw->master);
+       spi_bitbang_stop(&hw->bitbang);
 
        clk_disable(hw->clk);
        clk_put(hw->clk);
index 869a07d375d6171e175e6eff5de1996acd29357a..9eedd71ad898c172f3bf34c57909530f55b91e19 100644 (file)
@@ -427,10 +427,10 @@ static int __devexit spi_sh_remove(struct platform_device *pdev)
 {
        struct spi_sh_data *ss = dev_get_drvdata(&pdev->dev);
 
+       spi_unregister_master(ss->master);
        destroy_workqueue(ss->workqueue);
        free_irq(ss->irq, ss);
        iounmap(ss->addr);
-       spi_master_put(ss->master);
 
        return 0;
 }
index 891e5909038c4e4dd20bd956b33dca8e317630ac..6c3aa6ecaade10fbf04fa7be2d29478977774cde 100644 (file)
@@ -578,6 +578,7 @@ static int __devexit spi_tegra_remove(struct platform_device *pdev)
        master = dev_get_drvdata(&pdev->dev);
        tspi = spi_master_get_devdata(master);
 
+       spi_unregister_master(master);
        tegra_dma_free_channel(tspi->rx_dma);
 
        dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN,
@@ -586,7 +587,6 @@ static int __devexit spi_tegra_remove(struct platform_device *pdev)
        clk_put(tspi->clk);
        iounmap(tspi->base);
 
-       spi_master_put(master);
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        release_mem_region(r->start, (r->end - r->start) + 1);
 
index c69c6f2c2c5ceeadae69afa2efc60c636517a0e3..4d2c75df886c88621e43a453db625e59e7876ccf 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/interrupt.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
-#include <linux/mfd/core.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/spi_bitbang.h>
 #include <linux/spi/xilinx_spi.h>
@@ -471,7 +470,7 @@ static int __devinit xilinx_spi_probe(struct platform_device *dev)
        struct spi_master *master;
        u8 i;
 
-       pdata = mfd_get_data(dev);
+       pdata = dev->dev.platform_data;
        if (pdata) {
                num_cs = pdata->num_chipselect;
                little_endian = pdata->little_endian;
index 24a282bb89d4bd06fa2d0c82dd52333aad112c19..fb62b383f1de32079ef934986398c91052a0b88c 100644 (file)
@@ -381,7 +381,7 @@ struct rioVersion *RIOVersid(void)
 {
     strlcpy(stVersion.version, "RIO driver for linux V1.0",
            sizeof(stVersion.version));
-    strlcpy(stVersion.buildDate, __DATE__,
+    strlcpy(stVersion.buildDate, "Aug 15 2010",
            sizeof(stVersion.buildDate));
 
     return &stVersion;
index bfa05e8018237bede822973a2029c40caf57c014..c0e8f2eeb88697b390787d1f210411ae6521c1cd 100644 (file)
@@ -4096,8 +4096,7 @@ static int __init cy_init(void)
        if (!cy_serial_driver)
                goto err;
 
-       printk(KERN_INFO "Cyclades driver " CY_VERSION " (built %s %s)\n",
-                       __DATE__, __TIME__);
+       printk(KERN_INFO "Cyclades driver " CY_VERSION "\n");
 
        /* Initialize the tty_driver structure */
 
index b1aecc7bb32a263fbdc75035f65f156e1f4f2549..fd347ff34d071dc14972faaa2d3e89ef9260475c 100644 (file)
@@ -61,8 +61,7 @@
 #include <linux/delay.h>
 
 
-#define VERSION_STRING DRIVER_DESC " 2.1d (build date: " \
-                                       __DATE__ " " __TIME__ ")"
+#define VERSION_STRING DRIVER_DESC " 2.1d"
 
 /*    Macros definitions */
 
index bea5c215460c710901dcf18a00304d6d9be76f11..84db7321cce85e5d2be2a9cb3f7794c8f23ef0e3 100644 (file)
@@ -907,9 +907,10 @@ static int m32r_sio_request_port(struct uart_port *port)
        return ret;
 }
 
-static void m32r_sio_config_port(struct uart_port *port, int flags)
+static void m32r_sio_config_port(struct uart_port *port, int unused)
 {
        struct uart_sio_port *up = (struct uart_sio_port *)port;
+       unsigned long flags;
 
        spin_lock_irqsave(&up->port.lock, flags);
 
index 3f2e07011a48e533c00c9a23c0303e6ec6030490..cfb5aa72b1962916d87c6c873521d05112bda930 100644 (file)
@@ -100,6 +100,7 @@ struct twl6030_usb {
        u8                      linkstat;
        u8                      asleep;
        bool                    irq_enabled;
+       unsigned long           features;
 };
 
 #define xceiv_to_twl(x)                container_of((x), struct twl6030_usb, otg)
@@ -204,6 +205,12 @@ static int twl6030_start_srp(struct otg_transceiver *x)
 
 static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
 {
+       char *regulator_name;
+
+       if (twl->features & TWL6025_SUBCLASS)
+               regulator_name = "ldousb";
+       else
+               regulator_name = "vusb";
 
        /* Set to OTG_REV 1.3 and turn on the ID_WAKEUP_COMP */
        twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_BACKUP_REG);
@@ -214,7 +221,7 @@ static int twl6030_usb_ldo_init(struct twl6030_usb *twl)
        /* Program MISC2 register and set bit VUSB_IN_VBAT */
        twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x10, TWL6030_MISC2);
 
-       twl->usb3v3 = regulator_get(twl->dev, "vusb");
+       twl->usb3v3 = regulator_get(twl->dev, regulator_name);
        if (IS_ERR(twl->usb3v3))
                return -ENODEV;
 
@@ -409,6 +416,7 @@ static int __devinit twl6030_usb_probe(struct platform_device *pdev)
        twl->dev                = &pdev->dev;
        twl->irq1               = platform_get_irq(pdev, 0);
        twl->irq2               = platform_get_irq(pdev, 1);
+       twl->features           = pdata->features;
        twl->otg.dev            = twl->dev;
        twl->otg.label          = "twl6030";
        twl->otg.set_host       = twl6030_set_host;
index c8b520e9a11ae3ec3005311eaae9550d1660cac9..c04b94da81f7544ff776507fd809ae09a6a98365 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/fb.h>
 #include <linux/i2c.h>
 #include <linux/backlight.h>
-#include <linux/mfd/core.h>
 #include <linux/mfd/88pm860x.h>
 
 #define MAX_BRIGHTNESS         (0xFF)
@@ -168,7 +167,6 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
        struct pm860x_backlight_pdata *pdata = NULL;
        struct pm860x_backlight_data *data;
        struct backlight_device *bl;
-       struct mfd_cell *cell;
        struct resource *res;
        struct backlight_properties props;
        unsigned char value;
@@ -181,10 +179,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       cell = pdev->dev.platform_data;
-       if (cell == NULL)
-               return -ENODEV;
-       pdata = cell->mfd_data;
+       pdata = pdev->dev.platform_data;
        if (pdata == NULL) {
                dev_err(&pdev->dev, "platform data isn't assigned to "
                        "backlight\n");
index ea39336addfb791a2f806d4854e70675461a1158..f70bd63b01871223d74a2930bdd4823c27a120d7 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/fb.h>
 #include <linux/delay.h>
+#include <linux/uaccess.h>
 #include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/pci.h>
index 49226a1b909ebfb57c488a2132cea7b55fab0d95..25db55696e142961dc814bdc4fe98ca40ea2d371 100644 (file)
@@ -30,7 +30,6 @@ objs-y$(CONFIG_MACH_OMAP_APOLLON) += lcd_apollon.o
 objs-y$(CONFIG_MACH_OMAP_2430SDP) += lcd_2430sdp.o
 objs-y$(CONFIG_MACH_OMAP_3430SDP) += lcd_2430sdp.o
 objs-y$(CONFIG_MACH_OMAP_LDP) += lcd_ldp.o
-objs-y$(CONFIG_MACH_OMAP2EVM) += lcd_omap2evm.o
 objs-y$(CONFIG_MACH_OMAP3EVM) += lcd_omap3evm.o
 objs-y$(CONFIG_MACH_OMAP3_BEAGLE) += lcd_omap3beagle.o
 objs-y$(CONFIG_FB_OMAP_LCD_MIPID) += lcd_mipid.o
diff --git a/drivers/video/omap/lcd_omap2evm.c b/drivers/video/omap/lcd_omap2evm.c
deleted file mode 100644 (file)
index 7e7a65c..0000000
+++ /dev/null
@@ -1,192 +0,0 @@
-/*
- * LCD panel support for the MISTRAL OMAP2EVM board
- *
- * Author: Arun C <arunedarath@mistralsolutions.com>
- *
- * Derived from drivers/video/omap/lcd_omap3evm.c
- * Derived from drivers/video/omap/lcd-apollon.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/i2c/twl.h>
-
-#include <plat/mux.h>
-#include <asm/mach-types.h>
-
-#include "omapfb.h"
-
-#define LCD_PANEL_ENABLE_GPIO  154
-#define LCD_PANEL_LR           128
-#define LCD_PANEL_UD           129
-#define LCD_PANEL_INI          152
-#define LCD_PANEL_QVGA         148
-#define LCD_PANEL_RESB         153
-
-#define TWL_LED_LEDEN          0x00
-#define TWL_PWMA_PWMAON                0x00
-#define TWL_PWMA_PWMAOFF       0x01
-
-static unsigned int bklight_level;
-
-static int omap2evm_panel_init(struct lcd_panel *panel,
-                               struct omapfb_device *fbdev)
-{
-       gpio_request(LCD_PANEL_ENABLE_GPIO, "LCD enable");
-       gpio_request(LCD_PANEL_LR, "LCD lr");
-       gpio_request(LCD_PANEL_UD, "LCD ud");
-       gpio_request(LCD_PANEL_INI, "LCD ini");
-       gpio_request(LCD_PANEL_QVGA, "LCD qvga");
-       gpio_request(LCD_PANEL_RESB, "LCD resb");
-
-       gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 1);
-       gpio_direction_output(LCD_PANEL_RESB, 1);
-       gpio_direction_output(LCD_PANEL_INI, 1);
-       gpio_direction_output(LCD_PANEL_QVGA, 0);
-       gpio_direction_output(LCD_PANEL_LR, 1);
-       gpio_direction_output(LCD_PANEL_UD, 1);
-
-       twl_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN);
-       twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON);
-       twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF);
-       bklight_level = 100;
-
-       return 0;
-}
-
-static void omap2evm_panel_cleanup(struct lcd_panel *panel)
-{
-       gpio_free(LCD_PANEL_RESB);
-       gpio_free(LCD_PANEL_QVGA);
-       gpio_free(LCD_PANEL_INI);
-       gpio_free(LCD_PANEL_UD);
-       gpio_free(LCD_PANEL_LR);
-       gpio_free(LCD_PANEL_ENABLE_GPIO);
-}
-
-static int omap2evm_panel_enable(struct lcd_panel *panel)
-{
-       gpio_set_value(LCD_PANEL_ENABLE_GPIO, 0);
-       return 0;
-}
-
-static void omap2evm_panel_disable(struct lcd_panel *panel)
-{
-       gpio_set_value(LCD_PANEL_ENABLE_GPIO, 1);
-}
-
-static unsigned long omap2evm_panel_get_caps(struct lcd_panel *panel)
-{
-       return 0;
-}
-
-static int omap2evm_bklight_setlevel(struct lcd_panel *panel,
-                                               unsigned int level)
-{
-       u8 c;
-       if ((level >= 0) && (level <= 100)) {
-               c = (125 * (100 - level)) / 100 + 2;
-               twl_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF);
-               bklight_level = level;
-       }
-       return 0;
-}
-
-static unsigned int omap2evm_bklight_getlevel(struct lcd_panel *panel)
-{
-       return bklight_level;
-}
-
-static unsigned int omap2evm_bklight_getmaxlevel(struct lcd_panel *panel)
-{
-       return 100;
-}
-
-struct lcd_panel omap2evm_panel = {
-       .name           = "omap2evm",
-       .config         = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC |
-                         OMAP_LCDC_INV_HSYNC,
-
-       .bpp            = 16,
-       .data_lines     = 18,
-       .x_res          = 480,
-       .y_res          = 640,
-       .hsw            = 3,
-       .hfp            = 0,
-       .hbp            = 28,
-       .vsw            = 2,
-       .vfp            = 1,
-       .vbp            = 0,
-
-       .pixel_clock    = 20000,
-
-       .init           = omap2evm_panel_init,
-       .cleanup        = omap2evm_panel_cleanup,
-       .enable         = omap2evm_panel_enable,
-       .disable        = omap2evm_panel_disable,
-       .get_caps       = omap2evm_panel_get_caps,
-       .set_bklight_level      = omap2evm_bklight_setlevel,
-       .get_bklight_level      = omap2evm_bklight_getlevel,
-       .get_bklight_max        = omap2evm_bklight_getmaxlevel,
-};
-
-static int omap2evm_panel_probe(struct platform_device *pdev)
-{
-       omapfb_register_panel(&omap2evm_panel);
-       return 0;
-}
-
-static int omap2evm_panel_remove(struct platform_device *pdev)
-{
-       return 0;
-}
-
-static int omap2evm_panel_suspend(struct platform_device *pdev,
-                                  pm_message_t mesg)
-{
-       return 0;
-}
-
-static int omap2evm_panel_resume(struct platform_device *pdev)
-{
-       return 0;
-}
-
-struct platform_driver omap2evm_panel_driver = {
-       .probe          = omap2evm_panel_probe,
-       .remove         = omap2evm_panel_remove,
-       .suspend        = omap2evm_panel_suspend,
-       .resume         = omap2evm_panel_resume,
-       .driver         = {
-               .name   = "omap2evm_lcd",
-               .owner  = THIS_MODULE,
-       },
-};
-
-static int __init omap2evm_panel_drv_init(void)
-{
-       return platform_driver_register(&omap2evm_panel_driver);
-}
-
-static void __exit omap2evm_panel_drv_exit(void)
-{
-       platform_driver_unregister(&omap2evm_panel_driver);
-}
-
-module_init(omap2evm_panel_drv_init);
-module_exit(omap2evm_panel_drv_exit);
index 0c341d739604b8857c91e6a99db4a01cdac09ca5..cd1c4dcef8fdc0f0d79b1e33f7a84593416854fe 100644 (file)
@@ -250,7 +250,7 @@ static irqreturn_t tmiofb_irq(int irq, void *__info)
  */
 static int tmiofb_hw_stop(struct platform_device *dev)
 {
-       struct tmio_fb_data *data = mfd_get_data(dev);
+       struct tmio_fb_data *data = dev->dev.platform_data;
        struct fb_info *info = platform_get_drvdata(dev);
        struct tmiofb_par *par = info->par;
 
@@ -311,7 +311,7 @@ static int tmiofb_hw_init(struct platform_device *dev)
  */
 static void tmiofb_hw_mode(struct platform_device *dev)
 {
-       struct tmio_fb_data *data = mfd_get_data(dev);
+       struct tmio_fb_data *data = dev->dev.platform_data;
        struct fb_info *info = platform_get_drvdata(dev);
        struct fb_videomode *mode = info->mode;
        struct tmiofb_par *par = info->par;
@@ -557,8 +557,7 @@ static int tmiofb_ioctl(struct fb_info *fbi,
 static struct fb_videomode *
 tmiofb_find_mode(struct fb_info *info, struct fb_var_screeninfo *var)
 {
-       struct tmio_fb_data *data =
-                       mfd_get_data(to_platform_device(info->device));
+       struct tmio_fb_data *data = info->device->platform_data;
        struct fb_videomode *best = NULL;
        int i;
 
@@ -578,8 +577,7 @@ static int tmiofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 {
 
        struct fb_videomode *mode;
-       struct tmio_fb_data *data =
-                       mfd_get_data(to_platform_device(info->device));
+       struct tmio_fb_data *data = info->device->platform_data;
 
        mode = tmiofb_find_mode(info, var);
        if (!mode || var->bits_per_pixel > 16)
@@ -680,7 +678,7 @@ static struct fb_ops tmiofb_ops = {
 static int __devinit tmiofb_probe(struct platform_device *dev)
 {
        const struct mfd_cell *cell = mfd_get_cell(dev);
-       struct tmio_fb_data *data = mfd_get_data(dev);
+       struct tmio_fb_data *data = dev->dev.platform_data;
        struct resource *ccr = platform_get_resource(dev, IORESOURCE_MEM, 1);
        struct resource *lcr = platform_get_resource(dev, IORESOURCE_MEM, 0);
        struct resource *vram = platform_get_resource(dev, IORESOURCE_MEM, 2);
index c2a0a1cfd3b3fe6c9f2da44e8574fe9a20591be5..ab5341814c741af49674ca407f6dea0080776602 100644 (file)
@@ -145,7 +145,7 @@ static int via_gpio_get(struct gpio_chip *chip, unsigned int nr)
 }
 
 
-static struct viafb_gpio_cfg gpio_config = {
+static struct viafb_gpio_cfg viafb_gpio_config = {
        .gpio_chip = {
                .label = "VIAFB onboard GPIO",
                .owner = THIS_MODULE,
@@ -183,8 +183,8 @@ static int viafb_gpio_resume(void *private)
 {
        int i;
 
-       for (i = 0; i < gpio_config.gpio_chip.ngpio; i += 2)
-               viafb_gpio_enable(gpio_config.active_gpios[i]);
+       for (i = 0; i < viafb_gpio_config.gpio_chip.ngpio; i += 2)
+               viafb_gpio_enable(viafb_gpio_config.active_gpios[i]);
        return 0;
 }
 
@@ -201,9 +201,9 @@ int viafb_gpio_lookup(const char *name)
 {
        int i;
 
-       for (i = 0; i < gpio_config.gpio_chip.ngpio; i++)
-               if (!strcmp(name, gpio_config.active_gpios[i]->vg_name))
-                       return gpio_config.gpio_chip.base + i;
+       for (i = 0; i < viafb_gpio_config.gpio_chip.ngpio; i++)
+               if (!strcmp(name, viafb_gpio_config.active_gpios[i]->vg_name))
+                       return viafb_gpio_config.gpio_chip.base + i;
        return -1;
 }
 EXPORT_SYMBOL_GPL(viafb_gpio_lookup);
@@ -229,14 +229,15 @@ static __devinit int viafb_gpio_probe(struct platform_device *platdev)
                for (gpio = viafb_all_gpios;
                     gpio < viafb_all_gpios + VIAFB_NUM_GPIOS; gpio++)
                        if (gpio->vg_port_index == port_cfg[i].ioport_index) {
-                               gpio_config.active_gpios[ngpio] = gpio;
-                               gpio_config.gpio_names[ngpio] = gpio->vg_name;
+                               viafb_gpio_config.active_gpios[ngpio] = gpio;
+                               viafb_gpio_config.gpio_names[ngpio] =
+                                       gpio->vg_name;
                                ngpio++;
                        }
        }
-       gpio_config.gpio_chip.ngpio = ngpio;
-       gpio_config.gpio_chip.names = gpio_config.gpio_names;
-       gpio_config.vdev = vdev;
+       viafb_gpio_config.gpio_chip.ngpio = ngpio;
+       viafb_gpio_config.gpio_chip.names = viafb_gpio_config.gpio_names;
+       viafb_gpio_config.vdev = vdev;
        if (ngpio == 0) {
                printk(KERN_INFO "viafb: no GPIOs configured\n");
                return 0;
@@ -245,18 +246,18 @@ static __devinit int viafb_gpio_probe(struct platform_device *platdev)
         * Enable the ports.  They come in pairs, with a single
         * enable bit for both.
         */
-       spin_lock_irqsave(&gpio_config.vdev->reg_lock, flags);
+       spin_lock_irqsave(&viafb_gpio_config.vdev->reg_lock, flags);
        for (i = 0; i < ngpio; i += 2)
-               viafb_gpio_enable(gpio_config.active_gpios[i]);
-       spin_unlock_irqrestore(&gpio_config.vdev->reg_lock, flags);
+               viafb_gpio_enable(viafb_gpio_config.active_gpios[i]);
+       spin_unlock_irqrestore(&viafb_gpio_config.vdev->reg_lock, flags);
        /*
         * Get registered.
         */
-       gpio_config.gpio_chip.base = -1;  /* Dynamic */
-       ret = gpiochip_add(&gpio_config.gpio_chip);
+       viafb_gpio_config.gpio_chip.base = -1;  /* Dynamic */
+       ret = gpiochip_add(&viafb_gpio_config.gpio_chip);
        if (ret) {
                printk(KERN_ERR "viafb: failed to add gpios (%d)\n", ret);
-               gpio_config.gpio_chip.ngpio = 0;
+               viafb_gpio_config.gpio_chip.ngpio = 0;
        }
 #ifdef CONFIG_PM
        viafb_pm_register(&viafb_gpio_pm_hooks);
@@ -277,8 +278,8 @@ static int viafb_gpio_remove(struct platform_device *platdev)
        /*
         * Get unregistered.
         */
-       if (gpio_config.gpio_chip.ngpio > 0) {
-               ret = gpiochip_remove(&gpio_config.gpio_chip);
+       if (viafb_gpio_config.gpio_chip.ngpio > 0) {
+               ret = gpiochip_remove(&viafb_gpio_config.gpio_chip);
                if (ret) { /* Somebody still using it? */
                        printk(KERN_ERR "Viafb: GPIO remove failed\n");
                        return ret;
@@ -287,11 +288,11 @@ static int viafb_gpio_remove(struct platform_device *platdev)
        /*
         * Disable the ports.
         */
-       spin_lock_irqsave(&gpio_config.vdev->reg_lock, flags);
-       for (i = 0; i < gpio_config.gpio_chip.ngpio; i += 2)
-               viafb_gpio_disable(gpio_config.active_gpios[i]);
-       gpio_config.gpio_chip.ngpio = 0;
-       spin_unlock_irqrestore(&gpio_config.vdev->reg_lock, flags);
+       spin_lock_irqsave(&viafb_gpio_config.vdev->reg_lock, flags);
+       for (i = 0; i < viafb_gpio_config.gpio_chip.ngpio; i += 2)
+               viafb_gpio_disable(viafb_gpio_config.active_gpios[i]);
+       viafb_gpio_config.gpio_chip.ngpio = 0;
+       spin_unlock_irqrestore(&viafb_gpio_config.vdev->reg_lock, flags);
        return ret;
 }
 
index 7c608c5ccf84f856cf476d28c7b01f87c3f1c5f9..00d615d7aa216d52387e8d67952851a42802a326 100644 (file)
@@ -42,7 +42,7 @@ config W1_MASTER_MXC
 
 config W1_MASTER_DS1WM
        tristate "Maxim DS1WM 1-wire busmaster"
-       depends on W1 && ARM && HAVE_CLK
+       depends on W1
        help
          Say Y here to enable the DS1WM 1-wire driver, such as that
          in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like
index 2f4fa02744a5680673625f48192a7111008053a3..ad57593d224a1f3200a0ef818b36bcb22f6fdf75 100644 (file)
@@ -33,6 +33,7 @@
 #define DS1WM_INT      0x02    /* R/W interrupt status */
 #define DS1WM_INT_EN   0x03    /* R/W interrupt enable */
 #define DS1WM_CLKDIV   0x04    /* R/W 5 bits of divisor and pre-scale */
+#define DS1WM_CNTRL    0x05    /* R/W master control register (not used yet) */
 
 #define DS1WM_CMD_1W_RESET  (1 << 0)   /* force reset on 1-wire bus */
 #define DS1WM_CMD_SRA      (1 << 1)    /* enable Search ROM accelerator mode */
@@ -56,6 +57,7 @@
 #define DS1WM_INTEN_ERSRF   (1 << 5)   /* enable rx shift register full int */
 #define DS1WM_INTEN_DQO            (1 << 6)    /* enable direct bus driving ops */
 
+#define DS1WM_INTEN_NOT_IAS (~DS1WM_INTEN_IAS) /* all but INTR active state */
 
 #define DS1WM_TIMEOUT (HZ * 5)
 
@@ -63,41 +65,50 @@ static struct {
        unsigned long freq;
        unsigned long divisor;
 } freq[] = {
-       { 4000000, 0x8 },
-       { 5000000, 0x2 },
-       { 6000000, 0x5 },
-       { 7000000, 0x3 },
-       { 8000000, 0xc },
-       { 10000000, 0x6 },
-       { 12000000, 0x9 },
-       { 14000000, 0x7 },
-       { 16000000, 0x10 },
-       { 20000000, 0xa },
-       { 24000000, 0xd },
-       { 28000000, 0xb },
-       { 32000000, 0x14 },
-       { 40000000, 0xe },
-       { 48000000, 0x11 },
-       { 56000000, 0xf },
-       { 64000000, 0x18 },
-       { 80000000, 0x12 },
-       { 96000000, 0x15 },
-       { 112000000, 0x13 },
-       { 128000000, 0x1c },
+       {   1000000, 0x80 },
+       {   2000000, 0x84 },
+       {   3000000, 0x81 },
+       {   4000000, 0x88 },
+       {   5000000, 0x82 },
+       {   6000000, 0x85 },
+       {   7000000, 0x83 },
+       {   8000000, 0x8c },
+       {  10000000, 0x86 },
+       {  12000000, 0x89 },
+       {  14000000, 0x87 },
+       {  16000000, 0x90 },
+       {  20000000, 0x8a },
+       {  24000000, 0x8d },
+       {  28000000, 0x8b },
+       {  32000000, 0x94 },
+       {  40000000, 0x8e },
+       {  48000000, 0x91 },
+       {  56000000, 0x8f },
+       {  64000000, 0x98 },
+       {  80000000, 0x92 },
+       {  96000000, 0x95 },
+       { 112000000, 0x93 },
+       { 128000000, 0x9c },
+/* you can continue this table, consult the OPERATION - CLOCK DIVISOR
+   section of the ds1wm spec sheet. */
 };
 
 struct ds1wm_data {
-       void            __iomem *map;
-       int             bus_shift; /* # of shifts to calc register offsets */
+       void     __iomem *map;
+       int      bus_shift; /* # of shifts to calc register offsets */
        struct platform_device *pdev;
-       const struct mfd_cell *cell;
-       int             irq;
-       int             active_high;
-       int             slave_present;
-       void            *reset_complete;
-       void            *read_complete;
-       void            *write_complete;
-       u8              read_byte; /* last byte received */
+       const struct mfd_cell   *cell;
+       int      irq;
+       int      slave_present;
+       void     *reset_complete;
+       void     *read_complete;
+       void     *write_complete;
+       int      read_error;
+       /* last byte received */
+       u8       read_byte;
+       /* byte to write that makes all intr disabled, */
+       /* considering active_state (IAS) (optimization) */
+       u8       int_en_reg_none;
 };
 
 static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg,
@@ -115,23 +126,39 @@ static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg)
 static irqreturn_t ds1wm_isr(int isr, void *data)
 {
        struct ds1wm_data *ds1wm_data = data;
-       u8 intr = ds1wm_read_register(ds1wm_data, DS1WM_INT);
+       u8 intr;
+       u8 inten = ds1wm_read_register(ds1wm_data, DS1WM_INT_EN);
+       /* if no bits are set in int enable register (except the IAS)
+       than go no further, reading the regs below has side effects */
+       if (!(inten & DS1WM_INTEN_NOT_IAS))
+               return IRQ_NONE;
 
-       ds1wm_data->slave_present = (intr & DS1WM_INT_PDR) ? 0 : 1;
+       ds1wm_write_register(ds1wm_data,
+               DS1WM_INT_EN, ds1wm_data->int_en_reg_none);
 
-       if ((intr & DS1WM_INT_PD) && ds1wm_data->reset_complete)
-               complete(ds1wm_data->reset_complete);
+       /* this read action clears the INTR and certain flags in ds1wm */
+       intr = ds1wm_read_register(ds1wm_data, DS1WM_INT);
 
-       if ((intr & DS1WM_INT_TSRE) && ds1wm_data->write_complete)
-               complete(ds1wm_data->write_complete);
+       ds1wm_data->slave_present = (intr & DS1WM_INT_PDR) ? 0 : 1;
 
+       if ((intr & DS1WM_INT_TSRE) && ds1wm_data->write_complete) {
+               inten &= ~DS1WM_INTEN_ETMT;
+               complete(ds1wm_data->write_complete);
+       }
        if (intr & DS1WM_INT_RBF) {
+               /* this read clears the RBF flag */
                ds1wm_data->read_byte = ds1wm_read_register(ds1wm_data,
-                                                           DS1WM_DATA);
+               DS1WM_DATA);
+               inten &= ~DS1WM_INTEN_ERBF;
                if (ds1wm_data->read_complete)
                        complete(ds1wm_data->read_complete);
        }
+       if ((intr & DS1WM_INT_PD) && ds1wm_data->reset_complete) {
+               inten &= ~DS1WM_INTEN_EPD;
+               complete(ds1wm_data->reset_complete);
+       }
 
+       ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, inten);
        return IRQ_HANDLED;
 }
 
@@ -142,33 +169,19 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data)
 
        ds1wm_data->reset_complete = &reset_done;
 
+       /* enable Presence detect only */
        ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, DS1WM_INTEN_EPD |
-               (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0));
+       ds1wm_data->int_en_reg_none);
 
        ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_1W_RESET);
 
        timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT);
        ds1wm_data->reset_complete = NULL;
        if (!timeleft) {
-               dev_err(&ds1wm_data->pdev->dev, "reset failed\n");
+               dev_err(&ds1wm_data->pdev->dev, "reset failed, timed out\n");
                return 1;
        }
 
-       /* Wait for the end of the reset. According to the specs, the time
-        * from when the interrupt is asserted to the end of the reset is:
-        *     tRSTH  - tPDH  - tPDL - tPDI
-        *     625 us - 60 us - 240 us - 100 ns = 324.9 us
-        *
-        * We'll wait a bit longer just to be sure.
-        * Was udelay(500), but if it is going to busywait the cpu that long,
-        * might as well come back later.
-        */
-       msleep(1);
-
-       ds1wm_write_register(ds1wm_data, DS1WM_INT_EN,
-               DS1WM_INTEN_ERBF | DS1WM_INTEN_ETMT | DS1WM_INTEN_EPD |
-               (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0));
-
        if (!ds1wm_data->slave_present) {
                dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n");
                return 1;
@@ -179,26 +192,47 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data)
 
 static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data)
 {
+       unsigned long timeleft;
        DECLARE_COMPLETION_ONSTACK(write_done);
        ds1wm_data->write_complete = &write_done;
 
+       ds1wm_write_register(ds1wm_data, DS1WM_INT_EN,
+       ds1wm_data->int_en_reg_none | DS1WM_INTEN_ETMT);
+
        ds1wm_write_register(ds1wm_data, DS1WM_DATA, data);
 
-       wait_for_completion_timeout(&write_done, DS1WM_TIMEOUT);
+       timeleft = wait_for_completion_timeout(&write_done, DS1WM_TIMEOUT);
+
        ds1wm_data->write_complete = NULL;
+       if (!timeleft) {
+               dev_err(&ds1wm_data->pdev->dev, "write failed, timed out\n");
+               return -ETIMEDOUT;
+       }
 
        return 0;
 }
 
-static int ds1wm_read(struct ds1wm_data *ds1wm_data, unsigned char write_data)
+static u8 ds1wm_read(struct ds1wm_data *ds1wm_data, unsigned char write_data)
 {
+       unsigned long timeleft;
+       u8 intEnable = DS1WM_INTEN_ERBF | ds1wm_data->int_en_reg_none;
        DECLARE_COMPLETION_ONSTACK(read_done);
+
+       ds1wm_read_register(ds1wm_data, DS1WM_DATA);
+
        ds1wm_data->read_complete = &read_done;
+       ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, intEnable);
 
-       ds1wm_write(ds1wm_data, write_data);
-       wait_for_completion_timeout(&read_done, DS1WM_TIMEOUT);
-       ds1wm_data->read_complete = NULL;
+       ds1wm_write_register(ds1wm_data, DS1WM_DATA, write_data);
+       timeleft = wait_for_completion_timeout(&read_done, DS1WM_TIMEOUT);
 
+       ds1wm_data->read_complete = NULL;
+       if (!timeleft) {
+               dev_err(&ds1wm_data->pdev->dev, "read failed, timed out\n");
+               ds1wm_data->read_error = -ETIMEDOUT;
+               return 0xFF;
+       }
+       ds1wm_data->read_error = 0;
        return ds1wm_data->read_byte;
 }
 
@@ -206,8 +240,8 @@ static int ds1wm_find_divisor(int gclk)
 {
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(freq); i++)
-               if (gclk <= freq[i].freq)
+       for (i = ARRAY_SIZE(freq)-1; i >= 0; --i)
+               if (gclk >= freq[i].freq)
                        return freq[i].divisor;
 
        return 0;
@@ -216,12 +250,14 @@ static int ds1wm_find_divisor(int gclk)
 static void ds1wm_up(struct ds1wm_data *ds1wm_data)
 {
        int divisor;
-       struct ds1wm_driver_data *plat = mfd_get_data(ds1wm_data->pdev);
+       struct ds1wm_driver_data *plat = ds1wm_data->pdev->dev.platform_data;
 
        if (ds1wm_data->cell->enable)
                ds1wm_data->cell->enable(ds1wm_data->pdev);
 
        divisor = ds1wm_find_divisor(plat->clock_rate);
+       dev_dbg(&ds1wm_data->pdev->dev,
+               "found divisor 0x%x for clock %d\n", divisor, plat->clock_rate);
        if (divisor == 0) {
                dev_err(&ds1wm_data->pdev->dev,
                        "no suitable divisor for %dHz clock\n",
@@ -242,7 +278,7 @@ static void ds1wm_down(struct ds1wm_data *ds1wm_data)
 
        /* Disable interrupts. */
        ds1wm_write_register(ds1wm_data, DS1WM_INT_EN,
-                            ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0);
+               ds1wm_data->int_en_reg_none);
 
        if (ds1wm_data->cell->disable)
                ds1wm_data->cell->disable(ds1wm_data->pdev);
@@ -279,41 +315,121 @@ static void ds1wm_search(void *data, struct w1_master *master_dev,
 {
        struct ds1wm_data *ds1wm_data = data;
        int i;
-       unsigned long long rom_id;
-
-       /* XXX We need to iterate for multiple devices per the DS1WM docs.
-        * See http://www.maxim-ic.com/appnotes.cfm/appnote_number/120. */
-       if (ds1wm_reset(ds1wm_data))
-               return;
-
-       ds1wm_write(ds1wm_data, search_type);
-       ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_SRA);
-
-       for (rom_id = 0, i = 0; i < 16; i++) {
-
-               unsigned char resp, r, d;
-
-               resp = ds1wm_read(ds1wm_data, 0x00);
-
-               r = ((resp & 0x02) >> 1) |
-                   ((resp & 0x08) >> 2) |
-                   ((resp & 0x20) >> 3) |
-                   ((resp & 0x80) >> 4);
-
-               d = ((resp & 0x01) >> 0) |
-                   ((resp & 0x04) >> 1) |
-                   ((resp & 0x10) >> 2) |
-                   ((resp & 0x40) >> 3);
-
-               rom_id |= (unsigned long long) r << (i * 4);
-
-       }
-       dev_dbg(&ds1wm_data->pdev->dev, "found 0x%08llX\n", rom_id);
-
-       ds1wm_write_register(ds1wm_data, DS1WM_CMD, ~DS1WM_CMD_SRA);
-       ds1wm_reset(ds1wm_data);
-
-       slave_found(master_dev, rom_id);
+       int ms_discrep_bit = -1;
+       u64 r = 0; /* holds the progress of the search */
+       u64 r_prime, d;
+       unsigned slaves_found = 0;
+       unsigned int pass = 0;
+
+       dev_dbg(&ds1wm_data->pdev->dev, "search begin\n");
+       while (true) {
+               ++pass;
+               if (pass > 100) {
+                       dev_dbg(&ds1wm_data->pdev->dev,
+                               "too many attempts (100), search aborted\n");
+                       return;
+               }
+
+               if (ds1wm_reset(ds1wm_data)) {
+                       dev_dbg(&ds1wm_data->pdev->dev,
+                               "pass: %d reset error (or no slaves)\n", pass);
+                       break;
+               }
+
+               dev_dbg(&ds1wm_data->pdev->dev,
+                       "pass: %d r : %0#18llx writing SEARCH_ROM\n", pass, r);
+               ds1wm_write(ds1wm_data, search_type);
+               dev_dbg(&ds1wm_data->pdev->dev,
+                       "pass: %d entering ASM\n", pass);
+               ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_SRA);
+               dev_dbg(&ds1wm_data->pdev->dev,
+                       "pass: %d begining nibble loop\n", pass);
+
+               r_prime = 0;
+               d = 0;
+               /* we work one nibble at a time */
+               /* each nibble is interleaved to form a byte */
+               for (i = 0; i < 16; i++) {
+
+                       unsigned char resp, _r, _r_prime, _d;
+
+                       _r = (r >> (4*i)) & 0xf;
+                       _r = ((_r & 0x1) << 1) |
+                       ((_r & 0x2) << 2) |
+                       ((_r & 0x4) << 3) |
+                       ((_r & 0x8) << 4);
+
+                       /* writes _r, then reads back: */
+                       resp = ds1wm_read(ds1wm_data, _r);
+
+                       if (ds1wm_data->read_error) {
+                               dev_err(&ds1wm_data->pdev->dev,
+                               "pass: %d nibble: %d read error\n", pass, i);
+                               break;
+                       }
+
+                       _r_prime = ((resp & 0x02) >> 1) |
+                       ((resp & 0x08) >> 2) |
+                       ((resp & 0x20) >> 3) |
+                       ((resp & 0x80) >> 4);
+
+                       _d = ((resp & 0x01) >> 0) |
+                       ((resp & 0x04) >> 1) |
+                       ((resp & 0x10) >> 2) |
+                       ((resp & 0x40) >> 3);
+
+                       r_prime |= (unsigned long long) _r_prime << (i * 4);
+                       d |= (unsigned long long) _d << (i * 4);
+
+               }
+               if (ds1wm_data->read_error) {
+                       dev_err(&ds1wm_data->pdev->dev,
+                               "pass: %d read error, retrying\n", pass);
+                       break;
+               }
+               dev_dbg(&ds1wm_data->pdev->dev,
+                       "pass: %d r\': %0#18llx d:%0#18llx\n",
+                       pass, r_prime, d);
+               dev_dbg(&ds1wm_data->pdev->dev,
+                       "pass: %d nibble loop complete, exiting ASM\n", pass);
+               ds1wm_write_register(ds1wm_data, DS1WM_CMD, ~DS1WM_CMD_SRA);
+               dev_dbg(&ds1wm_data->pdev->dev,
+                       "pass: %d resetting bus\n", pass);
+               ds1wm_reset(ds1wm_data);
+               if ((r_prime & ((u64)1 << 63)) && (d & ((u64)1 << 63))) {
+                       dev_err(&ds1wm_data->pdev->dev,
+                               "pass: %d bus error, retrying\n", pass);
+                       continue; /* start over */
+               }
+
+
+               dev_dbg(&ds1wm_data->pdev->dev,
+                       "pass: %d found %0#18llx\n", pass, r_prime);
+               slave_found(master_dev, r_prime);
+               ++slaves_found;
+               dev_dbg(&ds1wm_data->pdev->dev,
+                       "pass: %d complete, preparing next pass\n", pass);
+
+               /* any discrepency found which we already choose the
+                  '1' branch is now is now irrelevant we reveal the
+                  next branch with this: */
+               d &= ~r;
+               /* find last bit set, i.e. the most signif. bit set */
+               ms_discrep_bit = fls64(d) - 1;
+               dev_dbg(&ds1wm_data->pdev->dev,
+                       "pass: %d new d:%0#18llx MS discrep bit:%d\n",
+                       pass, d, ms_discrep_bit);
+
+               /* prev_ms_discrep_bit = ms_discrep_bit;
+                  prepare for next ROM search:             */
+               if (ms_discrep_bit == -1)
+                       break;
+
+               r = (r &  ~(~0ull << (ms_discrep_bit))) | 1 << ms_discrep_bit;
+       } /* end while true */
+       dev_dbg(&ds1wm_data->pdev->dev,
+               "pass: %d total: %d search done ms d bit pos: %d\n", pass,
+               slaves_found, ms_discrep_bit);
 }
 
 /* --------------------------------------------------------------------- */
@@ -351,13 +467,21 @@ static int ds1wm_probe(struct platform_device *pdev)
                ret = -ENOMEM;
                goto err0;
        }
-       plat = mfd_get_data(pdev);
 
        /* calculate bus shift from mem resource */
        ds1wm_data->bus_shift = resource_size(res) >> 3;
 
        ds1wm_data->pdev = pdev;
        ds1wm_data->cell = mfd_get_cell(pdev);
+       if (!ds1wm_data->cell) {
+               ret = -ENODEV;
+               goto err1;
+       }
+       plat = pdev->dev.platform_data;
+       if (!plat) {
+               ret = -ENODEV;
+               goto err1;
+       }
 
        res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
        if (!res) {
@@ -365,15 +489,15 @@ static int ds1wm_probe(struct platform_device *pdev)
                goto err1;
        }
        ds1wm_data->irq = res->start;
-       ds1wm_data->active_high = plat->active_high;
+       ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0);
 
        if (res->flags & IORESOURCE_IRQ_HIGHEDGE)
                irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING);
        if (res->flags & IORESOURCE_IRQ_LOWEDGE)
                irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING);
 
-       ret = request_irq(ds1wm_data->irq, ds1wm_isr, IRQF_DISABLED,
-                         "ds1wm", ds1wm_data);
+       ret = request_irq(ds1wm_data->irq, ds1wm_isr,
+                       IRQF_DISABLED | IRQF_SHARED, "ds1wm", ds1wm_data);
        if (ret)
                goto err1;
 
@@ -460,5 +584,6 @@ module_exit(ds1wm_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>, "
-             "Matt Reimer <mreimer@vpop.net>");
+       "Matt Reimer <mreimer@vpop.net>,"
+       "Jean-Francois Dagenais <dagenaisj@sonatest.com>");
 MODULE_DESCRIPTION("DS1WM w1 busmaster driver");
index f0c909625bd1246991af2b0cda78de80b1d504da..d0cb01b42012ff8adff7e60ede81779100172aa3 100644 (file)
@@ -16,6 +16,13 @@ config W1_SLAVE_SMEM
          Say Y here if you want to connect 1-wire
          simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire.
 
+config W1_SLAVE_DS2408
+        tristate "8-Channel Addressable Switch (IO Expander) 0x29 family support (DS2408)"
+        help
+          Say Y here if you want to use a 1-wire
+
+                 DS2408 8-Channel Addressable Switch device support
+
 config W1_SLAVE_DS2423
        tristate "Counter 1-wire device (DS2423)"
        select CRC16
@@ -61,6 +68,19 @@ config W1_SLAVE_DS2760
 
          If you are unsure, say N.
 
+config W1_SLAVE_DS2780
+       tristate "Dallas 2780 battery monitor chip"
+       depends on W1
+       help
+         If you enable this you will have the DS2780 battery monitor
+         chip support.
+
+         The battery monitor chip is used in many batteries/devices
+         as the one who is responsible for charging/discharging/monitoring
+         Li+ batteries.
+
+         If you are unsure, say N.
+
 config W1_SLAVE_BQ27000
        tristate "BQ27000 slave support"
        depends on W1
index 3c76350a24f7656097d6fde70673d50be4c4ca9c..1f31e9fb0b25bed05a08bbe7e9997bbabe74e817 100644 (file)
@@ -4,8 +4,10 @@
 
 obj-$(CONFIG_W1_SLAVE_THERM)   += w1_therm.o
 obj-$(CONFIG_W1_SLAVE_SMEM)    += w1_smem.o
+obj-$(CONFIG_W1_SLAVE_DS2408)   += w1_ds2408.o
 obj-$(CONFIG_W1_SLAVE_DS2423)  += w1_ds2423.o
 obj-$(CONFIG_W1_SLAVE_DS2431)  += w1_ds2431.o
 obj-$(CONFIG_W1_SLAVE_DS2433)  += w1_ds2433.o
 obj-$(CONFIG_W1_SLAVE_DS2760)  += w1_ds2760.o
+obj-$(CONFIG_W1_SLAVE_DS2780)  += w1_ds2780.o
 obj-$(CONFIG_W1_SLAVE_BQ27000) += w1_bq27000.o
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
new file mode 100644 (file)
index 0000000..c377818
--- /dev/null
@@ -0,0 +1,402 @@
+/*
+ *     w1_ds2408.c - w1 family 29 (DS2408) driver
+ *
+ * Copyright (c) 2010 Jean-Francois Dagenais <dagenaisj@sonatest.com>
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "../w1.h"
+#include "../w1_int.h"
+#include "../w1_family.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jean-Francois Dagenais <dagenaisj@sonatest.com>");
+MODULE_DESCRIPTION("w1 family 29 driver for DS2408 8 Pin IO");
+
+
+#define W1_F29_RETRIES         3
+
+#define W1_F29_REG_LOGIG_STATE             0x88 /* R */
+#define W1_F29_REG_OUTPUT_LATCH_STATE      0x89 /* R */
+#define W1_F29_REG_ACTIVITY_LATCH_STATE    0x8A /* R */
+#define W1_F29_REG_COND_SEARCH_SELECT_MASK 0x8B /* RW */
+#define W1_F29_REG_COND_SEARCH_POL_SELECT  0x8C /* RW */
+#define W1_F29_REG_CONTROL_AND_STATUS      0x8D /* RW */
+
+#define W1_F29_FUNC_READ_PIO_REGS          0xF0
+#define W1_F29_FUNC_CHANN_ACCESS_READ      0xF5
+#define W1_F29_FUNC_CHANN_ACCESS_WRITE     0x5A
+/* also used to write the control/status reg (0x8D): */
+#define W1_F29_FUNC_WRITE_COND_SEARCH_REG  0xCC
+#define W1_F29_FUNC_RESET_ACTIVITY_LATCHES 0xC3
+
+#define W1_F29_SUCCESS_CONFIRM_BYTE        0xAA
+
+static int _read_reg(struct w1_slave *sl, u8 address, unsigned char* buf)
+{
+       u8 wrbuf[3];
+       dev_dbg(&sl->dev,
+                       "Reading with slave: %p, reg addr: %0#4x, buff addr: %p",
+                       sl, (unsigned int)address, buf);
+
+       if (!buf)
+               return -EINVAL;
+
+       mutex_lock(&sl->master->mutex);
+       dev_dbg(&sl->dev, "mutex locked");
+
+       if (w1_reset_select_slave(sl)) {
+               mutex_unlock(&sl->master->mutex);
+               return -EIO;
+       }
+
+       wrbuf[0] = W1_F29_FUNC_READ_PIO_REGS;
+       wrbuf[1] = address;
+       wrbuf[2] = 0;
+       w1_write_block(sl->master, wrbuf, 3);
+       *buf = w1_read_8(sl->master);
+
+       mutex_unlock(&sl->master->mutex);
+       dev_dbg(&sl->dev, "mutex unlocked");
+       return 1;
+}
+
+static ssize_t w1_f29_read_state(
+       struct file *filp, struct kobject *kobj,
+       struct bin_attribute *bin_attr,
+       char *buf, loff_t off, size_t count)
+{
+       dev_dbg(&kobj_to_w1_slave(kobj)->dev,
+               "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
+               bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
+       if (count != 1 || off != 0)
+               return -EFAULT;
+       return _read_reg(kobj_to_w1_slave(kobj), W1_F29_REG_LOGIG_STATE, buf);
+}
+
+static ssize_t w1_f29_read_output(
+       struct file *filp, struct kobject *kobj,
+       struct bin_attribute *bin_attr,
+       char *buf, loff_t off, size_t count)
+{
+       dev_dbg(&kobj_to_w1_slave(kobj)->dev,
+               "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
+               bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
+       if (count != 1 || off != 0)
+               return -EFAULT;
+       return _read_reg(kobj_to_w1_slave(kobj),
+                                        W1_F29_REG_OUTPUT_LATCH_STATE, buf);
+}
+
+static ssize_t w1_f29_read_activity(
+       struct file *filp, struct kobject *kobj,
+       struct bin_attribute *bin_attr,
+       char *buf, loff_t off, size_t count)
+{
+       dev_dbg(&kobj_to_w1_slave(kobj)->dev,
+               "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
+               bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
+       if (count != 1 || off != 0)
+               return -EFAULT;
+       return _read_reg(kobj_to_w1_slave(kobj),
+                                        W1_F29_REG_ACTIVITY_LATCH_STATE, buf);
+}
+
+static ssize_t w1_f29_read_cond_search_mask(
+       struct file *filp, struct kobject *kobj,
+       struct bin_attribute *bin_attr,
+       char *buf, loff_t off, size_t count)
+{
+       dev_dbg(&kobj_to_w1_slave(kobj)->dev,
+               "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
+               bin_attr->attr.name, kobj, (unsigned int)off, count, buf);
+       if (count != 1 || off != 0)
+               return -EFAULT;
+       return _read_reg(kobj_to_w1_slave(kobj),
+               W1_F29_REG_COND_SEARCH_SELECT_MASK, buf);
+}
+
+static ssize_t w1_f29_read_cond_search_polarity(
+       struct file *filp, struct kobject *kobj,
+       struct bin_attribute *bin_attr,
+       char *buf, loff_t off, size_t count)
+{
+       if (count != 1 || off != 0)
+               return -EFAULT;
+       return _read_reg(kobj_to_w1_slave(kobj),
+               W1_F29_REG_COND_SEARCH_POL_SELECT, buf);
+}
+
+static ssize_t w1_f29_read_status_control(
+       struct file *filp, struct kobject *kobj,
+       struct bin_attribute *bin_attr,
+       char *buf, loff_t off, size_t count)
+{
+       if (count != 1 || off != 0)
+               return -EFAULT;
+       return _read_reg(kobj_to_w1_slave(kobj),
+               W1_F29_REG_CONTROL_AND_STATUS, buf);
+}
+
+
+
+
+static ssize_t w1_f29_write_output(
+       struct file *filp, struct kobject *kobj,
+       struct bin_attribute *bin_attr,
+       char *buf, loff_t off, size_t count)
+{
+       struct w1_slave *sl = kobj_to_w1_slave(kobj);
+       u8 w1_buf[3];
+       u8 readBack;
+       unsigned int retries = W1_F29_RETRIES;
+
+       if (count != 1 || off != 0)
+               return -EFAULT;
+
+       dev_dbg(&sl->dev, "locking mutex for write_output");
+       mutex_lock(&sl->master->mutex);
+       dev_dbg(&sl->dev, "mutex locked");
+
+       if (w1_reset_select_slave(sl))
+               goto error;
+
+       while (retries--) {
+               w1_buf[0] = W1_F29_FUNC_CHANN_ACCESS_WRITE;
+               w1_buf[1] = *buf;
+               w1_buf[2] = ~(*buf);
+               w1_write_block(sl->master, w1_buf, 3);
+
+               readBack = w1_read_8(sl->master);
+               /* here the master could read another byte which
+                  would be the PIO reg (the actual pin logic state)
+                  since in this driver we don't know which pins are
+                  in and outs, there's no value to read the state and
+                  compare. with (*buf) so end this command abruptly: */
+               if (w1_reset_resume_command(sl->master))
+                       goto error;
+
+               if (readBack != 0xAA) {
+                       /* try again, the slave is ready for a command */
+                       continue;
+               }
+
+               /* go read back the output latches */
+               /* (the direct effect of the write above) */
+               w1_buf[0] = W1_F29_FUNC_READ_PIO_REGS;
+               w1_buf[1] = W1_F29_REG_OUTPUT_LATCH_STATE;
+               w1_buf[2] = 0;
+               w1_write_block(sl->master, w1_buf, 3);
+               /* read the result of the READ_PIO_REGS command */
+               if (w1_read_8(sl->master) == *buf) {
+                       /* success! */
+                       mutex_unlock(&sl->master->mutex);
+                       dev_dbg(&sl->dev,
+                               "mutex unlocked, retries:%d", retries);
+                       return 1;
+               }
+       }
+error:
+       mutex_unlock(&sl->master->mutex);
+       dev_dbg(&sl->dev, "mutex unlocked in error, retries:%d", retries);
+
+       return -EIO;
+}
+
+
+/**
+ * Writing to the activity file resets the activity latches.
+ */
+static ssize_t w1_f29_write_activity(
+       struct file *filp, struct kobject *kobj,
+       struct bin_attribute *bin_attr,
+       char *buf, loff_t off, size_t count)
+{
+       struct w1_slave *sl = kobj_to_w1_slave(kobj);
+       unsigned int retries = W1_F29_RETRIES;
+
+       if (count != 1 || off != 0)
+               return -EFAULT;
+
+       mutex_lock(&sl->master->mutex);
+
+       if (w1_reset_select_slave(sl))
+               goto error;
+
+       while (retries--) {
+               w1_write_8(sl->master, W1_F29_FUNC_RESET_ACTIVITY_LATCHES);
+               if (w1_read_8(sl->master) == W1_F29_SUCCESS_CONFIRM_BYTE) {
+                       mutex_unlock(&sl->master->mutex);
+                       return 1;
+               }
+               if (w1_reset_resume_command(sl->master))
+                       goto error;
+       }
+
+error:
+       mutex_unlock(&sl->master->mutex);
+       return -EIO;
+}
+
+static ssize_t w1_f29_write_status_control(
+       struct file *filp,
+       struct kobject *kobj,
+       struct bin_attribute *bin_attr,
+       char *buf,
+       loff_t off,
+       size_t count)
+{
+       struct w1_slave *sl = kobj_to_w1_slave(kobj);
+       u8 w1_buf[4];
+       unsigned int retries = W1_F29_RETRIES;
+
+       if (count != 1 || off != 0)
+               return -EFAULT;
+
+       mutex_lock(&sl->master->mutex);
+
+       if (w1_reset_select_slave(sl))
+               goto error;
+
+       while (retries--) {
+               w1_buf[0] = W1_F29_FUNC_WRITE_COND_SEARCH_REG;
+               w1_buf[1] = W1_F29_REG_CONTROL_AND_STATUS;
+               w1_buf[2] = 0;
+               w1_buf[3] = *buf;
+
+               w1_write_block(sl->master, w1_buf, 4);
+               if (w1_reset_resume_command(sl->master))
+                       goto error;
+
+               w1_buf[0] = W1_F29_FUNC_READ_PIO_REGS;
+               w1_buf[1] = W1_F29_REG_CONTROL_AND_STATUS;
+               w1_buf[2] = 0;
+
+               w1_write_block(sl->master, w1_buf, 3);
+               if (w1_read_8(sl->master) == *buf) {
+                       /* success! */
+                       mutex_unlock(&sl->master->mutex);
+                       return 1;
+               }
+       }
+error:
+       mutex_unlock(&sl->master->mutex);
+
+       return -EIO;
+}
+
+
+
+#define NB_SYSFS_BIN_FILES 6
+static struct bin_attribute w1_f29_sysfs_bin_files[NB_SYSFS_BIN_FILES] = {
+       {
+               .attr = {
+                       .name = "state",
+                       .mode = S_IRUGO,
+               },
+               .size = 1,
+               .read = w1_f29_read_state,
+       },
+       {
+               .attr = {
+                       .name = "output",
+                       .mode = S_IRUGO | S_IWUSR | S_IWGRP,
+               },
+               .size = 1,
+               .read = w1_f29_read_output,
+               .write = w1_f29_write_output,
+       },
+       {
+               .attr = {
+                       .name = "activity",
+                       .mode = S_IRUGO,
+               },
+               .size = 1,
+               .read = w1_f29_read_activity,
+               .write = w1_f29_write_activity,
+       },
+       {
+               .attr = {
+                       .name = "cond_search_mask",
+                       .mode = S_IRUGO,
+               },
+               .size = 1,
+               .read = w1_f29_read_cond_search_mask,
+               .write = 0,
+       },
+       {
+               .attr = {
+                       .name = "cond_search_polarity",
+                       .mode = S_IRUGO,
+               },
+               .size = 1,
+               .read = w1_f29_read_cond_search_polarity,
+               .write = 0,
+       },
+       {
+               .attr = {
+                       .name = "status_control",
+                       .mode = S_IRUGO | S_IWUSR | S_IWGRP,
+               },
+               .size = 1,
+               .read = w1_f29_read_status_control,
+               .write = w1_f29_write_status_control,
+       }
+};
+
+static int w1_f29_add_slave(struct w1_slave *sl)
+{
+       int err = 0;
+       int i;
+
+       for (i = 0; i < NB_SYSFS_BIN_FILES && !err; ++i)
+               err = sysfs_create_bin_file(
+                       &sl->dev.kobj,
+                       &(w1_f29_sysfs_bin_files[i]));
+       if (err)
+               while (--i >= 0)
+                       sysfs_remove_bin_file(&sl->dev.kobj,
+                               &(w1_f29_sysfs_bin_files[i]));
+       return err;
+}
+
+static void w1_f29_remove_slave(struct w1_slave *sl)
+{
+       int i;
+       for (i = NB_SYSFS_BIN_FILES; i <= 0; --i)
+               sysfs_remove_bin_file(&sl->dev.kobj,
+                       &(w1_f29_sysfs_bin_files[i]));
+}
+
+static struct w1_family_ops w1_f29_fops = {
+       .add_slave      = w1_f29_add_slave,
+       .remove_slave   = w1_f29_remove_slave,
+};
+
+static struct w1_family w1_family_29 = {
+       .fid = W1_FAMILY_DS2408,
+       .fops = &w1_f29_fops,
+};
+
+static int __init w1_f29_init(void)
+{
+       return w1_register_family(&w1_family_29);
+}
+
+static void __exit w1_f29_exit(void)
+{
+       w1_unregister_family(&w1_family_29);
+}
+
+module_init(w1_f29_init);
+module_exit(w1_f29_exit);
diff --git a/drivers/w1/slaves/w1_ds2780.c b/drivers/w1/slaves/w1_ds2780.c
new file mode 100644 (file)
index 0000000..274c8f3
--- /dev/null
@@ -0,0 +1,217 @@
+/*
+ * 1-Wire implementation for the ds2780 chip
+ *
+ * Copyright (C) 2010 Indesign, LLC
+ *
+ * Author: Clifton Barnes <cabarnes@indesign-llc.com>
+ *
+ * Based on w1-ds2760 driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/idr.h>
+
+#include "../w1.h"
+#include "../w1_int.h"
+#include "../w1_family.h"
+#include "w1_ds2780.h"
+
+int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
+                       int io)
+{
+       struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+
+       if (!dev)
+               return -ENODEV;
+
+       mutex_lock(&sl->master->mutex);
+
+       if (addr > DS2780_DATA_SIZE || addr < 0) {
+               count = 0;
+               goto out;
+       }
+       count = min_t(int, count, DS2780_DATA_SIZE - addr);
+
+       if (w1_reset_select_slave(sl) == 0) {
+               if (io) {
+                       w1_write_8(sl->master, W1_DS2780_WRITE_DATA);
+                       w1_write_8(sl->master, addr);
+                       w1_write_block(sl->master, buf, count);
+                       /* XXX w1_write_block returns void, not n_written */
+               } else {
+                       w1_write_8(sl->master, W1_DS2780_READ_DATA);
+                       w1_write_8(sl->master, addr);
+                       count = w1_read_block(sl->master, buf, count);
+               }
+       }
+
+out:
+       mutex_unlock(&sl->master->mutex);
+
+       return count;
+}
+EXPORT_SYMBOL(w1_ds2780_io);
+
+int w1_ds2780_eeprom_cmd(struct device *dev, int addr, int cmd)
+{
+       struct w1_slave *sl = container_of(dev, struct w1_slave, dev);
+
+       if (!dev)
+               return -EINVAL;
+
+       mutex_lock(&sl->master->mutex);
+
+       if (w1_reset_select_slave(sl) == 0) {
+               w1_write_8(sl->master, cmd);
+               w1_write_8(sl->master, addr);
+       }
+
+       mutex_unlock(&sl->master->mutex);
+       return 0;
+}
+EXPORT_SYMBOL(w1_ds2780_eeprom_cmd);
+
+static ssize_t w1_ds2780_read_bin(struct file *filp,
+                                 struct kobject *kobj,
+                                 struct bin_attribute *bin_attr,
+                                 char *buf, loff_t off, size_t count)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       return w1_ds2780_io(dev, buf, off, count, 0);
+}
+
+static struct bin_attribute w1_ds2780_bin_attr = {
+       .attr = {
+               .name = "w1_slave",
+               .mode = S_IRUGO,
+       },
+       .size = DS2780_DATA_SIZE,
+       .read = w1_ds2780_read_bin,
+};
+
+static DEFINE_IDR(bat_idr);
+static DEFINE_MUTEX(bat_idr_lock);
+
+static int new_bat_id(void)
+{
+       int ret;
+
+       while (1) {
+               int id;
+
+               ret = idr_pre_get(&bat_idr, GFP_KERNEL);
+               if (ret == 0)
+                       return -ENOMEM;
+
+               mutex_lock(&bat_idr_lock);
+               ret = idr_get_new(&bat_idr, NULL, &id);
+               mutex_unlock(&bat_idr_lock);
+
+               if (ret == 0) {
+                       ret = id & MAX_ID_MASK;
+                       break;
+               } else if (ret == -EAGAIN) {
+                       continue;
+               } else {
+                       break;
+               }
+       }
+
+       return ret;
+}
+
+static void release_bat_id(int id)
+{
+       mutex_lock(&bat_idr_lock);
+       idr_remove(&bat_idr, id);
+       mutex_unlock(&bat_idr_lock);
+}
+
+static int w1_ds2780_add_slave(struct w1_slave *sl)
+{
+       int ret;
+       int id;
+       struct platform_device *pdev;
+
+       id = new_bat_id();
+       if (id < 0) {
+               ret = id;
+               goto noid;
+       }
+
+       pdev = platform_device_alloc("ds2780-battery", id);
+       if (!pdev) {
+               ret = -ENOMEM;
+               goto pdev_alloc_failed;
+       }
+       pdev->dev.parent = &sl->dev;
+
+       ret = platform_device_add(pdev);
+       if (ret)
+               goto pdev_add_failed;
+
+       ret = sysfs_create_bin_file(&sl->dev.kobj, &w1_ds2780_bin_attr);
+       if (ret)
+               goto bin_attr_failed;
+
+       dev_set_drvdata(&sl->dev, pdev);
+
+       return 0;
+
+bin_attr_failed:
+pdev_add_failed:
+       platform_device_unregister(pdev);
+pdev_alloc_failed:
+       release_bat_id(id);
+noid:
+       return ret;
+}
+
+static void w1_ds2780_remove_slave(struct w1_slave *sl)
+{
+       struct platform_device *pdev = dev_get_drvdata(&sl->dev);
+       int id = pdev->id;
+
+       platform_device_unregister(pdev);
+       release_bat_id(id);
+       sysfs_remove_bin_file(&sl->dev.kobj, &w1_ds2780_bin_attr);
+}
+
+static struct w1_family_ops w1_ds2780_fops = {
+       .add_slave    = w1_ds2780_add_slave,
+       .remove_slave = w1_ds2780_remove_slave,
+};
+
+static struct w1_family w1_ds2780_family = {
+       .fid = W1_FAMILY_DS2780,
+       .fops = &w1_ds2780_fops,
+};
+
+static int __init w1_ds2780_init(void)
+{
+       idr_init(&bat_idr);
+       return w1_register_family(&w1_ds2780_family);
+}
+
+static void __exit w1_ds2780_exit(void)
+{
+       w1_unregister_family(&w1_ds2780_family);
+       idr_destroy(&bat_idr);
+}
+
+module_init(w1_ds2780_init);
+module_exit(w1_ds2780_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>");
+MODULE_DESCRIPTION("1-wire Driver for Maxim/Dallas DS2780 Stand-Alone Fuel Gauge IC");
diff --git a/drivers/w1/slaves/w1_ds2780.h b/drivers/w1/slaves/w1_ds2780.h
new file mode 100644 (file)
index 0000000..a1fba79
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ * 1-Wire implementation for the ds2780 chip
+ *
+ * Copyright (C) 2010 Indesign, LLC
+ *
+ * Author: Clifton Barnes <cabarnes@indesign-llc.com>
+ *
+ * Based on w1-ds2760 driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _W1_DS2780_H
+#define _W1_DS2780_H
+
+/* Function commands */
+#define W1_DS2780_READ_DATA            0x69
+#define W1_DS2780_WRITE_DATA           0x6C
+#define W1_DS2780_COPY_DATA            0x48
+#define W1_DS2780_RECALL_DATA          0xB8
+#define W1_DS2780_LOCK                 0x6A
+
+/* Register map */
+/* Register 0x00 Reserved */
+#define DS2780_STATUS_REG              0x01
+#define DS2780_RAAC_MSB_REG            0x02
+#define DS2780_RAAC_LSB_REG            0x03
+#define DS2780_RSAC_MSB_REG            0x04
+#define DS2780_RSAC_LSB_REG            0x05
+#define DS2780_RARC_REG                        0x06
+#define DS2780_RSRC_REG                        0x07
+#define DS2780_IAVG_MSB_REG            0x08
+#define DS2780_IAVG_LSB_REG            0x09
+#define DS2780_TEMP_MSB_REG            0x0A
+#define DS2780_TEMP_LSB_REG            0x0B
+#define DS2780_VOLT_MSB_REG            0x0C
+#define DS2780_VOLT_LSB_REG            0x0D
+#define DS2780_CURRENT_MSB_REG         0x0E
+#define DS2780_CURRENT_LSB_REG         0x0F
+#define DS2780_ACR_MSB_REG             0x10
+#define DS2780_ACR_LSB_REG             0x11
+#define DS2780_ACRL_MSB_REG            0x12
+#define DS2780_ACRL_LSB_REG            0x13
+#define DS2780_AS_REG                  0x14
+#define DS2780_SFR_REG                 0x15
+#define DS2780_FULL_MSB_REG            0x16
+#define DS2780_FULL_LSB_REG            0x17
+#define DS2780_AE_MSB_REG              0x18
+#define DS2780_AE_LSB_REG              0x19
+#define DS2780_SE_MSB_REG              0x1A
+#define DS2780_SE_LSB_REG              0x1B
+/* Register 0x1C - 0x1E Reserved */
+#define DS2780_EEPROM_REG              0x1F
+#define DS2780_EEPROM_BLOCK0_START     0x20
+/* Register 0x20 - 0x2F User EEPROM */
+#define DS2780_EEPROM_BLOCK0_END       0x2F
+/* Register 0x30 - 0x5F Reserved */
+#define DS2780_EEPROM_BLOCK1_START     0x60
+#define DS2780_CONTROL_REG             0x60
+#define DS2780_AB_REG                  0x61
+#define DS2780_AC_MSB_REG              0x62
+#define DS2780_AC_LSB_REG              0x63
+#define DS2780_VCHG_REG                        0x64
+#define DS2780_IMIN_REG                        0x65
+#define DS2780_VAE_REG                 0x66
+#define DS2780_IAE_REG                 0x67
+#define DS2780_AE_40_REG               0x68
+#define DS2780_RSNSP_REG               0x69
+#define DS2780_FULL_40_MSB_REG         0x6A
+#define DS2780_FULL_40_LSB_REG         0x6B
+#define DS2780_FULL_3040_SLOPE_REG     0x6C
+#define DS2780_FULL_2030_SLOPE_REG     0x6D
+#define DS2780_FULL_1020_SLOPE_REG     0x6E
+#define DS2780_FULL_0010_SLOPE_REG     0x6F
+#define DS2780_AE_3040_SLOPE_REG       0x70
+#define DS2780_AE_2030_SLOPE_REG       0x71
+#define DS2780_AE_1020_SLOPE_REG       0x72
+#define DS2780_AE_0010_SLOPE_REG       0x73
+#define DS2780_SE_3040_SLOPE_REG       0x74
+#define DS2780_SE_2030_SLOPE_REG       0x75
+#define DS2780_SE_1020_SLOPE_REG       0x76
+#define DS2780_SE_0010_SLOPE_REG       0x77
+#define DS2780_RSGAIN_MSB_REG          0x78
+#define DS2780_RSGAIN_LSB_REG          0x79
+#define DS2780_RSTC_REG                        0x7A
+#define DS2780_FRSGAIN_MSB_REG         0x7B
+#define DS2780_FRSGAIN_LSB_REG         0x7C
+#define DS2780_EEPROM_BLOCK1_END       0x7C
+/* Register 0x7D - 0xFF Reserved */
+
+/* Number of valid register addresses */
+#define DS2780_DATA_SIZE               0x80
+
+/* Status register bits */
+#define DS2780_STATUS_REG_CHGTF                (1 << 7)
+#define DS2780_STATUS_REG_AEF          (1 << 6)
+#define DS2780_STATUS_REG_SEF          (1 << 5)
+#define DS2780_STATUS_REG_LEARNF       (1 << 4)
+/* Bit 3 Reserved */
+#define DS2780_STATUS_REG_UVF          (1 << 2)
+#define DS2780_STATUS_REG_PORF         (1 << 1)
+/* Bit 0 Reserved */
+
+/* Control register bits */
+/* Bit 7 Reserved */
+#define DS2780_CONTROL_REG_UVEN                (1 << 6)
+#define DS2780_CONTROL_REG_PMOD                (1 << 5)
+#define DS2780_CONTROL_REG_RNAOP       (1 << 4)
+/* Bit 0 - 3 Reserved */
+
+/* Special feature register bits */
+/* Bit 1 - 7 Reserved */
+#define DS2780_SFR_REG_PIOSC           (1 << 0)
+
+/* EEPROM register bits */
+#define DS2780_EEPROM_REG_EEC          (1 << 7)
+#define DS2780_EEPROM_REG_LOCK         (1 << 6)
+/* Bit 2 - 6 Reserved */
+#define DS2780_EEPROM_REG_BL1          (1 << 1)
+#define DS2780_EEPROM_REG_BL0          (1 << 0)
+
+extern int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count,
+                       int io);
+extern int w1_ds2780_eeprom_cmd(struct device *dev, int addr, int cmd);
+
+#endif /* !_W1_DS2780_H */
index b7b5014ff7140262cbbcbe32d9530cb2936105e3..10606c822756a3c7ab9ba6de5923a263dab67150 100644 (file)
@@ -827,7 +827,7 @@ void w1_reconnect_slaves(struct w1_family *f, int attach)
        mutex_unlock(&w1_mlock);
 }
 
-static void w1_slave_found(struct w1_master *dev, u64 rn)
+void w1_slave_found(struct w1_master *dev, u64 rn)
 {
        struct w1_slave *sl;
        struct w1_reg_num *tmp;
@@ -933,14 +933,15 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
        }
 }
 
-void w1_search_process(struct w1_master *dev, u8 search_type)
+void w1_search_process_cb(struct w1_master *dev, u8 search_type,
+       w1_slave_found_callback cb)
 {
        struct w1_slave *sl, *sln;
 
        list_for_each_entry(sl, &dev->slist, w1_slave_entry)
                clear_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags);
 
-       w1_search_devices(dev, search_type, w1_slave_found);
+       w1_search_devices(dev, search_type, cb);
 
        list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) {
                if (!test_bit(W1_SLAVE_ACTIVE, (unsigned long *)&sl->flags) && !--sl->ttl)
@@ -953,6 +954,11 @@ void w1_search_process(struct w1_master *dev, u8 search_type)
                dev->search_count--;
 }
 
+static void w1_search_process(struct w1_master *dev, u8 search_type)
+{
+       w1_search_process_cb(dev, search_type, w1_slave_found);
+}
+
 int w1_process(void *data)
 {
        struct w1_master *dev = (struct w1_master *) data;
index d8a9709f3449159a2f8ada1c2ca328316414ed79..1ce23fc6186c36def2f0ed2608fb0b010ab32501 100644 (file)
@@ -55,6 +55,7 @@ struct w1_reg_num
 #define W1_READ_ROM            0x33
 #define W1_READ_PSUPPLY                0xB4
 #define W1_MATCH_ROM           0x55
+#define W1_RESUME_CMD          0xA5
 
 #define W1_SLAVE_ACTIVE                0
 
@@ -193,7 +194,9 @@ void w1_destroy_master_attributes(struct w1_master *master);
 void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb);
 void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb);
 struct w1_slave *w1_search_slave(struct w1_reg_num *id);
-void w1_search_process(struct w1_master *dev, u8 search_type);
+void w1_slave_found(struct w1_master *dev, u64 rn);
+void w1_search_process_cb(struct w1_master *dev, u8 search_type,
+       w1_slave_found_callback cb);
 struct w1_master *w1_search_master_id(u32 id);
 
 /* Disconnect and reconnect devices in the given family.  Used for finding
@@ -213,6 +216,7 @@ void w1_write_block(struct w1_master *, const u8 *, int);
 void w1_touch_block(struct w1_master *, u8 *, int);
 u8 w1_read_block(struct w1_master *, u8 *, int);
 int w1_reset_select_slave(struct w1_slave *sl);
+int w1_reset_resume_command(struct w1_master *);
 void w1_next_pullup(struct w1_master *, int);
 
 static inline struct w1_slave* dev_to_w1_slave(struct device *dev)
index f3b636d7cafe76360bf41e6be048eddb9da3c256..97479ae70b9cbb23da15189f4a2c69941a46d94b 100644 (file)
 #define W1_THERM_DS1822        0x22
 #define W1_EEPROM_DS2433       0x23
 #define W1_THERM_DS18B20       0x28
+#define W1_FAMILY_DS2408       0x29
 #define W1_EEPROM_DS2431       0x2D
 #define W1_FAMILY_DS2760       0x30
+#define W1_FAMILY_DS2780       0x32
 
 #define MAXNAMELEN             32
 
index 3ebe9726a9e55471ae9383fe262fc61de2727200..8e8b64cfafb69a417bded8e6f3f745ca3a7e44a1 100644 (file)
@@ -389,6 +389,32 @@ int w1_reset_select_slave(struct w1_slave *sl)
 }
 EXPORT_SYMBOL_GPL(w1_reset_select_slave);
 
+/**
+ * When the workflow with a slave amongst many requires several
+ * successive commands a reset between each, this function is similar
+ * to doing a reset then a match ROM for the last matched ROM. The
+ * advantage being that the matched ROM step is skipped in favor of the
+ * resume command. The slave must support the command of course.
+ *
+ * If the bus has only one slave, traditionnaly the match ROM is skipped
+ * and a "SKIP ROM" is done for efficiency. On multi-slave busses, this
+ * doesn't work of course, but the resume command is the next best thing.
+ *
+ * The w1 master lock must be held.
+ *
+ * @param dev     the master device
+ */
+int w1_reset_resume_command(struct w1_master *dev)
+{
+       if (w1_reset_bus(dev))
+               return -1;
+
+       /* This will make only the last matched slave perform a skip ROM. */
+       w1_write_8(dev, W1_RESUME_CMD);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(w1_reset_resume_command);
+
 /**
  * Put out a strong pull-up of the specified duration after the next write
  * operation.  Not all hardware supports strong pullups.  Hardware that
index 7e667bc77ef2d02649fbe32cd69b4ab94e06e9f6..55aabd927c60557e82c01a3eb11d92cd9bcd18c1 100644 (file)
@@ -55,6 +55,9 @@ static void w1_send_slave(struct w1_master *dev, u64 rn)
        struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1);
        int avail;
 
+       /* update kernel slave list */
+       w1_slave_found(dev, rn);
+
        avail = dev->priv_size - cmd->len;
 
        if (avail > 8) {
@@ -85,7 +88,7 @@ static int w1_process_search_command(struct w1_master *dev, struct cn_msg *msg,
        dev->priv = msg;
        dev->priv_size = avail;
 
-       w1_search_devices(dev, search_type, w1_send_slave);
+       w1_search_process_cb(dev, search_type, w1_send_slave);
 
        msg->ack = 0;
        cn_netlink_send(msg, 0, GFP_KERNEL);
index d8e725082fdc1c103d66f91eab6694e65af18c06..428f8a1583e8598ae69414f8171e21a3bbbf2ba8 100644 (file)
@@ -37,7 +37,6 @@
 #include <linux/io.h>
 #include <linux/uaccess.h>
 #include <linux/mfd/rdc321x.h>
-#include <linux/mfd/core.h>
 
 #define RDC_WDT_MASK   0x80000000 /* Mask */
 #define RDC_WDT_EN     0x00800000 /* Enable bit */
@@ -232,7 +231,7 @@ static int __devinit rdc321x_wdt_probe(struct platform_device *pdev)
        struct resource *r;
        struct rdc321x_wdt_pdata *pdata;
 
-       pdata = mfd_get_data(pdev);
+       pdata = pdev->dev.platform_data;
        if (!pdata) {
                dev_err(&pdev->dev, "no platform data supplied\n");
                return -ENODEV;
index 4781f806701d93c1e1a81917cdb2130d79d491b4..bbc18258ecc5419ec2210b83684afc528fecd0da 100644 (file)
@@ -1,5 +1,6 @@
 obj-y  += grant-table.o features.o events.o manage.o balloon.o
 obj-y  += xenbus/
+obj-y  += tmem.o
 
 nostackp := $(call cc-option, -fno-stack-protector)
 CFLAGS_features.o                      := $(nostackp)
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
new file mode 100644 (file)
index 0000000..816a449
--- /dev/null
@@ -0,0 +1,264 @@
+/*
+ * Xen implementation for transcendent memory (tmem)
+ *
+ * Copyright (C) 2009-2010 Oracle Corp.  All rights reserved.
+ * Author: Dan Magenheimer
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/cleancache.h>
+
+#include <xen/xen.h>
+#include <xen/interface/xen.h>
+#include <asm/xen/hypercall.h>
+#include <asm/xen/page.h>
+#include <asm/xen/hypervisor.h>
+
+#define TMEM_CONTROL               0
+#define TMEM_NEW_POOL              1
+#define TMEM_DESTROY_POOL          2
+#define TMEM_NEW_PAGE              3
+#define TMEM_PUT_PAGE              4
+#define TMEM_GET_PAGE              5
+#define TMEM_FLUSH_PAGE            6
+#define TMEM_FLUSH_OBJECT          7
+#define TMEM_READ                  8
+#define TMEM_WRITE                 9
+#define TMEM_XCHG                 10
+
+/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
+#define TMEM_POOL_PERSIST          1
+#define TMEM_POOL_SHARED           2
+#define TMEM_POOL_PAGESIZE_SHIFT   4
+#define TMEM_VERSION_SHIFT        24
+
+
+struct tmem_pool_uuid {
+       u64 uuid_lo;
+       u64 uuid_hi;
+};
+
+struct tmem_oid {
+       u64 oid[3];
+};
+
+#define TMEM_POOL_PRIVATE_UUID { 0, 0 }
+
+/* flags for tmem_ops.new_pool */
+#define TMEM_POOL_PERSIST          1
+#define TMEM_POOL_SHARED           2
+
+/* xen tmem foundation ops/hypercalls */
+
+static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
+       u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
+{
+       struct tmem_op op;
+       int rc = 0;
+
+       op.cmd = tmem_cmd;
+       op.pool_id = tmem_pool;
+       op.u.gen.oid[0] = oid.oid[0];
+       op.u.gen.oid[1] = oid.oid[1];
+       op.u.gen.oid[2] = oid.oid[2];
+       op.u.gen.index = index;
+       op.u.gen.tmem_offset = tmem_offset;
+       op.u.gen.pfn_offset = pfn_offset;
+       op.u.gen.len = len;
+       set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
+       rc = HYPERVISOR_tmem_op(&op);
+       return rc;
+}
+
+static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
+                               u32 flags, unsigned long pagesize)
+{
+       struct tmem_op op;
+       int rc = 0, pageshift;
+
+       for (pageshift = 0; pagesize != 1; pageshift++)
+               pagesize >>= 1;
+       flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
+       flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
+       op.cmd = TMEM_NEW_POOL;
+       op.u.new.uuid[0] = uuid.uuid_lo;
+       op.u.new.uuid[1] = uuid.uuid_hi;
+       op.u.new.flags = flags;
+       rc = HYPERVISOR_tmem_op(&op);
+       return rc;
+}
+
+/* xen generic tmem ops */
+
+static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
+                            u32 index, unsigned long pfn)
+{
+       unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
+
+       return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
+               gmfn, 0, 0, 0);
+}
+
+static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
+                            u32 index, unsigned long pfn)
+{
+       unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
+
+       return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
+               gmfn, 0, 0, 0);
+}
+
+static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
+{
+       return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
+               0, 0, 0, 0);
+}
+
+static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
+{
+       return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
+}
+
+static int xen_tmem_destroy_pool(u32 pool_id)
+{
+       struct tmem_oid oid = { { 0 } };
+
+       return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
+}
+
+int tmem_enabled;
+
+static int __init enable_tmem(char *s)
+{
+       tmem_enabled = 1;
+       return 1;
+}
+
+__setup("tmem", enable_tmem);
+
+/* cleancache ops */
+
+static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
+                                    pgoff_t index, struct page *page)
+{
+       u32 ind = (u32) index;
+       struct tmem_oid oid = *(struct tmem_oid *)&key;
+       unsigned long pfn = page_to_pfn(page);
+
+       if (pool < 0)
+               return;
+       if (ind != index)
+               return;
+       mb(); /* ensure page is quiescent; tmem may address it with an alias */
+       (void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
+}
+
+static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
+                                   pgoff_t index, struct page *page)
+{
+       u32 ind = (u32) index;
+       struct tmem_oid oid = *(struct tmem_oid *)&key;
+       unsigned long pfn = page_to_pfn(page);
+       int ret;
+
+       /* translate return values to linux semantics */
+       if (pool < 0)
+               return -1;
+       if (ind != index)
+               return -1;
+       ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
+       if (ret == 1)
+               return 0;
+       else
+               return -1;
+}
+
+static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
+                                      pgoff_t index)
+{
+       u32 ind = (u32) index;
+       struct tmem_oid oid = *(struct tmem_oid *)&key;
+
+       if (pool < 0)
+               return;
+       if (ind != index)
+               return;
+       (void)xen_tmem_flush_page((u32)pool, oid, ind);
+}
+
+static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
+{
+       struct tmem_oid oid = *(struct tmem_oid *)&key;
+
+       if (pool < 0)
+               return;
+       (void)xen_tmem_flush_object((u32)pool, oid);
+}
+
+static void tmem_cleancache_flush_fs(int pool)
+{
+       if (pool < 0)
+               return;
+       (void)xen_tmem_destroy_pool((u32)pool);
+}
+
+static int tmem_cleancache_init_fs(size_t pagesize)
+{
+       struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
+
+       return xen_tmem_new_pool(uuid_private, 0, pagesize);
+}
+
+static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
+{
+       struct tmem_pool_uuid shared_uuid;
+
+       shared_uuid.uuid_lo = *(u64 *)uuid;
+       shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
+       return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
+}
+
+static int use_cleancache = 1;
+
+static int __init no_cleancache(char *s)
+{
+       use_cleancache = 0;
+       return 1;
+}
+
+__setup("nocleancache", no_cleancache);
+
+static struct cleancache_ops tmem_cleancache_ops = {
+       .put_page = tmem_cleancache_put_page,
+       .get_page = tmem_cleancache_get_page,
+       .flush_page = tmem_cleancache_flush_page,
+       .flush_inode = tmem_cleancache_flush_inode,
+       .flush_fs = tmem_cleancache_flush_fs,
+       .init_shared_fs = tmem_cleancache_init_shared_fs,
+       .init_fs = tmem_cleancache_init_fs
+};
+
+static int __init xen_tmem_init(void)
+{
+       struct cleancache_ops old_ops;
+
+       if (!xen_domain())
+               return 0;
+#ifdef CONFIG_CLEANCACHE
+       BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
+       if (tmem_enabled && use_cleancache) {
+               char *s = "";
+               old_ops = cleancache_register_ops(&tmem_cleancache_ops);
+               if (old_ops.init_fs != NULL)
+                       s = " (WARNING: cleancache_ops overridden)";
+               printk(KERN_INFO "cleancache enabled, RAM provided by "
+                                "Xen Transcendent Memory%s\n", s);
+       }
+#endif
+       return 0;
+}
+
+module_init(xen_tmem_init)
index 7f6c6770319538c3094f7b26eb40fc18a398475d..8d7f3e69ae29072c28a0760222fb9b7cebcbcd2b 100644 (file)
@@ -814,6 +814,7 @@ int v9fs_vfs_unlink(struct inode *i, struct dentry *d)
 
 int v9fs_vfs_rmdir(struct inode *i, struct dentry *d)
 {
+       dentry_unhash(d);
        return v9fs_remove(i, d, 1);
 }
 
@@ -839,6 +840,9 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct p9_fid *newdirfid;
        struct p9_wstat wstat;
 
+       if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        P9_DPRINTK(P9_DEBUG_VFS, "\n");
        retval = 0;
        old_inode = old_dentry->d_inode;
index f6edba2e069f3a4a13f3c06a4786cfa9ed2532c3..19891aab9c6ed7255388f175eabfcd7e4f5bb253 100644 (file)
@@ -47,7 +47,7 @@ config FS_POSIX_ACL
        def_bool n
 
 config EXPORTFS
-       bool
+       tristate
 
 config FILE_LOCKING
        bool "Enable POSIX file locking API" if EXPERT
index e3e9efc1fdd8276b19eaccb02747afeb27172671..03330e2e390c130ef055cf45547c36fa3f4917e9 100644 (file)
@@ -320,6 +320,8 @@ affs_rmdir(struct inode *dir, struct dentry *dentry)
                 dentry->d_inode->i_ino,
                 (int)dentry->d_name.len, dentry->d_name.name);
 
+       dentry_unhash(dentry);
+
        return affs_remove_header(dentry);
 }
 
@@ -417,6 +419,9 @@ affs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct buffer_head *bh = NULL;
        int retval;
 
+       if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        pr_debug("AFFS: rename(old=%u,\"%*s\" to new=%u,\"%*s\")\n",
                 (u32)old_dir->i_ino, (int)old_dentry->d_name.len, old_dentry->d_name.name,
                 (u32)new_dir->i_ino, (int)new_dentry->d_name.len, new_dentry->d_name.name);
index 20c106f2492740f7de1615ee7712207b74a33828..2c4e051600420381e647f0d35810961e355d99f4 100644 (file)
@@ -845,6 +845,8 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
        _enter("{%x:%u},{%s}",
               dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name);
 
+       dentry_unhash(dentry);
+
        ret = -ENAMETOOLONG;
        if (dentry->d_name.len >= AFSNAMEMAX)
                goto error;
@@ -1146,6 +1148,9 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct key *key;
        int ret;
 
+       if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        vnode = AFS_FS_I(old_dentry->d_inode);
        orig_dvnode = AFS_FS_I(old_dir);
        new_dvnode = AFS_FS_I(new_dir);
index f55ae23b137e2a21001948e1c4b6b17795abc1b0..87d95a8cddbc4223c77e9aa642292b784912b807 100644 (file)
@@ -583,6 +583,8 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
        if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN))
                return -EACCES;
 
+       dentry_unhash(dentry);
+
        if (atomic_dec_and_test(&ino->count)) {
                p_ino = autofs4_dentry_ino(dentry->d_parent);
                if (p_ino && dentry->d_parent != dentry)
index b14cebfd90477ead4b15f0c1583da6393c247de5..c7d1d06b0483e3713f491cdad1d4cc3ce91500fe 100644 (file)
@@ -224,6 +224,9 @@ static int bfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct bfs_sb_info *info;
        int error = -ENOENT;
 
+       if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        old_bh = new_bh = NULL;
        old_inode = old_dentry->d_inode;
        if (S_ISDIR(old_inode->i_mode))
index 31610ea73aec2bff3410ec7c65b3b52e2c7a14ba..9b72dcf1cd258bd2e7733079b399ddd6413e2826 100644 (file)
@@ -7,4 +7,4 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
           extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
           extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
           export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \
-          compression.o delayed-ref.o relocation.o
+          compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o
index 44ea5b92e1ba891181235a24ba53785ff7acbfa0..f66fc99597331383890ac4847329faf6a494ddb0 100644 (file)
@@ -288,7 +288,7 @@ int btrfs_acl_chmod(struct inode *inode)
                return 0;
 
        acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS);
-       if (IS_ERR(acl) || !acl)
+       if (IS_ERR_OR_NULL(acl))
                return PTR_ERR(acl);
 
        clone = posix_acl_clone(acl, GFP_KERNEL);
index 57c3bb2884ceabd2ea4be939557a6c273c9e8626..93b1aa9320143e921a31dde1d0586529ce75ea8e 100644 (file)
@@ -22,6 +22,7 @@
 #include "extent_map.h"
 #include "extent_io.h"
 #include "ordered-data.h"
+#include "delayed-inode.h"
 
 /* in memory btrfs inode */
 struct btrfs_inode {
@@ -152,20 +153,34 @@ struct btrfs_inode {
        unsigned ordered_data_close:1;
        unsigned orphan_meta_reserved:1;
        unsigned dummy_inode:1;
+       unsigned in_defrag:1;
 
        /*
         * always compress this one file
         */
        unsigned force_compress:4;
 
+       struct btrfs_delayed_node *delayed_node;
+
        struct inode vfs_inode;
 };
 
+extern unsigned char btrfs_filetype_table[];
+
 static inline struct btrfs_inode *BTRFS_I(struct inode *inode)
 {
        return container_of(inode, struct btrfs_inode, vfs_inode);
 }
 
+static inline u64 btrfs_ino(struct inode *inode)
+{
+       u64 ino = BTRFS_I(inode)->location.objectid;
+
+       if (ino <= BTRFS_FIRST_FREE_OBJECTID)
+               ino = inode->i_ino;
+       return ino;
+}
+
 static inline void btrfs_i_size_write(struct inode *inode, u64 size)
 {
        i_size_write(inode, size);
index 41d1d7c70e29d2aed3a347e5b9d74d2a810d7b46..bfe42b03eaf9b3cc0bdf14b3975300e9a30b8584 100644 (file)
@@ -125,9 +125,10 @@ static int check_compressed_csum(struct inode *inode,
                kunmap_atomic(kaddr, KM_USER0);
 
                if (csum != *cb_sum) {
-                       printk(KERN_INFO "btrfs csum failed ino %lu "
+                       printk(KERN_INFO "btrfs csum failed ino %llu "
                               "extent %llu csum %u "
-                              "wanted %u mirror %d\n", inode->i_ino,
+                              "wanted %u mirror %d\n",
+                              (unsigned long long)btrfs_ino(inode),
                               (unsigned long long)disk_start,
                               csum, *cb_sum, cb->mirror_num);
                        ret = -EIO;
@@ -332,7 +333,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
        struct compressed_bio *cb;
        unsigned long bytes_left;
        struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
-       int page_index = 0;
+       int pg_index = 0;
        struct page *page;
        u64 first_byte = disk_start;
        struct block_device *bdev;
@@ -366,8 +367,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
 
        /* create and submit bios for the compressed pages */
        bytes_left = compressed_len;
-       for (page_index = 0; page_index < cb->nr_pages; page_index++) {
-               page = compressed_pages[page_index];
+       for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
+               page = compressed_pages[pg_index];
                page->mapping = inode->i_mapping;
                if (bio->bi_size)
                        ret = io_tree->ops->merge_bio_hook(page, 0,
@@ -432,7 +433,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
                                     struct compressed_bio *cb)
 {
        unsigned long end_index;
-       unsigned long page_index;
+       unsigned long pg_index;
        u64 last_offset;
        u64 isize = i_size_read(inode);
        int ret;
@@ -456,13 +457,13 @@ static noinline int add_ra_bio_pages(struct inode *inode,
        end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT;
 
        while (last_offset < compressed_end) {
-               page_index = last_offset >> PAGE_CACHE_SHIFT;
+               pg_index = last_offset >> PAGE_CACHE_SHIFT;
 
-               if (page_index > end_index)
+               if (pg_index > end_index)
                        break;
 
                rcu_read_lock();
-               page = radix_tree_lookup(&mapping->page_tree, page_index);
+               page = radix_tree_lookup(&mapping->page_tree, pg_index);
                rcu_read_unlock();
                if (page) {
                        misses++;
@@ -476,7 +477,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
                if (!page)
                        break;
 
-               if (add_to_page_cache_lru(page, mapping, page_index,
+               if (add_to_page_cache_lru(page, mapping, pg_index,
                                                                GFP_NOFS)) {
                        page_cache_release(page);
                        goto next;
@@ -560,7 +561,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
        unsigned long compressed_len;
        unsigned long nr_pages;
-       unsigned long page_index;
+       unsigned long pg_index;
        struct page *page;
        struct block_device *bdev;
        struct bio *comp_bio;
@@ -613,10 +614,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
 
        bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
 
-       for (page_index = 0; page_index < nr_pages; page_index++) {
-               cb->compressed_pages[page_index] = alloc_page(GFP_NOFS |
+       for (pg_index = 0; pg_index < nr_pages; pg_index++) {
+               cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
                                                              __GFP_HIGHMEM);
-               if (!cb->compressed_pages[page_index])
+               if (!cb->compressed_pages[pg_index])
                        goto fail2;
        }
        cb->nr_pages = nr_pages;
@@ -634,8 +635,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        comp_bio->bi_end_io = end_compressed_bio_read;
        atomic_inc(&cb->pending_bios);
 
-       for (page_index = 0; page_index < nr_pages; page_index++) {
-               page = cb->compressed_pages[page_index];
+       for (pg_index = 0; pg_index < nr_pages; pg_index++) {
+               page = cb->compressed_pages[pg_index];
                page->mapping = inode->i_mapping;
                page->index = em_start >> PAGE_CACHE_SHIFT;
 
@@ -702,8 +703,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
        return 0;
 
 fail2:
-       for (page_index = 0; page_index < nr_pages; page_index++)
-               free_page((unsigned long)cb->compressed_pages[page_index]);
+       for (pg_index = 0; pg_index < nr_pages; pg_index++)
+               free_page((unsigned long)cb->compressed_pages[pg_index]);
 
        kfree(cb->compressed_pages);
 fail1:
@@ -945,7 +946,7 @@ void btrfs_exit_compress(void)
 int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
                              unsigned long total_out, u64 disk_start,
                              struct bio_vec *bvec, int vcnt,
-                             unsigned long *page_index,
+                             unsigned long *pg_index,
                              unsigned long *pg_offset)
 {
        unsigned long buf_offset;
@@ -954,7 +955,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
        unsigned long working_bytes = total_out - buf_start;
        unsigned long bytes;
        char *kaddr;
-       struct page *page_out = bvec[*page_index].bv_page;
+       struct page *page_out = bvec[*pg_index].bv_page;
 
        /*
         * start byte is the first byte of the page we're currently
@@ -995,11 +996,11 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
 
                /* check if we need to pick another page */
                if (*pg_offset == PAGE_CACHE_SIZE) {
-                       (*page_index)++;
-                       if (*page_index >= vcnt)
+                       (*pg_index)++;
+                       if (*pg_index >= vcnt)
                                return 0;
 
-                       page_out = bvec[*page_index].bv_page;
+                       page_out = bvec[*pg_index].bv_page;
                        *pg_offset = 0;
                        start_byte = page_offset(page_out) - disk_start;
 
index 51000174b9d7ba687f3fda58ab44e6479be24e82..a12059f4f0fd3c70fd6302abe4ab3e83fa5797fb 100644 (file)
@@ -37,7 +37,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
 int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
                              unsigned long total_out, u64 disk_start,
                              struct bio_vec *bvec, int vcnt,
-                             unsigned long *page_index,
+                             unsigned long *pg_index,
                              unsigned long *pg_offset);
 
 int btrfs_submit_compressed_write(struct inode *inode, u64 start,
index 84d7ca1fe0bac42a58c9115f2dccaa68efb6b3b6..b0e18d986e0ac37f432dbad1141aea714d34f6a0 100644 (file)
@@ -38,11 +38,6 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
                              struct extent_buffer *src_buf);
 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
                   struct btrfs_path *path, int level, int slot);
-static int setup_items_for_insert(struct btrfs_trans_handle *trans,
-                       struct btrfs_root *root, struct btrfs_path *path,
-                       struct btrfs_key *cpu_key, u32 *data_size,
-                       u32 total_data, u32 total_size, int nr);
-
 
 struct btrfs_path *btrfs_alloc_path(void)
 {
@@ -107,7 +102,7 @@ void btrfs_free_path(struct btrfs_path *p)
 {
        if (!p)
                return;
-       btrfs_release_path(NULL, p);
+       btrfs_release_path(p);
        kmem_cache_free(btrfs_path_cachep, p);
 }
 
@@ -117,7 +112,7 @@ void btrfs_free_path(struct btrfs_path *p)
  *
  * It is safe to call this on paths that no locks or extent buffers held.
  */
-noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p)
+noinline void btrfs_release_path(struct btrfs_path *p)
 {
        int i;
 
@@ -1328,7 +1323,7 @@ static noinline int reada_for_balance(struct btrfs_root *root,
                ret = -EAGAIN;
 
                /* release the whole path */
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
 
                /* read the blocks */
                if (block1)
@@ -1475,7 +1470,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
                                return 0;
                        }
                        free_extent_buffer(tmp);
-                       btrfs_release_path(NULL, p);
+                       btrfs_release_path(p);
                        return -EIO;
                }
        }
@@ -1494,7 +1489,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
        if (p->reada)
                reada_for_search(root, p, level, slot, key->objectid);
 
-       btrfs_release_path(NULL, p);
+       btrfs_release_path(p);
 
        ret = -EAGAIN;
        tmp = read_tree_block(root, blocknr, blocksize, 0);
@@ -1563,7 +1558,7 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans,
                }
                b = p->nodes[level];
                if (!b) {
-                       btrfs_release_path(NULL, p);
+                       btrfs_release_path(p);
                        goto again;
                }
                BUG_ON(btrfs_header_nritems(b) == 1);
@@ -1753,7 +1748,7 @@ done:
        if (!p->leave_spinning)
                btrfs_set_path_blocking(p);
        if (ret < 0)
-               btrfs_release_path(root, p);
+               btrfs_release_path(p);
        return ret;
 }
 
@@ -3026,7 +3021,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
                                    struct btrfs_file_extent_item);
                extent_len = btrfs_file_extent_num_bytes(leaf, fi);
        }
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
        path->keep_locks = 1;
        path->search_for_split = 1;
@@ -3216,7 +3211,6 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,
                        struct btrfs_path *path,
                        u32 new_size, int from_end)
 {
-       int ret = 0;
        int slot;
        struct extent_buffer *leaf;
        struct btrfs_item *item;
@@ -3314,12 +3308,11 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans,
        btrfs_set_item_size(leaf, item, new_size);
        btrfs_mark_buffer_dirty(leaf);
 
-       ret = 0;
        if (btrfs_leaf_free_space(root, leaf) < 0) {
                btrfs_print_leaf(root, leaf);
                BUG();
        }
-       return ret;
+       return 0;
 }
 
 /*
@@ -3329,7 +3322,6 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans,
                      struct btrfs_root *root, struct btrfs_path *path,
                      u32 data_size)
 {
-       int ret = 0;
        int slot;
        struct extent_buffer *leaf;
        struct btrfs_item *item;
@@ -3394,12 +3386,11 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans,
        btrfs_set_item_size(leaf, item, old_size + data_size);
        btrfs_mark_buffer_dirty(leaf);
 
-       ret = 0;
        if (btrfs_leaf_free_space(root, leaf) < 0) {
                btrfs_print_leaf(root, leaf);
                BUG();
        }
-       return ret;
+       return 0;
 }
 
 /*
@@ -3559,11 +3550,10 @@ out:
  * to save stack depth by doing the bulk of the work in a function
  * that doesn't call btrfs_search_slot
  */
-static noinline_for_stack int
-setup_items_for_insert(struct btrfs_trans_handle *trans,
-                     struct btrfs_root *root, struct btrfs_path *path,
-                     struct btrfs_key *cpu_key, u32 *data_size,
-                     u32 total_data, u32 total_size, int nr)
+int setup_items_for_insert(struct btrfs_trans_handle *trans,
+                          struct btrfs_root *root, struct btrfs_path *path,
+                          struct btrfs_key *cpu_key, u32 *data_size,
+                          u32 total_data, u32 total_size, int nr)
 {
        struct btrfs_item *item;
        int i;
@@ -3647,7 +3637,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
 
        ret = 0;
        if (slot == 0) {
-               struct btrfs_disk_key disk_key;
                btrfs_cpu_key_to_disk(&disk_key, cpu_key);
                ret = fixup_low_keys(trans, root, path, &disk_key, 1);
        }
@@ -3949,7 +3938,7 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
        else
                return 1;
 
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        if (ret < 0)
                return ret;
@@ -4073,7 +4062,7 @@ find_next_key:
                        sret = btrfs_find_next_key(root, path, min_key, level,
                                                  cache_only, min_trans);
                        if (sret == 0) {
-                               btrfs_release_path(root, path);
+                               btrfs_release_path(path);
                                goto again;
                        } else {
                                goto out;
@@ -4152,7 +4141,7 @@ next:
                                btrfs_node_key_to_cpu(c, &cur_key, slot);
 
                        orig_lowest = path->lowest_level;
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        path->lowest_level = level;
                        ret = btrfs_search_slot(NULL, root, &cur_key, path,
                                                0, 0);
@@ -4229,7 +4218,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
 again:
        level = 1;
        next = NULL;
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
        path->keep_locks = 1;
 
@@ -4285,7 +4274,7 @@ again:
                        goto again;
 
                if (ret < 0) {
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        goto done;
                }
 
@@ -4324,7 +4313,7 @@ again:
                        goto again;
 
                if (ret < 0) {
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        goto done;
                }
 
index 8f4b81de3ae2a0ffb21b57080de5649d9a3785ff..332323e19dd121d4b2338d5d3371e0df7cc05879 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/mm.h>
 #include <linux/highmem.h>
 #include <linux/fs.h>
+#include <linux/rwsem.h>
 #include <linux/completion.h>
 #include <linux/backing-dev.h>
 #include <linux/wait.h>
@@ -33,6 +34,7 @@
 #include "extent_io.h"
 #include "extent_map.h"
 #include "async-thread.h"
+#include "ioctl.h"
 
 struct btrfs_trans_handle;
 struct btrfs_transaction;
@@ -105,6 +107,12 @@ struct btrfs_ordered_sum;
 /* For storing free space cache */
 #define BTRFS_FREE_SPACE_OBJECTID -11ULL
 
+/*
+ * The inode number assigned to the special inode for sotring
+ * free ino cache
+ */
+#define BTRFS_FREE_INO_OBJECTID -12ULL
+
 /* dummy objectid represents multiple objectids */
 #define BTRFS_MULTIPLE_OBJECTIDS -255ULL
 
@@ -187,7 +195,6 @@ struct btrfs_mapping_tree {
        struct extent_map_tree map_tree;
 };
 
-#define BTRFS_UUID_SIZE 16
 struct btrfs_dev_item {
        /* the internal btrfs device id */
        __le64 devid;
@@ -294,7 +301,6 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes)
                sizeof(struct btrfs_stripe) * (num_stripes - 1);
 }
 
-#define BTRFS_FSID_SIZE 16
 #define BTRFS_HEADER_FLAG_WRITTEN      (1ULL << 0)
 #define BTRFS_HEADER_FLAG_RELOC                (1ULL << 1)
 
@@ -510,6 +516,12 @@ struct btrfs_extent_item_v0 {
 /* use full backrefs for extent pointers in the block */
 #define BTRFS_BLOCK_FLAG_FULL_BACKREF  (1ULL << 8)
 
+/*
+ * this flag is only used internally by scrub and may be changed at any time
+ * it is only declared here to avoid collisions
+ */
+#define BTRFS_EXTENT_FLAG_SUPER                (1ULL << 48)
+
 struct btrfs_tree_block_info {
        struct btrfs_disk_key key;
        u8 level;
@@ -740,12 +752,12 @@ struct btrfs_space_info {
         */
        unsigned long reservation_progress;
 
-       int full:1;             /* indicates that we cannot allocate any more
+       unsigned int full:1;    /* indicates that we cannot allocate any more
                                   chunks for this space */
-       int chunk_alloc:1;      /* set if we are allocating a chunk */
+       unsigned int chunk_alloc:1;     /* set if we are allocating a chunk */
 
-       int force_alloc;        /* set if we need to force a chunk alloc for
-                                  this space */
+       unsigned int force_alloc;       /* set if we need to force a chunk
+                                          alloc for this space */
 
        struct list_head list;
 
@@ -830,9 +842,6 @@ struct btrfs_block_group_cache {
        u64 bytes_super;
        u64 flags;
        u64 sectorsize;
-       int extents_thresh;
-       int free_extents;
-       int total_bitmaps;
        unsigned int ro:1;
        unsigned int dirty:1;
        unsigned int iref:1;
@@ -847,9 +856,7 @@ struct btrfs_block_group_cache {
        struct btrfs_space_info *space_info;
 
        /* free space cache stuff */
-       spinlock_t tree_lock;
-       struct rb_root free_space_offset;
-       u64 free_space;
+       struct btrfs_free_space_ctl *free_space_ctl;
 
        /* block group cache stuff */
        struct rb_node cache_node;
@@ -869,6 +876,7 @@ struct btrfs_block_group_cache {
 struct reloc_control;
 struct btrfs_device;
 struct btrfs_fs_devices;
+struct btrfs_delayed_root;
 struct btrfs_fs_info {
        u8 fsid[BTRFS_FSID_SIZE];
        u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
@@ -895,7 +903,10 @@ struct btrfs_fs_info {
        /* logical->physical extent mapping */
        struct btrfs_mapping_tree mapping_tree;
 
-       /* block reservation for extent, checksum and root tree */
+       /*
+        * block reservation for extent, checksum, root tree and
+        * delayed dir index item
+        */
        struct btrfs_block_rsv global_block_rsv;
        /* block reservation for delay allocation */
        struct btrfs_block_rsv delalloc_block_rsv;
@@ -1022,6 +1033,7 @@ struct btrfs_fs_info {
         * for the sys_munmap function call path
         */
        struct btrfs_workers fixup_workers;
+       struct btrfs_workers delayed_workers;
        struct task_struct *transaction_kthread;
        struct task_struct *cleaner_kthread;
        int thread_pool_size;
@@ -1062,6 +1074,11 @@ struct btrfs_fs_info {
        /* all metadata allocations go through this cluster */
        struct btrfs_free_cluster meta_alloc_cluster;
 
+       /* auto defrag inodes go here */
+       spinlock_t defrag_inodes_lock;
+       struct rb_root defrag_inodes;
+       atomic_t defrag_running;
+
        spinlock_t ref_cache_lock;
        u64 total_ref_cache_size;
 
@@ -1077,8 +1094,21 @@ struct btrfs_fs_info {
 
        void *bdev_holder;
 
+       /* private scrub information */
+       struct mutex scrub_lock;
+       atomic_t scrubs_running;
+       atomic_t scrub_pause_req;
+       atomic_t scrubs_paused;
+       atomic_t scrub_cancel_req;
+       wait_queue_head_t scrub_pause_wait;
+       struct rw_semaphore scrub_super_lock;
+       int scrub_workers_refcnt;
+       struct btrfs_workers scrub_workers;
+
        /* filesystem state */
        u64 fs_state;
+
+       struct btrfs_delayed_root *delayed_root;
 };
 
 /*
@@ -1088,9 +1118,6 @@ struct btrfs_fs_info {
 struct btrfs_root {
        struct extent_buffer *node;
 
-       /* the node lock is held while changing the node pointer */
-       spinlock_t node_lock;
-
        struct extent_buffer *commit_root;
        struct btrfs_root *log_root;
        struct btrfs_root *reloc_root;
@@ -1107,6 +1134,16 @@ struct btrfs_root {
        spinlock_t accounting_lock;
        struct btrfs_block_rsv *block_rsv;
 
+       /* free ino cache stuff */
+       struct mutex fs_commit_mutex;
+       struct btrfs_free_space_ctl *free_ino_ctl;
+       enum btrfs_caching_type cached;
+       spinlock_t cache_lock;
+       wait_queue_head_t cache_wait;
+       struct btrfs_free_space_ctl *free_ino_pinned;
+       u64 cache_progress;
+       struct inode *cache_inode;
+
        struct mutex log_mutex;
        wait_queue_head_t log_writer_wait;
        wait_queue_head_t log_commit_wait[2];
@@ -1161,6 +1198,11 @@ struct btrfs_root {
        /* red-black tree that keeps track of in-memory inodes */
        struct rb_root inode_tree;
 
+       /*
+        * radix tree that keeps track of delayed nodes of every inode,
+        * protected by inode_lock
+        */
+       struct radix_tree_root delayed_nodes_tree;
        /*
         * right now this just gets used so that a root has its own devid
         * for stat.  It may be used for more later
@@ -1168,6 +1210,38 @@ struct btrfs_root {
        struct super_block anon_super;
 };
 
+struct btrfs_ioctl_defrag_range_args {
+       /* start of the defrag operation */
+       __u64 start;
+
+       /* number of bytes to defrag, use (u64)-1 to say all */
+       __u64 len;
+
+       /*
+        * flags for the operation, which can include turning
+        * on compression for this one defrag
+        */
+       __u64 flags;
+
+       /*
+        * any extent bigger than this will be considered
+        * already defragged.  Use 0 to take the kernel default
+        * Use 1 to say every single extent must be rewritten
+        */
+       __u32 extent_thresh;
+
+       /*
+        * which compression method to use if turning on compression
+        * for this defrag operation.  If unspecified, zlib will
+        * be used
+        */
+       __u32 compress_type;
+
+       /* spare for later */
+       __u32 unused[4];
+};
+
+
 /*
  * inode items have the data typically returned from stat and store other
  * info about object characteristics.  There is one for every file and dir in
@@ -1265,6 +1339,7 @@ struct btrfs_root {
 #define BTRFS_MOUNT_CLEAR_CACHE                (1 << 13)
 #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
 #define BTRFS_MOUNT_ENOSPC_DEBUG        (1 << 15)
+#define BTRFS_MOUNT_AUTO_DEFRAG                (1 << 16)
 
 #define btrfs_clear_opt(o, opt)                ((o) &= ~BTRFS_MOUNT_##opt)
 #define btrfs_set_opt(o, opt)          ((o) |= BTRFS_MOUNT_##opt)
@@ -1440,26 +1515,12 @@ static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb,
        return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr));
 }
 
-static inline void btrfs_set_stripe_offset_nr(struct extent_buffer *eb,
-                                            struct btrfs_chunk *c, int nr,
-                                            u64 val)
-{
-       btrfs_set_stripe_offset(eb, btrfs_stripe_nr(c, nr), val);
-}
-
 static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb,
                                         struct btrfs_chunk *c, int nr)
 {
        return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr));
 }
 
-static inline void btrfs_set_stripe_devid_nr(struct extent_buffer *eb,
-                                            struct btrfs_chunk *c, int nr,
-                                            u64 val)
-{
-       btrfs_set_stripe_devid(eb, btrfs_stripe_nr(c, nr), val);
-}
-
 /* struct btrfs_block_group_item */
 BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item,
                         used, 64);
@@ -1517,14 +1578,6 @@ btrfs_inode_ctime(struct btrfs_inode_item *inode_item)
        return (struct btrfs_timespec *)ptr;
 }
 
-static inline struct btrfs_timespec *
-btrfs_inode_otime(struct btrfs_inode_item *inode_item)
-{
-       unsigned long ptr = (unsigned long)inode_item;
-       ptr += offsetof(struct btrfs_inode_item, otime);
-       return (struct btrfs_timespec *)ptr;
-}
-
 BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64);
 BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
 
@@ -1875,33 +1928,6 @@ static inline u8 *btrfs_header_chunk_tree_uuid(struct extent_buffer *eb)
        return (u8 *)ptr;
 }
 
-static inline u8 *btrfs_super_fsid(struct extent_buffer *eb)
-{
-       unsigned long ptr = offsetof(struct btrfs_super_block, fsid);
-       return (u8 *)ptr;
-}
-
-static inline u8 *btrfs_header_csum(struct extent_buffer *eb)
-{
-       unsigned long ptr = offsetof(struct btrfs_header, csum);
-       return (u8 *)ptr;
-}
-
-static inline struct btrfs_node *btrfs_buffer_node(struct extent_buffer *eb)
-{
-       return NULL;
-}
-
-static inline struct btrfs_leaf *btrfs_buffer_leaf(struct extent_buffer *eb)
-{
-       return NULL;
-}
-
-static inline struct btrfs_header *btrfs_buffer_header(struct extent_buffer *eb)
-{
-       return NULL;
-}
-
 static inline int btrfs_is_leaf(struct extent_buffer *eb)
 {
        return btrfs_header_level(eb) == 0;
@@ -2055,22 +2081,6 @@ static inline struct btrfs_root *btrfs_sb(struct super_block *sb)
        return sb->s_fs_info;
 }
 
-static inline int btrfs_set_root_name(struct btrfs_root *root,
-                                     const char *name, int len)
-{
-       /* if we already have a name just free it */
-       kfree(root->name);
-
-       root->name = kmalloc(len+1, GFP_KERNEL);
-       if (!root->name)
-               return -ENOMEM;
-
-       memcpy(root->name, name, len);
-       root->name[len] = '\0';
-
-       return 0;
-}
-
 static inline u32 btrfs_level_size(struct btrfs_root *root, int level)
 {
        if (level == 0)
@@ -2099,6 +2109,13 @@ static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info)
 }
 
 /* extent-tree.c */
+static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
+                                                int num_items)
+{
+       return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
+               3 * num_items;
+}
+
 void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root, unsigned long count);
@@ -2108,12 +2125,9 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
                             u64 num_bytes, u64 *refs, u64 *flags);
 int btrfs_pin_extent(struct btrfs_root *root,
                     u64 bytenr, u64 num, int reserved);
-int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
-                       struct btrfs_root *root, struct extent_buffer *leaf);
 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
                          struct btrfs_root *root,
                          u64 objectid, u64 offset, u64 bytenr);
-int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy);
 struct btrfs_block_group_cache *btrfs_lookup_block_group(
                                                 struct btrfs_fs_info *info,
                                                 u64 bytenr);
@@ -2290,10 +2304,12 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
                       struct btrfs_root *root, struct extent_buffer *parent,
                       int start_slot, int cache_only, u64 *last_ret,
                       struct btrfs_key *progress);
-void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p);
+void btrfs_release_path(struct btrfs_path *p);
 struct btrfs_path *btrfs_alloc_path(void);
 void btrfs_free_path(struct btrfs_path *p);
 void btrfs_set_path_blocking(struct btrfs_path *p);
+void btrfs_clear_path_blocking(struct btrfs_path *p,
+                              struct extent_buffer *held);
 void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
 
 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
@@ -2305,13 +2321,12 @@ static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
        return btrfs_del_items(trans, root, path, path->slots[0], 1);
 }
 
+int setup_items_for_insert(struct btrfs_trans_handle *trans,
+                          struct btrfs_root *root, struct btrfs_path *path,
+                          struct btrfs_key *cpu_key, u32 *data_size,
+                          u32 total_data, u32 total_size, int nr);
 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
                      *root, struct btrfs_key *key, void *data, u32 data_size);
-int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
-                           struct btrfs_root *root,
-                           struct btrfs_path *path,
-                           struct btrfs_key *cpu_key, u32 *data_size,
-                           int nr);
 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root,
                             struct btrfs_path *path,
@@ -2357,8 +2372,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
                      *item);
 int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct
                         btrfs_root_item *item, struct btrfs_key *key);
-int btrfs_search_root(struct btrfs_root *root, u64 search_start,
-                     u64 *found_objectid);
 int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid);
 int btrfs_find_orphan_roots(struct btrfs_root *tree_root);
 int btrfs_set_root_node(struct btrfs_root_item *item,
@@ -2368,7 +2381,7 @@ void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
 /* dir-item.c */
 int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
                          struct btrfs_root *root, const char *name,
-                         int name_len, u64 dir,
+                         int name_len, struct inode *dir,
                          struct btrfs_key *location, u8 type, u64 index);
 struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
                                             struct btrfs_root *root,
@@ -2413,12 +2426,6 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
                          struct btrfs_root *root, u64 offset);
 int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset);
 
-/* inode-map.c */
-int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
-                            struct btrfs_root *fs_root,
-                            u64 dirid, u64 *objectid);
-int btrfs_find_highest_inode(struct btrfs_root *fs_root, u64 *objectid);
-
 /* inode-item.c */
 int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
@@ -2463,8 +2470,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
                           struct btrfs_ordered_sum *sums);
 int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
                       struct bio *bio, u64 file_start, int contig);
-int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode,
-                         u64 start, unsigned long len);
 struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
                                          struct btrfs_root *root,
                                          struct btrfs_path *path,
@@ -2472,8 +2477,8 @@ struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans,
 int btrfs_csum_truncate(struct btrfs_trans_handle *trans,
                        struct btrfs_root *root, struct btrfs_path *path,
                        u64 isize);
-int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start,
-                            u64 end, struct list_head *list);
+int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
+                            struct list_head *list, int search_commit);
 /* inode.c */
 
 /* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */
@@ -2502,8 +2507,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
                               u32 min_type);
 
 int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput);
-int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput,
-                                  int sync);
 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
                              struct extent_state **cached_state);
 int btrfs_writepages(struct address_space *mapping,
@@ -2520,7 +2523,6 @@ unsigned long btrfs_force_ra(struct address_space *mapping,
 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 int btrfs_readpage(struct file *file, struct page *page);
 void btrfs_evict_inode(struct inode *inode);
-void btrfs_put_inode(struct inode *inode);
 int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
 void btrfs_dirty_inode(struct inode *inode);
 struct inode *btrfs_alloc_inode(struct super_block *sb);
@@ -2531,10 +2533,8 @@ void btrfs_destroy_cachep(void);
 long btrfs_ioctl_trans_end(struct file *file);
 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
                         struct btrfs_root *root, int *was_new);
-int btrfs_commit_write(struct file *file, struct page *page,
-                      unsigned from, unsigned to);
 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
-                                   size_t page_offset, u64 start, u64 end,
+                                   size_t pg_offset, u64 start, u64 end,
                                    int create);
 int btrfs_update_inode(struct btrfs_trans_handle *trans,
                              struct btrfs_root *root,
@@ -2566,12 +2566,16 @@ extern const struct dentry_operations btrfs_dentry_operations;
 long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 void btrfs_update_iflags(struct inode *inode);
 void btrfs_inherit_iflags(struct inode *inode, struct inode *dir);
-
+int btrfs_defrag_file(struct inode *inode, struct file *file,
+                     struct btrfs_ioctl_defrag_range_args *range,
+                     u64 newer_than, unsigned long max_pages);
 /* file.c */
+int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
+                          struct inode *inode);
+int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
 int btrfs_sync_file(struct file *file, int datasync);
 int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
                            int skip_pinned);
-int btrfs_check_file(struct btrfs_root *root, struct inode *inode);
 extern const struct file_operations btrfs_file_operations;
 int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
                       u64 start, u64 end, u64 *hint_byte, int drop_cache);
@@ -2591,10 +2595,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
 /* sysfs.c */
 int btrfs_init_sysfs(void);
 void btrfs_exit_sysfs(void);
-int btrfs_sysfs_add_super(struct btrfs_fs_info *fs);
-int btrfs_sysfs_add_root(struct btrfs_root *root);
-void btrfs_sysfs_del_root(struct btrfs_root *root);
-void btrfs_sysfs_del_super(struct btrfs_fs_info *root);
 
 /* xattr.c */
 ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
@@ -2637,4 +2637,18 @@ void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans,
                              u64 *bytes_to_reserve);
 void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
                              struct btrfs_pending_snapshot *pending);
+
+/* scrub.c */
+int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
+                   struct btrfs_scrub_progress *progress, int readonly);
+int btrfs_scrub_pause(struct btrfs_root *root);
+int btrfs_scrub_pause_super(struct btrfs_root *root);
+int btrfs_scrub_continue(struct btrfs_root *root);
+int btrfs_scrub_continue_super(struct btrfs_root *root);
+int btrfs_scrub_cancel(struct btrfs_root *root);
+int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev);
+int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid);
+int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
+                        struct btrfs_scrub_progress *progress);
+
 #endif
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
new file mode 100644 (file)
index 0000000..01e2950
--- /dev/null
@@ -0,0 +1,1695 @@
+/*
+ * Copyright (C) 2011 Fujitsu.  All rights reserved.
+ * Written by Miao Xie <miaox@cn.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/slab.h>
+#include "delayed-inode.h"
+#include "disk-io.h"
+#include "transaction.h"
+
+#define BTRFS_DELAYED_WRITEBACK                400
+#define BTRFS_DELAYED_BACKGROUND       100
+
+static struct kmem_cache *delayed_node_cache;
+
+int __init btrfs_delayed_inode_init(void)
+{
+       delayed_node_cache = kmem_cache_create("delayed_node",
+                                       sizeof(struct btrfs_delayed_node),
+                                       0,
+                                       SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+                                       NULL);
+       if (!delayed_node_cache)
+               return -ENOMEM;
+       return 0;
+}
+
+void btrfs_delayed_inode_exit(void)
+{
+       if (delayed_node_cache)
+               kmem_cache_destroy(delayed_node_cache);
+}
+
+static inline void btrfs_init_delayed_node(
+                               struct btrfs_delayed_node *delayed_node,
+                               struct btrfs_root *root, u64 inode_id)
+{
+       delayed_node->root = root;
+       delayed_node->inode_id = inode_id;
+       atomic_set(&delayed_node->refs, 0);
+       delayed_node->count = 0;
+       delayed_node->in_list = 0;
+       delayed_node->inode_dirty = 0;
+       delayed_node->ins_root = RB_ROOT;
+       delayed_node->del_root = RB_ROOT;
+       mutex_init(&delayed_node->mutex);
+       delayed_node->index_cnt = 0;
+       INIT_LIST_HEAD(&delayed_node->n_list);
+       INIT_LIST_HEAD(&delayed_node->p_list);
+       delayed_node->bytes_reserved = 0;
+}
+
+static inline int btrfs_is_continuous_delayed_item(
+                                       struct btrfs_delayed_item *item1,
+                                       struct btrfs_delayed_item *item2)
+{
+       if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
+           item1->key.objectid == item2->key.objectid &&
+           item1->key.type == item2->key.type &&
+           item1->key.offset + 1 == item2->key.offset)
+               return 1;
+       return 0;
+}
+
+static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
+                                                       struct btrfs_root *root)
+{
+       return root->fs_info->delayed_root;
+}
+
+static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
+                                                       struct inode *inode)
+{
+       struct btrfs_delayed_node *node;
+       struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
+       struct btrfs_root *root = btrfs_inode->root;
+       u64 ino = btrfs_ino(inode);
+       int ret;
+
+again:
+       node = ACCESS_ONCE(btrfs_inode->delayed_node);
+       if (node) {
+               atomic_inc(&node->refs);        /* can be accessed */
+               return node;
+       }
+
+       spin_lock(&root->inode_lock);
+       node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
+       if (node) {
+               if (btrfs_inode->delayed_node) {
+                       spin_unlock(&root->inode_lock);
+                       goto again;
+               }
+               btrfs_inode->delayed_node = node;
+               atomic_inc(&node->refs);        /* can be accessed */
+               atomic_inc(&node->refs);        /* cached in the inode */
+               spin_unlock(&root->inode_lock);
+               return node;
+       }
+       spin_unlock(&root->inode_lock);
+
+       node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
+       if (!node)
+               return ERR_PTR(-ENOMEM);
+       btrfs_init_delayed_node(node, root, ino);
+
+       atomic_inc(&node->refs);        /* cached in the btrfs inode */
+       atomic_inc(&node->refs);        /* can be accessed */
+
+       ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
+       if (ret) {
+               kmem_cache_free(delayed_node_cache, node);
+               return ERR_PTR(ret);
+       }
+
+       spin_lock(&root->inode_lock);
+       ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
+       if (ret == -EEXIST) {
+               kmem_cache_free(delayed_node_cache, node);
+               spin_unlock(&root->inode_lock);
+               radix_tree_preload_end();
+               goto again;
+       }
+       btrfs_inode->delayed_node = node;
+       spin_unlock(&root->inode_lock);
+       radix_tree_preload_end();
+
+       return node;
+}
+
+/*
+ * Call it when holding delayed_node->mutex
+ *
+ * If mod = 1, add this node into the prepared list.
+ */
+static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
+                                    struct btrfs_delayed_node *node,
+                                    int mod)
+{
+       spin_lock(&root->lock);
+       if (node->in_list) {
+               if (!list_empty(&node->p_list))
+                       list_move_tail(&node->p_list, &root->prepare_list);
+               else if (mod)
+                       list_add_tail(&node->p_list, &root->prepare_list);
+       } else {
+               list_add_tail(&node->n_list, &root->node_list);
+               list_add_tail(&node->p_list, &root->prepare_list);
+               atomic_inc(&node->refs);        /* inserted into list */
+               root->nodes++;
+               node->in_list = 1;
+       }
+       spin_unlock(&root->lock);
+}
+
+/* Call it when holding delayed_node->mutex */
+static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
+                                      struct btrfs_delayed_node *node)
+{
+       spin_lock(&root->lock);
+       if (node->in_list) {
+               root->nodes--;
+               atomic_dec(&node->refs);        /* not in the list */
+               list_del_init(&node->n_list);
+               if (!list_empty(&node->p_list))
+                       list_del_init(&node->p_list);
+               node->in_list = 0;
+       }
+       spin_unlock(&root->lock);
+}
+
+struct btrfs_delayed_node *btrfs_first_delayed_node(
+                       struct btrfs_delayed_root *delayed_root)
+{
+       struct list_head *p;
+       struct btrfs_delayed_node *node = NULL;
+
+       spin_lock(&delayed_root->lock);
+       if (list_empty(&delayed_root->node_list))
+               goto out;
+
+       p = delayed_root->node_list.next;
+       node = list_entry(p, struct btrfs_delayed_node, n_list);
+       atomic_inc(&node->refs);
+out:
+       spin_unlock(&delayed_root->lock);
+
+       return node;
+}
+
+struct btrfs_delayed_node *btrfs_next_delayed_node(
+                                               struct btrfs_delayed_node *node)
+{
+       struct btrfs_delayed_root *delayed_root;
+       struct list_head *p;
+       struct btrfs_delayed_node *next = NULL;
+
+       delayed_root = node->root->fs_info->delayed_root;
+       spin_lock(&delayed_root->lock);
+       if (!node->in_list) {   /* not in the list */
+               if (list_empty(&delayed_root->node_list))
+                       goto out;
+               p = delayed_root->node_list.next;
+       } else if (list_is_last(&node->n_list, &delayed_root->node_list))
+               goto out;
+       else
+               p = node->n_list.next;
+
+       next = list_entry(p, struct btrfs_delayed_node, n_list);
+       atomic_inc(&next->refs);
+out:
+       spin_unlock(&delayed_root->lock);
+
+       return next;
+}
+
+static void __btrfs_release_delayed_node(
+                               struct btrfs_delayed_node *delayed_node,
+                               int mod)
+{
+       struct btrfs_delayed_root *delayed_root;
+
+       if (!delayed_node)
+               return;
+
+       delayed_root = delayed_node->root->fs_info->delayed_root;
+
+       mutex_lock(&delayed_node->mutex);
+       if (delayed_node->count)
+               btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
+       else
+               btrfs_dequeue_delayed_node(delayed_root, delayed_node);
+       mutex_unlock(&delayed_node->mutex);
+
+       if (atomic_dec_and_test(&delayed_node->refs)) {
+               struct btrfs_root *root = delayed_node->root;
+               spin_lock(&root->inode_lock);
+               if (atomic_read(&delayed_node->refs) == 0) {
+                       radix_tree_delete(&root->delayed_nodes_tree,
+                                         delayed_node->inode_id);
+                       kmem_cache_free(delayed_node_cache, delayed_node);
+               }
+               spin_unlock(&root->inode_lock);
+       }
+}
+
+static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
+{
+       __btrfs_release_delayed_node(node, 0);
+}
+
+struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
+                                       struct btrfs_delayed_root *delayed_root)
+{
+       struct list_head *p;
+       struct btrfs_delayed_node *node = NULL;
+
+       spin_lock(&delayed_root->lock);
+       if (list_empty(&delayed_root->prepare_list))
+               goto out;
+
+       p = delayed_root->prepare_list.next;
+       list_del_init(p);
+       node = list_entry(p, struct btrfs_delayed_node, p_list);
+       atomic_inc(&node->refs);
+out:
+       spin_unlock(&delayed_root->lock);
+
+       return node;
+}
+
+static inline void btrfs_release_prepared_delayed_node(
+                                       struct btrfs_delayed_node *node)
+{
+       __btrfs_release_delayed_node(node, 1);
+}
+
+struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
+{
+       struct btrfs_delayed_item *item;
+       item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
+       if (item) {
+               item->data_len = data_len;
+               item->ins_or_del = 0;
+               item->bytes_reserved = 0;
+               item->block_rsv = NULL;
+               item->delayed_node = NULL;
+               atomic_set(&item->refs, 1);
+       }
+       return item;
+}
+
+/*
+ * __btrfs_lookup_delayed_item - look up the delayed item by key
+ * @delayed_node: pointer to the delayed node
+ * @key:         the key to look up
+ * @prev:        used to store the prev item if the right item isn't found
+ * @next:        used to store the next item if the right item isn't found
+ *
+ * Note: if we don't find the right item, we will return the prev item and
+ * the next item.
+ */
+static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
+                               struct rb_root *root,
+                               struct btrfs_key *key,
+                               struct btrfs_delayed_item **prev,
+                               struct btrfs_delayed_item **next)
+{
+       struct rb_node *node, *prev_node = NULL;
+       struct btrfs_delayed_item *delayed_item = NULL;
+       int ret = 0;
+
+       node = root->rb_node;
+
+       while (node) {
+               delayed_item = rb_entry(node, struct btrfs_delayed_item,
+                                       rb_node);
+               prev_node = node;
+               ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
+               if (ret < 0)
+                       node = node->rb_right;
+               else if (ret > 0)
+                       node = node->rb_left;
+               else
+                       return delayed_item;
+       }
+
+       if (prev) {
+               if (!prev_node)
+                       *prev = NULL;
+               else if (ret < 0)
+                       *prev = delayed_item;
+               else if ((node = rb_prev(prev_node)) != NULL) {
+                       *prev = rb_entry(node, struct btrfs_delayed_item,
+                                        rb_node);
+               } else
+                       *prev = NULL;
+       }
+
+       if (next) {
+               if (!prev_node)
+                       *next = NULL;
+               else if (ret > 0)
+                       *next = delayed_item;
+               else if ((node = rb_next(prev_node)) != NULL) {
+                       *next = rb_entry(node, struct btrfs_delayed_item,
+                                        rb_node);
+               } else
+                       *next = NULL;
+       }
+       return NULL;
+}
+
+struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
+                                       struct btrfs_delayed_node *delayed_node,
+                                       struct btrfs_key *key)
+{
+       struct btrfs_delayed_item *item;
+
+       item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
+                                          NULL, NULL);
+       return item;
+}
+
+struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item(
+                                       struct btrfs_delayed_node *delayed_node,
+                                       struct btrfs_key *key)
+{
+       struct btrfs_delayed_item *item;
+
+       item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
+                                          NULL, NULL);
+       return item;
+}
+
+struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item(
+                                       struct btrfs_delayed_node *delayed_node,
+                                       struct btrfs_key *key)
+{
+       struct btrfs_delayed_item *item, *next;
+
+       item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
+                                          NULL, &next);
+       if (!item)
+               item = next;
+
+       return item;
+}
+
+struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item(
+                                       struct btrfs_delayed_node *delayed_node,
+                                       struct btrfs_key *key)
+{
+       struct btrfs_delayed_item *item, *next;
+
+       item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
+                                          NULL, &next);
+       if (!item)
+               item = next;
+
+       return item;
+}
+
+static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
+                                   struct btrfs_delayed_item *ins,
+                                   int action)
+{
+       struct rb_node **p, *node;
+       struct rb_node *parent_node = NULL;
+       struct rb_root *root;
+       struct btrfs_delayed_item *item;
+       int cmp;
+
+       if (action == BTRFS_DELAYED_INSERTION_ITEM)
+               root = &delayed_node->ins_root;
+       else if (action == BTRFS_DELAYED_DELETION_ITEM)
+               root = &delayed_node->del_root;
+       else
+               BUG();
+       p = &root->rb_node;
+       node = &ins->rb_node;
+
+       while (*p) {
+               parent_node = *p;
+               item = rb_entry(parent_node, struct btrfs_delayed_item,
+                                rb_node);
+
+               cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
+               if (cmp < 0)
+                       p = &(*p)->rb_right;
+               else if (cmp > 0)
+                       p = &(*p)->rb_left;
+               else
+                       return -EEXIST;
+       }
+
+       rb_link_node(node, parent_node, p);
+       rb_insert_color(node, root);
+       ins->delayed_node = delayed_node;
+       ins->ins_or_del = action;
+
+       if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
+           action == BTRFS_DELAYED_INSERTION_ITEM &&
+           ins->key.offset >= delayed_node->index_cnt)
+                       delayed_node->index_cnt = ins->key.offset + 1;
+
+       delayed_node->count++;
+       atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
+       return 0;
+}
+
+static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
+                                             struct btrfs_delayed_item *item)
+{
+       return __btrfs_add_delayed_item(node, item,
+                                       BTRFS_DELAYED_INSERTION_ITEM);
+}
+
+static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
+                                            struct btrfs_delayed_item *item)
+{
+       return __btrfs_add_delayed_item(node, item,
+                                       BTRFS_DELAYED_DELETION_ITEM);
+}
+
+static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
+{
+       struct rb_root *root;
+       struct btrfs_delayed_root *delayed_root;
+
+       delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
+
+       BUG_ON(!delayed_root);
+       BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
+              delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
+
+       if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
+               root = &delayed_item->delayed_node->ins_root;
+       else
+               root = &delayed_item->delayed_node->del_root;
+
+       rb_erase(&delayed_item->rb_node, root);
+       delayed_item->delayed_node->count--;
+       atomic_dec(&delayed_root->items);
+       if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND &&
+           waitqueue_active(&delayed_root->wait))
+               wake_up(&delayed_root->wait);
+}
+
+static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
+{
+       if (item) {
+               __btrfs_remove_delayed_item(item);
+               if (atomic_dec_and_test(&item->refs))
+                       kfree(item);
+       }
+}
+
+struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
+                                       struct btrfs_delayed_node *delayed_node)
+{
+       struct rb_node *p;
+       struct btrfs_delayed_item *item = NULL;
+
+       p = rb_first(&delayed_node->ins_root);
+       if (p)
+               item = rb_entry(p, struct btrfs_delayed_item, rb_node);
+
+       return item;
+}
+
+struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
+                                       struct btrfs_delayed_node *delayed_node)
+{
+       struct rb_node *p;
+       struct btrfs_delayed_item *item = NULL;
+
+       p = rb_first(&delayed_node->del_root);
+       if (p)
+               item = rb_entry(p, struct btrfs_delayed_item, rb_node);
+
+       return item;
+}
+
+struct btrfs_delayed_item *__btrfs_next_delayed_item(
+                                               struct btrfs_delayed_item *item)
+{
+       struct rb_node *p;
+       struct btrfs_delayed_item *next = NULL;
+
+       p = rb_next(&item->rb_node);
+       if (p)
+               next = rb_entry(p, struct btrfs_delayed_item, rb_node);
+
+       return next;
+}
+
+static inline struct btrfs_delayed_node *btrfs_get_delayed_node(
+                                                       struct inode *inode)
+{
+       struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
+       struct btrfs_delayed_node *delayed_node;
+
+       delayed_node = btrfs_inode->delayed_node;
+       if (delayed_node)
+               atomic_inc(&delayed_node->refs);
+
+       return delayed_node;
+}
+
+static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
+                                                  u64 root_id)
+{
+       struct btrfs_key root_key;
+
+       if (root->objectid == root_id)
+               return root;
+
+       root_key.objectid = root_id;
+       root_key.type = BTRFS_ROOT_ITEM_KEY;
+       root_key.offset = (u64)-1;
+       return btrfs_read_fs_root_no_name(root->fs_info, &root_key);
+}
+
+static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
+                                              struct btrfs_root *root,
+                                              struct btrfs_delayed_item *item)
+{
+       struct btrfs_block_rsv *src_rsv;
+       struct btrfs_block_rsv *dst_rsv;
+       u64 num_bytes;
+       int ret;
+
+       if (!trans->bytes_reserved)
+               return 0;
+
+       src_rsv = trans->block_rsv;
+       dst_rsv = &root->fs_info->global_block_rsv;
+
+       num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+       ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
+       if (!ret) {
+               item->bytes_reserved = num_bytes;
+               item->block_rsv = dst_rsv;
+       }
+
+       return ret;
+}
+
+static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
+                                               struct btrfs_delayed_item *item)
+{
+       if (!item->bytes_reserved)
+               return;
+
+       btrfs_block_rsv_release(root, item->block_rsv,
+                               item->bytes_reserved);
+}
+
+static int btrfs_delayed_inode_reserve_metadata(
+                                       struct btrfs_trans_handle *trans,
+                                       struct btrfs_root *root,
+                                       struct btrfs_delayed_node *node)
+{
+       struct btrfs_block_rsv *src_rsv;
+       struct btrfs_block_rsv *dst_rsv;
+       u64 num_bytes;
+       int ret;
+
+       if (!trans->bytes_reserved)
+               return 0;
+
+       src_rsv = trans->block_rsv;
+       dst_rsv = &root->fs_info->global_block_rsv;
+
+       num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+       ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
+       if (!ret)
+               node->bytes_reserved = num_bytes;
+
+       return ret;
+}
+
+static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
+                                               struct btrfs_delayed_node *node)
+{
+       struct btrfs_block_rsv *rsv;
+
+       if (!node->bytes_reserved)
+               return;
+
+       rsv = &root->fs_info->global_block_rsv;
+       btrfs_block_rsv_release(root, rsv,
+                               node->bytes_reserved);
+       node->bytes_reserved = 0;
+}
+
+/*
+ * This helper will insert some continuous items into the same leaf according
+ * to the free space of the leaf.
+ */
+static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
+                               struct btrfs_root *root,
+                               struct btrfs_path *path,
+                               struct btrfs_delayed_item *item)
+{
+       struct btrfs_delayed_item *curr, *next;
+       int free_space;
+       int total_data_size = 0, total_size = 0;
+       struct extent_buffer *leaf;
+       char *data_ptr;
+       struct btrfs_key *keys;
+       u32 *data_size;
+       struct list_head head;
+       int slot;
+       int nitems;
+       int i;
+       int ret = 0;
+
+       BUG_ON(!path->nodes[0]);
+
+       leaf = path->nodes[0];
+       free_space = btrfs_leaf_free_space(root, leaf);
+       INIT_LIST_HEAD(&head);
+
+       next = item;
+
+       /*
+        * count the number of the continuous items that we can insert in batch
+        */
+       while (total_size + next->data_len + sizeof(struct btrfs_item) <=
+              free_space) {
+               total_data_size += next->data_len;
+               total_size += next->data_len + sizeof(struct btrfs_item);
+               list_add_tail(&next->tree_list, &head);
+               nitems++;
+
+               curr = next;
+               next = __btrfs_next_delayed_item(curr);
+               if (!next)
+                       break;
+
+               if (!btrfs_is_continuous_delayed_item(curr, next))
+                       break;
+       }
+
+       if (!nitems) {
+               ret = 0;
+               goto out;
+       }
+
+       /*
+        * we need allocate some memory space, but it might cause the task
+        * to sleep, so we set all locked nodes in the path to blocking locks
+        * first.
+        */
+       btrfs_set_path_blocking(path);
+
+       keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS);
+       if (!keys) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS);
+       if (!data_size) {
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       /* get keys of all the delayed items */
+       i = 0;
+       list_for_each_entry(next, &head, tree_list) {
+               keys[i] = next->key;
+               data_size[i] = next->data_len;
+               i++;
+       }
+
+       /* reset all the locked nodes in the patch to spinning locks. */
+       btrfs_clear_path_blocking(path, NULL);
+
+       /* insert the keys of the items */
+       ret = setup_items_for_insert(trans, root, path, keys, data_size,
+                                    total_data_size, total_size, nitems);
+       if (ret)
+               goto error;
+
+       /* insert the dir index items */
+       slot = path->slots[0];
+       list_for_each_entry_safe(curr, next, &head, tree_list) {
+               data_ptr = btrfs_item_ptr(leaf, slot, char);
+               write_extent_buffer(leaf, &curr->data,
+                                   (unsigned long)data_ptr,
+                                   curr->data_len);
+               slot++;
+
+               btrfs_delayed_item_release_metadata(root, curr);
+
+               list_del(&curr->tree_list);
+               btrfs_release_delayed_item(curr);
+       }
+
+error:
+       kfree(data_size);
+       kfree(keys);
+out:
+       return ret;
+}
+
+/*
+ * This helper can just do simple insertion that needn't extend item for new
+ * data, such as directory name index insertion, inode insertion.
+ */
+static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
+                                    struct btrfs_root *root,
+                                    struct btrfs_path *path,
+                                    struct btrfs_delayed_item *delayed_item)
+{
+       struct extent_buffer *leaf;
+       struct btrfs_item *item;
+       char *ptr;
+       int ret;
+
+       ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
+                                     delayed_item->data_len);
+       if (ret < 0 && ret != -EEXIST)
+               return ret;
+
+       leaf = path->nodes[0];
+
+       item = btrfs_item_nr(leaf, path->slots[0]);
+       ptr = btrfs_item_ptr(leaf, path->slots[0], char);
+
+       write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
+                           delayed_item->data_len);
+       btrfs_mark_buffer_dirty(leaf);
+
+       btrfs_delayed_item_release_metadata(root, delayed_item);
+       return 0;
+}
+
+/*
+ * we insert an item first, then if there are some continuous items, we try
+ * to insert those items into the same leaf.
+ */
+static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
+                                     struct btrfs_path *path,
+                                     struct btrfs_root *root,
+                                     struct btrfs_delayed_node *node)
+{
+       struct btrfs_delayed_item *curr, *prev;
+       int ret = 0;
+
+do_again:
+       mutex_lock(&node->mutex);
+       curr = __btrfs_first_delayed_insertion_item(node);
+       if (!curr)
+               goto insert_end;
+
+       ret = btrfs_insert_delayed_item(trans, root, path, curr);
+       if (ret < 0) {
+               btrfs_release_path(path);
+               goto insert_end;
+       }
+
+       prev = curr;
+       curr = __btrfs_next_delayed_item(prev);
+       if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
+               /* insert the continuous items into the same leaf */
+               path->slots[0]++;
+               btrfs_batch_insert_items(trans, root, path, curr);
+       }
+       btrfs_release_delayed_item(prev);
+       btrfs_mark_buffer_dirty(path->nodes[0]);
+
+       btrfs_release_path(path);
+       mutex_unlock(&node->mutex);
+       goto do_again;
+
+insert_end:
+       mutex_unlock(&node->mutex);
+       return ret;
+}
+
+static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
+                                   struct btrfs_root *root,
+                                   struct btrfs_path *path,
+                                   struct btrfs_delayed_item *item)
+{
+       struct btrfs_delayed_item *curr, *next;
+       struct extent_buffer *leaf;
+       struct btrfs_key key;
+       struct list_head head;
+       int nitems, i, last_item;
+       int ret = 0;
+
+       BUG_ON(!path->nodes[0]);
+
+       leaf = path->nodes[0];
+
+       i = path->slots[0];
+       last_item = btrfs_header_nritems(leaf) - 1;
+       if (i > last_item)
+               return -ENOENT; /* FIXME: Is errno suitable? */
+
+       next = item;
+       INIT_LIST_HEAD(&head);
+       btrfs_item_key_to_cpu(leaf, &key, i);
+       nitems = 0;
+       /*
+        * count the number of the dir index items that we can delete in batch
+        */
+       while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
+               list_add_tail(&next->tree_list, &head);
+               nitems++;
+
+               curr = next;
+               next = __btrfs_next_delayed_item(curr);
+               if (!next)
+                       break;
+
+               if (!btrfs_is_continuous_delayed_item(curr, next))
+                       break;
+
+               i++;
+               if (i > last_item)
+                       break;
+               btrfs_item_key_to_cpu(leaf, &key, i);
+       }
+
+       if (!nitems)
+               return 0;
+
+       ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
+       if (ret)
+               goto out;
+
+       list_for_each_entry_safe(curr, next, &head, tree_list) {
+               btrfs_delayed_item_release_metadata(root, curr);
+               list_del(&curr->tree_list);
+               btrfs_release_delayed_item(curr);
+       }
+
+out:
+       return ret;
+}
+
+static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
+                                     struct btrfs_path *path,
+                                     struct btrfs_root *root,
+                                     struct btrfs_delayed_node *node)
+{
+       struct btrfs_delayed_item *curr, *prev;
+       int ret = 0;
+
+do_again:
+       mutex_lock(&node->mutex);
+       curr = __btrfs_first_delayed_deletion_item(node);
+       if (!curr)
+               goto delete_fail;
+
+       ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
+       if (ret < 0)
+               goto delete_fail;
+       else if (ret > 0) {
+               /*
+                * can't find the item which the node points to, so this node
+                * is invalid, just drop it.
+                */
+               prev = curr;
+               curr = __btrfs_next_delayed_item(prev);
+               btrfs_release_delayed_item(prev);
+               ret = 0;
+               btrfs_release_path(path);
+               if (curr)
+                       goto do_again;
+               else
+                       goto delete_fail;
+       }
+
+       btrfs_batch_delete_items(trans, root, path, curr);
+       btrfs_release_path(path);
+       mutex_unlock(&node->mutex);
+       goto do_again;
+
+delete_fail:
+       btrfs_release_path(path);
+       mutex_unlock(&node->mutex);
+       return ret;
+}
+
+static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
+{
+       struct btrfs_delayed_root *delayed_root;
+
+       if (delayed_node && delayed_node->inode_dirty) {
+               BUG_ON(!delayed_node->root);
+               delayed_node->inode_dirty = 0;
+               delayed_node->count--;
+
+               delayed_root = delayed_node->root->fs_info->delayed_root;
+               atomic_dec(&delayed_root->items);
+               if (atomic_read(&delayed_root->items) <
+                   BTRFS_DELAYED_BACKGROUND &&
+                   waitqueue_active(&delayed_root->wait))
+                       wake_up(&delayed_root->wait);
+       }
+}
+
+static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
+                                     struct btrfs_root *root,
+                                     struct btrfs_path *path,
+                                     struct btrfs_delayed_node *node)
+{
+       struct btrfs_key key;
+       struct btrfs_inode_item *inode_item;
+       struct extent_buffer *leaf;
+       int ret;
+
+       mutex_lock(&node->mutex);
+       if (!node->inode_dirty) {
+               mutex_unlock(&node->mutex);
+               return 0;
+       }
+
+       key.objectid = node->inode_id;
+       btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
+       key.offset = 0;
+       ret = btrfs_lookup_inode(trans, root, path, &key, 1);
+       if (ret > 0) {
+               btrfs_release_path(path);
+               mutex_unlock(&node->mutex);
+               return -ENOENT;
+       } else if (ret < 0) {
+               mutex_unlock(&node->mutex);
+               return ret;
+       }
+
+       btrfs_unlock_up_safe(path, 1);
+       leaf = path->nodes[0];
+       inode_item = btrfs_item_ptr(leaf, path->slots[0],
+                                   struct btrfs_inode_item);
+       write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
+                           sizeof(struct btrfs_inode_item));
+       btrfs_mark_buffer_dirty(leaf);
+       btrfs_release_path(path);
+
+       btrfs_delayed_inode_release_metadata(root, node);
+       btrfs_release_delayed_inode(node);
+       mutex_unlock(&node->mutex);
+
+       return 0;
+}
+
+/* Called when committing the transaction. */
+int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
+                           struct btrfs_root *root)
+{
+       struct btrfs_delayed_root *delayed_root;
+       struct btrfs_delayed_node *curr_node, *prev_node;
+       struct btrfs_path *path;
+       int ret = 0;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+       path->leave_spinning = 1;
+
+       delayed_root = btrfs_get_delayed_root(root);
+
+       curr_node = btrfs_first_delayed_node(delayed_root);
+       while (curr_node) {
+               root = curr_node->root;
+               ret = btrfs_insert_delayed_items(trans, path, root,
+                                                curr_node);
+               if (!ret)
+                       ret = btrfs_delete_delayed_items(trans, path, root,
+                                                        curr_node);
+               if (!ret)
+                       ret = btrfs_update_delayed_inode(trans, root, path,
+                                                        curr_node);
+               if (ret) {
+                       btrfs_release_delayed_node(curr_node);
+                       break;
+               }
+
+               prev_node = curr_node;
+               curr_node = btrfs_next_delayed_node(curr_node);
+               btrfs_release_delayed_node(prev_node);
+       }
+
+       btrfs_free_path(path);
+       return ret;
+}
+
+static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+                                             struct btrfs_delayed_node *node)
+{
+       struct btrfs_path *path;
+       int ret;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+       path->leave_spinning = 1;
+
+       ret = btrfs_insert_delayed_items(trans, path, node->root, node);
+       if (!ret)
+               ret = btrfs_delete_delayed_items(trans, path, node->root, node);
+       if (!ret)
+               ret = btrfs_update_delayed_inode(trans, node->root, path, node);
+       btrfs_free_path(path);
+
+       return ret;
+}
+
+int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+                                    struct inode *inode)
+{
+       struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
+       int ret;
+
+       if (!delayed_node)
+               return 0;
+
+       mutex_lock(&delayed_node->mutex);
+       if (!delayed_node->count) {
+               mutex_unlock(&delayed_node->mutex);
+               btrfs_release_delayed_node(delayed_node);
+               return 0;
+       }
+       mutex_unlock(&delayed_node->mutex);
+
+       ret = __btrfs_commit_inode_delayed_items(trans, delayed_node);
+       btrfs_release_delayed_node(delayed_node);
+       return ret;
+}
+
+void btrfs_remove_delayed_node(struct inode *inode)
+{
+       struct btrfs_delayed_node *delayed_node;
+
+       delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
+       if (!delayed_node)
+               return;
+
+       BTRFS_I(inode)->delayed_node = NULL;
+       btrfs_release_delayed_node(delayed_node);
+}
+
+struct btrfs_async_delayed_node {
+       struct btrfs_root *root;
+       struct btrfs_delayed_node *delayed_node;
+       struct btrfs_work work;
+};
+
+static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
+{
+       struct btrfs_async_delayed_node *async_node;
+       struct btrfs_trans_handle *trans;
+       struct btrfs_path *path;
+       struct btrfs_delayed_node *delayed_node = NULL;
+       struct btrfs_root *root;
+       unsigned long nr = 0;
+       int need_requeue = 0;
+       int ret;
+
+       async_node = container_of(work, struct btrfs_async_delayed_node, work);
+
+       path = btrfs_alloc_path();
+       if (!path)
+               goto out;
+       path->leave_spinning = 1;
+
+       delayed_node = async_node->delayed_node;
+       root = delayed_node->root;
+
+       trans = btrfs_join_transaction(root, 0);
+       if (IS_ERR(trans))
+               goto free_path;
+
+       ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
+       if (!ret)
+               ret = btrfs_delete_delayed_items(trans, path, root,
+                                                delayed_node);
+
+       if (!ret)
+               btrfs_update_delayed_inode(trans, root, path, delayed_node);
+
+       /*
+        * Maybe new delayed items have been inserted, so we need requeue
+        * the work. Besides that, we must dequeue the empty delayed nodes
+        * to avoid the race between delayed items balance and the worker.
+        * The race like this:
+        *      Task1                           Worker thread
+        *                                      count == 0, needn't requeue
+        *                                        also needn't insert the
+        *                                        delayed node into prepare
+        *                                        list again.
+        *      add lots of delayed items
+        *      queue the delayed node
+        *        already in the list,
+        *        and not in the prepare
+        *        list, it means the delayed
+        *        node is being dealt with
+        *        by the worker.
+        *      do delayed items balance
+        *        the delayed node is being
+        *        dealt with by the worker
+        *        now, just wait.
+        *                                      the worker goto idle.
+        * Task1 will sleep until the transaction is commited.
+        */
+       mutex_lock(&delayed_node->mutex);
+       if (delayed_node->count)
+               need_requeue = 1;
+       else
+               btrfs_dequeue_delayed_node(root->fs_info->delayed_root,
+                                          delayed_node);
+       mutex_unlock(&delayed_node->mutex);
+
+       nr = trans->blocks_used;
+
+       btrfs_end_transaction_dmeta(trans, root);
+       __btrfs_btree_balance_dirty(root, nr);
+free_path:
+       btrfs_free_path(path);
+out:
+       if (need_requeue)
+               btrfs_requeue_work(&async_node->work);
+       else {
+               btrfs_release_prepared_delayed_node(delayed_node);
+               kfree(async_node);
+       }
+}
+
+static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
+                                    struct btrfs_root *root, int all)
+{
+       struct btrfs_async_delayed_node *async_node;
+       struct btrfs_delayed_node *curr;
+       int count = 0;
+
+again:
+       curr = btrfs_first_prepared_delayed_node(delayed_root);
+       if (!curr)
+               return 0;
+
+       async_node = kmalloc(sizeof(*async_node), GFP_NOFS);
+       if (!async_node) {
+               btrfs_release_prepared_delayed_node(curr);
+               return -ENOMEM;
+       }
+
+       async_node->root = root;
+       async_node->delayed_node = curr;
+
+       async_node->work.func = btrfs_async_run_delayed_node_done;
+       async_node->work.flags = 0;
+
+       btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work);
+       count++;
+
+       if (all || count < 4)
+               goto again;
+
+       return 0;
+}
+
+void btrfs_balance_delayed_items(struct btrfs_root *root)
+{
+       struct btrfs_delayed_root *delayed_root;
+
+       delayed_root = btrfs_get_delayed_root(root);
+
+       if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
+               return;
+
+       if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
+               int ret;
+               ret = btrfs_wq_run_delayed_node(delayed_root, root, 1);
+               if (ret)
+                       return;
+
+               wait_event_interruptible_timeout(
+                               delayed_root->wait,
+                               (atomic_read(&delayed_root->items) <
+                                BTRFS_DELAYED_BACKGROUND),
+                               HZ);
+               return;
+       }
+
+       btrfs_wq_run_delayed_node(delayed_root, root, 0);
+}
+
+int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
+                                  struct btrfs_root *root, const char *name,
+                                  int name_len, struct inode *dir,
+                                  struct btrfs_disk_key *disk_key, u8 type,
+                                  u64 index)
+{
+       struct btrfs_delayed_node *delayed_node;
+       struct btrfs_delayed_item *delayed_item;
+       struct btrfs_dir_item *dir_item;
+       int ret;
+
+       delayed_node = btrfs_get_or_create_delayed_node(dir);
+       if (IS_ERR(delayed_node))
+               return PTR_ERR(delayed_node);
+
+       delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
+       if (!delayed_item) {
+               ret = -ENOMEM;
+               goto release_node;
+       }
+
+       ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
+       /*
+        * we have reserved enough space when we start a new transaction,
+        * so reserving metadata failure is impossible
+        */
+       BUG_ON(ret);
+
+       delayed_item->key.objectid = btrfs_ino(dir);
+       btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
+       delayed_item->key.offset = index;
+
+       dir_item = (struct btrfs_dir_item *)delayed_item->data;
+       dir_item->location = *disk_key;
+       dir_item->transid = cpu_to_le64(trans->transid);
+       dir_item->data_len = 0;
+       dir_item->name_len = cpu_to_le16(name_len);
+       dir_item->type = type;
+       memcpy((char *)(dir_item + 1), name, name_len);
+
+       mutex_lock(&delayed_node->mutex);
+       ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
+       if (unlikely(ret)) {
+               printk(KERN_ERR "err add delayed dir index item(name: %s) into "
+                               "the insertion tree of the delayed node"
+                               "(root id: %llu, inode id: %llu, errno: %d)\n",
+                               name,
+                               (unsigned long long)delayed_node->root->objectid,
+                               (unsigned long long)delayed_node->inode_id,
+                               ret);
+               BUG();
+       }
+       mutex_unlock(&delayed_node->mutex);
+
+release_node:
+       btrfs_release_delayed_node(delayed_node);
+       return ret;
+}
+
+static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
+                                              struct btrfs_delayed_node *node,
+                                              struct btrfs_key *key)
+{
+       struct btrfs_delayed_item *item;
+
+       mutex_lock(&node->mutex);
+       item = __btrfs_lookup_delayed_insertion_item(node, key);
+       if (!item) {
+               mutex_unlock(&node->mutex);
+               return 1;
+       }
+
+       btrfs_delayed_item_release_metadata(root, item);
+       btrfs_release_delayed_item(item);
+       mutex_unlock(&node->mutex);
+       return 0;
+}
+
+int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
+                                  struct btrfs_root *root, struct inode *dir,
+                                  u64 index)
+{
+       struct btrfs_delayed_node *node;
+       struct btrfs_delayed_item *item;
+       struct btrfs_key item_key;
+       int ret;
+
+       node = btrfs_get_or_create_delayed_node(dir);
+       if (IS_ERR(node))
+               return PTR_ERR(node);
+
+       item_key.objectid = btrfs_ino(dir);
+       btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
+       item_key.offset = index;
+
+       ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
+       if (!ret)
+               goto end;
+
+       item = btrfs_alloc_delayed_item(0);
+       if (!item) {
+               ret = -ENOMEM;
+               goto end;
+       }
+
+       item->key = item_key;
+
+       ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
+       /*
+        * we have reserved enough space when we start a new transaction,
+        * so reserving metadata failure is impossible.
+        */
+       BUG_ON(ret);
+
+       mutex_lock(&node->mutex);
+       ret = __btrfs_add_delayed_deletion_item(node, item);
+       if (unlikely(ret)) {
+               printk(KERN_ERR "err add delayed dir index item(index: %llu) "
+                               "into the deletion tree of the delayed node"
+                               "(root id: %llu, inode id: %llu, errno: %d)\n",
+                               (unsigned long long)index,
+                               (unsigned long long)node->root->objectid,
+                               (unsigned long long)node->inode_id,
+                               ret);
+               BUG();
+       }
+       mutex_unlock(&node->mutex);
+end:
+       btrfs_release_delayed_node(node);
+       return ret;
+}
+
+int btrfs_inode_delayed_dir_index_count(struct inode *inode)
+{
+       struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node;
+       int ret = 0;
+
+       if (!delayed_node)
+               return -ENOENT;
+
+       /*
+        * Since we have held i_mutex of this directory, it is impossible that
+        * a new directory index is added into the delayed node and index_cnt
+        * is updated now. So we needn't lock the delayed node.
+        */
+       if (!delayed_node->index_cnt)
+               return -EINVAL;
+
+       BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
+       return ret;
+}
+
+void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
+                            struct list_head *del_list)
+{
+       struct btrfs_delayed_node *delayed_node;
+       struct btrfs_delayed_item *item;
+
+       delayed_node = btrfs_get_delayed_node(inode);
+       if (!delayed_node)
+               return;
+
+       mutex_lock(&delayed_node->mutex);
+       item = __btrfs_first_delayed_insertion_item(delayed_node);
+       while (item) {
+               atomic_inc(&item->refs);
+               list_add_tail(&item->readdir_list, ins_list);
+               item = __btrfs_next_delayed_item(item);
+       }
+
+       item = __btrfs_first_delayed_deletion_item(delayed_node);
+       while (item) {
+               atomic_inc(&item->refs);
+               list_add_tail(&item->readdir_list, del_list);
+               item = __btrfs_next_delayed_item(item);
+       }
+       mutex_unlock(&delayed_node->mutex);
+       /*
+        * This delayed node is still cached in the btrfs inode, so refs
+        * must be > 1 now, and we needn't check it is going to be freed
+        * or not.
+        *
+        * Besides that, this function is used to read dir, we do not
+        * insert/delete delayed items in this period. So we also needn't
+        * requeue or dequeue this delayed node.
+        */
+       atomic_dec(&delayed_node->refs);
+}
+
+void btrfs_put_delayed_items(struct list_head *ins_list,
+                            struct list_head *del_list)
+{
+       struct btrfs_delayed_item *curr, *next;
+
+       list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
+               list_del(&curr->readdir_list);
+               if (atomic_dec_and_test(&curr->refs))
+                       kfree(curr);
+       }
+
+       list_for_each_entry_safe(curr, next, del_list, readdir_list) {
+               list_del(&curr->readdir_list);
+               if (atomic_dec_and_test(&curr->refs))
+                       kfree(curr);
+       }
+}
+
+int btrfs_should_delete_dir_index(struct list_head *del_list,
+                                 u64 index)
+{
+       struct btrfs_delayed_item *curr, *next;
+       int ret;
+
+       if (list_empty(del_list))
+               return 0;
+
+       list_for_each_entry_safe(curr, next, del_list, readdir_list) {
+               if (curr->key.offset > index)
+                       break;
+
+               list_del(&curr->readdir_list);
+               ret = (curr->key.offset == index);
+
+               if (atomic_dec_and_test(&curr->refs))
+                       kfree(curr);
+
+               if (ret)
+                       return 1;
+               else
+                       continue;
+       }
+       return 0;
+}
+
+/*
+ * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
+ *
+ */
+int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
+                                   filldir_t filldir,
+                                   struct list_head *ins_list)
+{
+       struct btrfs_dir_item *di;
+       struct btrfs_delayed_item *curr, *next;
+       struct btrfs_key location;
+       char *name;
+       int name_len;
+       int over = 0;
+       unsigned char d_type;
+
+       if (list_empty(ins_list))
+               return 0;
+
+       /*
+        * Changing the data of the delayed item is impossible. So
+        * we needn't lock them. And we have held i_mutex of the
+        * directory, nobody can delete any directory indexes now.
+        */
+       list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
+               list_del(&curr->readdir_list);
+
+               if (curr->key.offset < filp->f_pos) {
+                       if (atomic_dec_and_test(&curr->refs))
+                               kfree(curr);
+                       continue;
+               }
+
+               filp->f_pos = curr->key.offset;
+
+               di = (struct btrfs_dir_item *)curr->data;
+               name = (char *)(di + 1);
+               name_len = le16_to_cpu(di->name_len);
+
+               d_type = btrfs_filetype_table[di->type];
+               btrfs_disk_key_to_cpu(&location, &di->location);
+
+               over = filldir(dirent, name, name_len, curr->key.offset,
+                              location.objectid, d_type);
+
+               if (atomic_dec_and_test(&curr->refs))
+                       kfree(curr);
+
+               if (over)
+                       return 1;
+       }
+       return 0;
+}
+
+BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item,
+                        generation, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item,
+                        sequence, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item,
+                        transid, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item,
+                        nbytes, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item,
+                        block_group, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64);
+
+BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
+
+static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
+                                 struct btrfs_inode_item *inode_item,
+                                 struct inode *inode)
+{
+       btrfs_set_stack_inode_uid(inode_item, inode->i_uid);
+       btrfs_set_stack_inode_gid(inode_item, inode->i_gid);
+       btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
+       btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
+       btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
+       btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
+       btrfs_set_stack_inode_generation(inode_item,
+                                        BTRFS_I(inode)->generation);
+       btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence);
+       btrfs_set_stack_inode_transid(inode_item, trans->transid);
+       btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
+       btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
+       btrfs_set_stack_inode_block_group(inode_item,
+                                         BTRFS_I(inode)->block_group);
+
+       btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
+                                    inode->i_atime.tv_sec);
+       btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
+                                     inode->i_atime.tv_nsec);
+
+       btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
+                                    inode->i_mtime.tv_sec);
+       btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
+                                     inode->i_mtime.tv_nsec);
+
+       btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
+                                    inode->i_ctime.tv_sec);
+       btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
+                                     inode->i_ctime.tv_nsec);
+}
+
+int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
+                              struct btrfs_root *root, struct inode *inode)
+{
+       struct btrfs_delayed_node *delayed_node;
+       int ret;
+
+       delayed_node = btrfs_get_or_create_delayed_node(inode);
+       if (IS_ERR(delayed_node))
+               return PTR_ERR(delayed_node);
+
+       mutex_lock(&delayed_node->mutex);
+       if (delayed_node->inode_dirty) {
+               fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
+               goto release_node;
+       }
+
+       ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
+       /*
+        * we must reserve enough space when we start a new transaction,
+        * so reserving metadata failure is impossible
+        */
+       BUG_ON(ret);
+
+       fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
+       delayed_node->inode_dirty = 1;
+       delayed_node->count++;
+       atomic_inc(&root->fs_info->delayed_root->items);
+release_node:
+       mutex_unlock(&delayed_node->mutex);
+       btrfs_release_delayed_node(delayed_node);
+       return ret;
+}
+
+static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
+{
+       struct btrfs_root *root = delayed_node->root;
+       struct btrfs_delayed_item *curr_item, *prev_item;
+
+       mutex_lock(&delayed_node->mutex);
+       curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
+       while (curr_item) {
+               btrfs_delayed_item_release_metadata(root, curr_item);
+               prev_item = curr_item;
+               curr_item = __btrfs_next_delayed_item(prev_item);
+               btrfs_release_delayed_item(prev_item);
+       }
+
+       curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
+       while (curr_item) {
+               btrfs_delayed_item_release_metadata(root, curr_item);
+               prev_item = curr_item;
+               curr_item = __btrfs_next_delayed_item(prev_item);
+               btrfs_release_delayed_item(prev_item);
+       }
+
+       if (delayed_node->inode_dirty) {
+               btrfs_delayed_inode_release_metadata(root, delayed_node);
+               btrfs_release_delayed_inode(delayed_node);
+       }
+       mutex_unlock(&delayed_node->mutex);
+}
+
+void btrfs_kill_delayed_inode_items(struct inode *inode)
+{
+       struct btrfs_delayed_node *delayed_node;
+
+       delayed_node = btrfs_get_delayed_node(inode);
+       if (!delayed_node)
+               return;
+
+       __btrfs_kill_delayed_node(delayed_node);
+       btrfs_release_delayed_node(delayed_node);
+}
+
+void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
+{
+       u64 inode_id = 0;
+       struct btrfs_delayed_node *delayed_nodes[8];
+       int i, n;
+
+       while (1) {
+               spin_lock(&root->inode_lock);
+               n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
+                                          (void **)delayed_nodes, inode_id,
+                                          ARRAY_SIZE(delayed_nodes));
+               if (!n) {
+                       spin_unlock(&root->inode_lock);
+                       break;
+               }
+
+               inode_id = delayed_nodes[n - 1]->inode_id + 1;
+
+               for (i = 0; i < n; i++)
+                       atomic_inc(&delayed_nodes[i]->refs);
+               spin_unlock(&root->inode_lock);
+
+               for (i = 0; i < n; i++) {
+                       __btrfs_kill_delayed_node(delayed_nodes[i]);
+                       btrfs_release_delayed_node(delayed_nodes[i]);
+               }
+       }
+}
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
new file mode 100644 (file)
index 0000000..eb7d240
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2011 Fujitsu.  All rights reserved.
+ * Written by Miao Xie <miaox@cn.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __DELAYED_TREE_OPERATION_H
+#define __DELAYED_TREE_OPERATION_H
+
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <asm/atomic.h>
+
+#include "ctree.h"
+
+/* types of the delayed item */
+#define BTRFS_DELAYED_INSERTION_ITEM   1
+#define BTRFS_DELAYED_DELETION_ITEM    2
+
+struct btrfs_delayed_root {
+       spinlock_t lock;
+       struct list_head node_list;
+       /*
+        * Used for delayed nodes which is waiting to be dealt with by the
+        * worker. If the delayed node is inserted into the work queue, we
+        * drop it from this list.
+        */
+       struct list_head prepare_list;
+       atomic_t items;         /* for delayed items */
+       int nodes;              /* for delayed nodes */
+       wait_queue_head_t wait;
+};
+
+struct btrfs_delayed_node {
+       u64 inode_id;
+       u64 bytes_reserved;
+       struct btrfs_root *root;
+       /* Used to add the node into the delayed root's node list. */
+       struct list_head n_list;
+       /*
+        * Used to add the node into the prepare list, the nodes in this list
+        * is waiting to be dealt with by the async worker.
+        */
+       struct list_head p_list;
+       struct rb_root ins_root;
+       struct rb_root del_root;
+       struct mutex mutex;
+       struct btrfs_inode_item inode_item;
+       atomic_t refs;
+       u64 index_cnt;
+       bool in_list;
+       bool inode_dirty;
+       int count;
+};
+
+struct btrfs_delayed_item {
+       struct rb_node rb_node;
+       struct btrfs_key key;
+       struct list_head tree_list;     /* used for batch insert/delete items */
+       struct list_head readdir_list;  /* used for readdir items */
+       u64 bytes_reserved;
+       struct btrfs_block_rsv *block_rsv;
+       struct btrfs_delayed_node *delayed_node;
+       atomic_t refs;
+       int ins_or_del;
+       u32 data_len;
+       char data[0];
+};
+
+static inline void btrfs_init_delayed_root(
+                               struct btrfs_delayed_root *delayed_root)
+{
+       atomic_set(&delayed_root->items, 0);
+       delayed_root->nodes = 0;
+       spin_lock_init(&delayed_root->lock);
+       init_waitqueue_head(&delayed_root->wait);
+       INIT_LIST_HEAD(&delayed_root->node_list);
+       INIT_LIST_HEAD(&delayed_root->prepare_list);
+}
+
+int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
+                                  struct btrfs_root *root, const char *name,
+                                  int name_len, struct inode *dir,
+                                  struct btrfs_disk_key *disk_key, u8 type,
+                                  u64 index);
+
+int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
+                                  struct btrfs_root *root, struct inode *dir,
+                                  u64 index);
+
+int btrfs_inode_delayed_dir_index_count(struct inode *inode);
+
+int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
+                           struct btrfs_root *root);
+
+void btrfs_balance_delayed_items(struct btrfs_root *root);
+
+int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+                                    struct inode *inode);
+/* Used for evicting the inode. */
+void btrfs_remove_delayed_node(struct inode *inode);
+void btrfs_kill_delayed_inode_items(struct inode *inode);
+
+
+int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
+                              struct btrfs_root *root, struct inode *inode);
+
+/* Used for drop dead root */
+void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
+
+/* Used for readdir() */
+void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
+                            struct list_head *del_list);
+void btrfs_put_delayed_items(struct list_head *ins_list,
+                            struct list_head *del_list);
+int btrfs_should_delete_dir_index(struct list_head *del_list,
+                                 u64 index);
+int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
+                                   filldir_t filldir,
+                                   struct list_head *ins_list);
+
+/* for init */
+int __init btrfs_delayed_inode_init(void);
+void btrfs_delayed_inode_exit(void);
+#endif
index bce28f6538995bc908a6b2dee331e59aba91f651..125cf76fcd086803d35bd257bdb54168486e0e0b 100644 (file)
@@ -280,44 +280,6 @@ again:
        return 1;
 }
 
-/*
- * This checks to see if there are any delayed refs in the
- * btree for a given bytenr.  It returns one if it finds any
- * and zero otherwise.
- *
- * If it only finds a head node, it returns 0.
- *
- * The idea is to use this when deciding if you can safely delete an
- * extent from the extent allocation tree.  There may be a pending
- * ref in the rbtree that adds or removes references, so as long as this
- * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent
- * allocation tree.
- */
-int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr)
-{
-       struct btrfs_delayed_ref_node *ref;
-       struct btrfs_delayed_ref_root *delayed_refs;
-       struct rb_node *prev_node;
-       int ret = 0;
-
-       delayed_refs = &trans->transaction->delayed_refs;
-       spin_lock(&delayed_refs->lock);
-
-       ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
-       if (ref) {
-               prev_node = rb_prev(&ref->rb_node);
-               if (!prev_node)
-                       goto out;
-               ref = rb_entry(prev_node, struct btrfs_delayed_ref_node,
-                              rb_node);
-               if (ref->bytenr == bytenr)
-                       ret = 1;
-       }
-out:
-       spin_unlock(&delayed_refs->lock);
-       return ret;
-}
-
 /*
  * helper function to update an extent delayed ref in the
  * rbtree.  existing and update must both have the same
@@ -747,79 +709,3 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
                return btrfs_delayed_node_to_head(ref);
        return NULL;
 }
-
-/*
- * add a delayed ref to the tree.  This does all of the accounting required
- * to make sure the delayed ref is eventually processed before this
- * transaction commits.
- *
- * The main point of this call is to add and remove a backreference in a single
- * shot, taking the lock only once, and only searching for the head node once.
- *
- * It is the same as doing a ref add and delete in two separate calls.
- */
-#if 0
-int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
-                         u64 bytenr, u64 num_bytes, u64 orig_parent,
-                         u64 parent, u64 orig_ref_root, u64 ref_root,
-                         u64 orig_ref_generation, u64 ref_generation,
-                         u64 owner_objectid, int pin)
-{
-       struct btrfs_delayed_ref *ref;
-       struct btrfs_delayed_ref *old_ref;
-       struct btrfs_delayed_ref_head *head_ref;
-       struct btrfs_delayed_ref_root *delayed_refs;
-       int ret;
-
-       ref = kmalloc(sizeof(*ref), GFP_NOFS);
-       if (!ref)
-               return -ENOMEM;
-
-       old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS);
-       if (!old_ref) {
-               kfree(ref);
-               return -ENOMEM;
-       }
-
-       /*
-        * the parent = 0 case comes from cases where we don't actually
-        * know the parent yet.  It will get updated later via a add/drop
-        * pair.
-        */
-       if (parent == 0)
-               parent = bytenr;
-       if (orig_parent == 0)
-               orig_parent = bytenr;
-
-       head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
-       if (!head_ref) {
-               kfree(ref);
-               kfree(old_ref);
-               return -ENOMEM;
-       }
-       delayed_refs = &trans->transaction->delayed_refs;
-       spin_lock(&delayed_refs->lock);
-
-       /*
-        * insert both the head node and the new ref without dropping
-        * the spin lock
-        */
-       ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes,
-                                     (u64)-1, 0, 0, 0,
-                                     BTRFS_UPDATE_DELAYED_HEAD, 0);
-       BUG_ON(ret);
-
-       ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes,
-                                     parent, ref_root, ref_generation,
-                                     owner_objectid, BTRFS_ADD_DELAYED_REF, 0);
-       BUG_ON(ret);
-
-       ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes,
-                                     orig_parent, orig_ref_root,
-                                     orig_ref_generation, owner_objectid,
-                                     BTRFS_DROP_DELAYED_REF, pin);
-       BUG_ON(ret);
-       spin_unlock(&delayed_refs->lock);
-       return 0;
-}
-#endif
index 50e3cf92fbdac1610261e02c314a580b71c2f9ab..e287e3b0eab0d970d37f0f4c70fd688b22f276ac 100644 (file)
@@ -166,12 +166,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
 
 struct btrfs_delayed_ref_head *
 btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
-int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr);
-int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans,
-                         u64 bytenr, u64 num_bytes, u64 orig_parent,
-                         u64 parent, u64 orig_ref_root, u64 ref_root,
-                         u64 orig_ref_generation, u64 ref_generation,
-                         u64 owner_objectid, int pin);
 int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
                           struct btrfs_delayed_ref_head *head);
 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
index c62f02f6ae693728ce9286d963b78708428a86fb..685f2593c4f049559a087cec8b88daa04a0d357d 100644 (file)
@@ -50,7 +50,6 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle
                if (di)
                        return ERR_PTR(-EEXIST);
                ret = btrfs_extend_item(trans, root, path, data_size);
-               WARN_ON(ret > 0);
        }
        if (ret < 0)
                return ERR_PTR(ret);
@@ -124,8 +123,9 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
  * to use for the second index (if one is created).
  */
 int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
-                         *root, const char *name, int name_len, u64 dir,
-                         struct btrfs_key *location, u8 type, u64 index)
+                         *root, const char *name, int name_len,
+                         struct inode *dir, struct btrfs_key *location,
+                         u8 type, u64 index)
 {
        int ret = 0;
        int ret2 = 0;
@@ -137,13 +137,17 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
        struct btrfs_disk_key disk_key;
        u32 data_size;
 
-       key.objectid = dir;
+       key.objectid = btrfs_ino(dir);
        btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
        key.offset = btrfs_name_hash(name, name_len);
 
        path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
        path->leave_spinning = 1;
 
+       btrfs_cpu_key_to_disk(&disk_key, location);
+
        data_size = sizeof(*dir_item) + name_len;
        dir_item = insert_with_overflow(trans, root, path, &key, data_size,
                                        name, name_len);
@@ -155,7 +159,6 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
        }
 
        leaf = path->nodes[0];
-       btrfs_cpu_key_to_disk(&disk_key, location);
        btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
        btrfs_set_dir_type(leaf, dir_item, type);
        btrfs_set_dir_data_len(leaf, dir_item, 0);
@@ -172,29 +175,11 @@ second_insert:
                ret = 0;
                goto out_free;
        }
-       btrfs_release_path(root, path);
-
-       btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
-       key.offset = index;
-       dir_item = insert_with_overflow(trans, root, path, &key, data_size,
-                                       name, name_len);
-       if (IS_ERR(dir_item)) {
-               ret2 = PTR_ERR(dir_item);
-               goto out_free;
-       }
-       leaf = path->nodes[0];
-       btrfs_cpu_key_to_disk(&disk_key, location);
-       btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
-       btrfs_set_dir_type(leaf, dir_item, type);
-       btrfs_set_dir_data_len(leaf, dir_item, 0);
-       btrfs_set_dir_name_len(leaf, dir_item, name_len);
-       btrfs_set_dir_transid(leaf, dir_item, trans->transid);
-       name_ptr = (unsigned long)(dir_item + 1);
-       write_extent_buffer(leaf, name, name_ptr, name_len);
-       btrfs_mark_buffer_dirty(leaf);
+       btrfs_release_path(path);
 
+       ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir,
+                                             &disk_key, type, index);
 out_free:
-
        btrfs_free_path(path);
        if (ret)
                return ret;
@@ -452,7 +437,7 @@ int verify_dir_item(struct btrfs_root *root,
                namelen = XATTR_NAME_MAX;
 
        if (btrfs_dir_name_len(leaf, dir_item) > namelen) {
-               printk(KERN_CRIT "btrfS: invalid dir item name len: %u\n",
+               printk(KERN_CRIT "btrfs: invalid dir item name len: %u\n",
                       (unsigned)btrfs_dir_data_len(leaf, dir_item));
                return 1;
        }
index 228cf36ece8351475d5075f74e15624e4276e112..98b6a71decba105616ef5727b07b18c6d79dd8e0 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/crc32c.h>
 #include <linux/slab.h>
 #include <linux/migrate.h>
+#include <linux/ratelimit.h>
 #include <asm/unaligned.h>
 #include "compat.h"
 #include "ctree.h"
@@ -41,6 +42,7 @@
 #include "locking.h"
 #include "tree-log.h"
 #include "free-space-cache.h"
+#include "inode-map.h"
 
 static struct extent_io_ops btree_extent_io_ops;
 static void end_workqueue_fn(struct btrfs_work *work);
@@ -137,7 +139,7 @@ static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
  * that covers the entire device
  */
 static struct extent_map *btree_get_extent(struct inode *inode,
-               struct page *page, size_t page_offset, u64 start, u64 len,
+               struct page *page, size_t pg_offset, u64 start, u64 len,
                int create)
 {
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
@@ -154,7 +156,7 @@ static struct extent_map *btree_get_extent(struct inode *inode,
        }
        read_unlock(&em_tree->lock);
 
-       em = alloc_extent_map(GFP_NOFS);
+       em = alloc_extent_map();
        if (!em) {
                em = ERR_PTR(-ENOMEM);
                goto out;
@@ -254,14 +256,12 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
                        memcpy(&found, result, csum_size);
 
                        read_extent_buffer(buf, &val, 0, csum_size);
-                       if (printk_ratelimit()) {
-                               printk(KERN_INFO "btrfs: %s checksum verify "
+                       printk_ratelimited(KERN_INFO "btrfs: %s checksum verify "
                                       "failed on %llu wanted %X found %X "
                                       "level %d\n",
                                       root->fs_info->sb->s_id,
                                       (unsigned long long)buf->start, val, found,
                                       btrfs_header_level(buf));
-                       }
                        if (result != (char *)&inline_result)
                                kfree(result);
                        return 1;
@@ -296,13 +296,11 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
                ret = 0;
                goto out;
        }
-       if (printk_ratelimit()) {
-               printk("parent transid verify failed on %llu wanted %llu "
+       printk_ratelimited("parent transid verify failed on %llu wanted %llu "
                       "found %llu\n",
                       (unsigned long long)eb->start,
                       (unsigned long long)parent_transid,
                       (unsigned long long)btrfs_header_generation(eb));
-       }
        ret = 1;
        clear_extent_buffer_uptodate(io_tree, eb, &cached_state);
 out:
@@ -380,7 +378,7 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
        len = page->private >> 2;
        WARN_ON(len == 0);
 
-       eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
+       eb = alloc_extent_buffer(tree, start, len, page);
        if (eb == NULL) {
                WARN_ON(1);
                goto out;
@@ -525,7 +523,7 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
        len = page->private >> 2;
        WARN_ON(len == 0);
 
-       eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
+       eb = alloc_extent_buffer(tree, start, len, page);
        if (eb == NULL) {
                ret = -EIO;
                goto out;
@@ -533,12 +531,10 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
 
        found_start = btrfs_header_bytenr(eb);
        if (found_start != start) {
-               if (printk_ratelimit()) {
-                       printk(KERN_INFO "btrfs bad tree block start "
+               printk_ratelimited(KERN_INFO "btrfs bad tree block start "
                               "%llu %llu\n",
                               (unsigned long long)found_start,
                               (unsigned long long)eb->start);
-               }
                ret = -EIO;
                goto err;
        }
@@ -550,10 +546,8 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
                goto err;
        }
        if (check_tree_block_fsid(root, eb)) {
-               if (printk_ratelimit()) {
-                       printk(KERN_INFO "btrfs bad fsid on block %llu\n",
+               printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n",
                               (unsigned long long)eb->start);
-               }
                ret = -EIO;
                goto err;
        }
@@ -650,12 +644,6 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
        return 256 * limit;
 }
 
-int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
-{
-       return atomic_read(&info->nr_async_bios) >
-               btrfs_async_submit_limit(info);
-}
-
 static void run_one_async_start(struct btrfs_work *work)
 {
        struct async_submit_bio *async;
@@ -963,7 +951,7 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
        struct inode *btree_inode = root->fs_info->btree_inode;
        struct extent_buffer *eb;
        eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
-                               bytenr, blocksize, GFP_NOFS);
+                               bytenr, blocksize);
        return eb;
 }
 
@@ -974,7 +962,7 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
        struct extent_buffer *eb;
 
        eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
-                                bytenr, blocksize, NULL, GFP_NOFS);
+                                bytenr, blocksize, NULL);
        return eb;
 }
 
@@ -1058,13 +1046,13 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
        root->name = NULL;
        root->in_sysfs = 0;
        root->inode_tree = RB_ROOT;
+       INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
        root->block_rsv = NULL;
        root->orphan_block_rsv = NULL;
 
        INIT_LIST_HEAD(&root->dirty_list);
        INIT_LIST_HEAD(&root->orphan_list);
        INIT_LIST_HEAD(&root->root_list);
-       spin_lock_init(&root->node_lock);
        spin_lock_init(&root->orphan_lock);
        spin_lock_init(&root->inode_lock);
        spin_lock_init(&root->accounting_lock);
@@ -1080,7 +1068,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
        root->log_transid = 0;
        root->last_log_commit = 0;
        extent_io_tree_init(&root->dirty_log_pages,
-                            fs_info->btree_inode->i_mapping, GFP_NOFS);
+                            fs_info->btree_inode->i_mapping);
 
        memset(&root->root_key, 0, sizeof(root->root_key));
        memset(&root->root_item, 0, sizeof(root->root_item));
@@ -1283,21 +1271,6 @@ out:
        return root;
 }
 
-struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
-                                       u64 root_objectid)
-{
-       struct btrfs_root *root;
-
-       if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
-               return fs_info->tree_root;
-       if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
-               return fs_info->extent_root;
-
-       root = radix_tree_lookup(&fs_info->fs_roots_radix,
-                                (unsigned long)root_objectid);
-       return root;
-}
-
 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
                                              struct btrfs_key *location)
 {
@@ -1326,6 +1299,19 @@ again:
        if (IS_ERR(root))
                return root;
 
+       root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
+       if (!root->free_ino_ctl)
+               goto fail;
+       root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
+                                       GFP_NOFS);
+       if (!root->free_ino_pinned)
+               goto fail;
+
+       btrfs_init_free_ino_ctl(root);
+       mutex_init(&root->fs_commit_mutex);
+       spin_lock_init(&root->cache_lock);
+       init_waitqueue_head(&root->cache_wait);
+
        set_anon_super(&root->anon_super, NULL);
 
        if (btrfs_root_refs(&root->root_item) == 0) {
@@ -1369,41 +1355,6 @@ fail:
        return ERR_PTR(ret);
 }
 
-struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
-                                     struct btrfs_key *location,
-                                     const char *name, int namelen)
-{
-       return btrfs_read_fs_root_no_name(fs_info, location);
-#if 0
-       struct btrfs_root *root;
-       int ret;
-
-       root = btrfs_read_fs_root_no_name(fs_info, location);
-       if (!root)
-               return NULL;
-
-       if (root->in_sysfs)
-               return root;
-
-       ret = btrfs_set_root_name(root, name, namelen);
-       if (ret) {
-               free_extent_buffer(root->node);
-               kfree(root);
-               return ERR_PTR(ret);
-       }
-
-       ret = btrfs_sysfs_add_root(root);
-       if (ret) {
-               free_extent_buffer(root->node);
-               kfree(root->name);
-               kfree(root);
-               return ERR_PTR(ret);
-       }
-       root->in_sysfs = 1;
-       return root;
-#endif
-}
-
 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
 {
        struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
@@ -1411,7 +1362,8 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
        struct btrfs_device *device;
        struct backing_dev_info *bdi;
 
-       list_for_each_entry(device, &info->fs_devices->devices, dev_list) {
+       rcu_read_lock();
+       list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) {
                if (!device->bdev)
                        continue;
                bdi = blk_get_backing_dev_info(device->bdev);
@@ -1420,6 +1372,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits)
                        break;
                }
        }
+       rcu_read_unlock();
        return ret;
 }
 
@@ -1522,6 +1475,7 @@ static int cleaner_kthread(void *arg)
                        btrfs_run_delayed_iputs(root);
                        btrfs_clean_old_snapshots(root);
                        mutex_unlock(&root->fs_info->cleaner_mutex);
+                       btrfs_run_defrag_inodes(root->fs_info);
                }
 
                if (freezing(current)) {
@@ -1611,7 +1565,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
                                                 GFP_NOFS);
        struct btrfs_root *tree_root = btrfs_sb(sb);
-       struct btrfs_fs_info *fs_info = tree_root->fs_info;
+       struct btrfs_fs_info *fs_info = NULL;
        struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
                                                GFP_NOFS);
        struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
@@ -1623,11 +1577,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
 
        struct btrfs_super_block *disk_super;
 
-       if (!extent_root || !tree_root || !fs_info ||
+       if (!extent_root || !tree_root || !tree_root->fs_info ||
            !chunk_root || !dev_root || !csum_root) {
                err = -ENOMEM;
                goto fail;
        }
+       fs_info = tree_root->fs_info;
 
        ret = init_srcu_struct(&fs_info->subvol_srcu);
        if (ret) {
@@ -1662,6 +1617,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        spin_lock_init(&fs_info->ref_cache_lock);
        spin_lock_init(&fs_info->fs_roots_radix_lock);
        spin_lock_init(&fs_info->delayed_iput_lock);
+       spin_lock_init(&fs_info->defrag_inodes_lock);
 
        init_completion(&fs_info->kobj_unregister);
        fs_info->tree_root = tree_root;
@@ -1684,15 +1640,35 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        atomic_set(&fs_info->async_delalloc_pages, 0);
        atomic_set(&fs_info->async_submit_draining, 0);
        atomic_set(&fs_info->nr_async_bios, 0);
+       atomic_set(&fs_info->defrag_running, 0);
        fs_info->sb = sb;
        fs_info->max_inline = 8192 * 1024;
        fs_info->metadata_ratio = 0;
+       fs_info->defrag_inodes = RB_ROOT;
 
        fs_info->thread_pool_size = min_t(unsigned long,
                                          num_online_cpus() + 2, 8);
 
        INIT_LIST_HEAD(&fs_info->ordered_extents);
        spin_lock_init(&fs_info->ordered_extent_lock);
+       fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
+                                       GFP_NOFS);
+       if (!fs_info->delayed_root) {
+               err = -ENOMEM;
+               goto fail_iput;
+       }
+       btrfs_init_delayed_root(fs_info->delayed_root);
+
+       mutex_init(&fs_info->scrub_lock);
+       atomic_set(&fs_info->scrubs_running, 0);
+       atomic_set(&fs_info->scrub_pause_req, 0);
+       atomic_set(&fs_info->scrubs_paused, 0);
+       atomic_set(&fs_info->scrub_cancel_req, 0);
+       init_waitqueue_head(&fs_info->scrub_pause_wait);
+       init_rwsem(&fs_info->scrub_super_lock);
+       fs_info->scrub_workers_refcnt = 0;
+       btrfs_init_workers(&fs_info->scrub_workers, "scrub",
+                          fs_info->thread_pool_size, &fs_info->generic_worker);
 
        sb->s_blocksize = 4096;
        sb->s_blocksize_bits = blksize_bits(4096);
@@ -1711,10 +1687,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
 
        RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node);
        extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
-                            fs_info->btree_inode->i_mapping,
-                            GFP_NOFS);
-       extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
-                            GFP_NOFS);
+                            fs_info->btree_inode->i_mapping);
+       extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree);
 
        BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
 
@@ -1728,9 +1702,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        fs_info->block_group_cache_tree = RB_ROOT;
 
        extent_io_tree_init(&fs_info->freed_extents[0],
-                            fs_info->btree_inode->i_mapping, GFP_NOFS);
+                            fs_info->btree_inode->i_mapping);
        extent_io_tree_init(&fs_info->freed_extents[1],
-                            fs_info->btree_inode->i_mapping, GFP_NOFS);
+                            fs_info->btree_inode->i_mapping);
        fs_info->pinned_extents = &fs_info->freed_extents[0];
        fs_info->do_barriers = 1;
 
@@ -1760,7 +1734,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        bh = btrfs_read_dev_super(fs_devices->latest_bdev);
        if (!bh) {
                err = -EINVAL;
-               goto fail_iput;
+               goto fail_alloc;
        }
 
        memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
@@ -1772,7 +1746,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
 
        disk_super = &fs_info->super_copy;
        if (!btrfs_super_root(disk_super))
-               goto fail_iput;
+               goto fail_alloc;
 
        /* check FS state, whether FS is broken. */
        fs_info->fs_state |= btrfs_super_flags(disk_super);
@@ -1788,7 +1762,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        ret = btrfs_parse_options(tree_root, options);
        if (ret) {
                err = ret;
-               goto fail_iput;
+               goto fail_alloc;
        }
 
        features = btrfs_super_incompat_flags(disk_super) &
@@ -1798,7 +1772,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                       "unsupported optional features (%Lx).\n",
                       (unsigned long long)features);
                err = -EINVAL;
-               goto fail_iput;
+               goto fail_alloc;
        }
 
        features = btrfs_super_incompat_flags(disk_super);
@@ -1814,7 +1788,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                       "unsupported option features (%Lx).\n",
                       (unsigned long long)features);
                err = -EINVAL;
-               goto fail_iput;
+               goto fail_alloc;
        }
 
        btrfs_init_workers(&fs_info->generic_worker,
@@ -1861,6 +1835,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                           &fs_info->generic_worker);
        btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
                           1, &fs_info->generic_worker);
+       btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
+                          fs_info->thread_pool_size,
+                          &fs_info->generic_worker);
 
        /*
         * endios are largely parallel and should have a very
@@ -1882,6 +1859,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
        btrfs_start_workers(&fs_info->endio_write_workers, 1);
        btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
+       btrfs_start_workers(&fs_info->delayed_workers, 1);
 
        fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
        fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
@@ -2138,6 +2116,9 @@ fail_sb_buffer:
        btrfs_stop_workers(&fs_info->endio_write_workers);
        btrfs_stop_workers(&fs_info->endio_freespace_worker);
        btrfs_stop_workers(&fs_info->submit_workers);
+       btrfs_stop_workers(&fs_info->delayed_workers);
+fail_alloc:
+       kfree(fs_info->delayed_root);
 fail_iput:
        invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
        iput(fs_info->btree_inode);
@@ -2165,11 +2146,9 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
        if (uptodate) {
                set_buffer_uptodate(bh);
        } else {
-               if (printk_ratelimit()) {
-                       printk(KERN_WARNING "lost page write due to "
+               printk_ratelimited(KERN_WARNING "lost page write due to "
                                        "I/O error on %s\n",
                                       bdevname(bh->b_bdev, b));
-               }
                /* note, we dont' set_buffer_write_io_error because we have
                 * our own ways of dealing with the IO errors
                 */
@@ -2333,7 +2312,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
 
        mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
        head = &root->fs_info->fs_devices->devices;
-       list_for_each_entry(dev, head, dev_list) {
+       list_for_each_entry_rcu(dev, head, dev_list) {
                if (!dev->bdev) {
                        total_errors++;
                        continue;
@@ -2366,7 +2345,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors)
        }
 
        total_errors = 0;
-       list_for_each_entry(dev, head, dev_list) {
+       list_for_each_entry_rcu(dev, head, dev_list) {
                if (!dev->bdev)
                        continue;
                if (!dev->in_fs_metadata || !dev->writeable)
@@ -2404,12 +2383,15 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
        if (btrfs_root_refs(&root->root_item) == 0)
                synchronize_srcu(&fs_info->subvol_srcu);
 
+       __btrfs_remove_free_space_cache(root->free_ino_pinned);
+       __btrfs_remove_free_space_cache(root->free_ino_ctl);
        free_fs_root(root);
        return 0;
 }
 
 static void free_fs_root(struct btrfs_root *root)
 {
+       iput(root->cache_inode);
        WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
        if (root->anon_super.s_dev) {
                down_write(&root->anon_super.s_umount);
@@ -2417,6 +2399,8 @@ static void free_fs_root(struct btrfs_root *root)
        }
        free_extent_buffer(root->node);
        free_extent_buffer(root->commit_root);
+       kfree(root->free_ino_ctl);
+       kfree(root->free_ino_pinned);
        kfree(root->name);
        kfree(root);
 }
@@ -2520,6 +2504,15 @@ int close_ctree(struct btrfs_root *root)
        fs_info->closing = 1;
        smp_mb();
 
+       btrfs_scrub_cancel(root);
+
+       /* wait for any defraggers to finish */
+       wait_event(fs_info->transaction_wait,
+                  (atomic_read(&fs_info->defrag_running) == 0));
+
+       /* clear out the rbtree of defraggable inodes */
+       btrfs_run_defrag_inodes(root->fs_info);
+
        btrfs_put_block_group_cache(fs_info);
 
        /*
@@ -2578,6 +2571,7 @@ int close_ctree(struct btrfs_root *root)
        del_fs_roots(fs_info);
 
        iput(fs_info->btree_inode);
+       kfree(fs_info->delayed_root);
 
        btrfs_stop_workers(&fs_info->generic_worker);
        btrfs_stop_workers(&fs_info->fixup_workers);
@@ -2589,6 +2583,7 @@ int close_ctree(struct btrfs_root *root)
        btrfs_stop_workers(&fs_info->endio_write_workers);
        btrfs_stop_workers(&fs_info->endio_freespace_worker);
        btrfs_stop_workers(&fs_info->submit_workers);
+       btrfs_stop_workers(&fs_info->delayed_workers);
 
        btrfs_close_devices(fs_info->fs_devices);
        btrfs_mapping_tree_free(&fs_info->mapping_tree);
@@ -2662,6 +2657,29 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
        u64 num_dirty;
        unsigned long thresh = 32 * 1024 * 1024;
 
+       if (current->flags & PF_MEMALLOC)
+               return;
+
+       btrfs_balance_delayed_items(root);
+
+       num_dirty = root->fs_info->dirty_metadata_bytes;
+
+       if (num_dirty > thresh) {
+               balance_dirty_pages_ratelimited_nr(
+                                  root->fs_info->btree_inode->i_mapping, 1);
+       }
+       return;
+}
+
+void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
+{
+       /*
+        * looks as though older kernels can get into trouble with
+        * this code, they end up stuck in balance_dirty_pages forever
+        */
+       u64 num_dirty;
+       unsigned long thresh = 32 * 1024 * 1024;
+
        if (current->flags & PF_MEMALLOC)
                return;
 
@@ -2697,7 +2715,7 @@ int btree_lock_page_hook(struct page *page)
                goto out;
 
        len = page->private >> 2;
-       eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS);
+       eb = find_extent_buffer(io_tree, bytenr, len);
        if (!eb)
                goto out;
 
index 07b20dc2fd9560c0f497ecc3615cf8da747c4688..a0b610a67aaeadf1f564d03e218305997c021a85 100644 (file)
@@ -55,35 +55,20 @@ int btrfs_commit_super(struct btrfs_root *root);
 int btrfs_error_commit_super(struct btrfs_root *root);
 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
                                            u64 bytenr, u32 blocksize);
-struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
-                                       u64 root_objectid);
-struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
-                                     struct btrfs_key *location,
-                                     const char *name, int namelen);
 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
                                               struct btrfs_key *location);
 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
                                              struct btrfs_key *location);
 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
-int btrfs_insert_dev_radix(struct btrfs_root *root,
-                          struct block_device *bdev,
-                          u64 device_id,
-                          u64 block_start,
-                          u64 num_blocks);
 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
+void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
 void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
-void btrfs_mark_buffer_dirty_nonblocking(struct extent_buffer *buf);
 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
 int btrfs_set_buffer_uptodate(struct extent_buffer *buf);
-int wait_on_tree_block_writeback(struct btrfs_root *root,
-                                struct extent_buffer *buf);
 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid);
 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len);
 void btrfs_csum_final(u32 crc, char *result);
-int btrfs_open_device(struct btrfs_device *dev);
-int btrfs_verify_block_csum(struct btrfs_root *root,
-                           struct extent_buffer *buf);
 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
                        int metadata);
 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
@@ -91,8 +76,6 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
                        unsigned long bio_flags, u64 bio_offset,
                        extent_submit_bio_hook_t *submit_bio_start,
                        extent_submit_bio_hook_t *submit_bio_done);
-
-int btrfs_congested_async(struct btrfs_fs_info *info, int iodone);
 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info);
 int btrfs_write_tree_block(struct extent_buffer *buf);
 int btrfs_wait_tree_block_writeback(struct extent_buffer *buf);
index b4ffad859adb31a5ad5d8628a1fd6a10adae06ec..1b8dc33778f9c411206cc6ad9467546412431947 100644 (file)
@@ -32,7 +32,7 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
        len  = BTRFS_FID_SIZE_NON_CONNECTABLE;
        type = FILEID_BTRFS_WITHOUT_PARENT;
 
-       fid->objectid = inode->i_ino;
+       fid->objectid = btrfs_ino(inode);
        fid->root_objectid = BTRFS_I(inode)->root->objectid;
        fid->gen = inode->i_generation;
 
@@ -178,13 +178,13 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
        if (!path)
                return ERR_PTR(-ENOMEM);
 
-       if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
+       if (btrfs_ino(dir) == BTRFS_FIRST_FREE_OBJECTID) {
                key.objectid = root->root_key.objectid;
                key.type = BTRFS_ROOT_BACKREF_KEY;
                key.offset = (u64)-1;
                root = root->fs_info->tree_root;
        } else {
-               key.objectid = dir->i_ino;
+               key.objectid = btrfs_ino(dir);
                key.type = BTRFS_INODE_REF_KEY;
                key.offset = (u64)-1;
        }
@@ -244,6 +244,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
        struct btrfs_key key;
        int name_len;
        int ret;
+       u64 ino;
 
        if (!dir || !inode)
                return -EINVAL;
@@ -251,19 +252,21 @@ static int btrfs_get_name(struct dentry *parent, char *name,
        if (!S_ISDIR(dir->i_mode))
                return -EINVAL;
 
+       ino = btrfs_ino(inode);
+
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
        path->leave_spinning = 1;
 
-       if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
+       if (ino == BTRFS_FIRST_FREE_OBJECTID) {
                key.objectid = BTRFS_I(inode)->root->root_key.objectid;
                key.type = BTRFS_ROOT_BACKREF_KEY;
                key.offset = (u64)-1;
                root = root->fs_info->tree_root;
        } else {
-               key.objectid = inode->i_ino;
-               key.offset = dir->i_ino;
+               key.objectid = ino;
+               key.offset = btrfs_ino(dir);
                key.type = BTRFS_INODE_REF_KEY;
        }
 
@@ -272,7 +275,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
                btrfs_free_path(path);
                return ret;
        } else if (ret > 0) {
-               if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
+               if (ino == BTRFS_FIRST_FREE_OBJECTID) {
                        path->slots[0]--;
                } else {
                        btrfs_free_path(path);
@@ -281,11 +284,11 @@ static int btrfs_get_name(struct dentry *parent, char *name,
        }
        leaf = path->nodes[0];
 
-       if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
-              rref = btrfs_item_ptr(leaf, path->slots[0],
+       if (ino == BTRFS_FIRST_FREE_OBJECTID) {
+               rref = btrfs_item_ptr(leaf, path->slots[0],
                                     struct btrfs_root_ref);
-              name_ptr = (unsigned long)(rref + 1);
-              name_len = btrfs_root_ref_name_len(leaf, rref);
+               name_ptr = (unsigned long)(rref + 1);
+               name_len = btrfs_root_ref_name_len(leaf, rref);
        } else {
                iref = btrfs_item_ptr(leaf, path->slots[0],
                                      struct btrfs_inode_ref);
index 9ee6bd55e16c5ea4cc9e2f1e31289ba6591730b1..169bd62ce776257e72badbb2f16f4dbb1d0eaa56 100644 (file)
@@ -94,7 +94,7 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
        return (cache->flags & bits) == bits;
 }
 
-void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
+static void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
 {
        atomic_inc(&cache->count);
 }
@@ -105,6 +105,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
                WARN_ON(cache->pinned > 0);
                WARN_ON(cache->reserved > 0);
                WARN_ON(cache->reserved_pinned > 0);
+               kfree(cache->free_space_ctl);
                kfree(cache);
        }
 }
@@ -379,7 +380,7 @@ again:
                                break;
 
                        caching_ctl->progress = last;
-                       btrfs_release_path(extent_root, path);
+                       btrfs_release_path(path);
                        up_read(&fs_info->extent_commit_sem);
                        mutex_unlock(&caching_ctl->mutex);
                        if (btrfs_transaction_in_commit(fs_info))
@@ -754,8 +755,12 @@ again:
                        atomic_inc(&head->node.refs);
                        spin_unlock(&delayed_refs->lock);
 
-                       btrfs_release_path(root->fs_info->extent_root, path);
+                       btrfs_release_path(path);
 
+                       /*
+                        * Mutex was contended, block until it's released and try
+                        * again
+                        */
                        mutex_lock(&head->mutex);
                        mutex_unlock(&head->mutex);
                        btrfs_put_delayed_ref(&head->node);
@@ -934,7 +939,7 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
                        break;
                }
        }
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
        if (owner < BTRFS_FIRST_FREE_OBJECTID)
                new_size += sizeof(*bi);
@@ -947,7 +952,6 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
        BUG_ON(ret);
 
        ret = btrfs_extend_item(trans, root, path, new_size);
-       BUG_ON(ret);
 
        leaf = path->nodes[0];
        item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
@@ -1042,7 +1046,7 @@ again:
                        return 0;
 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
                key.type = BTRFS_EXTENT_REF_V0_KEY;
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
                ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
                if (ret < 0) {
                        err = ret;
@@ -1080,7 +1084,7 @@ again:
                if (match_extent_data_ref(leaf, ref, root_objectid,
                                          owner, offset)) {
                        if (recow) {
-                               btrfs_release_path(root, path);
+                               btrfs_release_path(path);
                                goto again;
                        }
                        err = 0;
@@ -1141,7 +1145,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
                        if (match_extent_data_ref(leaf, ref, root_objectid,
                                                  owner, offset))
                                break;
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        key.offset++;
                        ret = btrfs_insert_empty_item(trans, root, path, &key,
                                                      size);
@@ -1167,7 +1171,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(leaf);
        ret = 0;
 fail:
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        return ret;
 }
 
@@ -1293,7 +1297,7 @@ static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
                ret = -ENOENT;
 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
        if (ret == -ENOENT && parent) {
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
                key.type = BTRFS_EXTENT_REF_V0_KEY;
                ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
                if (ret > 0)
@@ -1322,7 +1326,7 @@ static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
        }
 
        ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        return ret;
 }
 
@@ -1555,7 +1559,6 @@ int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
        size = btrfs_extent_inline_ref_size(type);
 
        ret = btrfs_extend_item(trans, root, path, size);
-       BUG_ON(ret);
 
        ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
        refs = btrfs_extent_refs(leaf, ei);
@@ -1608,7 +1611,7 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans,
        if (ret != -ENOENT)
                return ret;
 
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        *ref_ret = NULL;
 
        if (owner < BTRFS_FIRST_FREE_OBJECTID) {
@@ -1684,7 +1687,6 @@ int update_inline_extent_backref(struct btrfs_trans_handle *trans,
                                              end - ptr - size);
                item_size -= size;
                ret = btrfs_truncate_item(trans, root, path, item_size, 1);
-               BUG_ON(ret);
        }
        btrfs_mark_buffer_dirty(leaf);
        return 0;
@@ -1862,7 +1864,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
                __run_delayed_extent_op(extent_op, leaf, item);
 
        btrfs_mark_buffer_dirty(leaf);
-       btrfs_release_path(root->fs_info->extent_root, path);
+       btrfs_release_path(path);
 
        path->reada = 1;
        path->leave_spinning = 1;
@@ -2297,6 +2299,10 @@ again:
                                atomic_inc(&ref->refs);
 
                                spin_unlock(&delayed_refs->lock);
+                               /*
+                                * Mutex was contended, block until it's
+                                * released and try again
+                                */
                                mutex_lock(&head->mutex);
                                mutex_unlock(&head->mutex);
 
@@ -2361,8 +2367,12 @@ static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
                atomic_inc(&head->node.refs);
                spin_unlock(&delayed_refs->lock);
 
-               btrfs_release_path(root->fs_info->extent_root, path);
+               btrfs_release_path(path);
 
+               /*
+                * Mutex was contended, block until it's released and let
+                * caller try again
+                */
                mutex_lock(&head->mutex);
                mutex_unlock(&head->mutex);
                btrfs_put_delayed_ref(&head->node);
@@ -2510,126 +2520,6 @@ out:
        return ret;
 }
 
-#if 0
-int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-                   struct extent_buffer *buf, u32 nr_extents)
-{
-       struct btrfs_key key;
-       struct btrfs_file_extent_item *fi;
-       u64 root_gen;
-       u32 nritems;
-       int i;
-       int level;
-       int ret = 0;
-       int shared = 0;
-
-       if (!root->ref_cows)
-               return 0;
-
-       if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
-               shared = 0;
-               root_gen = root->root_key.offset;
-       } else {
-               shared = 1;
-               root_gen = trans->transid - 1;
-       }
-
-       level = btrfs_header_level(buf);
-       nritems = btrfs_header_nritems(buf);
-
-       if (level == 0) {
-               struct btrfs_leaf_ref *ref;
-               struct btrfs_extent_info *info;
-
-               ref = btrfs_alloc_leaf_ref(root, nr_extents);
-               if (!ref) {
-                       ret = -ENOMEM;
-                       goto out;
-               }
-
-               ref->root_gen = root_gen;
-               ref->bytenr = buf->start;
-               ref->owner = btrfs_header_owner(buf);
-               ref->generation = btrfs_header_generation(buf);
-               ref->nritems = nr_extents;
-               info = ref->extents;
-
-               for (i = 0; nr_extents > 0 && i < nritems; i++) {
-                       u64 disk_bytenr;
-                       btrfs_item_key_to_cpu(buf, &key, i);
-                       if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
-                               continue;
-                       fi = btrfs_item_ptr(buf, i,
-                                           struct btrfs_file_extent_item);
-                       if (btrfs_file_extent_type(buf, fi) ==
-                           BTRFS_FILE_EXTENT_INLINE)
-                               continue;
-                       disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
-                       if (disk_bytenr == 0)
-                               continue;
-
-                       info->bytenr = disk_bytenr;
-                       info->num_bytes =
-                               btrfs_file_extent_disk_num_bytes(buf, fi);
-                       info->objectid = key.objectid;
-                       info->offset = key.offset;
-                       info++;
-               }
-
-               ret = btrfs_add_leaf_ref(root, ref, shared);
-               if (ret == -EEXIST && shared) {
-                       struct btrfs_leaf_ref *old;
-                       old = btrfs_lookup_leaf_ref(root, ref->bytenr);
-                       BUG_ON(!old);
-                       btrfs_remove_leaf_ref(root, old);
-                       btrfs_free_leaf_ref(root, old);
-                       ret = btrfs_add_leaf_ref(root, ref, shared);
-               }
-               WARN_ON(ret);
-               btrfs_free_leaf_ref(root, ref);
-       }
-out:
-       return ret;
-}
-
-/* when a block goes through cow, we update the reference counts of
- * everything that block points to.  The internal pointers of the block
- * can be in just about any order, and it is likely to have clusters of
- * things that are close together and clusters of things that are not.
- *
- * To help reduce the seeks that come with updating all of these reference
- * counts, sort them by byte number before actual updates are done.
- *
- * struct refsort is used to match byte number to slot in the btree block.
- * we sort based on the byte number and then use the slot to actually
- * find the item.
- *
- * struct refsort is smaller than strcut btrfs_item and smaller than
- * struct btrfs_key_ptr.  Since we're currently limited to the page size
- * for a btree block, there's no way for a kmalloc of refsorts for a
- * single node to be bigger than a page.
- */
-struct refsort {
-       u64 bytenr;
-       u32 slot;
-};
-
-/*
- * for passing into sort()
- */
-static int refsort_cmp(const void *a_void, const void *b_void)
-{
-       const struct refsort *a = a_void;
-       const struct refsort *b = b_void;
-
-       if (a->bytenr < b->bytenr)
-               return -1;
-       if (a->bytenr > b->bytenr)
-               return 1;
-       return 0;
-}
-#endif
-
 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct extent_buffer *buf,
@@ -2732,7 +2622,7 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
        bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
        write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
        btrfs_mark_buffer_dirty(leaf);
-       btrfs_release_path(extent_root, path);
+       btrfs_release_path(path);
 fail:
        if (ret)
                return ret;
@@ -2785,7 +2675,7 @@ again:
        inode = lookup_free_space_inode(root, block_group, path);
        if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
                ret = PTR_ERR(inode);
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
                goto out;
        }
 
@@ -2854,7 +2744,7 @@ again:
 out_put:
        iput(inode);
 out_free:
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 out:
        spin_lock(&block_group->lock);
        block_group->disk_cache_state = dcs;
@@ -3144,7 +3034,8 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
        /* make sure bytes are sectorsize aligned */
        bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
 
-       if (root == root->fs_info->tree_root) {
+       if (root == root->fs_info->tree_root ||
+           BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
                alloc_chunk = 0;
                committed = 1;
        }
@@ -3211,18 +3102,6 @@ commit_trans:
                        goto again;
                }
 
-#if 0 /* I hope we never need this code again, just in case */
-               printk(KERN_ERR "no space left, need %llu, %llu bytes_used, "
-                      "%llu bytes_reserved, " "%llu bytes_pinned, "
-                      "%llu bytes_readonly, %llu may use %llu total\n",
-                      (unsigned long long)bytes,
-                      (unsigned long long)data_sinfo->bytes_used,
-                      (unsigned long long)data_sinfo->bytes_reserved,
-                      (unsigned long long)data_sinfo->bytes_pinned,
-                      (unsigned long long)data_sinfo->bytes_readonly,
-                      (unsigned long long)data_sinfo->bytes_may_use,
-                      (unsigned long long)data_sinfo->total_bytes);
-#endif
                return -ENOSPC;
        }
        data_sinfo->bytes_may_use += bytes;
@@ -3425,6 +3304,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
        if (reserved == 0)
                return 0;
 
+       /* nothing to shrink - nothing to reclaim */
+       if (root->fs_info->delalloc_bytes == 0)
+               return 0;
+
        max_reclaim = min(reserved, to_reclaim);
 
        while (loops < 1024) {
@@ -3651,8 +3534,8 @@ static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
        spin_unlock(&block_rsv->lock);
 }
 
-void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
-                            struct btrfs_block_rsv *dest, u64 num_bytes)
+static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
+                                   struct btrfs_block_rsv *dest, u64 num_bytes)
 {
        struct btrfs_space_info *space_info = block_rsv->space_info;
 
@@ -3855,23 +3738,7 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
        u64 meta_used;
        u64 data_used;
        int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
-#if 0
-       /*
-        * per tree used space accounting can be inaccuracy, so we
-        * can't rely on it.
-        */
-       spin_lock(&fs_info->extent_root->accounting_lock);
-       num_bytes = btrfs_root_used(&fs_info->extent_root->root_item);
-       spin_unlock(&fs_info->extent_root->accounting_lock);
-
-       spin_lock(&fs_info->csum_root->accounting_lock);
-       num_bytes += btrfs_root_used(&fs_info->csum_root->root_item);
-       spin_unlock(&fs_info->csum_root->accounting_lock);
 
-       spin_lock(&fs_info->tree_root->accounting_lock);
-       num_bytes += btrfs_root_used(&fs_info->tree_root->root_item);
-       spin_unlock(&fs_info->tree_root->accounting_lock);
-#endif
        sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
        spin_lock(&sinfo->lock);
        data_used = sinfo->bytes_used;
@@ -3924,10 +3791,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
                block_rsv->reserved = block_rsv->size;
                block_rsv->full = 1;
        }
-#if 0
-       printk(KERN_INFO"global block rsv size %llu reserved %llu\n",
-               block_rsv->size, block_rsv->reserved);
-#endif
+
        spin_unlock(&sinfo->lock);
        spin_unlock(&block_rsv->lock);
 }
@@ -3973,12 +3837,6 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
        WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
 }
 
-static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items)
-{
-       return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
-               3 * num_items;
-}
-
 int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root,
                                 int num_items)
@@ -3989,7 +3847,7 @@ int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
        if (num_items == 0 || root->fs_info->chunk_root == root)
                return 0;
 
-       num_bytes = calc_trans_metadata_size(root, num_items);
+       num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
        ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
                                  num_bytes);
        if (!ret) {
@@ -4028,14 +3886,14 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
         * If all of the metadata space is used, we can commit
         * transaction and use space it freed.
         */
-       u64 num_bytes = calc_trans_metadata_size(root, 4);
+       u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4);
        return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
 }
 
 void btrfs_orphan_release_metadata(struct inode *inode)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
-       u64 num_bytes = calc_trans_metadata_size(root, 4);
+       u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4);
        btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
 }
 
@@ -4049,7 +3907,7 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
         * two for root back/forward refs, two for directory entries
         * and one for root of the snapshot.
         */
-       u64 num_bytes = calc_trans_metadata_size(root, 5);
+       u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
        dst_rsv->space_info = src_rsv->space_info;
        return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
 }
@@ -4078,7 +3936,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 
        if (nr_extents > reserved_extents) {
                nr_extents -= reserved_extents;
-               to_reserve = calc_trans_metadata_size(root, nr_extents);
+               to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
        } else {
                nr_extents = 0;
                to_reserve = 0;
@@ -4132,7 +3990,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
 
        to_free = calc_csum_metadata_size(inode, num_bytes);
        if (nr_extents > 0)
-               to_free += calc_trans_metadata_size(root, nr_extents);
+               to_free += btrfs_calc_trans_metadata_size(root, nr_extents);
 
        btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
                                to_free);
@@ -4541,7 +4399,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
                                                    NULL, refs_to_drop,
                                                    is_data);
                        BUG_ON(ret);
-                       btrfs_release_path(extent_root, path);
+                       btrfs_release_path(path);
                        path->leave_spinning = 1;
 
                        key.objectid = bytenr;
@@ -4580,7 +4438,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
                                             owner_objectid, 0);
                BUG_ON(ret < 0);
 
-               btrfs_release_path(extent_root, path);
+               btrfs_release_path(path);
                path->leave_spinning = 1;
 
                key.objectid = bytenr;
@@ -4650,7 +4508,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
                ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
                                      num_to_del);
                BUG_ON(ret);
-               btrfs_release_path(extent_root, path);
+               btrfs_release_path(path);
 
                if (is_data) {
                        ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
@@ -4893,7 +4751,7 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
                return 0;
 
        wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
-                  (cache->free_space >= num_bytes));
+                  (cache->free_space_ctl->free_space >= num_bytes));
 
        put_caching_control(caching_ctl);
        return 0;
@@ -6480,7 +6338,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
                                trans->block_rsv = block_rsv;
                }
        }
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        BUG_ON(err);
 
        ret = btrfs_del_root(trans, tree_root, &root->root_key);
@@ -6584,1662 +6442,154 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
        return ret;
 }
 
-#if 0
-static unsigned long calc_ra(unsigned long start, unsigned long last,
-                            unsigned long nr)
-{
-       return min(last, start + nr - 1);
-}
-
-static noinline int relocate_inode_pages(struct inode *inode, u64 start,
-                                        u64 len)
+static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
 {
-       u64 page_start;
-       u64 page_end;
-       unsigned long first_index;
-       unsigned long last_index;
-       unsigned long i;
-       struct page *page;
-       struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
-       struct file_ra_state *ra;
-       struct btrfs_ordered_extent *ordered;
-       unsigned int total_read = 0;
-       unsigned int total_dirty = 0;
-       int ret = 0;
-
-       ra = kzalloc(sizeof(*ra), GFP_NOFS);
-       if (!ra)
-               return -ENOMEM;
+       u64 num_devices;
+       u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
+               BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
 
-       mutex_lock(&inode->i_mutex);
-       first_index = start >> PAGE_CACHE_SHIFT;
-       last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
+       /*
+        * we add in the count of missing devices because we want
+        * to make sure that any RAID levels on a degraded FS
+        * continue to be honored.
+        */
+       num_devices = root->fs_info->fs_devices->rw_devices +
+               root->fs_info->fs_devices->missing_devices;
 
-       /* make sure the dirty trick played by the caller work */
-       ret = invalidate_inode_pages2_range(inode->i_mapping,
-                                           first_index, last_index);
-       if (ret)
-               goto out_unlock;
+       if (num_devices == 1) {
+               stripped |= BTRFS_BLOCK_GROUP_DUP;
+               stripped = flags & ~stripped;
 
-       file_ra_state_init(ra, inode->i_mapping);
+               /* turn raid0 into single device chunks */
+               if (flags & BTRFS_BLOCK_GROUP_RAID0)
+                       return stripped;
 
-       for (i = first_index ; i <= last_index; i++) {
-               if (total_read % ra->ra_pages == 0) {
-                       btrfs_force_ra(inode->i_mapping, ra, NULL, i,
-                                      calc_ra(i, last_index, ra->ra_pages));
-               }
-               total_read++;
-again:
-               if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
-                       BUG_ON(1);
-               page = grab_cache_page(inode->i_mapping, i);
-               if (!page) {
-                       ret = -ENOMEM;
-                       goto out_unlock;
-               }
-               if (!PageUptodate(page)) {
-                       btrfs_readpage(NULL, page);
-                       lock_page(page);
-                       if (!PageUptodate(page)) {
-                               unlock_page(page);
-                               page_cache_release(page);
-                               ret = -EIO;
-                               goto out_unlock;
-                       }
-               }
-               wait_on_page_writeback(page);
-
-               page_start = (u64)page->index << PAGE_CACHE_SHIFT;
-               page_end = page_start + PAGE_CACHE_SIZE - 1;
-               lock_extent(io_tree, page_start, page_end, GFP_NOFS);
-
-               ordered = btrfs_lookup_ordered_extent(inode, page_start);
-               if (ordered) {
-                       unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
-                       unlock_page(page);
-                       page_cache_release(page);
-                       btrfs_start_ordered_extent(inode, ordered, 1);
-                       btrfs_put_ordered_extent(ordered);
-                       goto again;
-               }
-               set_page_extent_mapped(page);
+               /* turn mirroring into duplication */
+               if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
+                            BTRFS_BLOCK_GROUP_RAID10))
+                       return stripped | BTRFS_BLOCK_GROUP_DUP;
+               return flags;
+       } else {
+               /* they already had raid on here, just return */
+               if (flags & stripped)
+                       return flags;
 
-               if (i == first_index)
-                       set_extent_bits(io_tree, page_start, page_end,
-                                       EXTENT_BOUNDARY, GFP_NOFS);
-               btrfs_set_extent_delalloc(inode, page_start, page_end);
+               stripped |= BTRFS_BLOCK_GROUP_DUP;
+               stripped = flags & ~stripped;
 
-               set_page_dirty(page);
-               total_dirty++;
+               /* switch duplicated blocks with raid1 */
+               if (flags & BTRFS_BLOCK_GROUP_DUP)
+                       return stripped | BTRFS_BLOCK_GROUP_RAID1;
 
-               unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
-               unlock_page(page);
-               page_cache_release(page);
+               /* turn single device chunks into raid0 */
+               return stripped | BTRFS_BLOCK_GROUP_RAID0;
        }
-
-out_unlock:
-       kfree(ra);
-       mutex_unlock(&inode->i_mutex);
-       balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
-       return ret;
+       return flags;
 }
 
-static noinline int relocate_data_extent(struct inode *reloc_inode,
-                                        struct btrfs_key *extent_key,
-                                        u64 offset)
+static int set_block_group_ro(struct btrfs_block_group_cache *cache)
 {
-       struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
-       struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
-       struct extent_map *em;
-       u64 start = extent_key->objectid - offset;
-       u64 end = start + extent_key->offset - 1;
+       struct btrfs_space_info *sinfo = cache->space_info;
+       u64 num_bytes;
+       int ret = -ENOSPC;
 
-       em = alloc_extent_map(GFP_NOFS);
-       BUG_ON(!em);
+       if (cache->ro)
+               return 0;
 
-       em->start = start;
-       em->len = extent_key->offset;
-       em->block_len = extent_key->offset;
-       em->block_start = extent_key->objectid;
-       em->bdev = root->fs_info->fs_devices->latest_bdev;
-       set_bit(EXTENT_FLAG_PINNED, &em->flags);
+       spin_lock(&sinfo->lock);
+       spin_lock(&cache->lock);
+       num_bytes = cache->key.offset - cache->reserved - cache->pinned -
+                   cache->bytes_super - btrfs_block_group_used(&cache->item);
 
-       /* setup extent map to cheat btrfs_readpage */
-       lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
-       while (1) {
-               int ret;
-               write_lock(&em_tree->lock);
-               ret = add_extent_mapping(em_tree, em);
-               write_unlock(&em_tree->lock);
-               if (ret != -EEXIST) {
-                       free_extent_map(em);
-                       break;
-               }
-               btrfs_drop_extent_cache(reloc_inode, start, end, 0);
+       if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
+           sinfo->bytes_may_use + sinfo->bytes_readonly +
+           cache->reserved_pinned + num_bytes <= sinfo->total_bytes) {
+               sinfo->bytes_readonly += num_bytes;
+               sinfo->bytes_reserved += cache->reserved_pinned;
+               cache->reserved_pinned = 0;
+               cache->ro = 1;
+               ret = 0;
        }
-       unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
 
-       return relocate_inode_pages(reloc_inode, start, extent_key->offset);
+       spin_unlock(&cache->lock);
+       spin_unlock(&sinfo->lock);
+       return ret;
 }
 
-struct btrfs_ref_path {
-       u64 extent_start;
-       u64 nodes[BTRFS_MAX_LEVEL];
-       u64 root_objectid;
-       u64 root_generation;
-       u64 owner_objectid;
-       u32 num_refs;
-       int lowest_level;
-       int current_level;
-       int shared_level;
-
-       struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
-       u64 new_nodes[BTRFS_MAX_LEVEL];
-};
-
-struct disk_extent {
-       u64 ram_bytes;
-       u64 disk_bytenr;
-       u64 disk_num_bytes;
-       u64 offset;
-       u64 num_bytes;
-       u8 compression;
-       u8 encryption;
-       u16 other_encoding;
-};
+int btrfs_set_block_group_ro(struct btrfs_root *root,
+                            struct btrfs_block_group_cache *cache)
 
-static int is_cowonly_root(u64 root_objectid)
 {
-       if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
-           root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
-           root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
-           root_objectid == BTRFS_DEV_TREE_OBJECTID ||
-           root_objectid == BTRFS_TREE_LOG_OBJECTID ||
-           root_objectid == BTRFS_CSUM_TREE_OBJECTID)
-               return 1;
-       return 0;
-}
+       struct btrfs_trans_handle *trans;
+       u64 alloc_flags;
+       int ret;
 
-static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
-                                   struct btrfs_root *extent_root,
-                                   struct btrfs_ref_path *ref_path,
-                                   int first_time)
-{
-       struct extent_buffer *leaf;
-       struct btrfs_path *path;
-       struct btrfs_extent_ref *ref;
-       struct btrfs_key key;
-       struct btrfs_key found_key;
-       u64 bytenr;
-       u32 nritems;
-       int level;
-       int ret = 1;
+       BUG_ON(cache->ro);
 
-       path = btrfs_alloc_path();
-       if (!path)
-               return -ENOMEM;
+       trans = btrfs_join_transaction(root, 1);
+       BUG_ON(IS_ERR(trans));
 
-       if (first_time) {
-               ref_path->lowest_level = -1;
-               ref_path->current_level = -1;
-               ref_path->shared_level = -1;
-               goto walk_up;
-       }
-walk_down:
-       level = ref_path->current_level - 1;
-       while (level >= -1) {
-               u64 parent;
-               if (level < ref_path->lowest_level)
-                       break;
+       alloc_flags = update_block_group_flags(root, cache->flags);
+       if (alloc_flags != cache->flags)
+               do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+                              CHUNK_ALLOC_FORCE);
 
-               if (level >= 0)
-                       bytenr = ref_path->nodes[level];
-               else
-                       bytenr = ref_path->extent_start;
-               BUG_ON(bytenr == 0);
+       ret = set_block_group_ro(cache);
+       if (!ret)
+               goto out;
+       alloc_flags = get_alloc_profile(root, cache->space_info->flags);
+       ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+                            CHUNK_ALLOC_FORCE);
+       if (ret < 0)
+               goto out;
+       ret = set_block_group_ro(cache);
+out:
+       btrfs_end_transaction(trans, root);
+       return ret;
+}
 
-               parent = ref_path->nodes[level + 1];
-               ref_path->nodes[level + 1] = 0;
-               ref_path->current_level = level;
-               BUG_ON(parent == 0);
+int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
+                           struct btrfs_root *root, u64 type)
+{
+       u64 alloc_flags = get_alloc_profile(root, type);
+       return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
+                             CHUNK_ALLOC_FORCE);
+}
 
-               key.objectid = bytenr;
-               key.offset = parent + 1;
-               key.type = BTRFS_EXTENT_REF_KEY;
+/*
+ * helper to account the unused space of all the readonly block group in the
+ * list. takes mirrors into account.
+ */
+static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
+{
+       struct btrfs_block_group_cache *block_group;
+       u64 free_bytes = 0;
+       int factor;
 
-               ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
-               if (ret < 0)
-                       goto out;
-               BUG_ON(ret == 0);
+       list_for_each_entry(block_group, groups_list, list) {
+               spin_lock(&block_group->lock);
 
-               leaf = path->nodes[0];
-               nritems = btrfs_header_nritems(leaf);
-               if (path->slots[0] >= nritems) {
-                       ret = btrfs_next_leaf(extent_root, path);
-                       if (ret < 0)
-                               goto out;
-                       if (ret > 0)
-                               goto next;
-                       leaf = path->nodes[0];
+               if (!block_group->ro) {
+                       spin_unlock(&block_group->lock);
+                       continue;
                }
 
-               btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
-               if (found_key.objectid == bytenr &&
-                   found_key.type == BTRFS_EXTENT_REF_KEY) {
-                       if (level < ref_path->shared_level)
-                               ref_path->shared_level = level;
-                       goto found;
-               }
-next:
-               level--;
-               btrfs_release_path(extent_root, path);
-               cond_resched();
-       }
-       /* reached lowest level */
-       ret = 1;
-       goto out;
-walk_up:
-       level = ref_path->current_level;
-       while (level < BTRFS_MAX_LEVEL - 1) {
-               u64 ref_objectid;
-
-               if (level >= 0)
-                       bytenr = ref_path->nodes[level];
+               if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
+                                         BTRFS_BLOCK_GROUP_RAID10 |
+                                         BTRFS_BLOCK_GROUP_DUP))
+                       factor = 2;
                else
-                       bytenr = ref_path->extent_start;
+                       factor = 1;
 
-               BUG_ON(bytenr == 0);
+               free_bytes += (block_group->key.offset -
+                              btrfs_block_group_used(&block_group->item)) *
+                              factor;
 
-               key.objectid = bytenr;
-               key.offset = 0;
-               key.type = BTRFS_EXTENT_REF_KEY;
+               spin_unlock(&block_group->lock);
+       }
 
-               ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
-               if (ret < 0)
-                       goto out;
-
-               leaf = path->nodes[0];
-               nritems = btrfs_header_nritems(leaf);
-               if (path->slots[0] >= nritems) {
-                       ret = btrfs_next_leaf(extent_root, path);
-                       if (ret < 0)
-                               goto out;
-                       if (ret > 0) {
-                               /* the extent was freed by someone */
-                               if (ref_path->lowest_level == level)
-                                       goto out;
-                               btrfs_release_path(extent_root, path);
-                               goto walk_down;
-                       }
-                       leaf = path->nodes[0];
-               }
-
-               btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
-               if (found_key.objectid != bytenr ||
-                               found_key.type != BTRFS_EXTENT_REF_KEY) {
-                       /* the extent was freed by someone */
-                       if (ref_path->lowest_level == level) {
-                               ret = 1;
-                               goto out;
-                       }
-                       btrfs_release_path(extent_root, path);
-                       goto walk_down;
-               }
-found:
-               ref = btrfs_item_ptr(leaf, path->slots[0],
-                               struct btrfs_extent_ref);
-               ref_objectid = btrfs_ref_objectid(leaf, ref);
-               if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
-                       if (first_time) {
-                               level = (int)ref_objectid;
-                               BUG_ON(level >= BTRFS_MAX_LEVEL);
-                               ref_path->lowest_level = level;
-                               ref_path->current_level = level;
-                               ref_path->nodes[level] = bytenr;
-                       } else {
-                               WARN_ON(ref_objectid != level);
-                       }
-               } else {
-                       WARN_ON(level != -1);
-               }
-               first_time = 0;
-
-               if (ref_path->lowest_level == level) {
-                       ref_path->owner_objectid = ref_objectid;
-                       ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
-               }
-
-               /*
-                * the block is tree root or the block isn't in reference
-                * counted tree.
-                */
-               if (found_key.objectid == found_key.offset ||
-                   is_cowonly_root(btrfs_ref_root(leaf, ref))) {
-                       ref_path->root_objectid = btrfs_ref_root(leaf, ref);
-                       ref_path->root_generation =
-                               btrfs_ref_generation(leaf, ref);
-                       if (level < 0) {
-                               /* special reference from the tree log */
-                               ref_path->nodes[0] = found_key.offset;
-                               ref_path->current_level = 0;
-                       }
-                       ret = 0;
-                       goto out;
-               }
-
-               level++;
-               BUG_ON(ref_path->nodes[level] != 0);
-               ref_path->nodes[level] = found_key.offset;
-               ref_path->current_level = level;
-
-               /*
-                * the reference was created in the running transaction,
-                * no need to continue walking up.
-                */
-               if (btrfs_ref_generation(leaf, ref) == trans->transid) {
-                       ref_path->root_objectid = btrfs_ref_root(leaf, ref);
-                       ref_path->root_generation =
-                               btrfs_ref_generation(leaf, ref);
-                       ret = 0;
-                       goto out;
-               }
-
-               btrfs_release_path(extent_root, path);
-               cond_resched();
-       }
-       /* reached max tree level, but no tree root found. */
-       BUG();
-out:
-       btrfs_free_path(path);
-       return ret;
-}
-
-static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
-                               struct btrfs_root *extent_root,
-                               struct btrfs_ref_path *ref_path,
-                               u64 extent_start)
-{
-       memset(ref_path, 0, sizeof(*ref_path));
-       ref_path->extent_start = extent_start;
-
-       return __next_ref_path(trans, extent_root, ref_path, 1);
-}
-
-static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
-                              struct btrfs_root *extent_root,
-                              struct btrfs_ref_path *ref_path)
-{
-       return __next_ref_path(trans, extent_root, ref_path, 0);
-}
-
-static noinline int get_new_locations(struct inode *reloc_inode,
-                                     struct btrfs_key *extent_key,
-                                     u64 offset, int no_fragment,
-                                     struct disk_extent **extents,
-                                     int *nr_extents)
-{
-       struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
-       struct btrfs_path *path;
-       struct btrfs_file_extent_item *fi;
-       struct extent_buffer *leaf;
-       struct disk_extent *exts = *extents;
-       struct btrfs_key found_key;
-       u64 cur_pos;
-       u64 last_byte;
-       u32 nritems;
-       int nr = 0;
-       int max = *nr_extents;
-       int ret;
-
-       WARN_ON(!no_fragment && *extents);
-       if (!exts) {
-               max = 1;
-               exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
-               if (!exts)
-                       return -ENOMEM;
-       }
-
-       path = btrfs_alloc_path();
-       if (!path) {
-               if (exts != *extents)
-                       kfree(exts);
-               return -ENOMEM;
-       }
-
-       cur_pos = extent_key->objectid - offset;
-       last_byte = extent_key->objectid + extent_key->offset;
-       ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
-                                      cur_pos, 0);
-       if (ret < 0)
-               goto out;
-       if (ret > 0) {
-               ret = -ENOENT;
-               goto out;
-       }
-
-       while (1) {
-               leaf = path->nodes[0];
-               nritems = btrfs_header_nritems(leaf);
-               if (path->slots[0] >= nritems) {
-                       ret = btrfs_next_leaf(root, path);
-                       if (ret < 0)
-                               goto out;
-                       if (ret > 0)
-                               break;
-                       leaf = path->nodes[0];
-               }
-
-               btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
-               if (found_key.offset != cur_pos ||
-                   found_key.type != BTRFS_EXTENT_DATA_KEY ||
-                   found_key.objectid != reloc_inode->i_ino)
-                       break;
-
-               fi = btrfs_item_ptr(leaf, path->slots[0],
-                                   struct btrfs_file_extent_item);
-               if (btrfs_file_extent_type(leaf, fi) !=
-                   BTRFS_FILE_EXTENT_REG ||
-                   btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
-                       break;
-
-               if (nr == max) {
-                       struct disk_extent *old = exts;
-                       max *= 2;
-                       exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
-                       if (!exts) {
-                               ret = -ENOMEM;
-                               goto out;
-                       }
-                       memcpy(exts, old, sizeof(*exts) * nr);
-                       if (old != *extents)
-                               kfree(old);
-               }
-
-               exts[nr].disk_bytenr =
-                       btrfs_file_extent_disk_bytenr(leaf, fi);
-               exts[nr].disk_num_bytes =
-                       btrfs_file_extent_disk_num_bytes(leaf, fi);
-               exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
-               exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
-               exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
-               exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
-               exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
-               exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
-                                                                          fi);
-               BUG_ON(exts[nr].offset > 0);
-               BUG_ON(exts[nr].compression || exts[nr].encryption);
-               BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
-
-               cur_pos += exts[nr].num_bytes;
-               nr++;
-
-               if (cur_pos + offset >= last_byte)
-                       break;
-
-               if (no_fragment) {
-                       ret = 1;
-                       goto out;
-               }
-               path->slots[0]++;
-       }
-
-       BUG_ON(cur_pos + offset > last_byte);
-       if (cur_pos + offset < last_byte) {
-               ret = -ENOENT;
-               goto out;
-       }
-       ret = 0;
-out:
-       btrfs_free_path(path);
-       if (ret) {
-               if (exts != *extents)
-                       kfree(exts);
-       } else {
-               *extents = exts;
-               *nr_extents = nr;
-       }
-       return ret;
-}
-
-static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
-                                       struct btrfs_root *root,
-                                       struct btrfs_path *path,
-                                       struct btrfs_key *extent_key,
-                                       struct btrfs_key *leaf_key,
-                                       struct btrfs_ref_path *ref_path,
-                                       struct disk_extent *new_extents,
-                                       int nr_extents)
-{
-       struct extent_buffer *leaf;
-       struct btrfs_file_extent_item *fi;
-       struct inode *inode = NULL;
-       struct btrfs_key key;
-       u64 lock_start = 0;
-       u64 lock_end = 0;
-       u64 num_bytes;
-       u64 ext_offset;
-       u64 search_end = (u64)-1;
-       u32 nritems;
-       int nr_scaned = 0;
-       int extent_locked = 0;
-       int extent_type;
-       int ret;
-
-       memcpy(&key, leaf_key, sizeof(key));
-       if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
-               if (key.objectid < ref_path->owner_objectid ||
-                   (key.objectid == ref_path->owner_objectid &&
-                    key.type < BTRFS_EXTENT_DATA_KEY)) {
-                       key.objectid = ref_path->owner_objectid;
-                       key.type = BTRFS_EXTENT_DATA_KEY;
-                       key.offset = 0;
-               }
-       }
-
-       while (1) {
-               ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
-               if (ret < 0)
-                       goto out;
-
-               leaf = path->nodes[0];
-               nritems = btrfs_header_nritems(leaf);
-next:
-               if (extent_locked && ret > 0) {
-                       /*
-                        * the file extent item was modified by someone
-                        * before the extent got locked.
-                        */
-                       unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
-                                     lock_end, GFP_NOFS);
-                       extent_locked = 0;
-               }
-
-               if (path->slots[0] >= nritems) {
-                       if (++nr_scaned > 2)
-                               break;
-
-                       BUG_ON(extent_locked);
-                       ret = btrfs_next_leaf(root, path);
-                       if (ret < 0)
-                               goto out;
-                       if (ret > 0)
-                               break;
-                       leaf = path->nodes[0];
-                       nritems = btrfs_header_nritems(leaf);
-               }
-
-               btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-
-               if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
-                       if ((key.objectid > ref_path->owner_objectid) ||
-                           (key.objectid == ref_path->owner_objectid &&
-                            key.type > BTRFS_EXTENT_DATA_KEY) ||
-                           key.offset >= search_end)
-                               break;
-               }
-
-               if (inode && key.objectid != inode->i_ino) {
-                       BUG_ON(extent_locked);
-                       btrfs_release_path(root, path);
-                       mutex_unlock(&inode->i_mutex);
-                       iput(inode);
-                       inode = NULL;
-                       continue;
-               }
-
-               if (key.type != BTRFS_EXTENT_DATA_KEY) {
-                       path->slots[0]++;
-                       ret = 1;
-                       goto next;
-               }
-               fi = btrfs_item_ptr(leaf, path->slots[0],
-                                   struct btrfs_file_extent_item);
-               extent_type = btrfs_file_extent_type(leaf, fi);
-               if ((extent_type != BTRFS_FILE_EXTENT_REG &&
-                    extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
-                   (btrfs_file_extent_disk_bytenr(leaf, fi) !=
-                    extent_key->objectid)) {
-                       path->slots[0]++;
-                       ret = 1;
-                       goto next;
-               }
-
-               num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
-               ext_offset = btrfs_file_extent_offset(leaf, fi);
-
-               if (search_end == (u64)-1) {
-                       search_end = key.offset - ext_offset +
-                               btrfs_file_extent_ram_bytes(leaf, fi);
-               }
-
-               if (!extent_locked) {
-                       lock_start = key.offset;
-                       lock_end = lock_start + num_bytes - 1;
-               } else {
-                       if (lock_start > key.offset ||
-                           lock_end + 1 < key.offset + num_bytes) {
-                               unlock_extent(&BTRFS_I(inode)->io_tree,
-                                             lock_start, lock_end, GFP_NOFS);
-                               extent_locked = 0;
-                       }
-               }
-
-               if (!inode) {
-                       btrfs_release_path(root, path);
-
-                       inode = btrfs_iget_locked(root->fs_info->sb,
-                                                 key.objectid, root);
-                       if (inode->i_state & I_NEW) {
-                               BTRFS_I(inode)->root = root;
-                               BTRFS_I(inode)->location.objectid =
-                                       key.objectid;
-                               BTRFS_I(inode)->location.type =
-                                       BTRFS_INODE_ITEM_KEY;
-                               BTRFS_I(inode)->location.offset = 0;
-                               btrfs_read_locked_inode(inode);
-                               unlock_new_inode(inode);
-                       }
-                       /*
-                        * some code call btrfs_commit_transaction while
-                        * holding the i_mutex, so we can't use mutex_lock
-                        * here.
-                        */
-                       if (is_bad_inode(inode) ||
-                           !mutex_trylock(&inode->i_mutex)) {
-                               iput(inode);
-                               inode = NULL;
-                               key.offset = (u64)-1;
-                               goto skip;
-                       }
-               }
-
-               if (!extent_locked) {
-                       struct btrfs_ordered_extent *ordered;
-
-                       btrfs_release_path(root, path);
-
-                       lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
-                                   lock_end, GFP_NOFS);
-                       ordered = btrfs_lookup_first_ordered_extent(inode,
-                                                                   lock_end);
-                       if (ordered &&
-                           ordered->file_offset <= lock_end &&
-                           ordered->file_offset + ordered->len > lock_start) {
-                               unlock_extent(&BTRFS_I(inode)->io_tree,
-                                             lock_start, lock_end, GFP_NOFS);
-                               btrfs_start_ordered_extent(inode, ordered, 1);
-                               btrfs_put_ordered_extent(ordered);
-                               key.offset += num_bytes;
-                               goto skip;
-                       }
-                       if (ordered)
-                               btrfs_put_ordered_extent(ordered);
-
-                       extent_locked = 1;
-                       continue;
-               }
-
-               if (nr_extents == 1) {
-                       /* update extent pointer in place */
-                       btrfs_set_file_extent_disk_bytenr(leaf, fi,
-                                               new_extents[0].disk_bytenr);
-                       btrfs_set_file_extent_disk_num_bytes(leaf, fi,
-                                               new_extents[0].disk_num_bytes);
-                       btrfs_mark_buffer_dirty(leaf);
-
-                       btrfs_drop_extent_cache(inode, key.offset,
-                                               key.offset + num_bytes - 1, 0);
-
-                       ret = btrfs_inc_extent_ref(trans, root,
-                                               new_extents[0].disk_bytenr,
-                                               new_extents[0].disk_num_bytes,
-                                               leaf->start,
-                                               root->root_key.objectid,
-                                               trans->transid,
-                                               key.objectid);
-                       BUG_ON(ret);
-
-                       ret = btrfs_free_extent(trans, root,
-                                               extent_key->objectid,
-                                               extent_key->offset,
-                                               leaf->start,
-                                               btrfs_header_owner(leaf),
-                                               btrfs_header_generation(leaf),
-                                               key.objectid, 0);
-                       BUG_ON(ret);
-
-                       btrfs_release_path(root, path);
-                       key.offset += num_bytes;
-               } else {
-                       BUG_ON(1);
-#if 0
-                       u64 alloc_hint;
-                       u64 extent_len;
-                       int i;
-                       /*
-                        * drop old extent pointer at first, then insert the
-                        * new pointers one bye one
-                        */
-                       btrfs_release_path(root, path);
-                       ret = btrfs_drop_extents(trans, root, inode, key.offset,
-                                                key.offset + num_bytes,
-                                                key.offset, &alloc_hint);
-                       BUG_ON(ret);
-
-                       for (i = 0; i < nr_extents; i++) {
-                               if (ext_offset >= new_extents[i].num_bytes) {
-                                       ext_offset -= new_extents[i].num_bytes;
-                                       continue;
-                               }
-                               extent_len = min(new_extents[i].num_bytes -
-                                                ext_offset, num_bytes);
-
-                               ret = btrfs_insert_empty_item(trans, root,
-                                                             path, &key,
-                                                             sizeof(*fi));
-                               BUG_ON(ret);
-
-                               leaf = path->nodes[0];
-                               fi = btrfs_item_ptr(leaf, path->slots[0],
-                                               struct btrfs_file_extent_item);
-                               btrfs_set_file_extent_generation(leaf, fi,
-                                                       trans->transid);
-                               btrfs_set_file_extent_type(leaf, fi,
-                                                       BTRFS_FILE_EXTENT_REG);
-                               btrfs_set_file_extent_disk_bytenr(leaf, fi,
-                                               new_extents[i].disk_bytenr);
-                               btrfs_set_file_extent_disk_num_bytes(leaf, fi,
-                                               new_extents[i].disk_num_bytes);
-                               btrfs_set_file_extent_ram_bytes(leaf, fi,
-                                               new_extents[i].ram_bytes);
-
-                               btrfs_set_file_extent_compression(leaf, fi,
-                                               new_extents[i].compression);
-                               btrfs_set_file_extent_encryption(leaf, fi,
-                                               new_extents[i].encryption);
-                               btrfs_set_file_extent_other_encoding(leaf, fi,
-                                               new_extents[i].other_encoding);
-
-                               btrfs_set_file_extent_num_bytes(leaf, fi,
-                                                       extent_len);
-                               ext_offset += new_extents[i].offset;
-                               btrfs_set_file_extent_offset(leaf, fi,
-                                                       ext_offset);
-                               btrfs_mark_buffer_dirty(leaf);
-
-                               btrfs_drop_extent_cache(inode, key.offset,
-                                               key.offset + extent_len - 1, 0);
-
-                               ret = btrfs_inc_extent_ref(trans, root,
-                                               new_extents[i].disk_bytenr,
-                                               new_extents[i].disk_num_bytes,
-                                               leaf->start,
-                                               root->root_key.objectid,
-                                               trans->transid, key.objectid);
-                               BUG_ON(ret);
-                               btrfs_release_path(root, path);
-
-                               inode_add_bytes(inode, extent_len);
-
-                               ext_offset = 0;
-                               num_bytes -= extent_len;
-                               key.offset += extent_len;
-
-                               if (num_bytes == 0)
-                                       break;
-                       }
-                       BUG_ON(i >= nr_extents);
-#endif
-               }
-
-               if (extent_locked) {
-                       unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
-                                     lock_end, GFP_NOFS);
-                       extent_locked = 0;
-               }
-skip:
-               if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
-                   key.offset >= search_end)
-                       break;
-
-               cond_resched();
-       }
-       ret = 0;
-out:
-       btrfs_release_path(root, path);
-       if (inode) {
-               mutex_unlock(&inode->i_mutex);
-               if (extent_locked) {
-                       unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
-                                     lock_end, GFP_NOFS);
-               }
-               iput(inode);
-       }
-       return ret;
-}
-
-int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
-                              struct btrfs_root *root,
-                              struct extent_buffer *buf, u64 orig_start)
-{
-       int level;
-       int ret;
-
-       BUG_ON(btrfs_header_generation(buf) != trans->transid);
-       BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
-
-       level = btrfs_header_level(buf);
-       if (level == 0) {
-               struct btrfs_leaf_ref *ref;
-               struct btrfs_leaf_ref *orig_ref;
-
-               orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
-               if (!orig_ref)
-                       return -ENOENT;
-
-               ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
-               if (!ref) {
-                       btrfs_free_leaf_ref(root, orig_ref);
-                       return -ENOMEM;
-               }
-
-               ref->nritems = orig_ref->nritems;
-               memcpy(ref->extents, orig_ref->extents,
-                       sizeof(ref->extents[0]) * ref->nritems);
-
-               btrfs_free_leaf_ref(root, orig_ref);
-
-               ref->root_gen = trans->transid;
-               ref->bytenr = buf->start;
-               ref->owner = btrfs_header_owner(buf);
-               ref->generation = btrfs_header_generation(buf);
-
-               ret = btrfs_add_leaf_ref(root, ref, 0);
-               WARN_ON(ret);
-               btrfs_free_leaf_ref(root, ref);
-       }
-       return 0;
-}
-
-static noinline int invalidate_extent_cache(struct btrfs_root *root,
-                                       struct extent_buffer *leaf,
-                                       struct btrfs_block_group_cache *group,
-                                       struct btrfs_root *target_root)
-{
-       struct btrfs_key key;
-       struct inode *inode = NULL;
-       struct btrfs_file_extent_item *fi;
-       struct extent_state *cached_state = NULL;
-       u64 num_bytes;
-       u64 skip_objectid = 0;
-       u32 nritems;
-       u32 i;
-
-       nritems = btrfs_header_nritems(leaf);
-       for (i = 0; i < nritems; i++) {
-               btrfs_item_key_to_cpu(leaf, &key, i);
-               if (key.objectid == skip_objectid ||
-                   key.type != BTRFS_EXTENT_DATA_KEY)
-                       continue;
-               fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
-               if (btrfs_file_extent_type(leaf, fi) ==
-                   BTRFS_FILE_EXTENT_INLINE)
-                       continue;
-               if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
-                       continue;
-               if (!inode || inode->i_ino != key.objectid) {
-                       iput(inode);
-                       inode = btrfs_ilookup(target_root->fs_info->sb,
-                                             key.objectid, target_root, 1);
-               }
-               if (!inode) {
-                       skip_objectid = key.objectid;
-                       continue;
-               }
-               num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
-
-               lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset,
-                                key.offset + num_bytes - 1, 0, &cached_state,
-                                GFP_NOFS);
-               btrfs_drop_extent_cache(inode, key.offset,
-                                       key.offset + num_bytes - 1, 1);
-               unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset,
-                                    key.offset + num_bytes - 1, &cached_state,
-                                    GFP_NOFS);
-               cond_resched();
-       }
-       iput(inode);
-       return 0;
-}
-
-static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
-                                       struct btrfs_root *root,
-                                       struct extent_buffer *leaf,
-                                       struct btrfs_block_group_cache *group,
-                                       struct inode *reloc_inode)
-{
-       struct btrfs_key key;
-       struct btrfs_key extent_key;
-       struct btrfs_file_extent_item *fi;
-       struct btrfs_leaf_ref *ref;
-       struct disk_extent *new_extent;
-       u64 bytenr;
-       u64 num_bytes;
-       u32 nritems;
-       u32 i;
-       int ext_index;
-       int nr_extent;
-       int ret;
-
-       new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
-       if (!new_extent)
-               return -ENOMEM;
-
-       ref = btrfs_lookup_leaf_ref(root, leaf->start);
-       BUG_ON(!ref);
-
-       ext_index = -1;
-       nritems = btrfs_header_nritems(leaf);
-       for (i = 0; i < nritems; i++) {
-               btrfs_item_key_to_cpu(leaf, &key, i);
-               if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
-                       continue;
-               fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
-               if (btrfs_file_extent_type(leaf, fi) ==
-                   BTRFS_FILE_EXTENT_INLINE)
-                       continue;
-               bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
-               num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
-               if (bytenr == 0)
-                       continue;
-
-               ext_index++;
-               if (bytenr >= group->key.objectid + group->key.offset ||
-                   bytenr + num_bytes <= group->key.objectid)
-                       continue;
-
-               extent_key.objectid = bytenr;
-               extent_key.offset = num_bytes;
-               extent_key.type = BTRFS_EXTENT_ITEM_KEY;
-               nr_extent = 1;
-               ret = get_new_locations(reloc_inode, &extent_key,
-                                       group->key.objectid, 1,
-                                       &new_extent, &nr_extent);
-               if (ret > 0)
-                       continue;
-               BUG_ON(ret < 0);
-
-               BUG_ON(ref->extents[ext_index].bytenr != bytenr);
-               BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
-               ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
-               ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
-
-               btrfs_set_file_extent_disk_bytenr(leaf, fi,
-                                               new_extent->disk_bytenr);
-               btrfs_set_file_extent_disk_num_bytes(leaf, fi,
-                                               new_extent->disk_num_bytes);
-               btrfs_mark_buffer_dirty(leaf);
-
-               ret = btrfs_inc_extent_ref(trans, root,
-                                       new_extent->disk_bytenr,
-                                       new_extent->disk_num_bytes,
-                                       leaf->start,
-                                       root->root_key.objectid,
-                                       trans->transid, key.objectid);
-               BUG_ON(ret);
-
-               ret = btrfs_free_extent(trans, root,
-                                       bytenr, num_bytes, leaf->start,
-                                       btrfs_header_owner(leaf),
-                                       btrfs_header_generation(leaf),
-                                       key.objectid, 0);
-               BUG_ON(ret);
-               cond_resched();
-       }
-       kfree(new_extent);
-       BUG_ON(ext_index + 1 != ref->nritems);
-       btrfs_free_leaf_ref(root, ref);
-       return 0;
-}
-
-int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
-                         struct btrfs_root *root)
-{
-       struct btrfs_root *reloc_root;
-       int ret;
-
-       if (root->reloc_root) {
-               reloc_root = root->reloc_root;
-               root->reloc_root = NULL;
-               list_add(&reloc_root->dead_list,
-                        &root->fs_info->dead_reloc_roots);
-
-               btrfs_set_root_bytenr(&reloc_root->root_item,
-                                     reloc_root->node->start);
-               btrfs_set_root_level(&root->root_item,
-                                    btrfs_header_level(reloc_root->node));
-               memset(&reloc_root->root_item.drop_progress, 0,
-                       sizeof(struct btrfs_disk_key));
-               reloc_root->root_item.drop_level = 0;
-
-               ret = btrfs_update_root(trans, root->fs_info->tree_root,
-                                       &reloc_root->root_key,
-                                       &reloc_root->root_item);
-               BUG_ON(ret);
-       }
-       return 0;
-}
-
-int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
-{
-       struct btrfs_trans_handle *trans;
-       struct btrfs_root *reloc_root;
-       struct btrfs_root *prev_root = NULL;
-       struct list_head dead_roots;
-       int ret;
-       unsigned long nr;
-
-       INIT_LIST_HEAD(&dead_roots);
-       list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
-
-       while (!list_empty(&dead_roots)) {
-               reloc_root = list_entry(dead_roots.prev,
-                                       struct btrfs_root, dead_list);
-               list_del_init(&reloc_root->dead_list);
-
-               BUG_ON(reloc_root->commit_root != NULL);
-               while (1) {
-                       trans = btrfs_join_transaction(root, 1);
-                       BUG_ON(IS_ERR(trans));
-
-                       mutex_lock(&root->fs_info->drop_mutex);
-                       ret = btrfs_drop_snapshot(trans, reloc_root);
-                       if (ret != -EAGAIN)
-                               break;
-                       mutex_unlock(&root->fs_info->drop_mutex);
-
-                       nr = trans->blocks_used;
-                       ret = btrfs_end_transaction(trans, root);
-                       BUG_ON(ret);
-                       btrfs_btree_balance_dirty(root, nr);
-               }
-
-               free_extent_buffer(reloc_root->node);
-
-               ret = btrfs_del_root(trans, root->fs_info->tree_root,
-                                    &reloc_root->root_key);
-               BUG_ON(ret);
-               mutex_unlock(&root->fs_info->drop_mutex);
-
-               nr = trans->blocks_used;
-               ret = btrfs_end_transaction(trans, root);
-               BUG_ON(ret);
-               btrfs_btree_balance_dirty(root, nr);
-
-               kfree(prev_root);
-               prev_root = reloc_root;
-       }
-       if (prev_root) {
-               btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
-               kfree(prev_root);
-       }
-       return 0;
-}
-
-int btrfs_add_dead_reloc_root(struct btrfs_root *root)
-{
-       list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
-       return 0;
-}
-
-int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
-{
-       struct btrfs_root *reloc_root;
-       struct btrfs_trans_handle *trans;
-       struct btrfs_key location;
-       int found;
-       int ret;
-
-       mutex_lock(&root->fs_info->tree_reloc_mutex);
-       ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
-       BUG_ON(ret);
-       found = !list_empty(&root->fs_info->dead_reloc_roots);
-       mutex_unlock(&root->fs_info->tree_reloc_mutex);
-
-       if (found) {
-               trans = btrfs_start_transaction(root, 1);
-               BUG_ON(IS_ERR(trans));
-               ret = btrfs_commit_transaction(trans, root);
-               BUG_ON(ret);
-       }
-
-       location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
-       location.offset = (u64)-1;
-       location.type = BTRFS_ROOT_ITEM_KEY;
-
-       reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
-       BUG_ON(!reloc_root);
-       ret = btrfs_orphan_cleanup(reloc_root);
-       BUG_ON(ret);
-       return 0;
-}
-
-static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
-                                   struct btrfs_root *root)
-{
-       struct btrfs_root *reloc_root;
-       struct extent_buffer *eb;
-       struct btrfs_root_item *root_item;
-       struct btrfs_key root_key;
-       int ret;
-
-       BUG_ON(!root->ref_cows);
-       if (root->reloc_root)
-               return 0;
-
-       root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
-       if (!root_item)
-               return -ENOMEM;
-
-       ret = btrfs_copy_root(trans, root, root->commit_root,
-                             &eb, BTRFS_TREE_RELOC_OBJECTID);
-       BUG_ON(ret);
-
-       root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
-       root_key.offset = root->root_key.objectid;
-       root_key.type = BTRFS_ROOT_ITEM_KEY;
-
-       memcpy(root_item, &root->root_item, sizeof(root_item));
-       btrfs_set_root_refs(root_item, 0);
-       btrfs_set_root_bytenr(root_item, eb->start);
-       btrfs_set_root_level(root_item, btrfs_header_level(eb));
-       btrfs_set_root_generation(root_item, trans->transid);
-
-       btrfs_tree_unlock(eb);
-       free_extent_buffer(eb);
-
-       ret = btrfs_insert_root(trans, root->fs_info->tree_root,
-                               &root_key, root_item);
-       BUG_ON(ret);
-       kfree(root_item);
-
-       reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
-                                                &root_key);
-       BUG_ON(IS_ERR(reloc_root));
-       reloc_root->last_trans = trans->transid;
-       reloc_root->commit_root = NULL;
-       reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
-
-       root->reloc_root = reloc_root;
-       return 0;
-}
-
-/*
- * Core function of space balance.
- *
- * The idea is using reloc trees to relocate tree blocks in reference
- * counted roots. There is one reloc tree for each subvol, and all
- * reloc trees share same root key objectid. Reloc trees are snapshots
- * of the latest committed roots of subvols (root->commit_root).
- *
- * To relocate a tree block referenced by a subvol, there are two steps.
- * COW the block through subvol's reloc tree, then update block pointer
- * in the subvol to point to the new block. Since all reloc trees share
- * same root key objectid, doing special handing for tree blocks owned
- * by them is easy. Once a tree block has been COWed in one reloc tree,
- * we can use the resulting new block directly when the same block is
- * required to COW again through other reloc trees. By this way, relocated
- * tree blocks are shared between reloc trees, so they are also shared
- * between subvols.
- */
-static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
-                                     struct btrfs_root *root,
-                                     struct btrfs_path *path,
-                                     struct btrfs_key *first_key,
-                                     struct btrfs_ref_path *ref_path,
-                                     struct btrfs_block_group_cache *group,
-                                     struct inode *reloc_inode)
-{
-       struct btrfs_root *reloc_root;
-       struct extent_buffer *eb = NULL;
-       struct btrfs_key *keys;
-       u64 *nodes;
-       int level;
-       int shared_level;
-       int lowest_level = 0;
-       int ret;
-
-       if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
-               lowest_level = ref_path->owner_objectid;
-
-       if (!root->ref_cows) {
-               path->lowest_level = lowest_level;
-               ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
-               BUG_ON(ret < 0);
-               path->lowest_level = 0;
-               btrfs_release_path(root, path);
-               return 0;
-       }
-
-       mutex_lock(&root->fs_info->tree_reloc_mutex);
-       ret = init_reloc_tree(trans, root);
-       BUG_ON(ret);
-       reloc_root = root->reloc_root;
-
-       shared_level = ref_path->shared_level;
-       ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
-
-       keys = ref_path->node_keys;
-       nodes = ref_path->new_nodes;
-       memset(&keys[shared_level + 1], 0,
-              sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
-       memset(&nodes[shared_level + 1], 0,
-              sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
-
-       if (nodes[lowest_level] == 0) {
-               path->lowest_level = lowest_level;
-               ret = btrfs_search_slot(trans, reloc_root, first_key, path,
-                                       0, 1);
-               BUG_ON(ret);
-               for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
-                       eb = path->nodes[level];
-                       if (!eb || eb == reloc_root->node)
-                               break;
-                       nodes[level] = eb->start;
-                       if (level == 0)
-                               btrfs_item_key_to_cpu(eb, &keys[level], 0);
-                       else
-                               btrfs_node_key_to_cpu(eb, &keys[level], 0);
-               }
-               if (nodes[0] &&
-                   ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
-                       eb = path->nodes[0];
-                       ret = replace_extents_in_leaf(trans, reloc_root, eb,
-                                                     group, reloc_inode);
-                       BUG_ON(ret);
-               }
-               btrfs_release_path(reloc_root, path);
-       } else {
-               ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
-                                      lowest_level);
-               BUG_ON(ret);
-       }
-
-       /*
-        * replace tree blocks in the fs tree with tree blocks in
-        * the reloc tree.
-        */
-       ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
-       BUG_ON(ret < 0);
-
-       if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
-               ret = btrfs_search_slot(trans, reloc_root, first_key, path,
-                                       0, 0);
-               BUG_ON(ret);
-               extent_buffer_get(path->nodes[0]);
-               eb = path->nodes[0];
-               btrfs_release_path(reloc_root, path);
-               ret = invalidate_extent_cache(reloc_root, eb, group, root);
-               BUG_ON(ret);
-               free_extent_buffer(eb);
-       }
-
-       mutex_unlock(&root->fs_info->tree_reloc_mutex);
-       path->lowest_level = 0;
-       return 0;
-}
-
-static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
-                                       struct btrfs_root *root,
-                                       struct btrfs_path *path,
-                                       struct btrfs_key *first_key,
-                                       struct btrfs_ref_path *ref_path)
-{
-       int ret;
-
-       ret = relocate_one_path(trans, root, path, first_key,
-                               ref_path, NULL, NULL);
-       BUG_ON(ret);
-
-       return 0;
-}
-
-static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
-                                   struct btrfs_root *extent_root,
-                                   struct btrfs_path *path,
-                                   struct btrfs_key *extent_key)
-{
-       int ret;
-
-       ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
-       if (ret)
-               goto out;
-       ret = btrfs_del_item(trans, extent_root, path);
-out:
-       btrfs_release_path(extent_root, path);
-       return ret;
-}
-
-static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
-                                               struct btrfs_ref_path *ref_path)
-{
-       struct btrfs_key root_key;
-
-       root_key.objectid = ref_path->root_objectid;
-       root_key.type = BTRFS_ROOT_ITEM_KEY;
-       if (is_cowonly_root(ref_path->root_objectid))
-               root_key.offset = 0;
-       else
-               root_key.offset = (u64)-1;
-
-       return btrfs_read_fs_root_no_name(fs_info, &root_key);
-}
-
-static noinline int relocate_one_extent(struct btrfs_root *extent_root,
-                                       struct btrfs_path *path,
-                                       struct btrfs_key *extent_key,
-                                       struct btrfs_block_group_cache *group,
-                                       struct inode *reloc_inode, int pass)
-{
-       struct btrfs_trans_handle *trans;
-       struct btrfs_root *found_root;
-       struct btrfs_ref_path *ref_path = NULL;
-       struct disk_extent *new_extents = NULL;
-       int nr_extents = 0;
-       int loops;
-       int ret;
-       int level;
-       struct btrfs_key first_key;
-       u64 prev_block = 0;
-
-
-       trans = btrfs_start_transaction(extent_root, 1);
-       BUG_ON(IS_ERR(trans));
-
-       if (extent_key->objectid == 0) {
-               ret = del_extent_zero(trans, extent_root, path, extent_key);
-               goto out;
-       }
-
-       ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
-       if (!ref_path) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       for (loops = 0; ; loops++) {
-               if (loops == 0) {
-                       ret = btrfs_first_ref_path(trans, extent_root, ref_path,
-                                                  extent_key->objectid);
-               } else {
-                       ret = btrfs_next_ref_path(trans, extent_root, ref_path);
-               }
-               if (ret < 0)
-                       goto out;
-               if (ret > 0)
-                       break;
-
-               if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
-                   ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
-                       continue;
-
-               found_root = read_ref_root(extent_root->fs_info, ref_path);
-               BUG_ON(!found_root);
-               /*
-                * for reference counted tree, only process reference paths
-                * rooted at the latest committed root.
-                */
-               if (found_root->ref_cows &&
-                   ref_path->root_generation != found_root->root_key.offset)
-                       continue;
-
-               if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
-                       if (pass == 0) {
-                               /*
-                                * copy data extents to new locations
-                                */
-                               u64 group_start = group->key.objectid;
-                               ret = relocate_data_extent(reloc_inode,
-                                                          extent_key,
-                                                          group_start);
-                               if (ret < 0)
-                                       goto out;
-                               break;
-                       }
-                       level = 0;
-               } else {
-                       level = ref_path->owner_objectid;
-               }
-
-               if (prev_block != ref_path->nodes[level]) {
-                       struct extent_buffer *eb;
-                       u64 block_start = ref_path->nodes[level];
-                       u64 block_size = btrfs_level_size(found_root, level);
-
-                       eb = read_tree_block(found_root, block_start,
-                                            block_size, 0);
-                       if (!eb) {
-                               ret = -EIO;
-                               goto out;
-                       }
-                       btrfs_tree_lock(eb);
-                       BUG_ON(level != btrfs_header_level(eb));
-
-                       if (level == 0)
-                               btrfs_item_key_to_cpu(eb, &first_key, 0);
-                       else
-                               btrfs_node_key_to_cpu(eb, &first_key, 0);
-
-                       btrfs_tree_unlock(eb);
-                       free_extent_buffer(eb);
-                       prev_block = block_start;
-               }
-
-               mutex_lock(&extent_root->fs_info->trans_mutex);
-               btrfs_record_root_in_trans(found_root);
-               mutex_unlock(&extent_root->fs_info->trans_mutex);
-               if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
-                       /*
-                        * try to update data extent references while
-                        * keeping metadata shared between snapshots.
-                        */
-                       if (pass == 1) {
-                               ret = relocate_one_path(trans, found_root,
-                                               path, &first_key, ref_path,
-                                               group, reloc_inode);
-                               if (ret < 0)
-                                       goto out;
-                               continue;
-                       }
-                       /*
-                        * use fallback method to process the remaining
-                        * references.
-                        */
-                       if (!new_extents) {
-                               u64 group_start = group->key.objectid;
-                               new_extents = kmalloc(sizeof(*new_extents),
-                                                     GFP_NOFS);
-                               if (!new_extents) {
-                                       ret = -ENOMEM;
-                                       goto out;
-                               }
-                               nr_extents = 1;
-                               ret = get_new_locations(reloc_inode,
-                                                       extent_key,
-                                                       group_start, 1,
-                                                       &new_extents,
-                                                       &nr_extents);
-                               if (ret)
-                                       goto out;
-                       }
-                       ret = replace_one_extent(trans, found_root,
-                                               path, extent_key,
-                                               &first_key, ref_path,
-                                               new_extents, nr_extents);
-               } else {
-                       ret = relocate_tree_block(trans, found_root, path,
-                                                 &first_key, ref_path);
-               }
-               if (ret < 0)
-                       goto out;
-       }
-       ret = 0;
-out:
-       btrfs_end_transaction(trans, extent_root);
-       kfree(new_extents);
-       kfree(ref_path);
-       return ret;
-}
-#endif
-
-static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
-{
-       u64 num_devices;
-       u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
-               BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
-
-       /*
-        * we add in the count of missing devices because we want
-        * to make sure that any RAID levels on a degraded FS
-        * continue to be honored.
-        */
-       num_devices = root->fs_info->fs_devices->rw_devices +
-               root->fs_info->fs_devices->missing_devices;
-
-       if (num_devices == 1) {
-               stripped |= BTRFS_BLOCK_GROUP_DUP;
-               stripped = flags & ~stripped;
-
-               /* turn raid0 into single device chunks */
-               if (flags & BTRFS_BLOCK_GROUP_RAID0)
-                       return stripped;
-
-               /* turn mirroring into duplication */
-               if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
-                            BTRFS_BLOCK_GROUP_RAID10))
-                       return stripped | BTRFS_BLOCK_GROUP_DUP;
-               return flags;
-       } else {
-               /* they already had raid on here, just return */
-               if (flags & stripped)
-                       return flags;
-
-               stripped |= BTRFS_BLOCK_GROUP_DUP;
-               stripped = flags & ~stripped;
-
-               /* switch duplicated blocks with raid1 */
-               if (flags & BTRFS_BLOCK_GROUP_DUP)
-                       return stripped | BTRFS_BLOCK_GROUP_RAID1;
-
-               /* turn single device chunks into raid0 */
-               return stripped | BTRFS_BLOCK_GROUP_RAID0;
-       }
-       return flags;
-}
-
-static int set_block_group_ro(struct btrfs_block_group_cache *cache)
-{
-       struct btrfs_space_info *sinfo = cache->space_info;
-       u64 num_bytes;
-       int ret = -ENOSPC;
-
-       if (cache->ro)
-               return 0;
-
-       spin_lock(&sinfo->lock);
-       spin_lock(&cache->lock);
-       num_bytes = cache->key.offset - cache->reserved - cache->pinned -
-                   cache->bytes_super - btrfs_block_group_used(&cache->item);
-
-       if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
-           sinfo->bytes_may_use + sinfo->bytes_readonly +
-           cache->reserved_pinned + num_bytes <= sinfo->total_bytes) {
-               sinfo->bytes_readonly += num_bytes;
-               sinfo->bytes_reserved += cache->reserved_pinned;
-               cache->reserved_pinned = 0;
-               cache->ro = 1;
-               ret = 0;
-       }
-
-       spin_unlock(&cache->lock);
-       spin_unlock(&sinfo->lock);
-       return ret;
-}
-
-int btrfs_set_block_group_ro(struct btrfs_root *root,
-                            struct btrfs_block_group_cache *cache)
-
-{
-       struct btrfs_trans_handle *trans;
-       u64 alloc_flags;
-       int ret;
-
-       BUG_ON(cache->ro);
-
-       trans = btrfs_join_transaction(root, 1);
-       BUG_ON(IS_ERR(trans));
-
-       alloc_flags = update_block_group_flags(root, cache->flags);
-       if (alloc_flags != cache->flags)
-               do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
-                              CHUNK_ALLOC_FORCE);
-
-       ret = set_block_group_ro(cache);
-       if (!ret)
-               goto out;
-       alloc_flags = get_alloc_profile(root, cache->space_info->flags);
-       ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
-                            CHUNK_ALLOC_FORCE);
-       if (ret < 0)
-               goto out;
-       ret = set_block_group_ro(cache);
-out:
-       btrfs_end_transaction(trans, root);
-       return ret;
-}
-
-int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
-                           struct btrfs_root *root, u64 type)
-{
-       u64 alloc_flags = get_alloc_profile(root, type);
-       return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags,
-                             CHUNK_ALLOC_FORCE);
-}
-
-/*
- * helper to account the unused space of all the readonly block group in the
- * list. takes mirrors into account.
- */
-static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
-{
-       struct btrfs_block_group_cache *block_group;
-       u64 free_bytes = 0;
-       int factor;
-
-       list_for_each_entry(block_group, groups_list, list) {
-               spin_lock(&block_group->lock);
-
-               if (!block_group->ro) {
-                       spin_unlock(&block_group->lock);
-                       continue;
-               }
-
-               if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
-                                         BTRFS_BLOCK_GROUP_RAID10 |
-                                         BTRFS_BLOCK_GROUP_DUP))
-                       factor = 2;
-               else
-                       factor = 1;
-
-               free_bytes += (block_group->key.offset -
-                              btrfs_block_group_used(&block_group->item)) *
-                              factor;
-
-               spin_unlock(&block_group->lock);
-       }
-
-       return free_bytes;
-}
+       return free_bytes;
+}
 
 /*
  * helper to account the unused space of all the readonly block group in the
@@ -8555,10 +6905,16 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                        ret = -ENOMEM;
                        goto error;
                }
+               cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
+                                               GFP_NOFS);
+               if (!cache->free_space_ctl) {
+                       kfree(cache);
+                       ret = -ENOMEM;
+                       goto error;
+               }
 
                atomic_set(&cache->count, 1);
                spin_lock_init(&cache->lock);
-               spin_lock_init(&cache->tree_lock);
                cache->fs_info = info;
                INIT_LIST_HEAD(&cache->list);
                INIT_LIST_HEAD(&cache->cluster_list);
@@ -8566,24 +6922,18 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                if (need_clear)
                        cache->disk_cache_state = BTRFS_DC_CLEAR;
 
-               /*
-                * we only want to have 32k of ram per block group for keeping
-                * track of free space, and if we pass 1/2 of that we want to
-                * start converting things over to using bitmaps
-                */
-               cache->extents_thresh = ((1024 * 32) / 2) /
-                       sizeof(struct btrfs_free_space);
-
                read_extent_buffer(leaf, &cache->item,
                                   btrfs_item_ptr_offset(leaf, path->slots[0]),
                                   sizeof(cache->item));
                memcpy(&cache->key, &found_key, sizeof(found_key));
 
                key.objectid = found_key.objectid + found_key.offset;
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
                cache->flags = btrfs_block_group_flags(&cache->item);
                cache->sectorsize = root->sectorsize;
 
+               btrfs_init_free_space_ctl(cache);
+
                /*
                 * We need to exclude the super stripes now so that the space
                 * info has super bytes accounted for, otherwise we'll think
@@ -8670,6 +7020,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
        cache = kzalloc(sizeof(*cache), GFP_NOFS);
        if (!cache)
                return -ENOMEM;
+       cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
+                                       GFP_NOFS);
+       if (!cache->free_space_ctl) {
+               kfree(cache);
+               return -ENOMEM;
+       }
 
        cache->key.objectid = chunk_offset;
        cache->key.offset = size;
@@ -8677,19 +7033,13 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
        cache->sectorsize = root->sectorsize;
        cache->fs_info = root->fs_info;
 
-       /*
-        * we only want to have 32k of ram per block group for keeping track
-        * of free space, and if we pass 1/2 of that we want to start
-        * converting things over to using bitmaps
-        */
-       cache->extents_thresh = ((1024 * 32) / 2) /
-               sizeof(struct btrfs_free_space);
        atomic_set(&cache->count, 1);
        spin_lock_init(&cache->lock);
-       spin_lock_init(&cache->tree_lock);
        INIT_LIST_HEAD(&cache->list);
        INIT_LIST_HEAD(&cache->cluster_list);
 
+       btrfs_init_free_space_ctl(cache);
+
        btrfs_set_block_group_used(&cache->item, bytes_used);
        btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
        cache->flags = type;
@@ -8802,12 +7152,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        if (ret < 0)
                goto out;
        if (ret > 0)
-               btrfs_release_path(tree_root, path);
+               btrfs_release_path(path);
        if (ret == 0) {
                ret = btrfs_del_item(trans, tree_root, path);
                if (ret)
                        goto out;
-               btrfs_release_path(tree_root, path);
+               btrfs_release_path(path);
        }
 
        spin_lock(&root->fs_info->block_group_cache_lock);
index 96fcfa522dab72f837d991d5afaaacd4e73d8f74..c5d9fbb92bc31b50ec9c7e2fd955a267661afe2d 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/writeback.h>
 #include <linux/pagevec.h>
 #include <linux/prefetch.h>
+#include <linux/cleancache.h>
 #include "extent_io.h"
 #include "extent_map.h"
 #include "compat.h"
@@ -102,7 +103,7 @@ void extent_io_exit(void)
 }
 
 void extent_io_tree_init(struct extent_io_tree *tree,
-                         struct address_space *mapping, gfp_t mask)
+                        struct address_space *mapping)
 {
        tree->state = RB_ROOT;
        INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
@@ -440,6 +441,15 @@ static int clear_state_bit(struct extent_io_tree *tree,
        return ret;
 }
 
+static struct extent_state *
+alloc_extent_state_atomic(struct extent_state *prealloc)
+{
+       if (!prealloc)
+               prealloc = alloc_extent_state(GFP_ATOMIC);
+
+       return prealloc;
+}
+
 /*
  * clear some bits on a range in the tree.  This may require splitting
  * or inserting elements in the tree, so the gfp mask is used to
@@ -530,8 +540,8 @@ hit_next:
         */
 
        if (state->start < start) {
-               if (!prealloc)
-                       prealloc = alloc_extent_state(GFP_ATOMIC);
+               prealloc = alloc_extent_state_atomic(prealloc);
+               BUG_ON(!prealloc);
                err = split_state(tree, state, prealloc, start);
                BUG_ON(err == -EEXIST);
                prealloc = NULL;
@@ -552,8 +562,8 @@ hit_next:
         * on the first half
         */
        if (state->start <= end && state->end > end) {
-               if (!prealloc)
-                       prealloc = alloc_extent_state(GFP_ATOMIC);
+               prealloc = alloc_extent_state_atomic(prealloc);
+               BUG_ON(!prealloc);
                err = split_state(tree, state, prealloc, end + 1);
                BUG_ON(err == -EEXIST);
                if (wake)
@@ -726,8 +736,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
 again:
        if (!prealloc && (mask & __GFP_WAIT)) {
                prealloc = alloc_extent_state(mask);
-               if (!prealloc)
-                       return -ENOMEM;
+               BUG_ON(!prealloc);
        }
 
        spin_lock(&tree->lock);
@@ -744,6 +753,8 @@ again:
         */
        node = tree_search(tree, start);
        if (!node) {
+               prealloc = alloc_extent_state_atomic(prealloc);
+               BUG_ON(!prealloc);
                err = insert_state(tree, prealloc, start, end, &bits);
                prealloc = NULL;
                BUG_ON(err == -EEXIST);
@@ -772,20 +783,18 @@ hit_next:
                if (err)
                        goto out;
 
+               next_node = rb_next(node);
                cache_state(state, cached_state);
                merge_state(tree, state);
                if (last_end == (u64)-1)
                        goto out;
 
                start = last_end + 1;
-               if (start < end && prealloc && !need_resched()) {
-                       next_node = rb_next(node);
-                       if (next_node) {
-                               state = rb_entry(next_node, struct extent_state,
-                                                rb_node);
-                               if (state->start == start)
-                                       goto hit_next;
-                       }
+               if (next_node && start < end && prealloc && !need_resched()) {
+                       state = rb_entry(next_node, struct extent_state,
+                                        rb_node);
+                       if (state->start == start)
+                               goto hit_next;
                }
                goto search_again;
        }
@@ -812,6 +821,9 @@ hit_next:
                        err = -EEXIST;
                        goto out;
                }
+
+               prealloc = alloc_extent_state_atomic(prealloc);
+               BUG_ON(!prealloc);
                err = split_state(tree, state, prealloc, start);
                BUG_ON(err == -EEXIST);
                prealloc = NULL;
@@ -842,14 +854,25 @@ hit_next:
                        this_end = end;
                else
                        this_end = last_start - 1;
+
+               prealloc = alloc_extent_state_atomic(prealloc);
+               BUG_ON(!prealloc);
+
+               /*
+                * Avoid to free 'prealloc' if it can be merged with
+                * the later extent.
+                */
+               atomic_inc(&prealloc->refs);
                err = insert_state(tree, prealloc, start, this_end,
                                   &bits);
                BUG_ON(err == -EEXIST);
                if (err) {
+                       free_extent_state(prealloc);
                        prealloc = NULL;
                        goto out;
                }
                cache_state(prealloc, cached_state);
+               free_extent_state(prealloc);
                prealloc = NULL;
                start = this_end + 1;
                goto search_again;
@@ -866,6 +889,9 @@ hit_next:
                        err = -EEXIST;
                        goto out;
                }
+
+               prealloc = alloc_extent_state_atomic(prealloc);
+               BUG_ON(!prealloc);
                err = split_state(tree, state, prealloc, end + 1);
                BUG_ON(err == -EEXIST);
 
@@ -942,13 +968,6 @@ int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
                              NULL, mask);
 }
 
-static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
-                      gfp_t mask)
-{
-       return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0,
-                               NULL, mask);
-}
-
 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
                        struct extent_state **cached_state, gfp_t mask)
 {
@@ -964,11 +983,6 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
                                cached_state, mask);
 }
 
-int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
-{
-       return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
-}
-
 /*
  * either insert or lock state struct between start and end use mask to tell
  * us if waiting is desired.
@@ -1028,25 +1042,6 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
                                mask);
 }
 
-/*
- * helper function to set pages and extents in the tree dirty
- */
-int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
-{
-       unsigned long index = start >> PAGE_CACHE_SHIFT;
-       unsigned long end_index = end >> PAGE_CACHE_SHIFT;
-       struct page *page;
-
-       while (index <= end_index) {
-               page = find_get_page(tree->mapping, index);
-               BUG_ON(!page);
-               __set_page_dirty_nobuffers(page);
-               page_cache_release(page);
-               index++;
-       }
-       return 0;
-}
-
 /*
  * helper function to set both pages and extents in the tree writeback
  */
@@ -1820,46 +1815,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
        bio_put(bio);
 }
 
-/*
- * IO done from prepare_write is pretty simple, we just unlock
- * the structs in the extent tree when done, and set the uptodate bits
- * as appropriate.
- */
-static void end_bio_extent_preparewrite(struct bio *bio, int err)
-{
-       const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
-       struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
-       struct extent_io_tree *tree;
-       u64 start;
-       u64 end;
-
-       do {
-               struct page *page = bvec->bv_page;
-               struct extent_state *cached = NULL;
-               tree = &BTRFS_I(page->mapping->host)->io_tree;
-
-               start = ((u64)page->index << PAGE_CACHE_SHIFT) +
-                       bvec->bv_offset;
-               end = start + bvec->bv_len - 1;
-
-               if (--bvec >= bio->bi_io_vec)
-                       prefetchw(&bvec->bv_page->flags);
-
-               if (uptodate) {
-                       set_extent_uptodate(tree, start, end, &cached,
-                                           GFP_ATOMIC);
-               } else {
-                       ClearPageUptodate(page);
-                       SetPageError(page);
-               }
-
-               unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
-
-       } while (bvec >= bio->bi_io_vec);
-
-       bio_put(bio);
-}
-
 struct bio *
 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
                gfp_t gfp_flags)
@@ -2008,7 +1963,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
        struct btrfs_ordered_extent *ordered;
        int ret;
        int nr = 0;
-       size_t page_offset = 0;
+       size_t pg_offset = 0;
        size_t iosize;
        size_t disk_io_size;
        size_t blocksize = inode->i_sb->s_blocksize;
@@ -2016,6 +1971,13 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
 
        set_page_extent_mapped(page);
 
+       if (!PageUptodate(page)) {
+               if (cleancache_get_page(page) == 0) {
+                       BUG_ON(blocksize != PAGE_SIZE);
+                       goto out;
+               }
+       }
+
        end = page_end;
        while (1) {
                lock_extent(tree, start, end, GFP_NOFS);
@@ -2044,9 +2006,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                        char *userpage;
                        struct extent_state *cached = NULL;
 
-                       iosize = PAGE_CACHE_SIZE - page_offset;
+                       iosize = PAGE_CACHE_SIZE - pg_offset;
                        userpage = kmap_atomic(page, KM_USER0);
-                       memset(userpage + page_offset, 0, iosize);
+                       memset(userpage + pg_offset, 0, iosize);
                        flush_dcache_page(page);
                        kunmap_atomic(userpage, KM_USER0);
                        set_extent_uptodate(tree, cur, cur + iosize - 1,
@@ -2055,9 +2017,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                                             &cached, GFP_NOFS);
                        break;
                }
-               em = get_extent(inode, page, page_offset, cur,
+               em = get_extent(inode, page, pg_offset, cur,
                                end - cur + 1, 0);
-               if (IS_ERR(em) || !em) {
+               if (IS_ERR_OR_NULL(em)) {
                        SetPageError(page);
                        unlock_extent(tree, cur, end, GFP_NOFS);
                        break;
@@ -2095,7 +2057,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                        struct extent_state *cached = NULL;
 
                        userpage = kmap_atomic(page, KM_USER0);
-                       memset(userpage + page_offset, 0, iosize);
+                       memset(userpage + pg_offset, 0, iosize);
                        flush_dcache_page(page);
                        kunmap_atomic(userpage, KM_USER0);
 
@@ -2104,7 +2066,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                        unlock_extent_cached(tree, cur, cur + iosize - 1,
                                             &cached, GFP_NOFS);
                        cur = cur + iosize;
-                       page_offset += iosize;
+                       pg_offset += iosize;
                        continue;
                }
                /* the get_extent function already copied into the page */
@@ -2113,7 +2075,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                        check_page_uptodate(tree, page);
                        unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
                        cur = cur + iosize;
-                       page_offset += iosize;
+                       pg_offset += iosize;
                        continue;
                }
                /* we have an inline extent but it didn't get marked up
@@ -2123,7 +2085,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                        SetPageError(page);
                        unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
                        cur = cur + iosize;
-                       page_offset += iosize;
+                       pg_offset += iosize;
                        continue;
                }
 
@@ -2136,7 +2098,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                        unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
                        pnr -= page->index;
                        ret = submit_extent_page(READ, tree, page,
-                                        sector, disk_io_size, page_offset,
+                                        sector, disk_io_size, pg_offset,
                                         bdev, bio, pnr,
                                         end_bio_extent_readpage, mirror_num,
                                         *bio_flags,
@@ -2147,8 +2109,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
                if (ret)
                        SetPageError(page);
                cur = cur + iosize;
-               page_offset += iosize;
+               pg_offset += iosize;
        }
+out:
        if (!nr) {
                if (!PageError(page))
                        SetPageUptodate(page);
@@ -2342,7 +2305,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
                }
                em = epd->get_extent(inode, page, pg_offset, cur,
                                     end - cur + 1, 1);
-               if (IS_ERR(em) || !em) {
+               if (IS_ERR_OR_NULL(em)) {
                        SetPageError(page);
                        break;
                }
@@ -2720,128 +2683,6 @@ int extent_invalidatepage(struct extent_io_tree *tree,
        return 0;
 }
 
-/*
- * simple commit_write call, set_range_dirty is used to mark both
- * the pages and the extent records as dirty
- */
-int extent_commit_write(struct extent_io_tree *tree,
-                       struct inode *inode, struct page *page,
-                       unsigned from, unsigned to)
-{
-       loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
-
-       set_page_extent_mapped(page);
-       set_page_dirty(page);
-
-       if (pos > inode->i_size) {
-               i_size_write(inode, pos);
-               mark_inode_dirty(inode);
-       }
-       return 0;
-}
-
-int extent_prepare_write(struct extent_io_tree *tree,
-                        struct inode *inode, struct page *page,
-                        unsigned from, unsigned to, get_extent_t *get_extent)
-{
-       u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
-       u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
-       u64 block_start;
-       u64 orig_block_start;
-       u64 block_end;
-       u64 cur_end;
-       struct extent_map *em;
-       unsigned blocksize = 1 << inode->i_blkbits;
-       size_t page_offset = 0;
-       size_t block_off_start;
-       size_t block_off_end;
-       int err = 0;
-       int iocount = 0;
-       int ret = 0;
-       int isnew;
-
-       set_page_extent_mapped(page);
-
-       block_start = (page_start + from) & ~((u64)blocksize - 1);
-       block_end = (page_start + to - 1) | (blocksize - 1);
-       orig_block_start = block_start;
-
-       lock_extent(tree, page_start, page_end, GFP_NOFS);
-       while (block_start <= block_end) {
-               em = get_extent(inode, page, page_offset, block_start,
-                               block_end - block_start + 1, 1);
-               if (IS_ERR(em) || !em)
-                       goto err;
-
-               cur_end = min(block_end, extent_map_end(em) - 1);
-               block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
-               block_off_end = block_off_start + blocksize;
-               isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
-
-               if (!PageUptodate(page) && isnew &&
-                   (block_off_end > to || block_off_start < from)) {
-                       void *kaddr;
-
-                       kaddr = kmap_atomic(page, KM_USER0);
-                       if (block_off_end > to)
-                               memset(kaddr + to, 0, block_off_end - to);
-                       if (block_off_start < from)
-                               memset(kaddr + block_off_start, 0,
-                                      from - block_off_start);
-                       flush_dcache_page(page);
-                       kunmap_atomic(kaddr, KM_USER0);
-               }
-               if ((em->block_start != EXTENT_MAP_HOLE &&
-                    em->block_start != EXTENT_MAP_INLINE) &&
-                   !isnew && !PageUptodate(page) &&
-                   (block_off_end > to || block_off_start < from) &&
-                   !test_range_bit(tree, block_start, cur_end,
-                                   EXTENT_UPTODATE, 1, NULL)) {
-                       u64 sector;
-                       u64 extent_offset = block_start - em->start;
-                       size_t iosize;
-                       sector = (em->block_start + extent_offset) >> 9;
-                       iosize = (cur_end - block_start + blocksize) &
-                               ~((u64)blocksize - 1);
-                       /*
-                        * we've already got the extent locked, but we
-                        * need to split the state such that our end_bio
-                        * handler can clear the lock.
-                        */
-                       set_extent_bit(tree, block_start,
-                                      block_start + iosize - 1,
-                                      EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS);
-                       ret = submit_extent_page(READ, tree, page,
-                                        sector, iosize, page_offset, em->bdev,
-                                        NULL, 1,
-                                        end_bio_extent_preparewrite, 0,
-                                        0, 0);
-                       if (ret && !err)
-                               err = ret;
-                       iocount++;
-                       block_start = block_start + iosize;
-               } else {
-                       struct extent_state *cached = NULL;
-
-                       set_extent_uptodate(tree, block_start, cur_end, &cached,
-                                           GFP_NOFS);
-                       unlock_extent_cached(tree, block_start, cur_end,
-                                            &cached, GFP_NOFS);
-                       block_start = cur_end + 1;
-               }
-               page_offset = block_start & (PAGE_CACHE_SIZE - 1);
-               free_extent_map(em);
-       }
-       if (iocount) {
-               wait_extent_bit(tree, orig_block_start,
-                               block_end, EXTENT_LOCKED);
-       }
-       check_page_uptodate(tree, page);
-err:
-       /* FIXME, zero out newly allocated blocks on error */
-       return err;
-}
-
 /*
  * a helper for releasepage, this tests for areas of the page that
  * are locked or under IO and drops the related state bits if it is safe
@@ -2900,7 +2741,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
                        len = end - start + 1;
                        write_lock(&map->lock);
                        em = lookup_extent_mapping(map, start, len);
-                       if (!em || IS_ERR(em)) {
+                       if (IS_ERR_OR_NULL(em)) {
                                write_unlock(&map->lock);
                                break;
                        }
@@ -2928,33 +2769,6 @@ int try_release_extent_mapping(struct extent_map_tree *map,
        return try_release_extent_state(map, tree, page, mask);
 }
 
-sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
-               get_extent_t *get_extent)
-{
-       struct inode *inode = mapping->host;
-       struct extent_state *cached_state = NULL;
-       u64 start = iblock << inode->i_blkbits;
-       sector_t sector = 0;
-       size_t blksize = (1 << inode->i_blkbits);
-       struct extent_map *em;
-
-       lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1,
-                        0, &cached_state, GFP_NOFS);
-       em = get_extent(inode, NULL, 0, start, blksize, 0);
-       unlock_extent_cached(&BTRFS_I(inode)->io_tree, start,
-                            start + blksize - 1, &cached_state, GFP_NOFS);
-       if (!em || IS_ERR(em))
-               return 0;
-
-       if (em->block_start > EXTENT_MAP_LAST_BYTE)
-               goto out;
-
-       sector = (em->block_start + start - em->start) >> inode->i_blkbits;
-out:
-       free_extent_map(em);
-       return sector;
-}
-
 /*
  * helper function for fiemap, which doesn't want to see any holes.
  * This maps until we find something past 'last'
@@ -2977,7 +2791,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
                        break;
                len = (len + sectorsize - 1) & ~(sectorsize - 1);
                em = get_extent(inode, NULL, 0, offset, len, 0);
-               if (!em || IS_ERR(em))
+               if (IS_ERR_OR_NULL(em))
                        return em;
 
                /* if this isn't a hole return it */
@@ -3031,7 +2845,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
         * because there might be preallocation past i_size
         */
        ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
-                                      path, inode->i_ino, -1, 0);
+                                      path, btrfs_ino(inode), -1, 0);
        if (ret < 0) {
                btrfs_free_path(path);
                return ret;
@@ -3044,7 +2858,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
        found_type = btrfs_key_type(&found_key);
 
        /* No extents, but there might be delalloc bits */
-       if (found_key.objectid != inode->i_ino ||
+       if (found_key.objectid != btrfs_ino(inode) ||
            found_type != BTRFS_EXTENT_DATA_KEY) {
                /* have to trust i_size as the end */
                last = (u64)-1;
@@ -3267,8 +3081,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
 
 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
                                          u64 start, unsigned long len,
-                                         struct page *page0,
-                                         gfp_t mask)
+                                         struct page *page0)
 {
        unsigned long num_pages = num_extent_pages(start, len);
        unsigned long i;
@@ -3289,7 +3102,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
        }
        rcu_read_unlock();
 
-       eb = __alloc_extent_buffer(tree, start, len, mask);
+       eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS);
        if (!eb)
                return NULL;
 
@@ -3306,7 +3119,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
                i = 0;
        }
        for (; i < num_pages; i++, index++) {
-               p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
+               p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM);
                if (!p) {
                        WARN_ON(1);
                        goto free_eb;
@@ -3378,8 +3191,7 @@ free_eb:
 }
 
 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
-                                        u64 start, unsigned long len,
-                                         gfp_t mask)
+                                        u64 start, unsigned long len)
 {
        struct extent_buffer *eb;
 
@@ -3440,13 +3252,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
        return 0;
 }
 
-int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
-                                   struct extent_buffer *eb)
-{
-       return wait_on_extent_writeback(tree, eb->start,
-                                       eb->start + eb->len - 1);
-}
-
 int set_extent_buffer_dirty(struct extent_io_tree *tree,
                             struct extent_buffer *eb)
 {
index af2d7179c37288f50371bfd2ffa5dd307b16af00..4e8445a4757c0a14a991fc1ca037411e0426c94e 100644 (file)
@@ -153,23 +153,14 @@ static inline int extent_compress_type(unsigned long bio_flags)
 
 struct extent_map_tree;
 
-static inline struct extent_state *extent_state_next(struct extent_state *state)
-{
-       struct rb_node *node;
-       node = rb_next(&state->rb_node);
-       if (!node)
-               return NULL;
-       return rb_entry(node, struct extent_state, rb_node);
-}
-
 typedef struct extent_map *(get_extent_t)(struct inode *inode,
                                          struct page *page,
-                                         size_t page_offset,
+                                         size_t pg_offset,
                                          u64 start, u64 len,
                                          int create);
 
 void extent_io_tree_init(struct extent_io_tree *tree,
-                         struct address_space *mapping, gfp_t mask);
+                        struct address_space *mapping);
 int try_release_extent_mapping(struct extent_map_tree *map,
                               struct extent_io_tree *tree, struct page *page,
                               gfp_t mask);
@@ -215,14 +206,8 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
                     gfp_t mask);
 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
                       gfp_t mask);
-int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
-                      gfp_t mask);
-int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start,
-                                 u64 end, gfp_t mask);
 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
                        struct extent_state **cached_state, gfp_t mask);
-int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
-                    gfp_t mask);
 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
                          u64 *start_ret, u64 *end_ret, int bits);
 struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
@@ -243,28 +228,17 @@ int extent_readpages(struct extent_io_tree *tree,
                     struct address_space *mapping,
                     struct list_head *pages, unsigned nr_pages,
                     get_extent_t get_extent);
-int extent_prepare_write(struct extent_io_tree *tree,
-                        struct inode *inode, struct page *page,
-                        unsigned from, unsigned to, get_extent_t *get_extent);
-int extent_commit_write(struct extent_io_tree *tree,
-                       struct inode *inode, struct page *page,
-                       unsigned from, unsigned to);
-sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
-               get_extent_t *get_extent);
 int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                __u64 start, __u64 len, get_extent_t *get_extent);
-int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end);
 int set_state_private(struct extent_io_tree *tree, u64 start, u64 private);
 int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
 void set_page_extent_mapped(struct page *page);
 
 struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
                                          u64 start, unsigned long len,
-                                         struct page *page0,
-                                         gfp_t mask);
+                                         struct page *page0);
 struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
-                                        u64 start, unsigned long len,
-                                         gfp_t mask);
+                                        u64 start, unsigned long len);
 void free_extent_buffer(struct extent_buffer *eb);
 int read_extent_buffer_pages(struct extent_io_tree *tree,
                             struct extent_buffer *eb, u64 start, int wait,
@@ -292,16 +266,11 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
                           unsigned long src_offset, unsigned long len);
 void memset_extent_buffer(struct extent_buffer *eb, char c,
                          unsigned long start, unsigned long len);
-int wait_on_extent_buffer_writeback(struct extent_io_tree *tree,
-                                   struct extent_buffer *eb);
-int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end);
 int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits);
 int clear_extent_buffer_dirty(struct extent_io_tree *tree,
                              struct extent_buffer *eb);
 int set_extent_buffer_dirty(struct extent_io_tree *tree,
                             struct extent_buffer *eb);
-int test_extent_buffer_dirty(struct extent_io_tree *tree,
-                            struct extent_buffer *eb);
 int set_extent_buffer_uptodate(struct extent_io_tree *tree,
                               struct extent_buffer *eb);
 int clear_extent_buffer_uptodate(struct extent_io_tree *tree,
@@ -319,7 +288,6 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset,
                      unsigned long *map_start,
                      unsigned long *map_len, int km);
 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km);
-int release_extent_buffer_tail_pages(struct extent_buffer *eb);
 int extent_range_uptodate(struct extent_io_tree *tree,
                          u64 start, u64 end);
 int extent_clear_unlock_delalloc(struct inode *inode,
index a24a3f2fa13e6fe4a1670073933aa1de61490dc4..2d0410344ea3667a7d505b2c543e7ba212dd01ae 100644 (file)
@@ -28,12 +28,11 @@ void extent_map_exit(void)
 /**
  * extent_map_tree_init - initialize extent map tree
  * @tree:              tree to initialize
- * @mask:              flags for memory allocations during tree operations
  *
  * Initialize the extent tree @tree.  Should be called for each new inode
  * or other user of the extent_map interface.
  */
-void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
+void extent_map_tree_init(struct extent_map_tree *tree)
 {
        tree->map = RB_ROOT;
        rwlock_init(&tree->lock);
@@ -41,16 +40,15 @@ void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
 
 /**
  * alloc_extent_map - allocate new extent map structure
- * @mask:      memory allocation flags
  *
  * Allocate a new extent_map structure.  The new structure is
  * returned with a reference count of one and needs to be
  * freed using free_extent_map()
  */
-struct extent_map *alloc_extent_map(gfp_t mask)
+struct extent_map *alloc_extent_map(void)
 {
        struct extent_map *em;
-       em = kmem_cache_alloc(extent_map_cache, mask);
+       em = kmem_cache_alloc(extent_map_cache, GFP_NOFS);
        if (!em)
                return NULL;
        em->in_tree = 0;
index 28b44dbd1e3508c37bd30a1ef990fd071b1096d0..33a7890b1f4091479df52d7a13c1532ba911ce76 100644 (file)
@@ -49,14 +49,14 @@ static inline u64 extent_map_block_end(struct extent_map *em)
        return em->block_start + em->block_len;
 }
 
-void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask);
+void extent_map_tree_init(struct extent_map_tree *tree);
 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
                                         u64 start, u64 len);
 int add_extent_mapping(struct extent_map_tree *tree,
                       struct extent_map *em);
 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em);
 
-struct extent_map *alloc_extent_map(gfp_t mask);
+struct extent_map *alloc_extent_map(void);
 void free_extent_map(struct extent_map *em);
 int __init extent_map_init(void);
 void extent_map_exit(void);
index a6a9d4e8b491eee488316c97f6f975e167bf13db..90d4ee52cd458ac9f7bf87dfe4a34c99be27bc30 100644 (file)
@@ -193,7 +193,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
                        u32 item_size;
 
                        if (item)
-                               btrfs_release_path(root, path);
+                               btrfs_release_path(path);
                        item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
                                                 path, disk_bytenr, 0);
                        if (IS_ERR(item)) {
@@ -208,12 +208,13 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
                                                EXTENT_NODATASUM, GFP_NOFS);
                                } else {
                                        printk(KERN_INFO "btrfs no csum found "
-                                              "for inode %lu start %llu\n",
-                                              inode->i_ino,
+                                              "for inode %llu start %llu\n",
+                                              (unsigned long long)
+                                              btrfs_ino(inode),
                                               (unsigned long long)offset);
                                }
                                item = NULL;
-                               btrfs_release_path(root, path);
+                               btrfs_release_path(path);
                                goto found;
                        }
                        btrfs_item_key_to_cpu(path->nodes[0], &found_key,
@@ -266,7 +267,7 @@ int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
 }
 
 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
-                            struct list_head *list)
+                            struct list_head *list, int search_commit)
 {
        struct btrfs_key key;
        struct btrfs_path *path;
@@ -283,6 +284,12 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
        path = btrfs_alloc_path();
        BUG_ON(!path);
 
+       if (search_commit) {
+               path->skip_locking = 1;
+               path->reada = 2;
+               path->search_commit_root = 1;
+       }
+
        key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
        key.offset = start;
        key.type = BTRFS_EXTENT_CSUM_KEY;
@@ -495,7 +502,6 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
                u32 new_size = (bytenr - key->offset) >> blocksize_bits;
                new_size *= csum_size;
                ret = btrfs_truncate_item(trans, root, path, new_size, 1);
-               BUG_ON(ret);
        } else if (key->offset >= bytenr && csum_end > end_byte &&
                   end_byte > key->offset) {
                /*
@@ -508,7 +514,6 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans,
                new_size *= csum_size;
 
                ret = btrfs_truncate_item(trans, root, path, new_size, 0);
-               BUG_ON(ret);
 
                key->offset = end_byte;
                ret = btrfs_set_item_key_safe(trans, root, path, key);
@@ -551,10 +556,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
                ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
                if (ret > 0) {
                        if (path->slots[0] == 0)
-                               goto out;
+                               break;
                        path->slots[0]--;
                } else if (ret < 0) {
-                       goto out;
+                       break;
                }
 
                leaf = path->nodes[0];
@@ -579,7 +584,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
                /* delete the entire item, it is inside our range */
                if (key.offset >= bytenr && csum_end <= end_byte) {
                        ret = btrfs_del_item(trans, root, path);
-                       BUG_ON(ret);
+                       if (ret)
+                               goto out;
                        if (key.offset == bytenr)
                                break;
                } else if (key.offset < bytenr && csum_end > end_byte) {
@@ -631,11 +637,12 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
                        if (key.offset < bytenr)
                                break;
                }
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
        }
+       ret = 0;
 out:
        btrfs_free_path(path);
-       return 0;
+       return ret;
 }
 
 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
@@ -722,7 +729,7 @@ again:
         * at this point, we know the tree has an item, but it isn't big
         * enough yet to put our csum in.  Grow it
         */
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        ret = btrfs_search_slot(trans, root, &file_key, path,
                                csum_size, 1);
        if (ret < 0)
@@ -761,12 +768,11 @@ again:
                        goto insert;
 
                ret = btrfs_extend_item(trans, root, path, diff);
-               BUG_ON(ret);
                goto csum;
        }
 
 insert:
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        csum_offset = 0;
        if (found_next) {
                u64 tmp = total_bytes + root->sectorsize;
@@ -850,7 +856,7 @@ next_sector:
        }
        btrfs_mark_buffer_dirty(path->nodes[0]);
        if (total_bytes < sums->len) {
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
                cond_resched();
                goto again;
        }
index 75899a01dded75042b4c99f15a44cc83882730a4..c6a22d783c35576253d96f1c3380b01d949f9eca 100644 (file)
 #include "locking.h"
 #include "compat.h"
 
+/*
+ * when auto defrag is enabled we
+ * queue up these defrag structs to remember which
+ * inodes need defragging passes
+ */
+struct inode_defrag {
+       struct rb_node rb_node;
+       /* objectid */
+       u64 ino;
+       /*
+        * transid where the defrag was added, we search for
+        * extents newer than this
+        */
+       u64 transid;
+
+       /* root objectid */
+       u64 root;
+
+       /* last offset we were able to defrag */
+       u64 last_offset;
+
+       /* if we've wrapped around back to zero once already */
+       int cycled;
+};
+
+/* pop a record for an inode into the defrag tree.  The lock
+ * must be held already
+ *
+ * If you're inserting a record for an older transid than an
+ * existing record, the transid already in the tree is lowered
+ *
+ * If an existing record is found the defrag item you
+ * pass in is freed
+ */
+static int __btrfs_add_inode_defrag(struct inode *inode,
+                                   struct inode_defrag *defrag)
+{
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct inode_defrag *entry;
+       struct rb_node **p;
+       struct rb_node *parent = NULL;
+
+       p = &root->fs_info->defrag_inodes.rb_node;
+       while (*p) {
+               parent = *p;
+               entry = rb_entry(parent, struct inode_defrag, rb_node);
+
+               if (defrag->ino < entry->ino)
+                       p = &parent->rb_left;
+               else if (defrag->ino > entry->ino)
+                       p = &parent->rb_right;
+               else {
+                       /* if we're reinserting an entry for
+                        * an old defrag run, make sure to
+                        * lower the transid of our existing record
+                        */
+                       if (defrag->transid < entry->transid)
+                               entry->transid = defrag->transid;
+                       if (defrag->last_offset > entry->last_offset)
+                               entry->last_offset = defrag->last_offset;
+                       goto exists;
+               }
+       }
+       BTRFS_I(inode)->in_defrag = 1;
+       rb_link_node(&defrag->rb_node, parent, p);
+       rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
+       return 0;
+
+exists:
+       kfree(defrag);
+       return 0;
+
+}
+
+/*
+ * insert a defrag record for this inode if auto defrag is
+ * enabled
+ */
+int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
+                          struct inode *inode)
+{
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       struct inode_defrag *defrag;
+       int ret = 0;
+       u64 transid;
+
+       if (!btrfs_test_opt(root, AUTO_DEFRAG))
+               return 0;
+
+       if (root->fs_info->closing)
+               return 0;
+
+       if (BTRFS_I(inode)->in_defrag)
+               return 0;
+
+       if (trans)
+               transid = trans->transid;
+       else
+               transid = BTRFS_I(inode)->root->last_trans;
+
+       defrag = kzalloc(sizeof(*defrag), GFP_NOFS);
+       if (!defrag)
+               return -ENOMEM;
+
+       defrag->ino = inode->i_ino;
+       defrag->transid = transid;
+       defrag->root = root->root_key.objectid;
+
+       spin_lock(&root->fs_info->defrag_inodes_lock);
+       if (!BTRFS_I(inode)->in_defrag)
+               ret = __btrfs_add_inode_defrag(inode, defrag);
+       spin_unlock(&root->fs_info->defrag_inodes_lock);
+       return ret;
+}
+
+/*
+ * must be called with the defrag_inodes lock held
+ */
+struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info, u64 ino,
+                                            struct rb_node **next)
+{
+       struct inode_defrag *entry = NULL;
+       struct rb_node *p;
+       struct rb_node *parent = NULL;
+
+       p = info->defrag_inodes.rb_node;
+       while (p) {
+               parent = p;
+               entry = rb_entry(parent, struct inode_defrag, rb_node);
+
+               if (ino < entry->ino)
+                       p = parent->rb_left;
+               else if (ino > entry->ino)
+                       p = parent->rb_right;
+               else
+                       return entry;
+       }
+
+       if (next) {
+               while (parent && ino > entry->ino) {
+                       parent = rb_next(parent);
+                       entry = rb_entry(parent, struct inode_defrag, rb_node);
+               }
+               *next = parent;
+       }
+       return NULL;
+}
+
+/*
+ * run through the list of inodes in the FS that need
+ * defragging
+ */
+int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
+{
+       struct inode_defrag *defrag;
+       struct btrfs_root *inode_root;
+       struct inode *inode;
+       struct rb_node *n;
+       struct btrfs_key key;
+       struct btrfs_ioctl_defrag_range_args range;
+       u64 first_ino = 0;
+       int num_defrag;
+       int defrag_batch = 1024;
+
+       memset(&range, 0, sizeof(range));
+       range.len = (u64)-1;
+
+       atomic_inc(&fs_info->defrag_running);
+       spin_lock(&fs_info->defrag_inodes_lock);
+       while(1) {
+               n = NULL;
+
+               /* find an inode to defrag */
+               defrag = btrfs_find_defrag_inode(fs_info, first_ino, &n);
+               if (!defrag) {
+                       if (n)
+                               defrag = rb_entry(n, struct inode_defrag, rb_node);
+                       else if (first_ino) {
+                               first_ino = 0;
+                               continue;
+                       } else {
+                               break;
+                       }
+               }
+
+               /* remove it from the rbtree */
+               first_ino = defrag->ino + 1;
+               rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
+
+               if (fs_info->closing)
+                       goto next_free;
+
+               spin_unlock(&fs_info->defrag_inodes_lock);
+
+               /* get the inode */
+               key.objectid = defrag->root;
+               btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
+               key.offset = (u64)-1;
+               inode_root = btrfs_read_fs_root_no_name(fs_info, &key);
+               if (IS_ERR(inode_root))
+                       goto next;
+
+               key.objectid = defrag->ino;
+               btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
+               key.offset = 0;
+
+               inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL);
+               if (IS_ERR(inode))
+                       goto next;
+
+               /* do a chunk of defrag */
+               BTRFS_I(inode)->in_defrag = 0;
+               range.start = defrag->last_offset;
+               num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
+                                              defrag_batch);
+               /*
+                * if we filled the whole defrag batch, there
+                * must be more work to do.  Queue this defrag
+                * again
+                */
+               if (num_defrag == defrag_batch) {
+                       defrag->last_offset = range.start;
+                       __btrfs_add_inode_defrag(inode, defrag);
+                       /*
+                        * we don't want to kfree defrag, we added it back to
+                        * the rbtree
+                        */
+                       defrag = NULL;
+               } else if (defrag->last_offset && !defrag->cycled) {
+                       /*
+                        * we didn't fill our defrag batch, but
+                        * we didn't start at zero.  Make sure we loop
+                        * around to the start of the file.
+                        */
+                       defrag->last_offset = 0;
+                       defrag->cycled = 1;
+                       __btrfs_add_inode_defrag(inode, defrag);
+                       defrag = NULL;
+               }
+
+               iput(inode);
+next:
+               spin_lock(&fs_info->defrag_inodes_lock);
+next_free:
+               kfree(defrag);
+       }
+       spin_unlock(&fs_info->defrag_inodes_lock);
+
+       atomic_dec(&fs_info->defrag_running);
+
+       /*
+        * during unmount, we use the transaction_wait queue to
+        * wait for the defragger to stop
+        */
+       wake_up(&fs_info->transaction_wait);
+       return 0;
+}
 
 /* simple helper to fault in pages and copy.  This should go away
  * and be replaced with calls into generic code.
@@ -191,9 +448,9 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
        }
        while (1) {
                if (!split)
-                       split = alloc_extent_map(GFP_NOFS);
+                       split = alloc_extent_map();
                if (!split2)
-                       split2 = alloc_extent_map(GFP_NOFS);
+                       split2 = alloc_extent_map();
                BUG_ON(!split || !split2);
 
                write_lock(&em_tree->lock);
@@ -298,6 +555,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
        struct btrfs_path *path;
        struct btrfs_key key;
        struct btrfs_key new_key;
+       u64 ino = btrfs_ino(inode);
        u64 search_start = start;
        u64 disk_bytenr = 0;
        u64 num_bytes = 0;
@@ -318,14 +576,14 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
 
        while (1) {
                recow = 0;
-               ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+               ret = btrfs_lookup_file_extent(trans, root, path, ino,
                                               search_start, -1);
                if (ret < 0)
                        break;
                if (ret > 0 && path->slots[0] > 0 && search_start == start) {
                        leaf = path->nodes[0];
                        btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
-                       if (key.objectid == inode->i_ino &&
+                       if (key.objectid == ino &&
                            key.type == BTRFS_EXTENT_DATA_KEY)
                                path->slots[0]--;
                }
@@ -346,7 +604,7 @@ next_slot:
                }
 
                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-               if (key.objectid > inode->i_ino ||
+               if (key.objectid > ino ||
                    key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
                        break;
 
@@ -376,7 +634,7 @@ next_slot:
 
                search_start = max(key.offset, start);
                if (recow) {
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        continue;
                }
 
@@ -393,7 +651,7 @@ next_slot:
                        ret = btrfs_duplicate_item(trans, root, path,
                                                   &new_key);
                        if (ret == -EAGAIN) {
-                               btrfs_release_path(root, path);
+                               btrfs_release_path(path);
                                continue;
                        }
                        if (ret < 0)
@@ -516,7 +774,7 @@ next_slot:
                        del_nr = 0;
                        del_slot = 0;
 
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        continue;
                }
 
@@ -592,6 +850,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
        int del_slot = 0;
        int recow;
        int ret;
+       u64 ino = btrfs_ino(inode);
 
        btrfs_drop_extent_cache(inode, start, end - 1, 0);
 
@@ -600,7 +859,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
 again:
        recow = 0;
        split = start;
-       key.objectid = inode->i_ino;
+       key.objectid = ino;
        key.type = BTRFS_EXTENT_DATA_KEY;
        key.offset = split;
 
@@ -612,8 +871,7 @@ again:
 
        leaf = path->nodes[0];
        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-       BUG_ON(key.objectid != inode->i_ino ||
-              key.type != BTRFS_EXTENT_DATA_KEY);
+       BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
        fi = btrfs_item_ptr(leaf, path->slots[0],
                            struct btrfs_file_extent_item);
        BUG_ON(btrfs_file_extent_type(leaf, fi) !=
@@ -630,7 +888,7 @@ again:
                other_start = 0;
                other_end = start;
                if (extent_mergeable(leaf, path->slots[0] - 1,
-                                    inode->i_ino, bytenr, orig_offset,
+                                    ino, bytenr, orig_offset,
                                     &other_start, &other_end)) {
                        new_key.offset = end;
                        btrfs_set_item_key_safe(trans, root, path, &new_key);
@@ -653,7 +911,7 @@ again:
                other_start = end;
                other_end = 0;
                if (extent_mergeable(leaf, path->slots[0] + 1,
-                                    inode->i_ino, bytenr, orig_offset,
+                                    ino, bytenr, orig_offset,
                                     &other_start, &other_end)) {
                        fi = btrfs_item_ptr(leaf, path->slots[0],
                                            struct btrfs_file_extent_item);
@@ -681,7 +939,7 @@ again:
                new_key.offset = split;
                ret = btrfs_duplicate_item(trans, root, path, &new_key);
                if (ret == -EAGAIN) {
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        goto again;
                }
                BUG_ON(ret < 0);
@@ -702,7 +960,7 @@ again:
 
                ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
                                           root->root_key.objectid,
-                                          inode->i_ino, orig_offset);
+                                          ino, orig_offset);
                BUG_ON(ret);
 
                if (split == start) {
@@ -718,10 +976,10 @@ again:
        other_start = end;
        other_end = 0;
        if (extent_mergeable(leaf, path->slots[0] + 1,
-                            inode->i_ino, bytenr, orig_offset,
+                            ino, bytenr, orig_offset,
                             &other_start, &other_end)) {
                if (recow) {
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        goto again;
                }
                extent_end = other_end;
@@ -729,16 +987,16 @@ again:
                del_nr++;
                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
                                        0, root->root_key.objectid,
-                                       inode->i_ino, orig_offset);
+                                       ino, orig_offset);
                BUG_ON(ret);
        }
        other_start = 0;
        other_end = start;
        if (extent_mergeable(leaf, path->slots[0] - 1,
-                            inode->i_ino, bytenr, orig_offset,
+                            ino, bytenr, orig_offset,
                             &other_start, &other_end)) {
                if (recow) {
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        goto again;
                }
                key.offset = other_start;
@@ -746,7 +1004,7 @@ again:
                del_nr++;
                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
                                        0, root->root_key.objectid,
-                                       inode->i_ino, orig_offset);
+                                       ino, orig_offset);
                BUG_ON(ret);
        }
        if (del_nr == 0) {
@@ -1375,7 +1633,7 @@ static long btrfs_fallocate(struct file *file, int mode,
        while (1) {
                em = btrfs_get_extent(inode, NULL, 0, cur_offset,
                                      alloc_end - cur_offset, 0);
-               BUG_ON(IS_ERR(em) || !em);
+               BUG_ON(IS_ERR_OR_NULL(em));
                last_byte = min(extent_map_end(em), alloc_end);
                last_byte = (last_byte + mask) & ~mask;
                if (em->block_start == EXTENT_MAP_HOLE ||
index 63731a1fb0a1f9004e892a31e121ae977a9eff3a..70d45795d758e63cd6303327245e80aeffcfa7ae 100644 (file)
 #include "transaction.h"
 #include "disk-io.h"
 #include "extent_io.h"
+#include "inode-map.h"
 
 #define BITS_PER_BITMAP                (PAGE_CACHE_SIZE * 8)
 #define MAX_CACHE_BYTES_PER_GIG        (32 * 1024)
 
-static void recalculate_thresholds(struct btrfs_block_group_cache
-                                  *block_group);
-static int link_free_space(struct btrfs_block_group_cache *block_group,
+static int link_free_space(struct btrfs_free_space_ctl *ctl,
                           struct btrfs_free_space *info);
 
-struct inode *lookup_free_space_inode(struct btrfs_root *root,
-                                     struct btrfs_block_group_cache
-                                     *block_group, struct btrfs_path *path)
+static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
+                                              struct btrfs_path *path,
+                                              u64 offset)
 {
        struct btrfs_key key;
        struct btrfs_key location;
@@ -46,22 +45,15 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
        struct inode *inode = NULL;
        int ret;
 
-       spin_lock(&block_group->lock);
-       if (block_group->inode)
-               inode = igrab(block_group->inode);
-       spin_unlock(&block_group->lock);
-       if (inode)
-               return inode;
-
        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
-       key.offset = block_group->key.objectid;
+       key.offset = offset;
        key.type = 0;
 
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        if (ret < 0)
                return ERR_PTR(ret);
        if (ret > 0) {
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
                return ERR_PTR(-ENOENT);
        }
 
@@ -70,7 +62,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
                                struct btrfs_free_space_header);
        btrfs_free_space_key(leaf, header, &disk_key);
        btrfs_disk_key_to_cpu(&location, &disk_key);
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
        inode = btrfs_iget(root->fs_info->sb, &location, root, NULL);
        if (!inode)
@@ -84,6 +76,27 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
 
        inode->i_mapping->flags &= ~__GFP_FS;
 
+       return inode;
+}
+
+struct inode *lookup_free_space_inode(struct btrfs_root *root,
+                                     struct btrfs_block_group_cache
+                                     *block_group, struct btrfs_path *path)
+{
+       struct inode *inode = NULL;
+
+       spin_lock(&block_group->lock);
+       if (block_group->inode)
+               inode = igrab(block_group->inode);
+       spin_unlock(&block_group->lock);
+       if (inode)
+               return inode;
+
+       inode = __lookup_free_space_inode(root, path,
+                                         block_group->key.objectid);
+       if (IS_ERR(inode))
+               return inode;
+
        spin_lock(&block_group->lock);
        if (!root->fs_info->closing) {
                block_group->inode = igrab(inode);
@@ -94,24 +107,18 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
        return inode;
 }
 
-int create_free_space_inode(struct btrfs_root *root,
-                           struct btrfs_trans_handle *trans,
-                           struct btrfs_block_group_cache *block_group,
-                           struct btrfs_path *path)
+int __create_free_space_inode(struct btrfs_root *root,
+                             struct btrfs_trans_handle *trans,
+                             struct btrfs_path *path, u64 ino, u64 offset)
 {
        struct btrfs_key key;
        struct btrfs_disk_key disk_key;
        struct btrfs_free_space_header *header;
        struct btrfs_inode_item *inode_item;
        struct extent_buffer *leaf;
-       u64 objectid;
        int ret;
 
-       ret = btrfs_find_free_objectid(trans, root, 0, &objectid);
-       if (ret < 0)
-               return ret;
-
-       ret = btrfs_insert_empty_inode(trans, root, path, objectid);
+       ret = btrfs_insert_empty_inode(trans, root, path, ino);
        if (ret)
                return ret;
 
@@ -131,19 +138,18 @@ int create_free_space_inode(struct btrfs_root *root,
                              BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM);
        btrfs_set_inode_nlink(leaf, inode_item, 1);
        btrfs_set_inode_transid(leaf, inode_item, trans->transid);
-       btrfs_set_inode_block_group(leaf, inode_item,
-                                   block_group->key.objectid);
+       btrfs_set_inode_block_group(leaf, inode_item, offset);
        btrfs_mark_buffer_dirty(leaf);
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
-       key.offset = block_group->key.objectid;
+       key.offset = offset;
        key.type = 0;
 
        ret = btrfs_insert_empty_item(trans, root, path, &key,
                                      sizeof(struct btrfs_free_space_header));
        if (ret < 0) {
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
                return ret;
        }
        leaf = path->nodes[0];
@@ -152,11 +158,27 @@ int create_free_space_inode(struct btrfs_root *root,
        memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header));
        btrfs_set_free_space_key(leaf, header, &disk_key);
        btrfs_mark_buffer_dirty(leaf);
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
        return 0;
 }
 
+int create_free_space_inode(struct btrfs_root *root,
+                           struct btrfs_trans_handle *trans,
+                           struct btrfs_block_group_cache *block_group,
+                           struct btrfs_path *path)
+{
+       int ret;
+       u64 ino;
+
+       ret = btrfs_find_free_objectid(root, &ino);
+       if (ret < 0)
+               return ret;
+
+       return __create_free_space_inode(root, trans, path, ino,
+                                        block_group->key.objectid);
+}
+
 int btrfs_truncate_free_space_cache(struct btrfs_root *root,
                                    struct btrfs_trans_handle *trans,
                                    struct btrfs_path *path,
@@ -187,7 +209,8 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
                return ret;
        }
 
-       return btrfs_update_inode(trans, root, inode);
+       ret = btrfs_update_inode(trans, root, inode);
+       return ret;
 }
 
 static int readahead_cache(struct inode *inode)
@@ -209,15 +232,13 @@ static int readahead_cache(struct inode *inode)
        return 0;
 }
 
-int load_free_space_cache(struct btrfs_fs_info *fs_info,
-                         struct btrfs_block_group_cache *block_group)
+int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+                           struct btrfs_free_space_ctl *ctl,
+                           struct btrfs_path *path, u64 offset)
 {
-       struct btrfs_root *root = fs_info->tree_root;
-       struct inode *inode;
        struct btrfs_free_space_header *header;
        struct extent_buffer *leaf;
        struct page *page;
-       struct btrfs_path *path;
        u32 *checksums = NULL, *crc;
        char *disk_crcs = NULL;
        struct btrfs_key key;
@@ -225,76 +246,47 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
        u64 num_entries;
        u64 num_bitmaps;
        u64 generation;
-       u64 used = btrfs_block_group_used(&block_group->item);
        u32 cur_crc = ~(u32)0;
        pgoff_t index = 0;
        unsigned long first_page_offset;
        int num_checksums;
-       int ret = 0;
-
-       /*
-        * If we're unmounting then just return, since this does a search on the
-        * normal root and not the commit root and we could deadlock.
-        */
-       smp_mb();
-       if (fs_info->closing)
-               return 0;
-
-       /*
-        * If this block group has been marked to be cleared for one reason or
-        * another then we can't trust the on disk cache, so just return.
-        */
-       spin_lock(&block_group->lock);
-       if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
-               spin_unlock(&block_group->lock);
-               return 0;
-       }
-       spin_unlock(&block_group->lock);
+       int ret = 0, ret2;
 
        INIT_LIST_HEAD(&bitmaps);
 
-       path = btrfs_alloc_path();
-       if (!path)
-               return 0;
-
-       inode = lookup_free_space_inode(root, block_group, path);
-       if (IS_ERR(inode)) {
-               btrfs_free_path(path);
-               return 0;
-       }
-
        /* Nothing in the space cache, goodbye */
-       if (!i_size_read(inode)) {
-               btrfs_free_path(path);
+       if (!i_size_read(inode))
                goto out;
-       }
 
        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
-       key.offset = block_group->key.objectid;
+       key.offset = offset;
        key.type = 0;
 
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
-       if (ret) {
-               btrfs_free_path(path);
+       if (ret < 0)
+               goto out;
+       else if (ret > 0) {
+               btrfs_release_path(path);
+               ret = 0;
                goto out;
        }
 
+       ret = -1;
+
        leaf = path->nodes[0];
        header = btrfs_item_ptr(leaf, path->slots[0],
                                struct btrfs_free_space_header);
        num_entries = btrfs_free_space_entries(leaf, header);
        num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
        generation = btrfs_free_space_generation(leaf, header);
-       btrfs_free_path(path);
+       btrfs_release_path(path);
 
        if (BTRFS_I(inode)->generation != generation) {
                printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
-                      " not match free space cache generation (%llu) for "
-                      "block group %llu\n",
+                      " not match free space cache generation (%llu)\n",
                       (unsigned long long)BTRFS_I(inode)->generation,
-                      (unsigned long long)generation,
-                      (unsigned long long)block_group->key.objectid);
-               goto free_cache;
+                      (unsigned long long)generation);
+               goto out;
        }
 
        if (!num_entries)
@@ -311,10 +303,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
                goto out;
 
        ret = readahead_cache(inode);
-       if (ret) {
-               ret = 0;
+       if (ret)
                goto out;
-       }
 
        while (1) {
                struct btrfs_free_space_entry *entry;
@@ -333,10 +323,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
                }
 
                page = grab_cache_page(inode->i_mapping, index);
-               if (!page) {
-                       ret = 0;
+               if (!page)
                        goto free_cache;
-               }
 
                if (!PageUptodate(page)) {
                        btrfs_readpage(NULL, page);
@@ -345,9 +333,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
                                unlock_page(page);
                                page_cache_release(page);
                                printk(KERN_ERR "btrfs: error reading free "
-                                      "space cache: %llu\n",
-                                      (unsigned long long)
-                                      block_group->key.objectid);
+                                      "space cache\n");
                                goto free_cache;
                        }
                }
@@ -360,13 +346,10 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
                        gen = addr + (sizeof(u32) * num_checksums);
                        if (*gen != BTRFS_I(inode)->generation) {
                                printk(KERN_ERR "btrfs: space cache generation"
-                                      " (%llu) does not match inode (%llu) "
-                                      "for block group %llu\n",
+                                      " (%llu) does not match inode (%llu)\n",
                                       (unsigned long long)*gen,
                                       (unsigned long long)
-                                      BTRFS_I(inode)->generation,
-                                      (unsigned long long)
-                                      block_group->key.objectid);
+                                      BTRFS_I(inode)->generation);
                                kunmap(page);
                                unlock_page(page);
                                page_cache_release(page);
@@ -382,9 +365,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
                                          PAGE_CACHE_SIZE - start_offset);
                btrfs_csum_final(cur_crc, (char *)&cur_crc);
                if (cur_crc != *crc) {
-                       printk(KERN_ERR "btrfs: crc mismatch for page %lu in "
-                              "block group %llu\n", index,
-                              (unsigned long long)block_group->key.objectid);
+                       printk(KERN_ERR "btrfs: crc mismatch for page %lu\n",
+                              index);
                        kunmap(page);
                        unlock_page(page);
                        page_cache_release(page);
@@ -417,9 +399,9 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
                        }
 
                        if (entry->type == BTRFS_FREE_SPACE_EXTENT) {
-                               spin_lock(&block_group->tree_lock);
-                               ret = link_free_space(block_group, e);
-                               spin_unlock(&block_group->tree_lock);
+                               spin_lock(&ctl->tree_lock);
+                               ret = link_free_space(ctl, e);
+                               spin_unlock(&ctl->tree_lock);
                                BUG_ON(ret);
                        } else {
                                e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
@@ -431,11 +413,11 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
                                        page_cache_release(page);
                                        goto free_cache;
                                }
-                               spin_lock(&block_group->tree_lock);
-                               ret = link_free_space(block_group, e);
-                               block_group->total_bitmaps++;
-                               recalculate_thresholds(block_group);
-                               spin_unlock(&block_group->tree_lock);
+                               spin_lock(&ctl->tree_lock);
+                               ret2 = link_free_space(ctl, e);
+                               ctl->total_bitmaps++;
+                               ctl->op->recalc_thresholds(ctl);
+                               spin_unlock(&ctl->tree_lock);
                                list_add_tail(&e->list, &bitmaps);
                        }
 
@@ -471,41 +453,97 @@ next:
                index++;
        }
 
-       spin_lock(&block_group->tree_lock);
-       if (block_group->free_space != (block_group->key.offset - used -
-                                       block_group->bytes_super)) {
-               spin_unlock(&block_group->tree_lock);
-               printk(KERN_ERR "block group %llu has an wrong amount of free "
-                      "space\n", block_group->key.objectid);
-               ret = 0;
-               goto free_cache;
-       }
-       spin_unlock(&block_group->tree_lock);
-
        ret = 1;
 out:
        kfree(checksums);
        kfree(disk_crcs);
-       iput(inode);
        return ret;
-
 free_cache:
-       /* This cache is bogus, make sure it gets cleared */
+       __btrfs_remove_free_space_cache(ctl);
+       goto out;
+}
+
+int load_free_space_cache(struct btrfs_fs_info *fs_info,
+                         struct btrfs_block_group_cache *block_group)
+{
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+       struct btrfs_root *root = fs_info->tree_root;
+       struct inode *inode;
+       struct btrfs_path *path;
+       int ret;
+       bool matched;
+       u64 used = btrfs_block_group_used(&block_group->item);
+
+       /*
+        * If we're unmounting then just return, since this does a search on the
+        * normal root and not the commit root and we could deadlock.
+        */
+       smp_mb();
+       if (fs_info->closing)
+               return 0;
+
+       /*
+        * If this block group has been marked to be cleared for one reason or
+        * another then we can't trust the on disk cache, so just return.
+        */
        spin_lock(&block_group->lock);
-       block_group->disk_cache_state = BTRFS_DC_CLEAR;
+       if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
+               spin_unlock(&block_group->lock);
+               return 0;
+       }
        spin_unlock(&block_group->lock);
-       btrfs_remove_free_space_cache(block_group);
-       goto out;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return 0;
+
+       inode = lookup_free_space_inode(root, block_group, path);
+       if (IS_ERR(inode)) {
+               btrfs_free_path(path);
+               return 0;
+       }
+
+       ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
+                                     path, block_group->key.objectid);
+       btrfs_free_path(path);
+       if (ret <= 0)
+               goto out;
+
+       spin_lock(&ctl->tree_lock);
+       matched = (ctl->free_space == (block_group->key.offset - used -
+                                      block_group->bytes_super));
+       spin_unlock(&ctl->tree_lock);
+
+       if (!matched) {
+               __btrfs_remove_free_space_cache(ctl);
+               printk(KERN_ERR "block group %llu has an wrong amount of free "
+                      "space\n", block_group->key.objectid);
+               ret = -1;
+       }
+out:
+       if (ret < 0) {
+               /* This cache is bogus, make sure it gets cleared */
+               spin_lock(&block_group->lock);
+               block_group->disk_cache_state = BTRFS_DC_CLEAR;
+               spin_unlock(&block_group->lock);
+               ret = 0;
+
+               printk(KERN_ERR "btrfs: failed to load free space cache "
+                      "for block group %llu\n", block_group->key.objectid);
+       }
+
+       iput(inode);
+       return ret;
 }
 
-int btrfs_write_out_cache(struct btrfs_root *root,
-                         struct btrfs_trans_handle *trans,
-                         struct btrfs_block_group_cache *block_group,
-                         struct btrfs_path *path)
+int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
+                           struct btrfs_free_space_ctl *ctl,
+                           struct btrfs_block_group_cache *block_group,
+                           struct btrfs_trans_handle *trans,
+                           struct btrfs_path *path, u64 offset)
 {
        struct btrfs_free_space_header *header;
        struct extent_buffer *leaf;
-       struct inode *inode;
        struct rb_node *node;
        struct list_head *pos, *n;
        struct page **pages;
@@ -522,35 +560,18 @@ int btrfs_write_out_cache(struct btrfs_root *root,
        int index = 0, num_pages = 0;
        int entries = 0;
        int bitmaps = 0;
-       int ret = 0;
+       int ret = -1;
        bool next_page = false;
        bool out_of_space = false;
 
-       root = root->fs_info->tree_root;
-
        INIT_LIST_HEAD(&bitmap_list);
 
-       spin_lock(&block_group->lock);
-       if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
-               spin_unlock(&block_group->lock);
-               return 0;
-       }
-       spin_unlock(&block_group->lock);
-
-       inode = lookup_free_space_inode(root, block_group, path);
-       if (IS_ERR(inode))
-               return 0;
-
-       if (!i_size_read(inode)) {
-               iput(inode);
+       node = rb_first(&ctl->free_space_offset);
+       if (!node)
                return 0;
-       }
 
-       node = rb_first(&block_group->free_space_offset);
-       if (!node) {
-               iput(inode);
-               return 0;
-       }
+       if (!i_size_read(inode))
+               return -1;
 
        num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
                PAGE_CACHE_SHIFT;
@@ -560,16 +581,13 @@ int btrfs_write_out_cache(struct btrfs_root *root,
 
        /* We need a checksum per page. */
        crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
-       if (!crc) {
-               iput(inode);
-               return 0;
-       }
+       if (!crc)
+               return -1;
 
        pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
        if (!pages) {
                kfree(crc);
-               iput(inode);
-               return 0;
+               return -1;
        }
 
        /* Since the first page has all of our checksums and our generation we
@@ -579,7 +597,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
        first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
 
        /* Get the cluster for this block_group if it exists */
-       if (!list_empty(&block_group->cluster_list))
+       if (block_group && !list_empty(&block_group->cluster_list))
                cluster = list_entry(block_group->cluster_list.next,
                                     struct btrfs_free_cluster,
                                     block_group_list);
@@ -621,7 +639,8 @@ int btrfs_write_out_cache(struct btrfs_root *root,
         * When searching for pinned extents, we need to start at our start
         * offset.
         */
-       start = block_group->key.objectid;
+       if (block_group)
+               start = block_group->key.objectid;
 
        /* Write out the extent entries */
        do {
@@ -679,8 +698,9 @@ int btrfs_write_out_cache(struct btrfs_root *root,
                 * We want to add any pinned extents to our free space cache
                 * so we don't leak the space
                 */
-               while (!next_page && (start < block_group->key.objectid +
-                                     block_group->key.offset)) {
+               while (block_group && !next_page &&
+                      (start < block_group->key.objectid +
+                       block_group->key.offset)) {
                        ret = find_first_extent_bit(unpin, start, &start, &end,
                                                    EXTENT_DIRTY);
                        if (ret) {
@@ -798,12 +818,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
        filemap_write_and_wait(inode->i_mapping);
 
        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
-       key.offset = block_group->key.objectid;
+       key.offset = offset;
        key.type = 0;
 
        ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
        if (ret < 0) {
-               ret = 0;
+               ret = -1;
                clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
                                 EXTENT_DIRTY | EXTENT_DELALLOC |
                                 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
@@ -816,13 +836,13 @@ int btrfs_write_out_cache(struct btrfs_root *root,
                path->slots[0]--;
                btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
                if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
-                   found_key.offset != block_group->key.objectid) {
-                       ret = 0;
+                   found_key.offset != offset) {
+                       ret = -1;
                        clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
                                         EXTENT_DIRTY | EXTENT_DELALLOC |
                                         EXTENT_DO_ACCOUNTING, 0, 0, NULL,
                                         GFP_NOFS);
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        goto out_free;
                }
        }
@@ -832,49 +852,83 @@ int btrfs_write_out_cache(struct btrfs_root *root,
        btrfs_set_free_space_bitmaps(leaf, header, bitmaps);
        btrfs_set_free_space_generation(leaf, header, trans->transid);
        btrfs_mark_buffer_dirty(leaf);
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
        ret = 1;
 
 out_free:
-       if (ret == 0) {
+       if (ret != 1) {
                invalidate_inode_pages2_range(inode->i_mapping, 0, index);
-               spin_lock(&block_group->lock);
-               block_group->disk_cache_state = BTRFS_DC_ERROR;
-               spin_unlock(&block_group->lock);
                BTRFS_I(inode)->generation = 0;
        }
        kfree(checksums);
        kfree(pages);
        btrfs_update_inode(trans, root, inode);
+       return ret;
+}
+
+int btrfs_write_out_cache(struct btrfs_root *root,
+                         struct btrfs_trans_handle *trans,
+                         struct btrfs_block_group_cache *block_group,
+                         struct btrfs_path *path)
+{
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+       struct inode *inode;
+       int ret = 0;
+
+       root = root->fs_info->tree_root;
+
+       spin_lock(&block_group->lock);
+       if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
+               spin_unlock(&block_group->lock);
+               return 0;
+       }
+       spin_unlock(&block_group->lock);
+
+       inode = lookup_free_space_inode(root, block_group, path);
+       if (IS_ERR(inode))
+               return 0;
+
+       ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
+                                     path, block_group->key.objectid);
+       if (ret < 0) {
+               spin_lock(&block_group->lock);
+               block_group->disk_cache_state = BTRFS_DC_ERROR;
+               spin_unlock(&block_group->lock);
+               ret = 0;
+
+               printk(KERN_ERR "btrfs: failed to write free space cace "
+                      "for block group %llu\n", block_group->key.objectid);
+       }
+
        iput(inode);
        return ret;
 }
 
-static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize,
+static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
                                          u64 offset)
 {
        BUG_ON(offset < bitmap_start);
        offset -= bitmap_start;
-       return (unsigned long)(div64_u64(offset, sectorsize));
+       return (unsigned long)(div_u64(offset, unit));
 }
 
-static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize)
+static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
 {
-       return (unsigned long)(div64_u64(bytes, sectorsize));
+       return (unsigned long)(div_u64(bytes, unit));
 }
 
-static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group,
+static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
                                   u64 offset)
 {
        u64 bitmap_start;
        u64 bytes_per_bitmap;
 
-       bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize;
-       bitmap_start = offset - block_group->key.objectid;
+       bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
+       bitmap_start = offset - ctl->start;
        bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
        bitmap_start *= bytes_per_bitmap;
-       bitmap_start += block_group->key.objectid;
+       bitmap_start += ctl->start;
 
        return bitmap_start;
 }
@@ -932,10 +986,10 @@ static int tree_insert_offset(struct rb_root *root, u64 offset,
  * offset.
  */
 static struct btrfs_free_space *
-tree_search_offset(struct btrfs_block_group_cache *block_group,
+tree_search_offset(struct btrfs_free_space_ctl *ctl,
                   u64 offset, int bitmap_only, int fuzzy)
 {
-       struct rb_node *n = block_group->free_space_offset.rb_node;
+       struct rb_node *n = ctl->free_space_offset.rb_node;
        struct btrfs_free_space *entry, *prev = NULL;
 
        /* find entry that is closest to the 'offset' */
@@ -1031,8 +1085,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
                                break;
                        }
                }
-               if (entry->offset + BITS_PER_BITMAP *
-                   block_group->sectorsize > offset)
+               if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
                        return entry;
        } else if (entry->offset + entry->bytes > offset)
                return entry;
@@ -1043,7 +1096,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
        while (1) {
                if (entry->bitmap) {
                        if (entry->offset + BITS_PER_BITMAP *
-                           block_group->sectorsize > offset)
+                           ctl->unit > offset)
                                break;
                } else {
                        if (entry->offset + entry->bytes > offset)
@@ -1059,42 +1112,47 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
 }
 
 static inline void
-__unlink_free_space(struct btrfs_block_group_cache *block_group,
+__unlink_free_space(struct btrfs_free_space_ctl *ctl,
                    struct btrfs_free_space *info)
 {
-       rb_erase(&info->offset_index, &block_group->free_space_offset);
-       block_group->free_extents--;
+       rb_erase(&info->offset_index, &ctl->free_space_offset);
+       ctl->free_extents--;
 }
 
-static void unlink_free_space(struct btrfs_block_group_cache *block_group,
+static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
                              struct btrfs_free_space *info)
 {
-       __unlink_free_space(block_group, info);
-       block_group->free_space -= info->bytes;
+       __unlink_free_space(ctl, info);
+       ctl->free_space -= info->bytes;
 }
 
-static int link_free_space(struct btrfs_block_group_cache *block_group,
+static int link_free_space(struct btrfs_free_space_ctl *ctl,
                           struct btrfs_free_space *info)
 {
        int ret = 0;
 
        BUG_ON(!info->bitmap && !info->bytes);
-       ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
+       ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
                                 &info->offset_index, (info->bitmap != NULL));
        if (ret)
                return ret;
 
-       block_group->free_space += info->bytes;
-       block_group->free_extents++;
+       ctl->free_space += info->bytes;
+       ctl->free_extents++;
        return ret;
 }
 
-static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
+static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
 {
+       struct btrfs_block_group_cache *block_group = ctl->private;
        u64 max_bytes;
        u64 bitmap_bytes;
        u64 extent_bytes;
        u64 size = block_group->key.offset;
+       u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
+       int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
+
+       BUG_ON(ctl->total_bitmaps > max_bitmaps);
 
        /*
         * The goal is to keep the total amount of memory used per 1gb of space
@@ -1112,10 +1170,10 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
         * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
         * we add more bitmaps.
         */
-       bitmap_bytes = (block_group->total_bitmaps + 1) * PAGE_CACHE_SIZE;
+       bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
 
        if (bitmap_bytes >= max_bytes) {
-               block_group->extents_thresh = 0;
+               ctl->extents_thresh = 0;
                return;
        }
 
@@ -1126,47 +1184,43 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
        extent_bytes = max_bytes - bitmap_bytes;
        extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
 
-       block_group->extents_thresh =
+       ctl->extents_thresh =
                div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
 }
 
-static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group,
+static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
                              struct btrfs_free_space *info, u64 offset,
                              u64 bytes)
 {
-       unsigned long start, end;
-       unsigned long i;
+       unsigned long start, count;
 
-       start = offset_to_bit(info->offset, block_group->sectorsize, offset);
-       end = start + bytes_to_bits(bytes, block_group->sectorsize);
-       BUG_ON(end > BITS_PER_BITMAP);
+       start = offset_to_bit(info->offset, ctl->unit, offset);
+       count = bytes_to_bits(bytes, ctl->unit);
+       BUG_ON(start + count > BITS_PER_BITMAP);
 
-       for (i = start; i < end; i++)
-               clear_bit(i, info->bitmap);
+       bitmap_clear(info->bitmap, start, count);
 
        info->bytes -= bytes;
-       block_group->free_space -= bytes;
+       ctl->free_space -= bytes;
 }
 
-static void bitmap_set_bits(struct btrfs_block_group_cache *block_group,
+static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
                            struct btrfs_free_space *info, u64 offset,
                            u64 bytes)
 {
-       unsigned long start, end;
-       unsigned long i;
+       unsigned long start, count;
 
-       start = offset_to_bit(info->offset, block_group->sectorsize, offset);
-       end = start + bytes_to_bits(bytes, block_group->sectorsize);
-       BUG_ON(end > BITS_PER_BITMAP);
+       start = offset_to_bit(info->offset, ctl->unit, offset);
+       count = bytes_to_bits(bytes, ctl->unit);
+       BUG_ON(start + count > BITS_PER_BITMAP);
 
-       for (i = start; i < end; i++)
-               set_bit(i, info->bitmap);
+       bitmap_set(info->bitmap, start, count);
 
        info->bytes += bytes;
-       block_group->free_space += bytes;
+       ctl->free_space += bytes;
 }
 
-static int search_bitmap(struct btrfs_block_group_cache *block_group,
+static int search_bitmap(struct btrfs_free_space_ctl *ctl,
                         struct btrfs_free_space *bitmap_info, u64 *offset,
                         u64 *bytes)
 {
@@ -1174,9 +1228,9 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group,
        unsigned long bits, i;
        unsigned long next_zero;
 
-       i = offset_to_bit(bitmap_info->offset, block_group->sectorsize,
+       i = offset_to_bit(bitmap_info->offset, ctl->unit,
                          max_t(u64, *offset, bitmap_info->offset));
-       bits = bytes_to_bits(*bytes, block_group->sectorsize);
+       bits = bytes_to_bits(*bytes, ctl->unit);
 
        for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
             i < BITS_PER_BITMAP;
@@ -1191,29 +1245,25 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group,
        }
 
        if (found_bits) {
-               *offset = (u64)(i * block_group->sectorsize) +
-                       bitmap_info->offset;
-               *bytes = (u64)(found_bits) * block_group->sectorsize;
+               *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
+               *bytes = (u64)(found_bits) * ctl->unit;
                return 0;
        }
 
        return -1;
 }
 
-static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
-                                               *block_group, u64 *offset,
-                                               u64 *bytes, int debug)
+static struct btrfs_free_space *
+find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
 {
        struct btrfs_free_space *entry;
        struct rb_node *node;
        int ret;
 
-       if (!block_group->free_space_offset.rb_node)
+       if (!ctl->free_space_offset.rb_node)
                return NULL;
 
-       entry = tree_search_offset(block_group,
-                                  offset_to_bitmap(block_group, *offset),
-                                  0, 1);
+       entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
        if (!entry)
                return NULL;
 
@@ -1223,7 +1273,7 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
                        continue;
 
                if (entry->bitmap) {
-                       ret = search_bitmap(block_group, entry, offset, bytes);
+                       ret = search_bitmap(ctl, entry, offset, bytes);
                        if (!ret)
                                return entry;
                        continue;
@@ -1237,33 +1287,28 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
        return NULL;
 }
 
-static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
+static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
                           struct btrfs_free_space *info, u64 offset)
 {
-       u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
-       int max_bitmaps = (int)div64_u64(block_group->key.offset +
-                                        bytes_per_bg - 1, bytes_per_bg);
-       BUG_ON(block_group->total_bitmaps >= max_bitmaps);
-
-       info->offset = offset_to_bitmap(block_group, offset);
+       info->offset = offset_to_bitmap(ctl, offset);
        info->bytes = 0;
-       link_free_space(block_group, info);
-       block_group->total_bitmaps++;
+       link_free_space(ctl, info);
+       ctl->total_bitmaps++;
 
-       recalculate_thresholds(block_group);
+       ctl->op->recalc_thresholds(ctl);
 }
 
-static void free_bitmap(struct btrfs_block_group_cache *block_group,
+static void free_bitmap(struct btrfs_free_space_ctl *ctl,
                        struct btrfs_free_space *bitmap_info)
 {
-       unlink_free_space(block_group, bitmap_info);
+       unlink_free_space(ctl, bitmap_info);
        kfree(bitmap_info->bitmap);
        kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
-       block_group->total_bitmaps--;
-       recalculate_thresholds(block_group);
+       ctl->total_bitmaps--;
+       ctl->op->recalc_thresholds(ctl);
 }
 
-static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
+static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
                              struct btrfs_free_space *bitmap_info,
                              u64 *offset, u64 *bytes)
 {
@@ -1272,8 +1317,7 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro
        int ret;
 
 again:
-       end = bitmap_info->offset +
-               (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1;
+       end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
 
        /*
         * XXX - this can go away after a few releases.
@@ -1288,24 +1332,22 @@ again:
        search_start = *offset;
        search_bytes = *bytes;
        search_bytes = min(search_bytes, end - search_start + 1);
-       ret = search_bitmap(block_group, bitmap_info, &search_start,
-                           &search_bytes);
+       ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
        BUG_ON(ret < 0 || search_start != *offset);
 
        if (*offset > bitmap_info->offset && *offset + *bytes > end) {
-               bitmap_clear_bits(block_group, bitmap_info, *offset,
-                                 end - *offset + 1);
+               bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
                *bytes -= end - *offset + 1;
                *offset = end + 1;
        } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
-               bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes);
+               bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
                *bytes = 0;
        }
 
        if (*bytes) {
                struct rb_node *next = rb_next(&bitmap_info->offset_index);
                if (!bitmap_info->bytes)
-                       free_bitmap(block_group, bitmap_info);
+                       free_bitmap(ctl, bitmap_info);
 
                /*
                 * no entry after this bitmap, but we still have bytes to
@@ -1332,31 +1374,28 @@ again:
                 */
                search_start = *offset;
                search_bytes = *bytes;
-               ret = search_bitmap(block_group, bitmap_info, &search_start,
+               ret = search_bitmap(ctl, bitmap_info, &search_start,
                                    &search_bytes);
                if (ret < 0 || search_start != *offset)
                        return -EAGAIN;
 
                goto again;
        } else if (!bitmap_info->bytes)
-               free_bitmap(block_group, bitmap_info);
+               free_bitmap(ctl, bitmap_info);
 
        return 0;
 }
 
-static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
-                             struct btrfs_free_space *info)
+static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
+                     struct btrfs_free_space *info)
 {
-       struct btrfs_free_space *bitmap_info;
-       int added = 0;
-       u64 bytes, offset, end;
-       int ret;
+       struct btrfs_block_group_cache *block_group = ctl->private;
 
        /*
         * If we are below the extents threshold then we can add this as an
         * extent, and don't have to deal with the bitmap
         */
-       if (block_group->free_extents < block_group->extents_thresh) {
+       if (ctl->free_extents < ctl->extents_thresh) {
                /*
                 * If this block group has some small extents we don't want to
                 * use up all of our free slots in the cache with them, we want
@@ -1365,11 +1404,10 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
                 * the overhead of a bitmap if we don't have to.
                 */
                if (info->bytes <= block_group->sectorsize * 4) {
-                       if (block_group->free_extents * 2 <=
-                           block_group->extents_thresh)
-                               return 0;
+                       if (ctl->free_extents * 2 <= ctl->extents_thresh)
+                               return false;
                } else {
-                       return 0;
+                       return false;
                }
        }
 
@@ -1379,31 +1417,42 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
         */
        if (BITS_PER_BITMAP * block_group->sectorsize >
            block_group->key.offset)
-               return 0;
+               return false;
+
+       return true;
+}
+
+static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
+                             struct btrfs_free_space *info)
+{
+       struct btrfs_free_space *bitmap_info;
+       int added = 0;
+       u64 bytes, offset, end;
+       int ret;
 
        bytes = info->bytes;
        offset = info->offset;
 
+       if (!ctl->op->use_bitmap(ctl, info))
+               return 0;
+
 again:
-       bitmap_info = tree_search_offset(block_group,
-                                        offset_to_bitmap(block_group, offset),
+       bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
                                         1, 0);
        if (!bitmap_info) {
                BUG_ON(added);
                goto new_bitmap;
        }
 
-       end = bitmap_info->offset +
-               (u64)(BITS_PER_BITMAP * block_group->sectorsize);
+       end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
 
        if (offset >= bitmap_info->offset && offset + bytes > end) {
-               bitmap_set_bits(block_group, bitmap_info, offset,
-                               end - offset);
+               bitmap_set_bits(ctl, bitmap_info, offset, end - offset);
                bytes -= end - offset;
                offset = end;
                added = 0;
        } else if (offset >= bitmap_info->offset && offset + bytes <= end) {
-               bitmap_set_bits(block_group, bitmap_info, offset, bytes);
+               bitmap_set_bits(ctl, bitmap_info, offset, bytes);
                bytes = 0;
        } else {
                BUG();
@@ -1417,19 +1466,19 @@ again:
 
 new_bitmap:
        if (info && info->bitmap) {
-               add_new_bitmap(block_group, info, offset);
+               add_new_bitmap(ctl, info, offset);
                added = 1;
                info = NULL;
                goto again;
        } else {
-               spin_unlock(&block_group->tree_lock);
+               spin_unlock(&ctl->tree_lock);
 
                /* no pre-allocated info, allocate a new one */
                if (!info) {
                        info = kmem_cache_zalloc(btrfs_free_space_cachep,
                                                 GFP_NOFS);
                        if (!info) {
-                               spin_lock(&block_group->tree_lock);
+                               spin_lock(&ctl->tree_lock);
                                ret = -ENOMEM;
                                goto out;
                        }
@@ -1437,7 +1486,7 @@ new_bitmap:
 
                /* allocate the bitmap */
                info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
-               spin_lock(&block_group->tree_lock);
+               spin_lock(&ctl->tree_lock);
                if (!info->bitmap) {
                        ret = -ENOMEM;
                        goto out;
@@ -1455,7 +1504,7 @@ out:
        return ret;
 }
 
-bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
+static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
                          struct btrfs_free_space *info, bool update_stat)
 {
        struct btrfs_free_space *left_info;
@@ -1469,18 +1518,18 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
         * are adding, if there is remove that struct and add a new one to
         * cover the entire range
         */
-       right_info = tree_search_offset(block_group, offset + bytes, 0, 0);
+       right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
        if (right_info && rb_prev(&right_info->offset_index))
                left_info = rb_entry(rb_prev(&right_info->offset_index),
                                     struct btrfs_free_space, offset_index);
        else
-               left_info = tree_search_offset(block_group, offset - 1, 0, 0);
+               left_info = tree_search_offset(ctl, offset - 1, 0, 0);
 
        if (right_info && !right_info->bitmap) {
                if (update_stat)
-                       unlink_free_space(block_group, right_info);
+                       unlink_free_space(ctl, right_info);
                else
-                       __unlink_free_space(block_group, right_info);
+                       __unlink_free_space(ctl, right_info);
                info->bytes += right_info->bytes;
                kmem_cache_free(btrfs_free_space_cachep, right_info);
                merged = true;
@@ -1489,9 +1538,9 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
        if (left_info && !left_info->bitmap &&
            left_info->offset + left_info->bytes == offset) {
                if (update_stat)
-                       unlink_free_space(block_group, left_info);
+                       unlink_free_space(ctl, left_info);
                else
-                       __unlink_free_space(block_group, left_info);
+                       __unlink_free_space(ctl, left_info);
                info->offset = left_info->offset;
                info->bytes += left_info->bytes;
                kmem_cache_free(btrfs_free_space_cachep, left_info);
@@ -1501,8 +1550,8 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
        return merged;
 }
 
-int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
-                        u64 offset, u64 bytes)
+int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
+                          u64 offset, u64 bytes)
 {
        struct btrfs_free_space *info;
        int ret = 0;
@@ -1514,9 +1563,9 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
        info->offset = offset;
        info->bytes = bytes;
 
-       spin_lock(&block_group->tree_lock);
+       spin_lock(&ctl->tree_lock);
 
-       if (try_merge_free_space(block_group, info, true))
+       if (try_merge_free_space(ctl, info, true))
                goto link;
 
        /*
@@ -1524,7 +1573,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
         * extent then we know we're going to have to allocate a new extent, so
         * before we do that see if we need to drop this into a bitmap
         */
-       ret = insert_into_bitmap(block_group, info);
+       ret = insert_into_bitmap(ctl, info);
        if (ret < 0) {
                goto out;
        } else if (ret) {
@@ -1532,11 +1581,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
                goto out;
        }
 link:
-       ret = link_free_space(block_group, info);
+       ret = link_free_space(ctl, info);
        if (ret)
                kmem_cache_free(btrfs_free_space_cachep, info);
 out:
-       spin_unlock(&block_group->tree_lock);
+       spin_unlock(&ctl->tree_lock);
 
        if (ret) {
                printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
@@ -1549,21 +1598,21 @@ out:
 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
                            u64 offset, u64 bytes)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *info;
        struct btrfs_free_space *next_info = NULL;
        int ret = 0;
 
-       spin_lock(&block_group->tree_lock);
+       spin_lock(&ctl->tree_lock);
 
 again:
-       info = tree_search_offset(block_group, offset, 0, 0);
+       info = tree_search_offset(ctl, offset, 0, 0);
        if (!info) {
                /*
                 * oops didn't find an extent that matched the space we wanted
                 * to remove, look for a bitmap instead
                 */
-               info = tree_search_offset(block_group,
-                                         offset_to_bitmap(block_group, offset),
+               info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
                                          1, 0);
                if (!info) {
                        WARN_ON(1);
@@ -1578,8 +1627,8 @@ again:
                                             offset_index);
 
                if (next_info->bitmap)
-                       end = next_info->offset + BITS_PER_BITMAP *
-                               block_group->sectorsize - 1;
+                       end = next_info->offset +
+                             BITS_PER_BITMAP * ctl->unit - 1;
                else
                        end = next_info->offset + next_info->bytes;
 
@@ -1599,20 +1648,20 @@ again:
        }
 
        if (info->bytes == bytes) {
-               unlink_free_space(block_group, info);
+               unlink_free_space(ctl, info);
                if (info->bitmap) {
                        kfree(info->bitmap);
-                       block_group->total_bitmaps--;
+                       ctl->total_bitmaps--;
                }
                kmem_cache_free(btrfs_free_space_cachep, info);
                goto out_lock;
        }
 
        if (!info->bitmap && info->offset == offset) {
-               unlink_free_space(block_group, info);
+               unlink_free_space(ctl, info);
                info->offset += bytes;
                info->bytes -= bytes;
-               link_free_space(block_group, info);
+               link_free_space(ctl, info);
                goto out_lock;
        }
 
@@ -1626,13 +1675,13 @@ again:
                 * first unlink the old info and then
                 * insert it again after the hole we're creating
                 */
-               unlink_free_space(block_group, info);
+               unlink_free_space(ctl, info);
                if (offset + bytes < info->offset + info->bytes) {
                        u64 old_end = info->offset + info->bytes;
 
                        info->offset = offset + bytes;
                        info->bytes = old_end - info->offset;
-                       ret = link_free_space(block_group, info);
+                       ret = link_free_space(ctl, info);
                        WARN_ON(ret);
                        if (ret)
                                goto out_lock;
@@ -1642,7 +1691,7 @@ again:
                         */
                        kmem_cache_free(btrfs_free_space_cachep, info);
                }
-               spin_unlock(&block_group->tree_lock);
+               spin_unlock(&ctl->tree_lock);
 
                /* step two, insert a new info struct to cover
                 * anything before the hole
@@ -1653,12 +1702,12 @@ again:
                goto out;
        }
 
-       ret = remove_from_bitmap(block_group, info, &offset, &bytes);
+       ret = remove_from_bitmap(ctl, info, &offset, &bytes);
        if (ret == -EAGAIN)
                goto again;
        BUG_ON(ret);
 out_lock:
-       spin_unlock(&block_group->tree_lock);
+       spin_unlock(&ctl->tree_lock);
 out:
        return ret;
 }
@@ -1666,11 +1715,12 @@ out:
 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
                           u64 bytes)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *info;
        struct rb_node *n;
        int count = 0;
 
-       for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) {
+       for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
                info = rb_entry(n, struct btrfs_free_space, offset_index);
                if (info->bytes >= bytes)
                        count++;
@@ -1685,19 +1735,28 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
               "\n", count);
 }
 
-u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group)
+static struct btrfs_free_space_op free_space_op = {
+       .recalc_thresholds      = recalculate_thresholds,
+       .use_bitmap             = use_bitmap,
+};
+
+void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
 {
-       struct btrfs_free_space *info;
-       struct rb_node *n;
-       u64 ret = 0;
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
 
-       for (n = rb_first(&block_group->free_space_offset); n;
-            n = rb_next(n)) {
-               info = rb_entry(n, struct btrfs_free_space, offset_index);
-               ret += info->bytes;
-       }
+       spin_lock_init(&ctl->tree_lock);
+       ctl->unit = block_group->sectorsize;
+       ctl->start = block_group->key.objectid;
+       ctl->private = block_group;
+       ctl->op = &free_space_op;
 
-       return ret;
+       /*
+        * we only want to have 32k of ram per block group for keeping
+        * track of free space, and if we pass 1/2 of that we want to
+        * start converting things over to using bitmaps
+        */
+       ctl->extents_thresh = ((1024 * 32) / 2) /
+                               sizeof(struct btrfs_free_space);
 }
 
 /*
@@ -1711,6 +1770,7 @@ __btrfs_return_cluster_to_free_space(
                             struct btrfs_block_group_cache *block_group,
                             struct btrfs_free_cluster *cluster)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *entry;
        struct rb_node *node;
 
@@ -1732,8 +1792,8 @@ __btrfs_return_cluster_to_free_space(
 
                bitmap = (entry->bitmap != NULL);
                if (!bitmap)
-                       try_merge_free_space(block_group, entry, false);
-               tree_insert_offset(&block_group->free_space_offset,
+                       try_merge_free_space(ctl, entry, false);
+               tree_insert_offset(&ctl->free_space_offset,
                                   entry->offset, &entry->offset_index, bitmap);
        }
        cluster->root = RB_ROOT;
@@ -1744,14 +1804,38 @@ out:
        return 0;
 }
 
-void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
+void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
 {
        struct btrfs_free_space *info;
        struct rb_node *node;
+
+       while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
+               info = rb_entry(node, struct btrfs_free_space, offset_index);
+               unlink_free_space(ctl, info);
+               kfree(info->bitmap);
+               kmem_cache_free(btrfs_free_space_cachep, info);
+               if (need_resched()) {
+                       spin_unlock(&ctl->tree_lock);
+                       cond_resched();
+                       spin_lock(&ctl->tree_lock);
+               }
+       }
+}
+
+void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
+{
+       spin_lock(&ctl->tree_lock);
+       __btrfs_remove_free_space_cache_locked(ctl);
+       spin_unlock(&ctl->tree_lock);
+}
+
+void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
+{
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_cluster *cluster;
        struct list_head *head;
 
-       spin_lock(&block_group->tree_lock);
+       spin_lock(&ctl->tree_lock);
        while ((head = block_group->cluster_list.next) !=
               &block_group->cluster_list) {
                cluster = list_entry(head, struct btrfs_free_cluster,
@@ -1760,60 +1844,46 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
                WARN_ON(cluster->block_group != block_group);
                __btrfs_return_cluster_to_free_space(block_group, cluster);
                if (need_resched()) {
-                       spin_unlock(&block_group->tree_lock);
+                       spin_unlock(&ctl->tree_lock);
                        cond_resched();
-                       spin_lock(&block_group->tree_lock);
+                       spin_lock(&ctl->tree_lock);
                }
        }
+       __btrfs_remove_free_space_cache_locked(ctl);
+       spin_unlock(&ctl->tree_lock);
 
-       while ((node = rb_last(&block_group->free_space_offset)) != NULL) {
-               info = rb_entry(node, struct btrfs_free_space, offset_index);
-               if (!info->bitmap) {
-                       unlink_free_space(block_group, info);
-                       kmem_cache_free(btrfs_free_space_cachep, info);
-               } else {
-                       free_bitmap(block_group, info);
-               }
-
-               if (need_resched()) {
-                       spin_unlock(&block_group->tree_lock);
-                       cond_resched();
-                       spin_lock(&block_group->tree_lock);
-               }
-       }
-
-       spin_unlock(&block_group->tree_lock);
 }
 
 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
                               u64 offset, u64 bytes, u64 empty_size)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *entry = NULL;
        u64 bytes_search = bytes + empty_size;
        u64 ret = 0;
 
-       spin_lock(&block_group->tree_lock);
-       entry = find_free_space(block_group, &offset, &bytes_search, 0);
+       spin_lock(&ctl->tree_lock);
+       entry = find_free_space(ctl, &offset, &bytes_search);
        if (!entry)
                goto out;
 
        ret = offset;
        if (entry->bitmap) {
-               bitmap_clear_bits(block_group, entry, offset, bytes);
+               bitmap_clear_bits(ctl, entry, offset, bytes);
                if (!entry->bytes)
-                       free_bitmap(block_group, entry);
+                       free_bitmap(ctl, entry);
        } else {
-               unlink_free_space(block_group, entry);
+               unlink_free_space(ctl, entry);
                entry->offset += bytes;
                entry->bytes -= bytes;
                if (!entry->bytes)
                        kmem_cache_free(btrfs_free_space_cachep, entry);
                else
-                       link_free_space(block_group, entry);
+                       link_free_space(ctl, entry);
        }
 
 out:
-       spin_unlock(&block_group->tree_lock);
+       spin_unlock(&ctl->tree_lock);
 
        return ret;
 }
@@ -1830,6 +1900,7 @@ int btrfs_return_cluster_to_free_space(
                               struct btrfs_block_group_cache *block_group,
                               struct btrfs_free_cluster *cluster)
 {
+       struct btrfs_free_space_ctl *ctl;
        int ret;
 
        /* first, get a safe pointer to the block group */
@@ -1848,10 +1919,12 @@ int btrfs_return_cluster_to_free_space(
        atomic_inc(&block_group->count);
        spin_unlock(&cluster->lock);
 
+       ctl = block_group->free_space_ctl;
+
        /* now return any extents the cluster had on it */
-       spin_lock(&block_group->tree_lock);
+       spin_lock(&ctl->tree_lock);
        ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
-       spin_unlock(&block_group->tree_lock);
+       spin_unlock(&ctl->tree_lock);
 
        /* finally drop our ref */
        btrfs_put_block_group(block_group);
@@ -1863,6 +1936,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
                                   struct btrfs_free_space *entry,
                                   u64 bytes, u64 min_start)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        int err;
        u64 search_start = cluster->window_start;
        u64 search_bytes = bytes;
@@ -1871,13 +1945,12 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
        search_start = min_start;
        search_bytes = bytes;
 
-       err = search_bitmap(block_group, entry, &search_start,
-                           &search_bytes);
+       err = search_bitmap(ctl, entry, &search_start, &search_bytes);
        if (err)
                return 0;
 
        ret = search_start;
-       bitmap_clear_bits(block_group, entry, ret, bytes);
+       bitmap_clear_bits(ctl, entry, ret, bytes);
 
        return ret;
 }
@@ -1891,6 +1964,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
                             struct btrfs_free_cluster *cluster, u64 bytes,
                             u64 min_start)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *entry = NULL;
        struct rb_node *node;
        u64 ret = 0;
@@ -1910,8 +1984,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
        while(1) {
                if (entry->bytes < bytes ||
                    (!entry->bitmap && entry->offset < min_start)) {
-                       struct rb_node *node;
-
                        node = rb_next(&entry->offset_index);
                        if (!node)
                                break;
@@ -1925,7 +1997,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
                                                      cluster, entry, bytes,
                                                      min_start);
                        if (ret == 0) {
-                               struct rb_node *node;
                                node = rb_next(&entry->offset_index);
                                if (!node)
                                        break;
@@ -1951,20 +2022,20 @@ out:
        if (!ret)
                return 0;
 
-       spin_lock(&block_group->tree_lock);
+       spin_lock(&ctl->tree_lock);
 
-       block_group->free_space -= bytes;
+       ctl->free_space -= bytes;
        if (entry->bytes == 0) {
-               block_group->free_extents--;
+               ctl->free_extents--;
                if (entry->bitmap) {
                        kfree(entry->bitmap);
-                       block_group->total_bitmaps--;
-                       recalculate_thresholds(block_group);
+                       ctl->total_bitmaps--;
+                       ctl->op->recalc_thresholds(ctl);
                }
                kmem_cache_free(btrfs_free_space_cachep, entry);
        }
 
-       spin_unlock(&block_group->tree_lock);
+       spin_unlock(&ctl->tree_lock);
 
        return ret;
 }
@@ -1974,6 +2045,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
                                struct btrfs_free_cluster *cluster,
                                u64 offset, u64 bytes, u64 min_bytes)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        unsigned long next_zero;
        unsigned long i;
        unsigned long search_bits;
@@ -2028,7 +2100,7 @@ again:
 
        cluster->window_start = start * block_group->sectorsize +
                entry->offset;
-       rb_erase(&entry->offset_index, &block_group->free_space_offset);
+       rb_erase(&entry->offset_index, &ctl->free_space_offset);
        ret = tree_insert_offset(&cluster->root, entry->offset,
                                 &entry->offset_index, 1);
        BUG_ON(ret);
@@ -2043,6 +2115,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
                                   struct btrfs_free_cluster *cluster,
                                   u64 offset, u64 bytes, u64 min_bytes)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *first = NULL;
        struct btrfs_free_space *entry = NULL;
        struct btrfs_free_space *prev = NULL;
@@ -2053,7 +2126,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
        u64 max_extent;
        u64 max_gap = 128 * 1024;
 
-       entry = tree_search_offset(block_group, offset, 0, 1);
+       entry = tree_search_offset(ctl, offset, 0, 1);
        if (!entry)
                return -ENOSPC;
 
@@ -2119,7 +2192,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
                if (entry->bitmap)
                        continue;
 
-               rb_erase(&entry->offset_index, &block_group->free_space_offset);
+               rb_erase(&entry->offset_index, &ctl->free_space_offset);
                ret = tree_insert_offset(&cluster->root, entry->offset,
                                         &entry->offset_index, 0);
                BUG_ON(ret);
@@ -2138,16 +2211,15 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
                                struct btrfs_free_cluster *cluster,
                                u64 offset, u64 bytes, u64 min_bytes)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *entry;
        struct rb_node *node;
        int ret = -ENOSPC;
 
-       if (block_group->total_bitmaps == 0)
+       if (ctl->total_bitmaps == 0)
                return -ENOSPC;
 
-       entry = tree_search_offset(block_group,
-                                  offset_to_bitmap(block_group, offset),
-                                  0, 1);
+       entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
        if (!entry)
                return -ENOSPC;
 
@@ -2180,6 +2252,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
                             struct btrfs_free_cluster *cluster,
                             u64 offset, u64 bytes, u64 empty_size)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        u64 min_bytes;
        int ret;
 
@@ -2199,14 +2272,14 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
        } else
                min_bytes = max(bytes, (bytes + empty_size) >> 2);
 
-       spin_lock(&block_group->tree_lock);
+       spin_lock(&ctl->tree_lock);
 
        /*
         * If we know we don't have enough space to make a cluster don't even
         * bother doing all the work to try and find one.
         */
-       if (block_group->free_space < min_bytes) {
-               spin_unlock(&block_group->tree_lock);
+       if (ctl->free_space < min_bytes) {
+               spin_unlock(&ctl->tree_lock);
                return -ENOSPC;
        }
 
@@ -2232,7 +2305,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
        }
 out:
        spin_unlock(&cluster->lock);
-       spin_unlock(&block_group->tree_lock);
+       spin_unlock(&ctl->tree_lock);
 
        return ret;
 }
@@ -2253,6 +2326,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
 int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
                           u64 *trimmed, u64 start, u64 end, u64 minlen)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *entry = NULL;
        struct btrfs_fs_info *fs_info = block_group->fs_info;
        u64 bytes = 0;
@@ -2262,52 +2336,50 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
        *trimmed = 0;
 
        while (start < end) {
-               spin_lock(&block_group->tree_lock);
+               spin_lock(&ctl->tree_lock);
 
-               if (block_group->free_space < minlen) {
-                       spin_unlock(&block_group->tree_lock);
+               if (ctl->free_space < minlen) {
+                       spin_unlock(&ctl->tree_lock);
                        break;
                }
 
-               entry = tree_search_offset(block_group, start, 0, 1);
+               entry = tree_search_offset(ctl, start, 0, 1);
                if (!entry)
-                       entry = tree_search_offset(block_group,
-                                                  offset_to_bitmap(block_group,
-                                                                   start),
+                       entry = tree_search_offset(ctl,
+                                                  offset_to_bitmap(ctl, start),
                                                   1, 1);
 
                if (!entry || entry->offset >= end) {
-                       spin_unlock(&block_group->tree_lock);
+                       spin_unlock(&ctl->tree_lock);
                        break;
                }
 
                if (entry->bitmap) {
-                       ret = search_bitmap(block_group, entry, &start, &bytes);
+                       ret = search_bitmap(ctl, entry, &start, &bytes);
                        if (!ret) {
                                if (start >= end) {
-                                       spin_unlock(&block_group->tree_lock);
+                                       spin_unlock(&ctl->tree_lock);
                                        break;
                                }
                                bytes = min(bytes, end - start);
-                               bitmap_clear_bits(block_group, entry,
-                                                 start, bytes);
+                               bitmap_clear_bits(ctl, entry, start, bytes);
                                if (entry->bytes == 0)
-                                       free_bitmap(block_group, entry);
+                                       free_bitmap(ctl, entry);
                        } else {
                                start = entry->offset + BITS_PER_BITMAP *
                                        block_group->sectorsize;
-                               spin_unlock(&block_group->tree_lock);
+                               spin_unlock(&ctl->tree_lock);
                                ret = 0;
                                continue;
                        }
                } else {
                        start = entry->offset;
                        bytes = min(entry->bytes, end - start);
-                       unlink_free_space(block_group, entry);
+                       unlink_free_space(ctl, entry);
                        kmem_cache_free(btrfs_free_space_cachep, entry);
                }
 
-               spin_unlock(&block_group->tree_lock);
+               spin_unlock(&ctl->tree_lock);
 
                if (bytes >= minlen) {
                        int update_ret;
@@ -2319,8 +2391,7 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
                                                         bytes,
                                                         &actually_trimmed);
 
-                       btrfs_add_free_space(block_group,
-                                            start, bytes);
+                       btrfs_add_free_space(block_group, start, bytes);
                        if (!update_ret)
                                btrfs_update_reserved_bytes(block_group,
                                                            bytes, 0, 1);
@@ -2342,3 +2413,145 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
 
        return ret;
 }
+
+/*
+ * Find the left-most item in the cache tree, and then return the
+ * smallest inode number in the item.
+ *
+ * Note: the returned inode number may not be the smallest one in
+ * the tree, if the left-most item is a bitmap.
+ */
+u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
+{
+       struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
+       struct btrfs_free_space *entry = NULL;
+       u64 ino = 0;
+
+       spin_lock(&ctl->tree_lock);
+
+       if (RB_EMPTY_ROOT(&ctl->free_space_offset))
+               goto out;
+
+       entry = rb_entry(rb_first(&ctl->free_space_offset),
+                        struct btrfs_free_space, offset_index);
+
+       if (!entry->bitmap) {
+               ino = entry->offset;
+
+               unlink_free_space(ctl, entry);
+               entry->offset++;
+               entry->bytes--;
+               if (!entry->bytes)
+                       kmem_cache_free(btrfs_free_space_cachep, entry);
+               else
+                       link_free_space(ctl, entry);
+       } else {
+               u64 offset = 0;
+               u64 count = 1;
+               int ret;
+
+               ret = search_bitmap(ctl, entry, &offset, &count);
+               BUG_ON(ret);
+
+               ino = offset;
+               bitmap_clear_bits(ctl, entry, offset, 1);
+               if (entry->bytes == 0)
+                       free_bitmap(ctl, entry);
+       }
+out:
+       spin_unlock(&ctl->tree_lock);
+
+       return ino;
+}
+
+struct inode *lookup_free_ino_inode(struct btrfs_root *root,
+                                   struct btrfs_path *path)
+{
+       struct inode *inode = NULL;
+
+       spin_lock(&root->cache_lock);
+       if (root->cache_inode)
+               inode = igrab(root->cache_inode);
+       spin_unlock(&root->cache_lock);
+       if (inode)
+               return inode;
+
+       inode = __lookup_free_space_inode(root, path, 0);
+       if (IS_ERR(inode))
+               return inode;
+
+       spin_lock(&root->cache_lock);
+       if (!root->fs_info->closing)
+               root->cache_inode = igrab(inode);
+       spin_unlock(&root->cache_lock);
+
+       return inode;
+}
+
+int create_free_ino_inode(struct btrfs_root *root,
+                         struct btrfs_trans_handle *trans,
+                         struct btrfs_path *path)
+{
+       return __create_free_space_inode(root, trans, path,
+                                        BTRFS_FREE_INO_OBJECTID, 0);
+}
+
+int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
+{
+       struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+       struct btrfs_path *path;
+       struct inode *inode;
+       int ret = 0;
+       u64 root_gen = btrfs_root_generation(&root->root_item);
+
+       /*
+        * If we're unmounting then just return, since this does a search on the
+        * normal root and not the commit root and we could deadlock.
+        */
+       smp_mb();
+       if (fs_info->closing)
+               return 0;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return 0;
+
+       inode = lookup_free_ino_inode(root, path);
+       if (IS_ERR(inode))
+               goto out;
+
+       if (root_gen != BTRFS_I(inode)->generation)
+               goto out_put;
+
+       ret = __load_free_space_cache(root, inode, ctl, path, 0);
+
+       if (ret < 0)
+               printk(KERN_ERR "btrfs: failed to load free ino cache for "
+                      "root %llu\n", root->root_key.objectid);
+out_put:
+       iput(inode);
+out:
+       btrfs_free_path(path);
+       return ret;
+}
+
+int btrfs_write_out_ino_cache(struct btrfs_root *root,
+                             struct btrfs_trans_handle *trans,
+                             struct btrfs_path *path)
+{
+       struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+       struct inode *inode;
+       int ret;
+
+       inode = lookup_free_ino_inode(root, path);
+       if (IS_ERR(inode))
+               return 0;
+
+       ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
+       if (ret < 0)
+               printk(KERN_ERR "btrfs: failed to write free ino cache "
+                      "for root %llu\n", root->root_key.objectid);
+
+       iput(inode);
+       return ret;
+}
index 65c3b935289f3814747947e8967e091cf0e4b1a3..8f2613f779edc6bfb2dfcac1e6884f2d7ad156b7 100644 (file)
@@ -27,6 +27,25 @@ struct btrfs_free_space {
        struct list_head list;
 };
 
+struct btrfs_free_space_ctl {
+       spinlock_t tree_lock;
+       struct rb_root free_space_offset;
+       u64 free_space;
+       int extents_thresh;
+       int free_extents;
+       int total_bitmaps;
+       int unit;
+       u64 start;
+       struct btrfs_free_space_op *op;
+       void *private;
+};
+
+struct btrfs_free_space_op {
+       void (*recalc_thresholds)(struct btrfs_free_space_ctl *ctl);
+       bool (*use_bitmap)(struct btrfs_free_space_ctl *ctl,
+                          struct btrfs_free_space *info);
+};
+
 struct inode *lookup_free_space_inode(struct btrfs_root *root,
                                      struct btrfs_block_group_cache
                                      *block_group, struct btrfs_path *path);
@@ -45,17 +64,38 @@ int btrfs_write_out_cache(struct btrfs_root *root,
                          struct btrfs_trans_handle *trans,
                          struct btrfs_block_group_cache *block_group,
                          struct btrfs_path *path);
-int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
-                        u64 bytenr, u64 size);
+
+struct inode *lookup_free_ino_inode(struct btrfs_root *root,
+                                   struct btrfs_path *path);
+int create_free_ino_inode(struct btrfs_root *root,
+                         struct btrfs_trans_handle *trans,
+                         struct btrfs_path *path);
+int load_free_ino_cache(struct btrfs_fs_info *fs_info,
+                       struct btrfs_root *root);
+int btrfs_write_out_ino_cache(struct btrfs_root *root,
+                             struct btrfs_trans_handle *trans,
+                             struct btrfs_path *path);
+
+void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group);
+int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
+                          u64 bytenr, u64 size);
+static inline int
+btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
+                    u64 bytenr, u64 size)
+{
+       return __btrfs_add_free_space(block_group->free_space_ctl,
+                                     bytenr, size);
+}
 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
                            u64 bytenr, u64 size);
+void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl);
 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
-                                  *block_group);
+                                    *block_group);
 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
                               u64 offset, u64 bytes, u64 empty_size);
+u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
                           u64 bytes);
-u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group);
 int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root,
                             struct btrfs_block_group_cache *block_group,
index 64f1150bb48d1fb1cd5b7c7d29742b3f9afff7cd..baa74f3db6911fb4ff6f2497964a2f69caef95a0 100644 (file)
@@ -130,7 +130,6 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
                              item_size - (ptr + sub_item_len - item_start));
        ret = btrfs_truncate_item(trans, root, path,
                                  item_size - sub_item_len, 1);
-       BUG_ON(ret);
 out:
        btrfs_free_path(path);
        return ret;
@@ -167,7 +166,6 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
 
                old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
                ret = btrfs_extend_item(trans, root, path, ins_len);
-               BUG_ON(ret);
                ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
                                     struct btrfs_inode_ref);
                ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size);
index c05a08f4c4111fdfa4157e9447005523f256fb67..3262cd17a12f89192ce0c3e5394d5936ce003049 100644 (file)
  * Boston, MA 021110-1307, USA.
  */
 
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/pagemap.h>
+
 #include "ctree.h"
 #include "disk-io.h"
+#include "free-space-cache.h"
+#include "inode-map.h"
 #include "transaction.h"
 
-int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid)
+static int caching_kthread(void *data)
+{
+       struct btrfs_root *root = data;
+       struct btrfs_fs_info *fs_info = root->fs_info;
+       struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+       struct btrfs_key key;
+       struct btrfs_path *path;
+       struct extent_buffer *leaf;
+       u64 last = (u64)-1;
+       int slot;
+       int ret;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       /* Since the commit root is read-only, we can safely skip locking. */
+       path->skip_locking = 1;
+       path->search_commit_root = 1;
+       path->reada = 2;
+
+       key.objectid = BTRFS_FIRST_FREE_OBJECTID;
+       key.offset = 0;
+       key.type = BTRFS_INODE_ITEM_KEY;
+again:
+       /* need to make sure the commit_root doesn't disappear */
+       mutex_lock(&root->fs_commit_mutex);
+
+       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+       if (ret < 0)
+               goto out;
+
+       while (1) {
+               smp_mb();
+               if (fs_info->closing)
+                       goto out;
+
+               leaf = path->nodes[0];
+               slot = path->slots[0];
+               if (slot >= btrfs_header_nritems(leaf)) {
+                       ret = btrfs_next_leaf(root, path);
+                       if (ret < 0)
+                               goto out;
+                       else if (ret > 0)
+                               break;
+
+                       if (need_resched() ||
+                           btrfs_transaction_in_commit(fs_info)) {
+                               leaf = path->nodes[0];
+
+                               if (btrfs_header_nritems(leaf) == 0) {
+                                       WARN_ON(1);
+                                       break;
+                               }
+
+                               /*
+                                * Save the key so we can advances forward
+                                * in the next search.
+                                */
+                               btrfs_item_key_to_cpu(leaf, &key, 0);
+                               btrfs_release_path(path);
+                               root->cache_progress = last;
+                               mutex_unlock(&root->fs_commit_mutex);
+                               schedule_timeout(1);
+                               goto again;
+                       } else
+                               continue;
+               }
+
+               btrfs_item_key_to_cpu(leaf, &key, slot);
+
+               if (key.type != BTRFS_INODE_ITEM_KEY)
+                       goto next;
+
+               if (key.objectid >= root->highest_objectid)
+                       break;
+
+               if (last != (u64)-1 && last + 1 != key.objectid) {
+                       __btrfs_add_free_space(ctl, last + 1,
+                                              key.objectid - last - 1);
+                       wake_up(&root->cache_wait);
+               }
+
+               last = key.objectid;
+next:
+               path->slots[0]++;
+       }
+
+       if (last < root->highest_objectid - 1) {
+               __btrfs_add_free_space(ctl, last + 1,
+                                      root->highest_objectid - last - 1);
+       }
+
+       spin_lock(&root->cache_lock);
+       root->cached = BTRFS_CACHE_FINISHED;
+       spin_unlock(&root->cache_lock);
+
+       root->cache_progress = (u64)-1;
+       btrfs_unpin_free_ino(root);
+out:
+       wake_up(&root->cache_wait);
+       mutex_unlock(&root->fs_commit_mutex);
+
+       btrfs_free_path(path);
+
+       return ret;
+}
+
+static void start_caching(struct btrfs_root *root)
+{
+       struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+       struct task_struct *tsk;
+       int ret;
+       u64 objectid;
+
+       spin_lock(&root->cache_lock);
+       if (root->cached != BTRFS_CACHE_NO) {
+               spin_unlock(&root->cache_lock);
+               return;
+       }
+
+       root->cached = BTRFS_CACHE_STARTED;
+       spin_unlock(&root->cache_lock);
+
+       ret = load_free_ino_cache(root->fs_info, root);
+       if (ret == 1) {
+               spin_lock(&root->cache_lock);
+               root->cached = BTRFS_CACHE_FINISHED;
+               spin_unlock(&root->cache_lock);
+               return;
+       }
+
+       /*
+        * It can be quite time-consuming to fill the cache by searching
+        * through the extent tree, and this can keep ino allocation path
+        * waiting. Therefore at start we quickly find out the highest
+        * inode number and we know we can use inode numbers which fall in
+        * [highest_ino + 1, BTRFS_LAST_FREE_OBJECTID].
+        */
+       ret = btrfs_find_free_objectid(root, &objectid);
+       if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) {
+               __btrfs_add_free_space(ctl, objectid,
+                                      BTRFS_LAST_FREE_OBJECTID - objectid + 1);
+       }
+
+       tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
+                         root->root_key.objectid);
+       BUG_ON(IS_ERR(tsk));
+}
+
+int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
+{
+again:
+       *objectid = btrfs_find_ino_for_alloc(root);
+
+       if (*objectid != 0)
+               return 0;
+
+       start_caching(root);
+
+       wait_event(root->cache_wait,
+                  root->cached == BTRFS_CACHE_FINISHED ||
+                  root->free_ino_ctl->free_space > 0);
+
+       if (root->cached == BTRFS_CACHE_FINISHED &&
+           root->free_ino_ctl->free_space == 0)
+               return -ENOSPC;
+       else
+               goto again;
+}
+
+void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
+{
+       struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+       struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
+again:
+       if (root->cached == BTRFS_CACHE_FINISHED) {
+               __btrfs_add_free_space(ctl, objectid, 1);
+       } else {
+               /*
+                * If we are in the process of caching free ino chunks,
+                * to avoid adding the same inode number to the free_ino
+                * tree twice due to cross transaction, we'll leave it
+                * in the pinned tree until a transaction is committed
+                * or the caching work is done.
+                */
+
+               mutex_lock(&root->fs_commit_mutex);
+               spin_lock(&root->cache_lock);
+               if (root->cached == BTRFS_CACHE_FINISHED) {
+                       spin_unlock(&root->cache_lock);
+                       mutex_unlock(&root->fs_commit_mutex);
+                       goto again;
+               }
+               spin_unlock(&root->cache_lock);
+
+               start_caching(root);
+
+               if (objectid <= root->cache_progress ||
+                   objectid > root->highest_objectid)
+                       __btrfs_add_free_space(ctl, objectid, 1);
+               else
+                       __btrfs_add_free_space(pinned, objectid, 1);
+
+               mutex_unlock(&root->fs_commit_mutex);
+       }
+}
+
+/*
+ * When a transaction is committed, we'll move those inode numbers which
+ * are smaller than root->cache_progress from pinned tree to free_ino tree,
+ * and others will just be dropped, because the commit root we were
+ * searching has changed.
+ *
+ * Must be called with root->fs_commit_mutex held
+ */
+void btrfs_unpin_free_ino(struct btrfs_root *root)
+{
+       struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+       struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
+       struct btrfs_free_space *info;
+       struct rb_node *n;
+       u64 count;
+
+       while (1) {
+               n = rb_first(rbroot);
+               if (!n)
+                       break;
+
+               info = rb_entry(n, struct btrfs_free_space, offset_index);
+               BUG_ON(info->bitmap);
+
+               if (info->offset > root->cache_progress)
+                       goto free;
+               else if (info->offset + info->bytes > root->cache_progress)
+                       count = root->cache_progress - info->offset + 1;
+               else
+                       count = info->bytes;
+
+               __btrfs_add_free_space(ctl, info->offset, count);
+free:
+               rb_erase(&info->offset_index, rbroot);
+               kfree(info);
+       }
+}
+
+#define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space))
+#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)
+
+/*
+ * The goal is to keep the memory used by the free_ino tree won't
+ * exceed the memory if we use bitmaps only.
+ */
+static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
+{
+       struct btrfs_free_space *info;
+       struct rb_node *n;
+       int max_ino;
+       int max_bitmaps;
+
+       n = rb_last(&ctl->free_space_offset);
+       if (!n) {
+               ctl->extents_thresh = INIT_THRESHOLD;
+               return;
+       }
+       info = rb_entry(n, struct btrfs_free_space, offset_index);
+
+       /*
+        * Find the maximum inode number in the filesystem. Note we
+        * ignore the fact that this can be a bitmap, because we are
+        * not doing precise calculation.
+        */
+       max_ino = info->bytes - 1;
+
+       max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP;
+       if (max_bitmaps <= ctl->total_bitmaps) {
+               ctl->extents_thresh = 0;
+               return;
+       }
+
+       ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
+                               PAGE_CACHE_SIZE / sizeof(*info);
+}
+
+/*
+ * We don't fall back to bitmap, if we are below the extents threshold
+ * or this chunk of inode numbers is a big one.
+ */
+static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
+                      struct btrfs_free_space *info)
+{
+       if (ctl->free_extents < ctl->extents_thresh ||
+           info->bytes > INODES_PER_BITMAP / 10)
+               return false;
+
+       return true;
+}
+
+static struct btrfs_free_space_op free_ino_op = {
+       .recalc_thresholds      = recalculate_thresholds,
+       .use_bitmap             = use_bitmap,
+};
+
+static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl)
+{
+}
+
+static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl,
+                             struct btrfs_free_space *info)
+{
+       /*
+        * We always use extents for two reasons:
+        *
+        * - The pinned tree is only used during the process of caching
+        *   work.
+        * - Make code simpler. See btrfs_unpin_free_ino().
+        */
+       return false;
+}
+
+static struct btrfs_free_space_op pinned_free_ino_op = {
+       .recalc_thresholds      = pinned_recalc_thresholds,
+       .use_bitmap             = pinned_use_bitmap,
+};
+
+void btrfs_init_free_ino_ctl(struct btrfs_root *root)
+{
+       struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+       struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
+
+       spin_lock_init(&ctl->tree_lock);
+       ctl->unit = 1;
+       ctl->start = 0;
+       ctl->private = NULL;
+       ctl->op = &free_ino_op;
+
+       /*
+        * Initially we allow to use 16K of ram to cache chunks of
+        * inode numbers before we resort to bitmaps. This is somewhat
+        * arbitrary, but it will be adjusted in runtime.
+        */
+       ctl->extents_thresh = INIT_THRESHOLD;
+
+       spin_lock_init(&pinned->tree_lock);
+       pinned->unit = 1;
+       pinned->start = 0;
+       pinned->private = NULL;
+       pinned->extents_thresh = 0;
+       pinned->op = &pinned_free_ino_op;
+}
+
+int btrfs_save_ino_cache(struct btrfs_root *root,
+                        struct btrfs_trans_handle *trans)
+{
+       struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+       struct btrfs_path *path;
+       struct inode *inode;
+       u64 alloc_hint = 0;
+       int ret;
+       int prealloc;
+       bool retry = false;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+again:
+       inode = lookup_free_ino_inode(root, path);
+       if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
+               ret = PTR_ERR(inode);
+               goto out;
+       }
+
+       if (IS_ERR(inode)) {
+               BUG_ON(retry);
+               retry = true;
+
+               ret = create_free_ino_inode(root, trans, path);
+               if (ret)
+                       goto out;
+               goto again;
+       }
+
+       BTRFS_I(inode)->generation = 0;
+       ret = btrfs_update_inode(trans, root, inode);
+       WARN_ON(ret);
+
+       if (i_size_read(inode) > 0) {
+               ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
+               if (ret)
+                       goto out_put;
+       }
+
+       spin_lock(&root->cache_lock);
+       if (root->cached != BTRFS_CACHE_FINISHED) {
+               ret = -1;
+               spin_unlock(&root->cache_lock);
+               goto out_put;
+       }
+       spin_unlock(&root->cache_lock);
+
+       spin_lock(&ctl->tree_lock);
+       prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
+       prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE);
+       prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE;
+       spin_unlock(&ctl->tree_lock);
+
+       /* Just to make sure we have enough space */
+       prealloc += 8 * PAGE_CACHE_SIZE;
+
+       ret = btrfs_check_data_free_space(inode, prealloc);
+       if (ret)
+               goto out_put;
+
+       ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
+                                             prealloc, prealloc, &alloc_hint);
+       if (ret)
+               goto out_put;
+       btrfs_free_reserved_data_space(inode, prealloc);
+
+out_put:
+       iput(inode);
+out:
+       if (ret == 0)
+               ret = btrfs_write_out_ino_cache(root, trans, path);
+
+       btrfs_free_path(path);
+       return ret;
+}
+
+static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
 {
        struct btrfs_path *path;
        int ret;
@@ -55,15 +490,14 @@ error:
        return ret;
 }
 
-int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
-                            struct btrfs_root *root,
-                            u64 dirid, u64 *objectid)
+int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
 {
        int ret;
        mutex_lock(&root->objectid_mutex);
 
        if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
-               ret = btrfs_find_highest_inode(root, &root->highest_objectid);
+               ret = btrfs_find_highest_objectid(root,
+                                                 &root->highest_objectid);
                if (ret)
                        goto out;
        }
diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h
new file mode 100644 (file)
index 0000000..ddb347b
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef __BTRFS_INODE_MAP
+#define __BTRFS_INODE_MAP
+
+void btrfs_init_free_ino_ctl(struct btrfs_root *root);
+void btrfs_unpin_free_ino(struct btrfs_root *root);
+void btrfs_return_ino(struct btrfs_root *root, u64 objectid);
+int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid);
+int btrfs_save_ino_cache(struct btrfs_root *root,
+                        struct btrfs_trans_handle *trans);
+
+int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid);
+
+#endif
index 7cd8ab0ef04d5b3e95ccd572f96ca858457e8c18..bb51bb1fa44f836ffaa519eaa3c49b96a6ebac03 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/posix_acl.h>
 #include <linux/falloc.h>
 #include <linux/slab.h>
+#include <linux/ratelimit.h>
 #include "compat.h"
 #include "ctree.h"
 #include "disk-io.h"
@@ -51,6 +52,7 @@
 #include "compression.h"
 #include "locking.h"
 #include "free-space-cache.h"
+#include "inode-map.h"
 
 struct btrfs_iget_args {
        u64 ino;
@@ -138,7 +140,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
        path->leave_spinning = 1;
        btrfs_set_trans_block_group(trans, inode);
 
-       key.objectid = inode->i_ino;
+       key.objectid = btrfs_ino(inode);
        key.offset = start;
        btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
        datasize = btrfs_file_extent_calc_inline_size(cur_size);
@@ -340,6 +342,10 @@ static noinline int compress_file_range(struct inode *inode,
        int will_compress;
        int compress_type = root->fs_info->compress_type;
 
+       /* if this is a small write inside eof, kick off a defragbot */
+       if (end <= BTRFS_I(inode)->disk_i_size && (end - start + 1) < 16 * 1024)
+               btrfs_add_inode_defrag(NULL, inode);
+
        actual_end = min_t(u64, isize, end + 1);
 again:
        will_compress = 0;
@@ -649,7 +655,7 @@ retry:
                                        async_extent->start +
                                        async_extent->ram_size - 1, 0);
 
-               em = alloc_extent_map(GFP_NOFS);
+               em = alloc_extent_map();
                BUG_ON(!em);
                em->start = async_extent->start;
                em->len = async_extent->ram_size;
@@ -745,6 +751,15 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
        return alloc_hint;
 }
 
+static inline bool is_free_space_inode(struct btrfs_root *root,
+                                      struct inode *inode)
+{
+       if (root == root->fs_info->tree_root ||
+           BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID)
+               return true;
+       return false;
+}
+
 /*
  * when extent_io.c finds a delayed allocation range in the file,
  * the call backs end up in this code.  The basic idea is to
@@ -777,7 +792,7 @@ static noinline int cow_file_range(struct inode *inode,
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
        int ret = 0;
 
-       BUG_ON(root == root->fs_info->tree_root);
+       BUG_ON(is_free_space_inode(root, inode));
        trans = btrfs_join_transaction(root, 1);
        BUG_ON(IS_ERR(trans));
        btrfs_set_trans_block_group(trans, inode);
@@ -788,6 +803,10 @@ static noinline int cow_file_range(struct inode *inode,
        disk_num_bytes = num_bytes;
        ret = 0;
 
+       /* if this is a small write inside eof, kick off defrag */
+       if (end <= BTRFS_I(inode)->disk_i_size && num_bytes < 64 * 1024)
+               btrfs_add_inode_defrag(trans, inode);
+
        if (start == 0) {
                /* lets try to make an inline extent */
                ret = cow_file_range_inline(trans, root, inode,
@@ -826,7 +845,7 @@ static noinline int cow_file_range(struct inode *inode,
                                           (u64)-1, &ins, 1);
                BUG_ON(ret);
 
-               em = alloc_extent_map(GFP_NOFS);
+               em = alloc_extent_map();
                BUG_ON(!em);
                em->start = start;
                em->orig_start = em->start;
@@ -1008,7 +1027,7 @@ static noinline int csum_exist_in_range(struct btrfs_root *root,
        LIST_HEAD(list);
 
        ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
-                                      bytenr + num_bytes - 1, &list);
+                                      bytenr + num_bytes - 1, &list, 0);
        if (ret == 0 && list_empty(&list))
                return 0;
 
@@ -1049,29 +1068,31 @@ static noinline int run_delalloc_nocow(struct inode *inode,
        int type;
        int nocow;
        int check_prev = 1;
-       bool nolock = false;
+       bool nolock;
+       u64 ino = btrfs_ino(inode);
 
        path = btrfs_alloc_path();
        BUG_ON(!path);
-       if (root == root->fs_info->tree_root) {
-               nolock = true;
+
+       nolock = is_free_space_inode(root, inode);
+
+       if (nolock)
                trans = btrfs_join_transaction_nolock(root, 1);
-       } else {
+       else
                trans = btrfs_join_transaction(root, 1);
-       }
        BUG_ON(IS_ERR(trans));
 
        cow_start = (u64)-1;
        cur_offset = start;
        while (1) {
-               ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+               ret = btrfs_lookup_file_extent(trans, root, path, ino,
                                               cur_offset, 0);
                BUG_ON(ret < 0);
                if (ret > 0 && path->slots[0] > 0 && check_prev) {
                        leaf = path->nodes[0];
                        btrfs_item_key_to_cpu(leaf, &found_key,
                                              path->slots[0] - 1);
-                       if (found_key.objectid == inode->i_ino &&
+                       if (found_key.objectid == ino &&
                            found_key.type == BTRFS_EXTENT_DATA_KEY)
                                path->slots[0]--;
                }
@@ -1092,7 +1113,7 @@ next_slot:
                num_bytes = 0;
                btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
 
-               if (found_key.objectid > inode->i_ino ||
+               if (found_key.objectid > ino ||
                    found_key.type > BTRFS_EXTENT_DATA_KEY ||
                    found_key.offset > end)
                        break;
@@ -1127,7 +1148,7 @@ next_slot:
                                goto out_check;
                        if (btrfs_extent_readonly(root, disk_bytenr))
                                goto out_check;
-                       if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
+                       if (btrfs_cross_ref_exist(trans, root, ino,
                                                  found_key.offset -
                                                  extent_offset, disk_bytenr))
                                goto out_check;
@@ -1164,7 +1185,7 @@ out_check:
                        goto next_slot;
                }
 
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
                if (cow_start != (u64)-1) {
                        ret = cow_file_range(inode, locked_page, cow_start,
                                        found_key.offset - 1, page_started,
@@ -1177,7 +1198,7 @@ out_check:
                        struct extent_map *em;
                        struct extent_map_tree *em_tree;
                        em_tree = &BTRFS_I(inode)->extent_tree;
-                       em = alloc_extent_map(GFP_NOFS);
+                       em = alloc_extent_map();
                        BUG_ON(!em);
                        em->start = cur_offset;
                        em->orig_start = em->start;
@@ -1222,7 +1243,7 @@ out_check:
                if (cur_offset > end)
                        break;
        }
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
        if (cur_offset <= end && cow_start == (u64)-1)
                cow_start = cur_offset;
@@ -1310,14 +1331,13 @@ static int btrfs_set_bit_hook(struct inode *inode,
 
        /*
         * set_bit and clear bit hooks normally require _irqsave/restore
-        * but in this case, we are only testeing for the DELALLOC
+        * but in this case, we are only testing for the DELALLOC
         * bit, which is only set or cleared with irqs on
         */
        if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
                struct btrfs_root *root = BTRFS_I(inode)->root;
                u64 len = state->end + 1 - state->start;
-               int do_list = (root->root_key.objectid !=
-                              BTRFS_ROOT_TREE_OBJECTID);
+               bool do_list = !is_free_space_inode(root, inode);
 
                if (*bits & EXTENT_FIRST_DELALLOC)
                        *bits &= ~EXTENT_FIRST_DELALLOC;
@@ -1344,14 +1364,13 @@ static int btrfs_clear_bit_hook(struct inode *inode,
 {
        /*
         * set_bit and clear bit hooks normally require _irqsave/restore
-        * but in this case, we are only testeing for the DELALLOC
+        * but in this case, we are only testing for the DELALLOC
         * bit, which is only set or cleared with irqs on
         */
        if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
                struct btrfs_root *root = BTRFS_I(inode)->root;
                u64 len = state->end + 1 - state->start;
-               int do_list = (root->root_key.objectid !=
-                              BTRFS_ROOT_TREE_OBJECTID);
+               bool do_list = !is_free_space_inode(root, inode);
 
                if (*bits & EXTENT_FIRST_DELALLOC)
                        *bits &= ~EXTENT_FIRST_DELALLOC;
@@ -1458,7 +1477,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
 
        skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 
-       if (root == root->fs_info->tree_root)
+       if (is_free_space_inode(root, inode))
                ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2);
        else
                ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
@@ -1644,7 +1663,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
                                 &hint, 0);
        BUG_ON(ret);
 
-       ins.objectid = inode->i_ino;
+       ins.objectid = btrfs_ino(inode);
        ins.offset = file_pos;
        ins.type = BTRFS_EXTENT_DATA_KEY;
        ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
@@ -1675,7 +1694,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
        ins.type = BTRFS_EXTENT_ITEM_KEY;
        ret = btrfs_alloc_reserved_file_extent(trans, root,
                                        root->root_key.objectid,
-                                       inode->i_ino, file_pos, &ins);
+                                       btrfs_ino(inode), file_pos, &ins);
        BUG_ON(ret);
        btrfs_free_path(path);
 
@@ -1701,7 +1720,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
        struct extent_state *cached_state = NULL;
        int compress_type = 0;
        int ret;
-       bool nolock = false;
+       bool nolock;
 
        ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
                                             end - start + 1);
@@ -1709,7 +1728,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
                return 0;
        BUG_ON(!ordered_extent);
 
-       nolock = (root == root->fs_info->tree_root);
+       nolock = is_free_space_inode(root, inode);
 
        if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
                BUG_ON(!list_empty(&ordered_extent->list));
@@ -1855,7 +1874,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
                }
                read_unlock(&em_tree->lock);
 
-               if (!em || IS_ERR(em)) {
+               if (IS_ERR_OR_NULL(em)) {
                        kfree(failrec);
                        return -EIO;
                }
@@ -2004,12 +2023,11 @@ good:
        return 0;
 
 zeroit:
-       if (printk_ratelimit()) {
-               printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
-                      "private %llu\n", page->mapping->host->i_ino,
+       printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
+                      "private %llu\n",
+                      (unsigned long long)btrfs_ino(page->mapping->host),
                       (unsigned long long)start, csum,
                       (unsigned long long)private);
-       }
        memset(kaddr + offset, 1, end - start + 1);
        flush_dcache_page(page);
        kunmap_atomic(kaddr, KM_USER0);
@@ -2244,7 +2262,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
 
        /* insert an orphan item to track this unlinked/truncated file */
        if (insert >= 1) {
-               ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
+               ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
                BUG_ON(ret);
        }
 
@@ -2281,7 +2299,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
        spin_unlock(&root->orphan_lock);
 
        if (trans && delete_item) {
-               ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
+               ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
                BUG_ON(ret);
        }
 
@@ -2346,7 +2364,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                        break;
 
                /* release the path since we're done with it */
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
 
                /*
                 * this is where we are basically btrfs_lookup, without the
@@ -2543,7 +2561,8 @@ static void btrfs_read_locked_inode(struct inode *inode)
         * try to precache a NULL acl entry for files that don't have
         * any xattrs or acls
         */
-       maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
+       maybe_acls = acls_after_inode_item(leaf, path->slots[0],
+                                          btrfs_ino(inode));
        if (!maybe_acls)
                cache_no_acl(inode);
 
@@ -2647,11 +2666,26 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
        struct extent_buffer *leaf;
        int ret;
 
+       /*
+        * If root is tree root, it means this inode is used to
+        * store free space information. And these inodes are updated
+        * when committing the transaction, so they needn't delaye to
+        * be updated, or deadlock will occured.
+        */
+       if (!is_free_space_inode(root, inode)) {
+               ret = btrfs_delayed_update_inode(trans, root, inode);
+               if (!ret)
+                       btrfs_set_inode_last_trans(trans, inode);
+               return ret;
+       }
+
        path = btrfs_alloc_path();
-       BUG_ON(!path);
+       if (!path)
+               return -ENOMEM;
+
        path->leave_spinning = 1;
-       ret = btrfs_lookup_inode(trans, root, path,
-                                &BTRFS_I(inode)->location, 1);
+       ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
+                                1);
        if (ret) {
                if (ret > 0)
                        ret = -ENOENT;
@@ -2661,7 +2695,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
        btrfs_unlock_up_safe(path, 1);
        leaf = path->nodes[0];
        inode_item = btrfs_item_ptr(leaf, path->slots[0],
-                                 struct btrfs_inode_item);
+                                   struct btrfs_inode_item);
 
        fill_inode_item(trans, leaf, inode_item, inode);
        btrfs_mark_buffer_dirty(leaf);
@@ -2672,7 +2706,6 @@ failed:
        return ret;
 }
 
-
 /*
  * unlink helper that gets used here in inode.c and in the tree logging
  * recovery code.  It remove a link in a directory with a given name, and
@@ -2689,6 +2722,8 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
        struct btrfs_dir_item *di;
        struct btrfs_key key;
        u64 index;
+       u64 ino = btrfs_ino(inode);
+       u64 dir_ino = btrfs_ino(dir);
 
        path = btrfs_alloc_path();
        if (!path) {
@@ -2697,7 +2732,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
        }
 
        path->leave_spinning = 1;
-       di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
+       di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
                                    name, name_len, -1);
        if (IS_ERR(di)) {
                ret = PTR_ERR(di);
@@ -2712,33 +2747,23 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
        ret = btrfs_delete_one_dir_name(trans, root, path, di);
        if (ret)
                goto err;
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
-       ret = btrfs_del_inode_ref(trans, root, name, name_len,
-                                 inode->i_ino,
-                                 dir->i_ino, &index);
+       ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
+                                 dir_ino, &index);
        if (ret) {
                printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
-                      "inode %lu parent %lu\n", name_len, name,
-                      inode->i_ino, dir->i_ino);
+                      "inode %llu parent %llu\n", name_len, name,
+                      (unsigned long long)ino, (unsigned long long)dir_ino);
                goto err;
        }
 
-       di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
-                                        index, name, name_len, -1);
-       if (IS_ERR(di)) {
-               ret = PTR_ERR(di);
-               goto err;
-       }
-       if (!di) {
-               ret = -ENOENT;
+       ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
+       if (ret)
                goto err;
-       }
-       ret = btrfs_delete_one_dir_name(trans, root, path, di);
-       btrfs_release_path(root, path);
 
        ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
-                                        inode, dir->i_ino);
+                                        inode, dir_ino);
        BUG_ON(ret != 0 && ret != -ENOENT);
 
        ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
@@ -2816,12 +2841,14 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
        int check_link = 1;
        int err = -ENOSPC;
        int ret;
+       u64 ino = btrfs_ino(inode);
+       u64 dir_ino = btrfs_ino(dir);
 
        trans = btrfs_start_transaction(root, 10);
        if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
                return trans;
 
-       if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
+       if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
                return ERR_PTR(-ENOSPC);
 
        /* check if there is someone else holds reference */
@@ -2862,7 +2889,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
        } else {
                check_link = 0;
        }
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
        ret = btrfs_lookup_inode(trans, root, path,
                                &BTRFS_I(inode)->location, 0);
@@ -2876,11 +2903,11 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
        } else {
                check_link = 0;
        }
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
        if (ret == 0 && S_ISREG(inode->i_mode)) {
                ret = btrfs_lookup_file_extent(trans, root, path,
-                                              inode->i_ino, (u64)-1, 0);
+                                              ino, (u64)-1, 0);
                if (ret < 0) {
                        err = ret;
                        goto out;
@@ -2888,7 +2915,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
                BUG_ON(ret == 0);
                if (check_path_shared(root, path))
                        goto out;
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
        }
 
        if (!check_link) {
@@ -2896,7 +2923,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
                goto out;
        }
 
-       di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
+       di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
                                dentry->d_name.name, dentry->d_name.len, 0);
        if (IS_ERR(di)) {
                err = PTR_ERR(di);
@@ -2909,11 +2936,11 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
                err = 0;
                goto out;
        }
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
        ref = btrfs_lookup_inode_ref(trans, root, path,
                                dentry->d_name.name, dentry->d_name.len,
-                               inode->i_ino, dir->i_ino, 0);
+                               ino, dir_ino, 0);
        if (IS_ERR(ref)) {
                err = PTR_ERR(ref);
                goto out;
@@ -2922,9 +2949,17 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
        if (check_path_shared(root, path))
                goto out;
        index = btrfs_inode_ref_index(path->nodes[0], ref);
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
-       di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index,
+       /*
+        * This is a commit root search, if we can lookup inode item and other
+        * relative items in the commit root, it means the transaction of
+        * dir/file creation has been committed, and the dir index item that we
+        * delay to insert has also been inserted into the commit root. So
+        * we needn't worry about the delayed insertion of the dir index item
+        * here.
+        */
+       di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
                                dentry->d_name.name, dentry->d_name.len, 0);
        if (IS_ERR(di)) {
                err = PTR_ERR(di);
@@ -2999,54 +3034,47 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
        struct btrfs_key key;
        u64 index;
        int ret;
+       u64 dir_ino = btrfs_ino(dir);
 
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
 
-       di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
+       di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
                                   name, name_len, -1);
-       BUG_ON(!di || IS_ERR(di));
+       BUG_ON(IS_ERR_OR_NULL(di));
 
        leaf = path->nodes[0];
        btrfs_dir_item_key_to_cpu(leaf, di, &key);
        WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
        ret = btrfs_delete_one_dir_name(trans, root, path, di);
        BUG_ON(ret);
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
        ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
                                 objectid, root->root_key.objectid,
-                                dir->i_ino, &index, name, name_len);
+                                dir_ino, &index, name, name_len);
        if (ret < 0) {
                BUG_ON(ret != -ENOENT);
-               di = btrfs_search_dir_index_item(root, path, dir->i_ino,
+               di = btrfs_search_dir_index_item(root, path, dir_ino,
                                                 name, name_len);
-               BUG_ON(!di || IS_ERR(di));
+               BUG_ON(IS_ERR_OR_NULL(di));
 
                leaf = path->nodes[0];
                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
                index = key.offset;
        }
+       btrfs_release_path(path);
 
-       di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
-                                        index, name, name_len, -1);
-       BUG_ON(!di || IS_ERR(di));
-
-       leaf = path->nodes[0];
-       btrfs_dir_item_key_to_cpu(leaf, di, &key);
-       WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
-       ret = btrfs_delete_one_dir_name(trans, root, path, di);
+       ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
        BUG_ON(ret);
-       btrfs_release_path(root, path);
 
        btrfs_i_size_write(dir, dir->i_size - name_len * 2);
        dir->i_mtime = dir->i_ctime = CURRENT_TIME;
        ret = btrfs_update_inode(trans, root, dir);
        BUG_ON(ret);
 
-       btrfs_free_path(path);
        return 0;
 }
 
@@ -3059,7 +3087,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
        unsigned long nr = 0;
 
        if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
-           inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
+           btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
                return -ENOTEMPTY;
 
        trans = __unlink_start_trans(dir, dentry);
@@ -3068,7 +3096,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
 
        btrfs_set_trans_block_group(trans, dir);
 
-       if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
+       if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
                err = btrfs_unlink_subvol(trans, root, dir,
                                          BTRFS_I(inode)->location.objectid,
                                          dentry->d_name.name,
@@ -3093,178 +3121,6 @@ out:
        return err;
 }
 
-#if 0
-/*
- * when truncating bytes in a file, it is possible to avoid reading
- * the leaves that contain only checksum items.  This can be the
- * majority of the IO required to delete a large file, but it must
- * be done carefully.
- *
- * The keys in the level just above the leaves are checked to make sure
- * the lowest key in a given leaf is a csum key, and starts at an offset
- * after the new  size.
- *
- * Then the key for the next leaf is checked to make sure it also has
- * a checksum item for the same file.  If it does, we know our target leaf
- * contains only checksum items, and it can be safely freed without reading
- * it.
- *
- * This is just an optimization targeted at large files.  It may do
- * nothing.  It will return 0 unless things went badly.
- */
-static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
-                                    struct btrfs_root *root,
-                                    struct btrfs_path *path,
-                                    struct inode *inode, u64 new_size)
-{
-       struct btrfs_key key;
-       int ret;
-       int nritems;
-       struct btrfs_key found_key;
-       struct btrfs_key other_key;
-       struct btrfs_leaf_ref *ref;
-       u64 leaf_gen;
-       u64 leaf_start;
-
-       path->lowest_level = 1;
-       key.objectid = inode->i_ino;
-       key.type = BTRFS_CSUM_ITEM_KEY;
-       key.offset = new_size;
-again:
-       ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
-       if (ret < 0)
-               goto out;
-
-       if (path->nodes[1] == NULL) {
-               ret = 0;
-               goto out;
-       }
-       ret = 0;
-       btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
-       nritems = btrfs_header_nritems(path->nodes[1]);
-
-       if (!nritems)
-               goto out;
-
-       if (path->slots[1] >= nritems)
-               goto next_node;
-
-       /* did we find a key greater than anything we want to delete? */
-       if (found_key.objectid > inode->i_ino ||
-          (found_key.objectid == inode->i_ino && found_key.type > key.type))
-               goto out;
-
-       /* we check the next key in the node to make sure the leave contains
-        * only checksum items.  This comparison doesn't work if our
-        * leaf is the last one in the node
-        */
-       if (path->slots[1] + 1 >= nritems) {
-next_node:
-               /* search forward from the last key in the node, this
-                * will bring us into the next node in the tree
-                */
-               btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
-
-               /* unlikely, but we inc below, so check to be safe */
-               if (found_key.offset == (u64)-1)
-                       goto out;
-
-               /* search_forward needs a path with locks held, do the
-                * search again for the original key.  It is possible
-                * this will race with a balance and return a path that
-                * we could modify, but this drop is just an optimization
-                * and is allowed to miss some leaves.
-                */
-               btrfs_release_path(root, path);
-               found_key.offset++;
-
-               /* setup a max key for search_forward */
-               other_key.offset = (u64)-1;
-               other_key.type = key.type;
-               other_key.objectid = key.objectid;
-
-               path->keep_locks = 1;
-               ret = btrfs_search_forward(root, &found_key, &other_key,
-                                          path, 0, 0);
-               path->keep_locks = 0;
-               if (ret || found_key.objectid != key.objectid ||
-                   found_key.type != key.type) {
-                       ret = 0;
-                       goto out;
-               }
-
-               key.offset = found_key.offset;
-               btrfs_release_path(root, path);
-               cond_resched();
-               goto again;
-       }
-
-       /* we know there's one more slot after us in the tree,
-        * read that key so we can verify it is also a checksum item
-        */
-       btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
-
-       if (found_key.objectid < inode->i_ino)
-               goto next_key;
-
-       if (found_key.type != key.type || found_key.offset < new_size)
-               goto next_key;
-
-       /*
-        * if the key for the next leaf isn't a csum key from this objectid,
-        * we can't be sure there aren't good items inside this leaf.
-        * Bail out
-        */
-       if (other_key.objectid != inode->i_ino || other_key.type != key.type)
-               goto out;
-
-       leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
-       leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
-       /*
-        * it is safe to delete this leaf, it contains only
-        * csum items from this inode at an offset >= new_size
-        */
-       ret = btrfs_del_leaf(trans, root, path, leaf_start);
-       BUG_ON(ret);
-
-       if (root->ref_cows && leaf_gen < trans->transid) {
-               ref = btrfs_alloc_leaf_ref(root, 0);
-               if (ref) {
-                       ref->root_gen = root->root_key.offset;
-                       ref->bytenr = leaf_start;
-                       ref->owner = 0;
-                       ref->generation = leaf_gen;
-                       ref->nritems = 0;
-
-                       btrfs_sort_leaf_ref(ref);
-
-                       ret = btrfs_add_leaf_ref(root, ref, 0);
-                       WARN_ON(ret);
-                       btrfs_free_leaf_ref(root, ref);
-               } else {
-                       WARN_ON(1);
-               }
-       }
-next_key:
-       btrfs_release_path(root, path);
-
-       if (other_key.objectid == inode->i_ino &&
-           other_key.type == key.type && other_key.offset > key.offset) {
-               key.offset = other_key.offset;
-               cond_resched();
-               goto again;
-       }
-       ret = 0;
-out:
-       /* fixup any changes we've made to the path */
-       path->lowest_level = 0;
-       path->keep_locks = 0;
-       btrfs_release_path(root, path);
-       return ret;
-}
-
-#endif
-
 /*
  * this can truncate away extent items, csum items and directory items.
  * It starts at a high offset and removes keys until it can't find
@@ -3300,17 +3156,27 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
        int encoding;
        int ret;
        int err = 0;
+       u64 ino = btrfs_ino(inode);
 
        BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
 
        if (root->ref_cows || root == root->fs_info->tree_root)
                btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
 
+       /*
+        * This function is also used to drop the items in the log tree before
+        * we relog the inode, so if root != BTRFS_I(inode)->root, it means
+        * it is used to drop the loged items. So we shouldn't kill the delayed
+        * items.
+        */
+       if (min_type == 0 && root == BTRFS_I(inode)->root)
+               btrfs_kill_delayed_inode_items(inode);
+
        path = btrfs_alloc_path();
        BUG_ON(!path);
        path->reada = -1;
 
-       key.objectid = inode->i_ino;
+       key.objectid = ino;
        key.offset = (u64)-1;
        key.type = (u8)-1;
 
@@ -3338,7 +3204,7 @@ search_again:
                found_type = btrfs_key_type(&found_key);
                encoding = 0;
 
-               if (found_key.objectid != inode->i_ino)
+               if (found_key.objectid != ino)
                        break;
 
                if (found_type < min_type)
@@ -3428,7 +3294,6 @@ search_again:
                                    btrfs_file_extent_calc_inline_size(size);
                                ret = btrfs_truncate_item(trans, root, path,
                                                          size, 1);
-                               BUG_ON(ret);
                        } else if (root->ref_cows) {
                                inode_sub_bytes(inode, item_end + 1 -
                                                found_key.offset);
@@ -3457,7 +3322,7 @@ delete:
                        ret = btrfs_free_extent(trans, root, extent_start,
                                                extent_num_bytes, 0,
                                                btrfs_header_owner(leaf),
-                                               inode->i_ino, extent_offset);
+                                               ino, extent_offset);
                        BUG_ON(ret);
                }
 
@@ -3466,7 +3331,9 @@ delete:
 
                if (path->slots[0] == 0 ||
                    path->slots[0] != pending_del_slot) {
-                       if (root->ref_cows) {
+                       if (root->ref_cows &&
+                           BTRFS_I(inode)->location.objectid !=
+                                               BTRFS_FREE_INO_OBJECTID) {
                                err = -EAGAIN;
                                goto out;
                        }
@@ -3477,7 +3344,7 @@ delete:
                                BUG_ON(ret);
                                pending_del_nr = 0;
                        }
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        goto search_again;
                } else {
                        path->slots[0]--;
@@ -3635,7 +3502,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
        while (1) {
                em = btrfs_get_extent(inode, NULL, 0, cur_offset,
                                block_end - cur_offset, 0);
-               BUG_ON(IS_ERR(em) || !em);
+               BUG_ON(IS_ERR_OR_NULL(em));
                last_byte = min(extent_map_end(em), block_end);
                last_byte = (last_byte + mask) & ~mask;
                if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
@@ -3656,7 +3523,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                                break;
 
                        err = btrfs_insert_file_extent(trans, root,
-                                       inode->i_ino, cur_offset, 0,
+                                       btrfs_ino(inode), cur_offset, 0,
                                        0, hole_size, 0, hole_size,
                                        0, 0, 0);
                        if (err)
@@ -3758,7 +3625,7 @@ void btrfs_evict_inode(struct inode *inode)
 
        truncate_inode_pages(&inode->i_data, 0);
        if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
-                              root == root->fs_info->tree_root))
+                              is_free_space_inode(root, inode)))
                goto no_delete;
 
        if (is_bad_inode(inode)) {
@@ -3811,6 +3678,10 @@ void btrfs_evict_inode(struct inode *inode)
                BUG_ON(ret);
        }
 
+       if (!(root == root->fs_info->tree_root ||
+             root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
+               btrfs_return_ino(root, btrfs_ino(inode));
+
        nr = trans->blocks_used;
        btrfs_end_transaction(trans, root);
        btrfs_btree_balance_dirty(root, nr);
@@ -3836,12 +3707,12 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
        path = btrfs_alloc_path();
        BUG_ON(!path);
 
-       di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
+       di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
                                    namelen, 0);
        if (IS_ERR(di))
                ret = PTR_ERR(di);
 
-       if (!di || IS_ERR(di))
+       if (IS_ERR_OR_NULL(di))
                goto out_err;
 
        btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
@@ -3889,7 +3760,7 @@ static int fixup_tree_root_location(struct btrfs_root *root,
 
        leaf = path->nodes[0];
        ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
-       if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino ||
+       if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
            btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
                goto out;
 
@@ -3899,7 +3770,7 @@ static int fixup_tree_root_location(struct btrfs_root *root,
        if (ret)
                goto out;
 
-       btrfs_release_path(root->fs_info->tree_root, path);
+       btrfs_release_path(path);
 
        new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
        if (IS_ERR(new_root)) {
@@ -3928,6 +3799,7 @@ static void inode_tree_add(struct inode *inode)
        struct btrfs_inode *entry;
        struct rb_node **p;
        struct rb_node *parent;
+       u64 ino = btrfs_ino(inode);
 again:
        p = &root->inode_tree.rb_node;
        parent = NULL;
@@ -3940,9 +3812,9 @@ again:
                parent = *p;
                entry = rb_entry(parent, struct btrfs_inode, rb_node);
 
-               if (inode->i_ino < entry->vfs_inode.i_ino)
+               if (ino < btrfs_ino(&entry->vfs_inode))
                        p = &parent->rb_left;
-               else if (inode->i_ino > entry->vfs_inode.i_ino)
+               else if (ino > btrfs_ino(&entry->vfs_inode))
                        p = &parent->rb_right;
                else {
                        WARN_ON(!(entry->vfs_inode.i_state &
@@ -4006,9 +3878,9 @@ again:
                prev = node;
                entry = rb_entry(node, struct btrfs_inode, rb_node);
 
-               if (objectid < entry->vfs_inode.i_ino)
+               if (objectid < btrfs_ino(&entry->vfs_inode))
                        node = node->rb_left;
-               else if (objectid > entry->vfs_inode.i_ino)
+               else if (objectid > btrfs_ino(&entry->vfs_inode))
                        node = node->rb_right;
                else
                        break;
@@ -4016,7 +3888,7 @@ again:
        if (!node) {
                while (prev) {
                        entry = rb_entry(prev, struct btrfs_inode, rb_node);
-                       if (objectid <= entry->vfs_inode.i_ino) {
+                       if (objectid <= btrfs_ino(&entry->vfs_inode)) {
                                node = prev;
                                break;
                        }
@@ -4025,7 +3897,7 @@ again:
        }
        while (node) {
                entry = rb_entry(node, struct btrfs_inode, rb_node);
-               objectid = entry->vfs_inode.i_ino + 1;
+               objectid = btrfs_ino(&entry->vfs_inode) + 1;
                inode = igrab(&entry->vfs_inode);
                if (inode) {
                        spin_unlock(&root->inode_lock);
@@ -4063,7 +3935,7 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
 static int btrfs_find_actor(struct inode *inode, void *opaque)
 {
        struct btrfs_iget_args *args = opaque;
-       return args->ino == inode->i_ino &&
+       return args->ino == btrfs_ino(inode) &&
                args->root == BTRFS_I(inode)->root;
 }
 
@@ -4208,7 +4080,7 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
        return d_splice_alias(inode, dentry);
 }
 
-static unsigned char btrfs_filetype_table[] = {
+unsigned char btrfs_filetype_table[] = {
        DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
 };
 
@@ -4222,6 +4094,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
        struct btrfs_key key;
        struct btrfs_key found_key;
        struct btrfs_path *path;
+       struct list_head ins_list;
+       struct list_head del_list;
        int ret;
        struct extent_buffer *leaf;
        int slot;
@@ -4234,6 +4108,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
        char tmp_name[32];
        char *name_ptr;
        int name_len;
+       int is_curr = 0;        /* filp->f_pos points to the current index? */
 
        /* FIXME, use a real flag for deciding about the key type */
        if (root->fs_info->tree_root == root)
@@ -4241,9 +4116,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
 
        /* special case for "." */
        if (filp->f_pos == 0) {
-               over = filldir(dirent, ".", 1,
-                              1, inode->i_ino,
-                              DT_DIR);
+               over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR);
                if (over)
                        return 0;
                filp->f_pos = 1;
@@ -4258,11 +4131,19 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
                filp->f_pos = 2;
        }
        path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
        path->reada = 2;
 
+       if (key_type == BTRFS_DIR_INDEX_KEY) {
+               INIT_LIST_HEAD(&ins_list);
+               INIT_LIST_HEAD(&del_list);
+               btrfs_get_delayed_items(inode, &ins_list, &del_list);
+       }
+
        btrfs_set_key_type(&key, key_type);
        key.offset = filp->f_pos;
-       key.objectid = inode->i_ino;
+       key.objectid = btrfs_ino(inode);
 
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        if (ret < 0)
@@ -4289,8 +4170,13 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
                        break;
                if (found_key.offset < filp->f_pos)
                        goto next;
+               if (key_type == BTRFS_DIR_INDEX_KEY &&
+                   btrfs_should_delete_dir_index(&del_list,
+                                                 found_key.offset))
+                       goto next;
 
                filp->f_pos = found_key.offset;
+               is_curr = 1;
 
                di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
                di_cur = 0;
@@ -4345,6 +4231,15 @@ next:
                path->slots[0]++;
        }
 
+       if (key_type == BTRFS_DIR_INDEX_KEY) {
+               if (is_curr)
+                       filp->f_pos++;
+               ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
+                                                     &ins_list);
+               if (ret)
+                       goto nopos;
+       }
+
        /* Reached end of directory/root. Bump pos past the last item. */
        if (key_type == BTRFS_DIR_INDEX_KEY)
                /*
@@ -4357,6 +4252,8 @@ next:
 nopos:
        ret = 0;
 err:
+       if (key_type == BTRFS_DIR_INDEX_KEY)
+               btrfs_put_delayed_items(&ins_list, &del_list);
        btrfs_free_path(path);
        return ret;
 }
@@ -4372,7 +4269,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
                return 0;
 
        smp_mb();
-       nolock = (root->fs_info->closing && root == root->fs_info->tree_root);
+       if (root->fs_info->closing && is_free_space_inode(root, inode))
+               nolock = true;
 
        if (wbc->sync_mode == WB_SYNC_ALL) {
                if (nolock)
@@ -4415,25 +4313,25 @@ void btrfs_dirty_inode(struct inode *inode)
                btrfs_end_transaction(trans, root);
                trans = btrfs_start_transaction(root, 1);
                if (IS_ERR(trans)) {
-                       if (printk_ratelimit()) {
-                               printk(KERN_ERR "btrfs: fail to "
-                                      "dirty  inode %lu error %ld\n",
-                                      inode->i_ino, PTR_ERR(trans));
-                       }
+                       printk_ratelimited(KERN_ERR "btrfs: fail to "
+                                      "dirty  inode %llu error %ld\n",
+                                      (unsigned long long)btrfs_ino(inode),
+                                      PTR_ERR(trans));
                        return;
                }
                btrfs_set_trans_block_group(trans, inode);
 
                ret = btrfs_update_inode(trans, root, inode);
                if (ret) {
-                       if (printk_ratelimit()) {
-                               printk(KERN_ERR "btrfs: fail to "
-                                      "dirty  inode %lu error %d\n",
-                                      inode->i_ino, ret);
-                       }
+                       printk_ratelimited(KERN_ERR "btrfs: fail to "
+                                      "dirty  inode %llu error %d\n",
+                                      (unsigned long long)btrfs_ino(inode),
+                                      ret);
                }
        }
        btrfs_end_transaction(trans, root);
+       if (BTRFS_I(inode)->delayed_node)
+               btrfs_balance_delayed_items(root);
 }
 
 /*
@@ -4449,7 +4347,7 @@ static int btrfs_set_inode_index_count(struct inode *inode)
        struct extent_buffer *leaf;
        int ret;
 
-       key.objectid = inode->i_ino;
+       key.objectid = btrfs_ino(inode);
        btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
        key.offset = (u64)-1;
 
@@ -4481,7 +4379,7 @@ static int btrfs_set_inode_index_count(struct inode *inode)
        leaf = path->nodes[0];
        btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
 
-       if (found_key.objectid != inode->i_ino ||
+       if (found_key.objectid != btrfs_ino(inode) ||
            btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
                BTRFS_I(inode)->index_cnt = 2;
                goto out;
@@ -4502,9 +4400,12 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index)
        int ret = 0;
 
        if (BTRFS_I(dir)->index_cnt == (u64)-1) {
-               ret = btrfs_set_inode_index_count(dir);
-               if (ret)
-                       return ret;
+               ret = btrfs_inode_delayed_dir_index_count(dir);
+               if (ret) {
+                       ret = btrfs_set_inode_index_count(dir);
+                       if (ret)
+                               return ret;
+               }
        }
 
        *index = BTRFS_I(dir)->index_cnt;
@@ -4540,6 +4441,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                return ERR_PTR(-ENOMEM);
        }
 
+       /*
+        * we have to initialize this early, so we can reclaim the inode
+        * number if we fail afterwards in this function.
+        */
+       inode->i_ino = objectid;
+
        if (dir) {
                trace_btrfs_inode_request(dir);
 
@@ -4585,7 +4492,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                goto fail;
 
        inode_init_owner(inode, dir, mode);
-       inode->i_ino = objectid;
        inode_set_bytes(inode, 0);
        inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
        inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
@@ -4649,29 +4555,29 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
        int ret = 0;
        struct btrfs_key key;
        struct btrfs_root *root = BTRFS_I(parent_inode)->root;
+       u64 ino = btrfs_ino(inode);
+       u64 parent_ino = btrfs_ino(parent_inode);
 
-       if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+       if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
                memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
        } else {
-               key.objectid = inode->i_ino;
+               key.objectid = ino;
                btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
                key.offset = 0;
        }
 
-       if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+       if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
                ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
                                         key.objectid, root->root_key.objectid,
-                                        parent_inode->i_ino,
-                                        index, name, name_len);
+                                        parent_ino, index, name, name_len);
        } else if (add_backref) {
-               ret = btrfs_insert_inode_ref(trans, root,
-                                            name, name_len, inode->i_ino,
-                                            parent_inode->i_ino, index);
+               ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
+                                            parent_ino, index);
        }
 
        if (ret == 0) {
                ret = btrfs_insert_dir_item(trans, root, name, name_len,
-                                           parent_inode->i_ino, &key,
+                                           parent_inode, &key,
                                            btrfs_inode_type(inode), index);
                BUG_ON(ret);
 
@@ -4714,10 +4620,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
        if (!new_valid_dev(rdev))
                return -EINVAL;
 
-       err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
-       if (err)
-               return err;
-
        /*
         * 2 for inode item and ref
         * 2 for dir items
@@ -4729,8 +4631,12 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
 
        btrfs_set_trans_block_group(trans, dir);
 
+       err = btrfs_find_free_ino(root, &objectid);
+       if (err)
+               goto out_unlock;
+
        inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
-                               dentry->d_name.len, dir->i_ino, objectid,
+                               dentry->d_name.len, btrfs_ino(dir), objectid,
                                BTRFS_I(dir)->block_group, mode, &index);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
@@ -4777,9 +4683,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
        u64 objectid;
        u64 index = 0;
 
-       err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
-       if (err)
-               return err;
        /*
         * 2 for inode item and ref
         * 2 for dir items
@@ -4791,8 +4694,12 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
 
        btrfs_set_trans_block_group(trans, dir);
 
+       err = btrfs_find_free_ino(root, &objectid);
+       if (err)
+               goto out_unlock;
+
        inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
-                               dentry->d_name.len, dir->i_ino, objectid,
+                               dentry->d_name.len, btrfs_ino(dir), objectid,
                                BTRFS_I(dir)->block_group, mode, &index);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
@@ -4903,10 +4810,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        u64 index = 0;
        unsigned long nr = 1;
 
-       err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
-       if (err)
-               return err;
-
        /*
         * 2 items for inode and ref
         * 2 items for dir items
@@ -4917,8 +4820,12 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
                return PTR_ERR(trans);
        btrfs_set_trans_block_group(trans, dir);
 
+       err = btrfs_find_free_ino(root, &objectid);
+       if (err)
+               goto out_fail;
+
        inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
-                               dentry->d_name.len, dir->i_ino, objectid,
+                               dentry->d_name.len, btrfs_ino(dir), objectid,
                                BTRFS_I(dir)->block_group, S_IFDIR | mode,
                                &index);
        if (IS_ERR(inode)) {
@@ -5041,7 +4948,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
        u64 bytenr;
        u64 extent_start = 0;
        u64 extent_end = 0;
-       u64 objectid = inode->i_ino;
+       u64 objectid = btrfs_ino(inode);
        u32 found_type;
        struct btrfs_path *path = NULL;
        struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -5069,7 +4976,7 @@ again:
                else
                        goto out;
        }
-       em = alloc_extent_map(GFP_NOFS);
+       em = alloc_extent_map();
        if (!em) {
                err = -ENOMEM;
                goto out;
@@ -5223,7 +5130,7 @@ again:
                                kunmap(page);
                                free_extent_map(em);
                                em = NULL;
-                               btrfs_release_path(root, path);
+                               btrfs_release_path(path);
                                trans = btrfs_join_transaction(root, 1);
                                if (IS_ERR(trans))
                                        return ERR_CAST(trans);
@@ -5249,7 +5156,7 @@ not_found_em:
        em->block_start = EXTENT_MAP_HOLE;
        set_bit(EXTENT_FLAG_VACANCY, &em->flags);
 insert:
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        if (em->start > start || extent_map_end(em) <= start) {
                printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
                       "[%llu %llu]\n", (unsigned long long)em->start,
@@ -5382,7 +5289,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag
                u64 hole_start = start;
                u64 hole_len = len;
 
-               em = alloc_extent_map(GFP_NOFS);
+               em = alloc_extent_map();
                if (!em) {
                        err = -ENOMEM;
                        goto out;
@@ -5472,6 +5379,9 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
        if (IS_ERR(trans))
                return ERR_CAST(trans);
 
+       if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024)
+               btrfs_add_inode_defrag(trans, inode);
+
        trans->block_rsv = &root->fs_info->delalloc_block_rsv;
 
        alloc_hint = get_extent_allocation_hint(inode, start, len);
@@ -5483,7 +5393,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
        }
 
        if (!em) {
-               em = alloc_extent_map(GFP_NOFS);
+               em = alloc_extent_map();
                if (!em) {
                        em = ERR_PTR(-ENOMEM);
                        goto out;
@@ -5549,7 +5459,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
        if (!path)
                return -ENOMEM;
 
-       ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+       ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
                                       offset, 0);
        if (ret < 0)
                goto out;
@@ -5566,7 +5476,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
        ret = 0;
        leaf = path->nodes[0];
        btrfs_item_key_to_cpu(leaf, &key, slot);
-       if (key.objectid != inode->i_ino ||
+       if (key.objectid != btrfs_ino(inode) ||
            key.type != BTRFS_EXTENT_DATA_KEY) {
                /* not our file or wrong item type, must cow */
                goto out;
@@ -5600,7 +5510,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
         * look for other files referencing this extent, if we
         * find any we must cow
         */
-       if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
+       if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
                                  key.offset - backref_offset, disk_bytenr))
                goto out;
 
@@ -5790,9 +5700,10 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
 
                        flush_dcache_page(bvec->bv_page);
                        if (csum != *private) {
-                               printk(KERN_ERR "btrfs csum failed ino %lu off"
+                               printk(KERN_ERR "btrfs csum failed ino %llu off"
                                      " %llu csum %u private %u\n",
-                                     inode->i_ino, (unsigned long long)start,
+                                     (unsigned long long)btrfs_ino(inode),
+                                     (unsigned long long)start,
                                      csum, *private);
                                err = -EIO;
                        }
@@ -5939,9 +5850,9 @@ static void btrfs_end_dio_bio(struct bio *bio, int err)
        struct btrfs_dio_private *dip = bio->bi_private;
 
        if (err) {
-               printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu "
+               printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
                      "sector %#Lx len %u err no %d\n",
-                     dip->inode->i_ino, bio->bi_rw,
+                     (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
                      (unsigned long long)bio->bi_sector, bio->bi_size, err);
                dip->errors = 1;
 
@@ -6782,12 +6693,15 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
        ei->ordered_data_close = 0;
        ei->orphan_meta_reserved = 0;
        ei->dummy_inode = 0;
+       ei->in_defrag = 0;
        ei->force_compress = BTRFS_COMPRESS_NONE;
 
+       ei->delayed_node = NULL;
+
        inode = &ei->vfs_inode;
-       extent_map_tree_init(&ei->extent_tree, GFP_NOFS);
-       extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS);
-       extent_io_tree_init(&ei->io_failure_tree, &inode->i_data, GFP_NOFS);
+       extent_map_tree_init(&ei->extent_tree);
+       extent_io_tree_init(&ei->io_tree, &inode->i_data);
+       extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
        mutex_init(&ei->log_mutex);
        btrfs_ordered_inode_tree_init(&ei->ordered_tree);
        INIT_LIST_HEAD(&ei->i_orphan);
@@ -6851,8 +6765,8 @@ void btrfs_destroy_inode(struct inode *inode)
 
        spin_lock(&root->orphan_lock);
        if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
-               printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
-                      inode->i_ino);
+               printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
+                      (unsigned long long)btrfs_ino(inode));
                list_del_init(&BTRFS_I(inode)->i_orphan);
        }
        spin_unlock(&root->orphan_lock);
@@ -6874,6 +6788,7 @@ void btrfs_destroy_inode(struct inode *inode)
        inode_tree_del(inode);
        btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
 free:
+       btrfs_remove_delayed_node(inode);
        call_rcu(&inode->i_rcu, btrfs_i_callback);
 }
 
@@ -6882,7 +6797,7 @@ int btrfs_drop_inode(struct inode *inode)
        struct btrfs_root *root = BTRFS_I(inode)->root;
 
        if (btrfs_root_refs(&root->root_item) == 0 &&
-           root != root->fs_info->tree_root)
+           !is_free_space_inode(root, inode))
                return 1;
        else
                return generic_drop_inode(inode);
@@ -6991,16 +6906,17 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        u64 index = 0;
        u64 root_objectid;
        int ret;
+       u64 old_ino = btrfs_ino(old_inode);
 
-       if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
+       if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
                return -EPERM;
 
        /* we only allow rename subvolume link between subvolumes */
-       if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
+       if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
                return -EXDEV;
 
-       if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
-           (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID))
+       if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
+           (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
                return -ENOTEMPTY;
 
        if (S_ISDIR(old_inode->i_mode) && new_inode &&
@@ -7016,7 +6932,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                filemap_flush(old_inode->i_mapping);
 
        /* close the racy window with snapshot create/destroy ioctl */
-       if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
+       if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
                down_read(&root->fs_info->subvol_sem);
        /*
         * We want to reserve the absolute worst case amount of items.  So if
@@ -7041,15 +6957,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (ret)
                goto out_fail;
 
-       if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+       if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
                /* force full log commit if subvolume involved. */
                root->fs_info->last_trans_log_full_commit = trans->transid;
        } else {
                ret = btrfs_insert_inode_ref(trans, dest,
                                             new_dentry->d_name.name,
                                             new_dentry->d_name.len,
-                                            old_inode->i_ino,
-                                            new_dir->i_ino, index);
+                                            old_ino,
+                                            btrfs_ino(new_dir), index);
                if (ret)
                        goto out_fail;
                /*
@@ -7065,10 +6981,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
         * make sure the inode gets flushed if it is replacing
         * something.
         */
-       if (new_inode && new_inode->i_size &&
-           old_inode && S_ISREG(old_inode->i_mode)) {
+       if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
                btrfs_add_ordered_operation(trans, root, old_inode);
-       }
 
        old_dir->i_ctime = old_dir->i_mtime = ctime;
        new_dir->i_ctime = new_dir->i_mtime = ctime;
@@ -7077,7 +6991,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (old_dentry->d_parent != new_dentry->d_parent)
                btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
 
-       if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+       if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
                root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
                ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
                                        old_dentry->d_name.name,
@@ -7094,7 +7008,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        if (new_inode) {
                new_inode->i_ctime = CURRENT_TIME;
-               if (unlikely(new_inode->i_ino ==
+               if (unlikely(btrfs_ino(new_inode) ==
                             BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
                        root_objectid = BTRFS_I(new_inode)->location.objectid;
                        ret = btrfs_unlink_subvol(trans, dest, new_dir,
@@ -7122,7 +7036,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                             new_dentry->d_name.len, 0, index);
        BUG_ON(ret);
 
-       if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
+       if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
                struct dentry *parent = dget_parent(new_dentry);
                btrfs_log_new_name(trans, old_inode, old_dir, parent);
                dput(parent);
@@ -7131,7 +7045,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 out_fail:
        btrfs_end_transaction_throttle(trans, root);
 out_notrans:
-       if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
+       if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&root->fs_info->subvol_sem);
 
        return ret;
@@ -7185,58 +7099,6 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
        return 0;
 }
 
-int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput,
-                                  int sync)
-{
-       struct btrfs_inode *binode;
-       struct inode *inode = NULL;
-
-       spin_lock(&root->fs_info->delalloc_lock);
-       while (!list_empty(&root->fs_info->delalloc_inodes)) {
-               binode = list_entry(root->fs_info->delalloc_inodes.next,
-                                   struct btrfs_inode, delalloc_inodes);
-               inode = igrab(&binode->vfs_inode);
-               if (inode) {
-                       list_move_tail(&binode->delalloc_inodes,
-                                      &root->fs_info->delalloc_inodes);
-                       break;
-               }
-
-               list_del_init(&binode->delalloc_inodes);
-               cond_resched_lock(&root->fs_info->delalloc_lock);
-       }
-       spin_unlock(&root->fs_info->delalloc_lock);
-
-       if (inode) {
-               if (sync) {
-                       filemap_write_and_wait(inode->i_mapping);
-                       /*
-                        * We have to do this because compression doesn't
-                        * actually set PG_writeback until it submits the pages
-                        * for IO, which happens in an async thread, so we could
-                        * race and not actually wait for any writeback pages
-                        * because they've not been submitted yet.  Technically
-                        * this could still be the case for the ordered stuff
-                        * since the async thread may not have started to do its
-                        * work yet.  If this becomes the case then we need to
-                        * figure out a way to make sure that in writepage we
-                        * wait for any async pages to be submitted before
-                        * returning so that fdatawait does what its supposed to
-                        * do.
-                        */
-                       btrfs_wait_ordered_range(inode, 0, (u64)-1);
-               } else {
-                       filemap_flush(inode->i_mapping);
-               }
-               if (delay_iput)
-                       btrfs_add_delayed_iput(inode);
-               else
-                       iput(inode);
-               return 1;
-       }
-       return 0;
-}
-
 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                         const char *symname)
 {
@@ -7260,9 +7122,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
                return -ENAMETOOLONG;
 
-       err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
-       if (err)
-               return err;
        /*
         * 2 items for inode item and ref
         * 2 items for dir items
@@ -7274,8 +7133,12 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
 
        btrfs_set_trans_block_group(trans, dir);
 
+       err = btrfs_find_free_ino(root, &objectid);
+       if (err)
+               goto out_unlock;
+
        inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
-                               dentry->d_name.len, dir->i_ino, objectid,
+                               dentry->d_name.len, btrfs_ino(dir), objectid,
                                BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
                                &index);
        if (IS_ERR(inode)) {
@@ -7307,7 +7170,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
 
        path = btrfs_alloc_path();
        BUG_ON(!path);
-       key.objectid = inode->i_ino;
+       key.objectid = btrfs_ino(inode);
        key.offset = 0;
        btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
        datasize = btrfs_file_extent_calc_inline_size(name_len);
@@ -7315,6 +7178,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
                                      datasize);
        if (err) {
                drop_inode = 1;
+               btrfs_free_path(path);
                goto out_unlock;
        }
        leaf = path->nodes[0];
index 2616f7ed47996a21df8b0c5de224c4b6c1aebf70..85e818ce00c5df0be84c6ce3c1894ab6c5303615 100644 (file)
@@ -50,6 +50,7 @@
 #include "print-tree.h"
 #include "volumes.h"
 #include "locking.h"
+#include "inode-map.h"
 
 /* Mask out flags that are inappropriate for the given type of inode. */
 static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
@@ -281,8 +282,9 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
-       mutex_lock(&fs_info->fs_devices->device_list_mutex);
-       list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) {
+       rcu_read_lock();
+       list_for_each_entry_rcu(device, &fs_info->fs_devices->devices,
+                               dev_list) {
                if (!device->bdev)
                        continue;
                q = bdev_get_queue(device->bdev);
@@ -292,7 +294,7 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
                                     minlen);
                }
        }
-       mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+       rcu_read_unlock();
        if (!num_devices)
                return -EOPNOTSUPP;
 
@@ -329,8 +331,7 @@ static noinline int create_subvol(struct btrfs_root *root,
        u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
        u64 index = 0;
 
-       ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root,
-                                      0, &objectid);
+       ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid);
        if (ret) {
                dput(parent);
                return ret;
@@ -422,7 +423,7 @@ static noinline int create_subvol(struct btrfs_root *root,
        BUG_ON(ret);
 
        ret = btrfs_insert_dir_item(trans, root,
-                                   name, namelen, dir->i_ino, &key,
+                                   name, namelen, dir, &key,
                                    BTRFS_FT_DIR, index);
        if (ret)
                goto fail;
@@ -433,7 +434,7 @@ static noinline int create_subvol(struct btrfs_root *root,
 
        ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
                                 objectid, root->root_key.objectid,
-                                dir->i_ino, index, name, namelen);
+                                btrfs_ino(dir), index, name, namelen);
 
        BUG_ON(ret);
 
@@ -655,6 +656,106 @@ out_unlock:
        return error;
 }
 
+/*
+ * When we're defragging a range, we don't want to kick it off again
+ * if it is really just waiting for delalloc to send it down.
+ * If we find a nice big extent or delalloc range for the bytes in the
+ * file you want to defrag, we return 0 to let you know to skip this
+ * part of the file
+ */
+static int check_defrag_in_cache(struct inode *inode, u64 offset, int thresh)
+{
+       struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+       struct extent_map *em = NULL;
+       struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+       u64 end;
+
+       read_lock(&em_tree->lock);
+       em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
+       read_unlock(&em_tree->lock);
+
+       if (em) {
+               end = extent_map_end(em);
+               free_extent_map(em);
+               if (end - offset > thresh)
+                       return 0;
+       }
+       /* if we already have a nice delalloc here, just stop */
+       thresh /= 2;
+       end = count_range_bits(io_tree, &offset, offset + thresh,
+                              thresh, EXTENT_DELALLOC, 1);
+       if (end >= thresh)
+               return 0;
+       return 1;
+}
+
+/*
+ * helper function to walk through a file and find extents
+ * newer than a specific transid, and smaller than thresh.
+ *
+ * This is used by the defragging code to find new and small
+ * extents
+ */
+static int find_new_extents(struct btrfs_root *root,
+                           struct inode *inode, u64 newer_than,
+                           u64 *off, int thresh)
+{
+       struct btrfs_path *path;
+       struct btrfs_key min_key;
+       struct btrfs_key max_key;
+       struct extent_buffer *leaf;
+       struct btrfs_file_extent_item *extent;
+       int type;
+       int ret;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       min_key.objectid = inode->i_ino;
+       min_key.type = BTRFS_EXTENT_DATA_KEY;
+       min_key.offset = *off;
+
+       max_key.objectid = inode->i_ino;
+       max_key.type = (u8)-1;
+       max_key.offset = (u64)-1;
+
+       path->keep_locks = 1;
+
+       while(1) {
+               ret = btrfs_search_forward(root, &min_key, &max_key,
+                                          path, 0, newer_than);
+               if (ret != 0)
+                       goto none;
+               if (min_key.objectid != inode->i_ino)
+                       goto none;
+               if (min_key.type != BTRFS_EXTENT_DATA_KEY)
+                       goto none;
+
+               leaf = path->nodes[0];
+               extent = btrfs_item_ptr(leaf, path->slots[0],
+                                       struct btrfs_file_extent_item);
+
+               type = btrfs_file_extent_type(leaf, extent);
+               if (type == BTRFS_FILE_EXTENT_REG &&
+                   btrfs_file_extent_num_bytes(leaf, extent) < thresh &&
+                   check_defrag_in_cache(inode, min_key.offset, thresh)) {
+                       *off = min_key.offset;
+                       btrfs_free_path(path);
+                       return 0;
+               }
+
+               if (min_key.offset == (u64)-1)
+                       goto none;
+
+               min_key.offset++;
+               btrfs_release_path(path);
+       }
+none:
+       btrfs_free_path(path);
+       return -ENOENT;
+}
+
 static int should_defrag_range(struct inode *inode, u64 start, u64 len,
                               int thresh, u64 *last_len, u64 *skip,
                               u64 *defrag_end)
@@ -664,10 +765,6 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
        int ret = 1;
 
-
-       if (thresh == 0)
-               thresh = 256 * 1024;
-
        /*
         * make sure that once we start defragging and extent, we keep on
         * defragging it
@@ -726,27 +823,176 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
        return ret;
 }
 
-static int btrfs_defrag_file(struct file *file,
-                            struct btrfs_ioctl_defrag_range_args *range)
+/*
+ * it doesn't do much good to defrag one or two pages
+ * at a time.  This pulls in a nice chunk of pages
+ * to COW and defrag.
+ *
+ * It also makes sure the delalloc code has enough
+ * dirty data to avoid making new small extents as part
+ * of the defrag
+ *
+ * It's a good idea to start RA on this range
+ * before calling this.
+ */
+static int cluster_pages_for_defrag(struct inode *inode,
+                                   struct page **pages,
+                                   unsigned long start_index,
+                                   int num_pages)
 {
-       struct inode *inode = fdentry(file)->d_inode;
-       struct btrfs_root *root = BTRFS_I(inode)->root;
-       struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
+       unsigned long file_end;
+       u64 isize = i_size_read(inode);
+       u64 page_start;
+       u64 page_end;
+       int ret;
+       int i;
+       int i_done;
        struct btrfs_ordered_extent *ordered;
-       struct page *page;
+       struct extent_state *cached_state = NULL;
+
+       if (isize == 0)
+               return 0;
+       file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
+
+       ret = btrfs_delalloc_reserve_space(inode,
+                                          num_pages << PAGE_CACHE_SHIFT);
+       if (ret)
+               return ret;
+again:
+       ret = 0;
+       i_done = 0;
+
+       /* step one, lock all the pages */
+       for (i = 0; i < num_pages; i++) {
+               struct page *page;
+               page = grab_cache_page(inode->i_mapping,
+                                           start_index + i);
+               if (!page)
+                       break;
+
+               if (!PageUptodate(page)) {
+                       btrfs_readpage(NULL, page);
+                       lock_page(page);
+                       if (!PageUptodate(page)) {
+                               unlock_page(page);
+                               page_cache_release(page);
+                               ret = -EIO;
+                               break;
+                       }
+               }
+               isize = i_size_read(inode);
+               file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
+               if (!isize || page->index > file_end ||
+                   page->mapping != inode->i_mapping) {
+                       /* whoops, we blew past eof, skip this page */
+                       unlock_page(page);
+                       page_cache_release(page);
+                       break;
+               }
+               pages[i] = page;
+               i_done++;
+       }
+       if (!i_done || ret)
+               goto out;
+
+       if (!(inode->i_sb->s_flags & MS_ACTIVE))
+               goto out;
+
+       /*
+        * so now we have a nice long stream of locked
+        * and up to date pages, lets wait on them
+        */
+       for (i = 0; i < i_done; i++)
+               wait_on_page_writeback(pages[i]);
+
+       page_start = page_offset(pages[0]);
+       page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE;
+
+       lock_extent_bits(&BTRFS_I(inode)->io_tree,
+                        page_start, page_end - 1, 0, &cached_state,
+                        GFP_NOFS);
+       ordered = btrfs_lookup_first_ordered_extent(inode, page_end - 1);
+       if (ordered &&
+           ordered->file_offset + ordered->len > page_start &&
+           ordered->file_offset < page_end) {
+               btrfs_put_ordered_extent(ordered);
+               unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+                                    page_start, page_end - 1,
+                                    &cached_state, GFP_NOFS);
+               for (i = 0; i < i_done; i++) {
+                       unlock_page(pages[i]);
+                       page_cache_release(pages[i]);
+               }
+               btrfs_wait_ordered_range(inode, page_start,
+                                        page_end - page_start);
+               goto again;
+       }
+       if (ordered)
+               btrfs_put_ordered_extent(ordered);
+
+       clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
+                         page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
+                         EXTENT_DO_ACCOUNTING, 0, 0, &cached_state,
+                         GFP_NOFS);
+
+       if (i_done != num_pages) {
+               atomic_inc(&BTRFS_I(inode)->outstanding_extents);
+               btrfs_delalloc_release_space(inode,
+                                    (num_pages - i_done) << PAGE_CACHE_SHIFT);
+       }
+
+
+       btrfs_set_extent_delalloc(inode, page_start, page_end - 1,
+                                 &cached_state);
+
+       unlock_extent_cached(&BTRFS_I(inode)->io_tree,
+                            page_start, page_end - 1, &cached_state,
+                            GFP_NOFS);
+
+       for (i = 0; i < i_done; i++) {
+               clear_page_dirty_for_io(pages[i]);
+               ClearPageChecked(pages[i]);
+               set_page_extent_mapped(pages[i]);
+               set_page_dirty(pages[i]);
+               unlock_page(pages[i]);
+               page_cache_release(pages[i]);
+       }
+       return i_done;
+out:
+       for (i = 0; i < i_done; i++) {
+               unlock_page(pages[i]);
+               page_cache_release(pages[i]);
+       }
+       btrfs_delalloc_release_space(inode, num_pages << PAGE_CACHE_SHIFT);
+       return ret;
+
+}
+
+int btrfs_defrag_file(struct inode *inode, struct file *file,
+                     struct btrfs_ioctl_defrag_range_args *range,
+                     u64 newer_than, unsigned long max_to_defrag)
+{
+       struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_super_block *disk_super;
+       struct file_ra_state *ra = NULL;
        unsigned long last_index;
-       unsigned long ra_pages = root->fs_info->bdi.ra_pages;
-       unsigned long total_read = 0;
        u64 features;
-       u64 page_start;
-       u64 page_end;
        u64 last_len = 0;
        u64 skip = 0;
        u64 defrag_end = 0;
+       u64 newer_off = range->start;
+       int newer_left = 0;
        unsigned long i;
        int ret;
+       int defrag_count = 0;
        int compress_type = BTRFS_COMPRESS_ZLIB;
+       int extent_thresh = range->extent_thresh;
+       int newer_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT;
+       u64 new_align = ~((u64)128 * 1024 - 1);
+       struct page **pages = NULL;
+
+       if (extent_thresh == 0)
+               extent_thresh = 256 * 1024;
 
        if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) {
                if (range->compress_type > BTRFS_COMPRESS_TYPES)
@@ -758,6 +1004,27 @@ static int btrfs_defrag_file(struct file *file,
        if (inode->i_size == 0)
                return 0;
 
+       /*
+        * if we were not given a file, allocate a readahead
+        * context
+        */
+       if (!file) {
+               ra = kzalloc(sizeof(*ra), GFP_NOFS);
+               if (!ra)
+                       return -ENOMEM;
+               file_ra_state_init(ra, inode->i_mapping);
+       } else {
+               ra = &file->f_ra;
+       }
+
+       pages = kmalloc(sizeof(struct page *) * newer_cluster,
+                       GFP_NOFS);
+       if (!pages) {
+               ret = -ENOMEM;
+               goto out_ra;
+       }
+
+       /* find the last page to defrag */
        if (range->start + range->len > range->start) {
                last_index = min_t(u64, inode->i_size - 1,
                         range->start + range->len - 1) >> PAGE_CACHE_SHIFT;
@@ -765,11 +1032,37 @@ static int btrfs_defrag_file(struct file *file,
                last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT;
        }
 
-       i = range->start >> PAGE_CACHE_SHIFT;
-       while (i <= last_index) {
-               if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
+       if (newer_than) {
+               ret = find_new_extents(root, inode, newer_than,
+                                      &newer_off, 64 * 1024);
+               if (!ret) {
+                       range->start = newer_off;
+                       /*
+                        * we always align our defrag to help keep
+                        * the extents in the file evenly spaced
+                        */
+                       i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
+                       newer_left = newer_cluster;
+               } else
+                       goto out_ra;
+       } else {
+               i = range->start >> PAGE_CACHE_SHIFT;
+       }
+       if (!max_to_defrag)
+               max_to_defrag = last_index - 1;
+
+       while (i <= last_index && defrag_count < max_to_defrag) {
+               /*
+                * make sure we stop running if someone unmounts
+                * the FS
+                */
+               if (!(inode->i_sb->s_flags & MS_ACTIVE))
+                       break;
+
+               if (!newer_than &&
+                   !should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
                                        PAGE_CACHE_SIZE,
-                                       range->extent_thresh,
+                                       extent_thresh,
                                        &last_len, &skip,
                                        &defrag_end)) {
                        unsigned long next;
@@ -781,92 +1074,39 @@ static int btrfs_defrag_file(struct file *file,
                        i = max(i + 1, next);
                        continue;
                }
-
-               if (total_read % ra_pages == 0) {
-                       btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i,
-                                      min(last_index, i + ra_pages - 1));
-               }
-               total_read++;
-               mutex_lock(&inode->i_mutex);
                if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)
                        BTRFS_I(inode)->force_compress = compress_type;
 
-               ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
-               if (ret)
-                       goto err_unlock;
-again:
-               if (inode->i_size == 0 ||
-                   i > ((inode->i_size - 1) >> PAGE_CACHE_SHIFT)) {
-                       ret = 0;
-                       goto err_reservations;
-               }
+               btrfs_force_ra(inode->i_mapping, ra, file, i, newer_cluster);
 
-               page = grab_cache_page(inode->i_mapping, i);
-               if (!page) {
-                       ret = -ENOMEM;
-                       goto err_reservations;
-               }
-
-               if (!PageUptodate(page)) {
-                       btrfs_readpage(NULL, page);
-                       lock_page(page);
-                       if (!PageUptodate(page)) {
-                               unlock_page(page);
-                               page_cache_release(page);
-                               ret = -EIO;
-                               goto err_reservations;
-                       }
-               }
-
-               if (page->mapping != inode->i_mapping) {
-                       unlock_page(page);
-                       page_cache_release(page);
-                       goto again;
-               }
+               ret = cluster_pages_for_defrag(inode, pages, i, newer_cluster);
+               if (ret < 0)
+                       goto out_ra;
 
-               wait_on_page_writeback(page);
+               defrag_count += ret;
+               balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret);
+               i += ret;
 
-               if (PageDirty(page)) {
-                       btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
-                       goto loop_unlock;
-               }
-
-               page_start = (u64)page->index << PAGE_CACHE_SHIFT;
-               page_end = page_start + PAGE_CACHE_SIZE - 1;
-               lock_extent(io_tree, page_start, page_end, GFP_NOFS);
+               if (newer_than) {
+                       if (newer_off == (u64)-1)
+                               break;
 
-               ordered = btrfs_lookup_ordered_extent(inode, page_start);
-               if (ordered) {
-                       unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
-                       unlock_page(page);
-                       page_cache_release(page);
-                       btrfs_start_ordered_extent(inode, ordered, 1);
-                       btrfs_put_ordered_extent(ordered);
-                       goto again;
+                       newer_off = max(newer_off + 1,
+                                       (u64)i << PAGE_CACHE_SHIFT);
+
+                       ret = find_new_extents(root, inode,
+                                              newer_than, &newer_off,
+                                              64 * 1024);
+                       if (!ret) {
+                               range->start = newer_off;
+                               i = (newer_off & new_align) >> PAGE_CACHE_SHIFT;
+                               newer_left = newer_cluster;
+                       } else {
+                               break;
+                       }
+               } else {
+                       i++;
                }
-               set_page_extent_mapped(page);
-
-               /*
-                * this makes sure page_mkwrite is called on the
-                * page if it is dirtied again later
-                */
-               clear_page_dirty_for_io(page);
-               clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start,
-                                 page_end, EXTENT_DIRTY | EXTENT_DELALLOC |
-                                 EXTENT_DO_ACCOUNTING, GFP_NOFS);
-
-               btrfs_set_extent_delalloc(inode, page_start, page_end, NULL);
-               ClearPageChecked(page);
-               set_page_dirty(page);
-               unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
-
-loop_unlock:
-               unlock_page(page);
-               page_cache_release(page);
-               mutex_unlock(&inode->i_mutex);
-
-               balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1);
-               i++;
        }
 
        if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO))
@@ -898,12 +1138,14 @@ loop_unlock:
                btrfs_set_super_incompat_flags(disk_super, features);
        }
 
-       return 0;
+       if (!file)
+               kfree(ra);
+       return defrag_count;
 
-err_reservations:
-       btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
-err_unlock:
-       mutex_unlock(&inode->i_mutex);
+out_ra:
+       if (!file)
+               kfree(ra);
+       kfree(pages);
        return ret;
 }
 
@@ -1129,7 +1371,7 @@ static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
        int ret = 0;
        u64 flags = 0;
 
-       if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID)
+       if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
                return -EINVAL;
 
        down_read(&root->fs_info->subvol_sem);
@@ -1156,7 +1398,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
        if (root->fs_info->sb->s_flags & MS_RDONLY)
                return -EROFS;
 
-       if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID)
+       if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
                return -EINVAL;
 
        if (copy_from_user(&flags, arg, sizeof(flags)))
@@ -1279,7 +1521,6 @@ static noinline int copy_to_sk(struct btrfs_root *root,
        int nritems;
        int i;
        int slot;
-       int found = 0;
        int ret = 0;
 
        leaf = path->nodes[0];
@@ -1326,7 +1567,7 @@ static noinline int copy_to_sk(struct btrfs_root *root,
                                           item_off, item_len);
                        *sk_offset += item_len;
                }
-               found++;
+               (*num_found)++;
 
                if (*num_found >= sk->nr_items)
                        break;
@@ -1345,7 +1586,6 @@ advance_key:
        } else
                ret = 1;
 overflow:
-       *num_found += found;
        return ret;
 }
 
@@ -1402,7 +1642,7 @@ static noinline int search_ioctl(struct inode *inode,
                }
                ret = copy_to_sk(root, path, &key, sk, args->buf,
                                 &sk_offset, &num_found);
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
                if (ret || num_found >= sk->nr_items)
                        break;
 
@@ -1509,7 +1749,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info,
                if (key.offset == BTRFS_FIRST_FREE_OBJECTID)
                        break;
 
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
                key.objectid = key.offset;
                key.offset = (u64)-1;
                dirid = key.objectid;
@@ -1639,7 +1879,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
                        goto out_dput;
        }
 
-       if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
+       if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
                err = -EINVAL;
                goto out_dput;
        }
@@ -1757,7 +1997,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
                        /* the rest are all set to zero by kzalloc */
                        range->len = (u64)-1;
                }
-               ret = btrfs_defrag_file(file, range);
+               ret = btrfs_defrag_file(fdentry(file)->d_inode, file,
+                                       range, 0, 0);
+               if (ret > 0)
+                       ret = 0;
                kfree(range);
                break;
        default:
@@ -1809,6 +2052,75 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
        return ret;
 }
 
+static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
+{
+       struct btrfs_ioctl_fs_info_args fi_args;
+       struct btrfs_device *device;
+       struct btrfs_device *next;
+       struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       fi_args.num_devices = fs_devices->num_devices;
+       fi_args.max_id = 0;
+       memcpy(&fi_args.fsid, root->fs_info->fsid, sizeof(fi_args.fsid));
+
+       mutex_lock(&fs_devices->device_list_mutex);
+       list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
+               if (device->devid > fi_args.max_id)
+                       fi_args.max_id = device->devid;
+       }
+       mutex_unlock(&fs_devices->device_list_mutex);
+
+       if (copy_to_user(arg, &fi_args, sizeof(fi_args)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
+{
+       struct btrfs_ioctl_dev_info_args *di_args;
+       struct btrfs_device *dev;
+       struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+       int ret = 0;
+       char *s_uuid = NULL;
+       char empty_uuid[BTRFS_UUID_SIZE] = {0};
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       di_args = memdup_user(arg, sizeof(*di_args));
+       if (IS_ERR(di_args))
+               return PTR_ERR(di_args);
+
+       if (memcmp(empty_uuid, di_args->uuid, BTRFS_UUID_SIZE) != 0)
+               s_uuid = di_args->uuid;
+
+       mutex_lock(&fs_devices->device_list_mutex);
+       dev = btrfs_find_device(root, di_args->devid, s_uuid, NULL);
+       mutex_unlock(&fs_devices->device_list_mutex);
+
+       if (!dev) {
+               ret = -ENODEV;
+               goto out;
+       }
+
+       di_args->devid = dev->devid;
+       di_args->bytes_used = dev->bytes_used;
+       di_args->total_bytes = dev->total_bytes;
+       memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
+       strncpy(di_args->path, dev->name, sizeof(di_args->path));
+
+out:
+       if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
+               ret = -EFAULT;
+
+       kfree(di_args);
+       return ret;
+}
+
 static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                                       u64 off, u64 olen, u64 destoff)
 {
@@ -1925,7 +2237,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
        }
 
        /* clone data */
-       key.objectid = src->i_ino;
+       key.objectid = btrfs_ino(src);
        key.type = BTRFS_EXTENT_DATA_KEY;
        key.offset = 0;
 
@@ -1952,7 +2264,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
 
                btrfs_item_key_to_cpu(leaf, &key, slot);
                if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
-                   key.objectid != src->i_ino)
+                   key.objectid != btrfs_ino(src))
                        break;
 
                if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
@@ -1988,14 +2300,14 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                                datal = btrfs_file_extent_ram_bytes(leaf,
                                                                    extent);
                        }
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
 
                        if (key.offset + datal <= off ||
                            key.offset >= off+len)
                                goto next;
 
                        memcpy(&new_key, &key, sizeof(new_key));
-                       new_key.objectid = inode->i_ino;
+                       new_key.objectid = btrfs_ino(inode);
                        if (off <= key.offset)
                                new_key.offset = key.offset + destoff - off;
                        else
@@ -2049,7 +2361,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                                        ret = btrfs_inc_extent_ref(trans, root,
                                                        disko, diskl, 0,
                                                        root->root_key.objectid,
-                                                       inode->i_ino,
+                                                       btrfs_ino(inode),
                                                        new_key.offset - datao);
                                        BUG_ON(ret);
                                }
@@ -2098,7 +2410,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                        }
 
                        btrfs_mark_buffer_dirty(leaf);
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
 
                        inode->i_mtime = inode->i_ctime = CURRENT_TIME;
 
@@ -2119,12 +2431,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                        btrfs_end_transaction(trans, root);
                }
 next:
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
                key.offset++;
        }
        ret = 0;
 out:
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS);
 out_unlock:
        mutex_unlock(&src->i_mutex);
@@ -2471,6 +2783,58 @@ static noinline long btrfs_ioctl_wait_sync(struct file *file, void __user *argp)
        return btrfs_wait_for_commit(root, transid);
 }
 
+static long btrfs_ioctl_scrub(struct btrfs_root *root, void __user *arg)
+{
+       int ret;
+       struct btrfs_ioctl_scrub_args *sa;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       sa = memdup_user(arg, sizeof(*sa));
+       if (IS_ERR(sa))
+               return PTR_ERR(sa);
+
+       ret = btrfs_scrub_dev(root, sa->devid, sa->start, sa->end,
+                             &sa->progress, sa->flags & BTRFS_SCRUB_READONLY);
+
+       if (copy_to_user(arg, sa, sizeof(*sa)))
+               ret = -EFAULT;
+
+       kfree(sa);
+       return ret;
+}
+
+static long btrfs_ioctl_scrub_cancel(struct btrfs_root *root, void __user *arg)
+{
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       return btrfs_scrub_cancel(root);
+}
+
+static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
+                                      void __user *arg)
+{
+       struct btrfs_ioctl_scrub_args *sa;
+       int ret;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       sa = memdup_user(arg, sizeof(*sa));
+       if (IS_ERR(sa))
+               return PTR_ERR(sa);
+
+       ret = btrfs_scrub_progress(root, sa->devid, &sa->progress);
+
+       if (copy_to_user(arg, sa, sizeof(*sa)))
+               ret = -EFAULT;
+
+       kfree(sa);
+       return ret;
+}
+
 long btrfs_ioctl(struct file *file, unsigned int
                cmd, unsigned long arg)
 {
@@ -2510,6 +2874,10 @@ long btrfs_ioctl(struct file *file, unsigned int
                return btrfs_ioctl_add_dev(root, argp);
        case BTRFS_IOC_RM_DEV:
                return btrfs_ioctl_rm_dev(root, argp);
+       case BTRFS_IOC_FS_INFO:
+               return btrfs_ioctl_fs_info(root, argp);
+       case BTRFS_IOC_DEV_INFO:
+               return btrfs_ioctl_dev_info(root, argp);
        case BTRFS_IOC_BALANCE:
                return btrfs_balance(root->fs_info->dev_root);
        case BTRFS_IOC_CLONE:
@@ -2533,6 +2901,12 @@ long btrfs_ioctl(struct file *file, unsigned int
                return btrfs_ioctl_start_sync(file, argp);
        case BTRFS_IOC_WAIT_SYNC:
                return btrfs_ioctl_wait_sync(file, argp);
+       case BTRFS_IOC_SCRUB:
+               return btrfs_ioctl_scrub(root, argp);
+       case BTRFS_IOC_SCRUB_CANCEL:
+               return btrfs_ioctl_scrub_cancel(root, argp);
+       case BTRFS_IOC_SCRUB_PROGRESS:
+               return btrfs_ioctl_scrub_progress(root, argp);
        }
 
        return -ENOTTY;
index 8fb382167b13b55670e6411785f006defccc9d0d..ad1ea789fcb4d281dcd80398bd75bbec2b1e30d9 100644 (file)
@@ -32,6 +32,8 @@ struct btrfs_ioctl_vol_args {
 
 #define BTRFS_SUBVOL_CREATE_ASYNC      (1ULL << 0)
 #define BTRFS_SUBVOL_RDONLY            (1ULL << 1)
+#define BTRFS_FSID_SIZE 16
+#define BTRFS_UUID_SIZE 16
 
 #define BTRFS_SUBVOL_NAME_MAX 4039
 struct btrfs_ioctl_vol_args_v2 {
@@ -42,6 +44,71 @@ struct btrfs_ioctl_vol_args_v2 {
        char name[BTRFS_SUBVOL_NAME_MAX + 1];
 };
 
+/*
+ * structure to report errors and progress to userspace, either as a
+ * result of a finished scrub, a canceled scrub or a progress inquiry
+ */
+struct btrfs_scrub_progress {
+       __u64 data_extents_scrubbed;    /* # of data extents scrubbed */
+       __u64 tree_extents_scrubbed;    /* # of tree extents scrubbed */
+       __u64 data_bytes_scrubbed;      /* # of data bytes scrubbed */
+       __u64 tree_bytes_scrubbed;      /* # of tree bytes scrubbed */
+       __u64 read_errors;              /* # of read errors encountered (EIO) */
+       __u64 csum_errors;              /* # of failed csum checks */
+       __u64 verify_errors;            /* # of occurences, where the metadata
+                                        * of a tree block did not match the
+                                        * expected values, like generation or
+                                        * logical */
+       __u64 no_csum;                  /* # of 4k data block for which no csum
+                                        * is present, probably the result of
+                                        * data written with nodatasum */
+       __u64 csum_discards;            /* # of csum for which no data was found
+                                        * in the extent tree. */
+       __u64 super_errors;             /* # of bad super blocks encountered */
+       __u64 malloc_errors;            /* # of internal kmalloc errors. These
+                                        * will likely cause an incomplete
+                                        * scrub */
+       __u64 uncorrectable_errors;     /* # of errors where either no intact
+                                        * copy was found or the writeback
+                                        * failed */
+       __u64 corrected_errors;         /* # of errors corrected */
+       __u64 last_physical;            /* last physical address scrubbed. In
+                                        * case a scrub was aborted, this can
+                                        * be used to restart the scrub */
+       __u64 unverified_errors;        /* # of occurences where a read for a
+                                        * full (64k) bio failed, but the re-
+                                        * check succeeded for each 4k piece.
+                                        * Intermittent error. */
+};
+
+#define BTRFS_SCRUB_READONLY   1
+struct btrfs_ioctl_scrub_args {
+       __u64 devid;                            /* in */
+       __u64 start;                            /* in */
+       __u64 end;                              /* in */
+       __u64 flags;                            /* in */
+       struct btrfs_scrub_progress progress;   /* out */
+       /* pad to 1k */
+       __u64 unused[(1024-32-sizeof(struct btrfs_scrub_progress))/8];
+};
+
+#define BTRFS_DEVICE_PATH_NAME_MAX 1024
+struct btrfs_ioctl_dev_info_args {
+       __u64 devid;                            /* in/out */
+       __u8 uuid[BTRFS_UUID_SIZE];             /* in/out */
+       __u64 bytes_used;                       /* out */
+       __u64 total_bytes;                      /* out */
+       __u64 unused[379];                      /* pad to 4k */
+       __u8 path[BTRFS_DEVICE_PATH_NAME_MAX];  /* out */
+};
+
+struct btrfs_ioctl_fs_info_args {
+       __u64 max_id;                           /* out */
+       __u64 num_devices;                      /* out */
+       __u8 fsid[BTRFS_FSID_SIZE];             /* out */
+       __u64 reserved[124];                    /* pad to 1k */
+};
+
 #define BTRFS_INO_LOOKUP_PATH_MAX 4080
 struct btrfs_ioctl_ino_lookup_args {
        __u64 treeid;
@@ -114,37 +181,6 @@ struct btrfs_ioctl_clone_range_args {
 #define BTRFS_DEFRAG_RANGE_COMPRESS 1
 #define BTRFS_DEFRAG_RANGE_START_IO 2
 
-struct btrfs_ioctl_defrag_range_args {
-       /* start of the defrag operation */
-       __u64 start;
-
-       /* number of bytes to defrag, use (u64)-1 to say all */
-       __u64 len;
-
-       /*
-        * flags for the operation, which can include turning
-        * on compression for this one defrag
-        */
-       __u64 flags;
-
-       /*
-        * any extent bigger than this will be considered
-        * already defragged.  Use 0 to take the kernel default
-        * Use 1 to say every single extent must be rewritten
-        */
-       __u32 extent_thresh;
-
-       /*
-        * which compression method to use if turning on compression
-        * for this defrag operation.  If unspecified, zlib will
-        * be used
-        */
-       __u32 compress_type;
-
-       /* spare for later */
-       __u32 unused[4];
-};
-
 struct btrfs_ioctl_space_info {
        __u64 flags;
        __u64 total_bytes;
@@ -203,4 +239,13 @@ struct btrfs_ioctl_space_args {
                                   struct btrfs_ioctl_vol_args_v2)
 #define BTRFS_IOC_SUBVOL_GETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 25, __u64)
 #define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64)
+#define BTRFS_IOC_SCRUB _IOWR(BTRFS_IOCTL_MAGIC, 27, \
+                             struct btrfs_ioctl_scrub_args)
+#define BTRFS_IOC_SCRUB_CANCEL _IO(BTRFS_IOCTL_MAGIC, 28)
+#define BTRFS_IOC_SCRUB_PROGRESS _IOWR(BTRFS_IOCTL_MAGIC, 29, \
+                                      struct btrfs_ioctl_scrub_args)
+#define BTRFS_IOC_DEV_INFO _IOWR(BTRFS_IOCTL_MAGIC, 30, \
+                                struct btrfs_ioctl_dev_info_args)
+#define BTRFS_IOC_FS_INFO _IOR(BTRFS_IOCTL_MAGIC, 31, \
+                              struct btrfs_ioctl_fs_info_args)
 #endif
index 6151f2ea38bb193eaeed1f45bc7f2d1f60c0b1d7..66fa43dc3f0f9ff8b5c67e5330120fd663bf4054 100644 (file)
@@ -185,31 +185,6 @@ sleep:
        return 0;
 }
 
-/*
- * Very quick trylock, this does not spin or schedule.  It returns
- * 1 with the spinlock held if it was able to take the lock, or it
- * returns zero if it was unable to take the lock.
- *
- * After this call, scheduling is not safe without first calling
- * btrfs_set_lock_blocking()
- */
-int btrfs_try_tree_lock(struct extent_buffer *eb)
-{
-       if (spin_trylock(&eb->lock)) {
-               if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) {
-                       /*
-                        * we've got the spinlock, but the real owner is
-                        * blocking.  Drop the spinlock and return failure
-                        */
-                       spin_unlock(&eb->lock);
-                       return 0;
-               }
-               return 1;
-       }
-       /* someone else has the spinlock giveup */
-       return 0;
-}
-
 int btrfs_tree_unlock(struct extent_buffer *eb)
 {
        /*
index 6c4ce457168cd41cc0b92a58453f5c8f2b2fe2f9..5c33a560a2f100c2454797d122875a76921403ab 100644 (file)
@@ -21,8 +21,6 @@
 
 int btrfs_tree_lock(struct extent_buffer *eb);
 int btrfs_tree_unlock(struct extent_buffer *eb);
-
-int btrfs_try_tree_lock(struct extent_buffer *eb);
 int btrfs_try_spin_lock(struct extent_buffer *eb);
 
 void btrfs_set_lock_blocking(struct extent_buffer *eb);
index a97314cf6bd6ef7ac44aa60a4485261a74e241a7..82d569cb62675c76947dfcc580db4bab8347e917 100644 (file)
 #include "ref-cache.h"
 #include "transaction.h"
 
-/*
- * leaf refs are used to cache the information about which extents
- * a given leaf has references on.  This allows us to process that leaf
- * in btrfs_drop_snapshot without needing to read it back from disk.
- */
-
-/*
- * kmalloc a leaf reference struct and update the counters for the
- * total ref cache size
- */
-struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
-                                           int nr_extents)
-{
-       struct btrfs_leaf_ref *ref;
-       size_t size = btrfs_leaf_ref_size(nr_extents);
-
-       ref = kmalloc(size, GFP_NOFS);
-       if (ref) {
-               spin_lock(&root->fs_info->ref_cache_lock);
-               root->fs_info->total_ref_cache_size += size;
-               spin_unlock(&root->fs_info->ref_cache_lock);
-
-               memset(ref, 0, sizeof(*ref));
-               atomic_set(&ref->usage, 1);
-               INIT_LIST_HEAD(&ref->list);
-       }
-       return ref;
-}
-
-/*
- * free a leaf reference struct and update the counters for the
- * total ref cache size
- */
-void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
-{
-       if (!ref)
-               return;
-       WARN_ON(atomic_read(&ref->usage) == 0);
-       if (atomic_dec_and_test(&ref->usage)) {
-               size_t size = btrfs_leaf_ref_size(ref->nritems);
-
-               BUG_ON(ref->in_tree);
-               kfree(ref);
-
-               spin_lock(&root->fs_info->ref_cache_lock);
-               root->fs_info->total_ref_cache_size -= size;
-               spin_unlock(&root->fs_info->ref_cache_lock);
-       }
-}
-
 static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr,
                                   struct rb_node *node)
 {
@@ -116,117 +66,3 @@ static struct rb_node *tree_search(struct rb_root *root, u64 bytenr)
        }
        return NULL;
 }
-
-int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
-                          int shared)
-{
-       struct btrfs_leaf_ref *ref = NULL;
-       struct btrfs_leaf_ref_tree *tree = root->ref_tree;
-
-       if (shared)
-               tree = &root->fs_info->shared_ref_tree;
-       if (!tree)
-               return 0;
-
-       spin_lock(&tree->lock);
-       while (!list_empty(&tree->list)) {
-               ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list);
-               BUG_ON(ref->tree != tree);
-               if (ref->root_gen > max_root_gen)
-                       break;
-               if (!xchg(&ref->in_tree, 0)) {
-                       cond_resched_lock(&tree->lock);
-                       continue;
-               }
-
-               rb_erase(&ref->rb_node, &tree->root);
-               list_del_init(&ref->list);
-
-               spin_unlock(&tree->lock);
-               btrfs_free_leaf_ref(root, ref);
-               cond_resched();
-               spin_lock(&tree->lock);
-       }
-       spin_unlock(&tree->lock);
-       return 0;
-}
-
-/*
- * find the leaf ref for a given extent.  This returns the ref struct with
- * a usage reference incremented
- */
-struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
-                                            u64 bytenr)
-{
-       struct rb_node *rb;
-       struct btrfs_leaf_ref *ref = NULL;
-       struct btrfs_leaf_ref_tree *tree = root->ref_tree;
-again:
-       if (tree) {
-               spin_lock(&tree->lock);
-               rb = tree_search(&tree->root, bytenr);
-               if (rb)
-                       ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node);
-               if (ref)
-                       atomic_inc(&ref->usage);
-               spin_unlock(&tree->lock);
-               if (ref)
-                       return ref;
-       }
-       if (tree != &root->fs_info->shared_ref_tree) {
-               tree = &root->fs_info->shared_ref_tree;
-               goto again;
-       }
-       return NULL;
-}
-
-/*
- * add a fully filled in leaf ref struct
- * remove all the refs older than a given root generation
- */
-int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
-                      int shared)
-{
-       int ret = 0;
-       struct rb_node *rb;
-       struct btrfs_leaf_ref_tree *tree = root->ref_tree;
-
-       if (shared)
-               tree = &root->fs_info->shared_ref_tree;
-
-       spin_lock(&tree->lock);
-       rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node);
-       if (rb) {
-               ret = -EEXIST;
-       } else {
-               atomic_inc(&ref->usage);
-               ref->tree = tree;
-               ref->in_tree = 1;
-               list_add_tail(&ref->list, &tree->list);
-       }
-       spin_unlock(&tree->lock);
-       return ret;
-}
-
-/*
- * remove a single leaf ref from the tree.  This drops the ref held by the tree
- * only
- */
-int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref)
-{
-       struct btrfs_leaf_ref_tree *tree;
-
-       if (!xchg(&ref->in_tree, 0))
-               return 0;
-
-       tree = ref->tree;
-       spin_lock(&tree->lock);
-
-       rb_erase(&ref->rb_node, &tree->root);
-       list_del_init(&ref->list);
-
-       spin_unlock(&tree->lock);
-
-       btrfs_free_leaf_ref(root, ref);
-       return 0;
-}
index e2a55cb2072bda576d6f30b6079b30b2e497d8e7..24f7001f6387d501f07ceb6a605074474b0a463d 100644 (file)
@@ -49,28 +49,4 @@ static inline size_t btrfs_leaf_ref_size(int nr_extents)
        return sizeof(struct btrfs_leaf_ref) +
               sizeof(struct btrfs_extent_info) * nr_extents;
 }
-
-static inline void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree)
-{
-       tree->root = RB_ROOT;
-       INIT_LIST_HEAD(&tree->list);
-       spin_lock_init(&tree->lock);
-}
-
-static inline int btrfs_leaf_ref_tree_empty(struct btrfs_leaf_ref_tree *tree)
-{
-       return RB_EMPTY_ROOT(&tree->root);
-}
-
-void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree);
-struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root,
-                                           int nr_extents);
-void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
-struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root,
-                                            u64 bytenr);
-int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref,
-                      int shared);
-int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen,
-                          int shared);
-int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref);
 #endif
index f340f7c99d09d2126ac20a40b6285fa031f84a63..ca38eca70af0b3b552c60cc8615b70925c575635 100644 (file)
@@ -30,6 +30,7 @@
 #include "btrfs_inode.h"
 #include "async-thread.h"
 #include "free-space-cache.h"
+#include "inode-map.h"
 
 /*
  * backref_node, mapping_node and tree_block start with this
@@ -507,6 +508,7 @@ static int update_backref_cache(struct btrfs_trans_handle *trans,
        return 1;
 }
 
+
 static int should_ignore_root(struct btrfs_root *root)
 {
        struct btrfs_root *reloc_root;
@@ -529,7 +531,6 @@ static int should_ignore_root(struct btrfs_root *root)
         */
        return 1;
 }
-
 /*
  * find reloc tree by address of tree root
  */
@@ -961,7 +962,7 @@ again:
                        lower = upper;
                        upper = NULL;
                }
-               btrfs_release_path(root, path2);
+               btrfs_release_path(path2);
 next:
                if (ptr < end) {
                        ptr += btrfs_extent_inline_ref_size(key.type);
@@ -974,7 +975,7 @@ next:
                if (ptr >= end)
                        path1->slots[0]++;
        }
-       btrfs_release_path(rc->extent_root, path1);
+       btrfs_release_path(path1);
 
        cur->checked = 1;
        WARN_ON(exist);
@@ -1409,9 +1410,9 @@ again:
                prev = node;
                entry = rb_entry(node, struct btrfs_inode, rb_node);
 
-               if (objectid < entry->vfs_inode.i_ino)
+               if (objectid < btrfs_ino(&entry->vfs_inode))
                        node = node->rb_left;
-               else if (objectid > entry->vfs_inode.i_ino)
+               else if (objectid > btrfs_ino(&entry->vfs_inode))
                        node = node->rb_right;
                else
                        break;
@@ -1419,7 +1420,7 @@ again:
        if (!node) {
                while (prev) {
                        entry = rb_entry(prev, struct btrfs_inode, rb_node);
-                       if (objectid <= entry->vfs_inode.i_ino) {
+                       if (objectid <= btrfs_ino(&entry->vfs_inode)) {
                                node = prev;
                                break;
                        }
@@ -1434,7 +1435,7 @@ again:
                        return inode;
                }
 
-               objectid = entry->vfs_inode.i_ino + 1;
+               objectid = btrfs_ino(&entry->vfs_inode) + 1;
                if (cond_resched_lock(&root->inode_lock))
                        goto again;
 
@@ -1470,7 +1471,7 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
                return -ENOMEM;
 
        bytenr -= BTRFS_I(reloc_inode)->index_cnt;
-       ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
+       ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode),
                                       bytenr, 0);
        if (ret < 0)
                goto out;
@@ -1558,11 +1559,11 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
                        if (first) {
                                inode = find_next_inode(root, key.objectid);
                                first = 0;
-                       } else if (inode && inode->i_ino < key.objectid) {
+                       } else if (inode && btrfs_ino(inode) < key.objectid) {
                                btrfs_add_delayed_iput(inode);
                                inode = find_next_inode(root, key.objectid);
                        }
-                       if (inode && inode->i_ino == key.objectid) {
+                       if (inode && btrfs_ino(inode) == key.objectid) {
                                end = key.offset +
                                      btrfs_file_extent_num_bytes(leaf, fi);
                                WARN_ON(!IS_ALIGNED(key.offset,
@@ -1749,7 +1750,7 @@ again:
 
                btrfs_node_key_to_cpu(path->nodes[level], &key,
                                      path->slots[level]);
-               btrfs_release_path(src, path);
+               btrfs_release_path(path);
 
                path->lowest_level = level;
                ret = btrfs_search_slot(trans, src, &key, path, 0, 1);
@@ -1893,6 +1894,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
        struct inode *inode = NULL;
        u64 objectid;
        u64 start, end;
+       u64 ino;
 
        objectid = min_key->objectid;
        while (1) {
@@ -1905,17 +1907,18 @@ static int invalidate_extent_cache(struct btrfs_root *root,
                inode = find_next_inode(root, objectid);
                if (!inode)
                        break;
+               ino = btrfs_ino(inode);
 
-               if (inode->i_ino > max_key->objectid) {
+               if (ino > max_key->objectid) {
                        iput(inode);
                        break;
                }
 
-               objectid = inode->i_ino + 1;
+               objectid = ino + 1;
                if (!S_ISREG(inode->i_mode))
                        continue;
 
-               if (unlikely(min_key->objectid == inode->i_ino)) {
+               if (unlikely(min_key->objectid == ino)) {
                        if (min_key->type > BTRFS_EXTENT_DATA_KEY)
                                continue;
                        if (min_key->type < BTRFS_EXTENT_DATA_KEY)
@@ -1928,7 +1931,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
                        start = 0;
                }
 
-               if (unlikely(max_key->objectid == inode->i_ino)) {
+               if (unlikely(max_key->objectid == ino)) {
                        if (max_key->type < BTRFS_EXTENT_DATA_KEY)
                                continue;
                        if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
@@ -2496,7 +2499,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
                        path->locks[upper->level] = 0;
 
                        slot = path->slots[upper->level];
-                       btrfs_release_path(NULL, path);
+                       btrfs_release_path(path);
                } else {
                        ret = btrfs_bin_search(upper->eb, key, upper->level,
                                               &slot);
@@ -2737,7 +2740,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans,
                } else {
                        path->lowest_level = node->level;
                        ret = btrfs_search_slot(trans, root, key, path, 0, 1);
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        if (ret > 0)
                                ret = 0;
                }
@@ -2870,7 +2873,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end,
        struct extent_map *em;
        int ret = 0;
 
-       em = alloc_extent_map(GFP_NOFS);
+       em = alloc_extent_map();
        if (!em)
                return -ENOMEM;
 
@@ -3119,7 +3122,7 @@ static int add_tree_block(struct reloc_control *rc,
 #endif
        }
 
-       btrfs_release_path(rc->extent_root, path);
+       btrfs_release_path(path);
 
        BUG_ON(level == -1);
 
@@ -3220,7 +3223,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
        key.offset = 0;
 
        inode = btrfs_iget(fs_info->sb, &key, root, NULL);
-       if (!inode || IS_ERR(inode) || is_bad_inode(inode)) {
+       if (IS_ERR_OR_NULL(inode) || is_bad_inode(inode)) {
                if (inode && !IS_ERR(inode))
                        iput(inode);
                return -ENOENT;
@@ -3505,7 +3508,7 @@ int add_data_references(struct reloc_control *rc,
                }
                path->slots[0]++;
        }
-       btrfs_release_path(rc->extent_root, path);
+       btrfs_release_path(path);
        if (err)
                free_block_list(blocks);
        return err;
@@ -3568,7 +3571,7 @@ next:
                                            EXTENT_DIRTY);
 
                if (ret == 0 && start <= key.objectid) {
-                       btrfs_release_path(rc->extent_root, path);
+                       btrfs_release_path(path);
                        rc->search_start = end + 1;
                } else {
                        rc->search_start = key.objectid + key.offset;
@@ -3576,7 +3579,7 @@ next:
                        return 0;
                }
        }
-       btrfs_release_path(rc->extent_root, path);
+       btrfs_release_path(path);
        return ret;
 }
 
@@ -3713,7 +3716,7 @@ restart:
                                flags = BTRFS_EXTENT_FLAG_DATA;
 
                        if (path_change) {
-                               btrfs_release_path(rc->extent_root, path);
+                               btrfs_release_path(path);
 
                                path->search_commit_root = 1;
                                path->skip_locking = 1;
@@ -3736,7 +3739,7 @@ restart:
                           (flags & BTRFS_EXTENT_FLAG_DATA)) {
                        ret = add_data_references(rc, &key, path, &blocks);
                } else {
-                       btrfs_release_path(rc->extent_root, path);
+                       btrfs_release_path(path);
                        ret = 0;
                }
                if (ret < 0) {
@@ -3799,7 +3802,7 @@ restart:
                }
        }
 
-       btrfs_release_path(rc->extent_root, path);
+       btrfs_release_path(path);
        clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
                          GFP_NOFS);
 
@@ -3867,7 +3870,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
        btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS |
                                          BTRFS_INODE_PREALLOC);
        btrfs_mark_buffer_dirty(leaf);
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 out:
        btrfs_free_path(path);
        return ret;
@@ -3897,7 +3900,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
        if (IS_ERR(trans))
                return ERR_CAST(trans);
 
-       err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
+       err = btrfs_find_free_objectid(root, &objectid);
        if (err)
                goto out;
 
@@ -3935,7 +3938,7 @@ static struct reloc_control *alloc_reloc_control(void)
        INIT_LIST_HEAD(&rc->reloc_roots);
        backref_cache_init(&rc->backref_cache);
        mapping_tree_init(&rc->reloc_root_tree);
-       extent_io_tree_init(&rc->processed_blocks, NULL, GFP_NOFS);
+       extent_io_tree_init(&rc->processed_blocks, NULL);
        return rc;
 }
 
@@ -4109,7 +4112,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
                }
                leaf = path->nodes[0];
                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-               btrfs_release_path(root->fs_info->tree_root, path);
+               btrfs_release_path(path);
 
                if (key.objectid != BTRFS_TREE_RELOC_OBJECTID ||
                    key.type != BTRFS_ROOT_ITEM_KEY)
@@ -4141,7 +4144,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
 
                key.offset--;
        }
-       btrfs_release_path(root->fs_info->tree_root, path);
+       btrfs_release_path(path);
 
        if (list_empty(&reloc_roots))
                goto out;
@@ -4242,7 +4245,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
 
        disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
        ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
-                                      disk_bytenr + len - 1, &list);
+                                      disk_bytenr + len - 1, &list, 0);
 
        while (!list_empty(&list)) {
                sums = list_entry(list.next, struct btrfs_ordered_sum, list);
index 6928bff62daa8370b4d9efce397fac35208c9611..ebe45443de064471fba64ae4e176b2f7a1b66f93 100644 (file)
 #include "disk-io.h"
 #include "print-tree.h"
 
-/*
- *  search forward for a root, starting with objectid 'search_start'
- *  if a root key is found, the objectid we find is filled into 'found_objectid'
- *  and 0 is returned.  < 0 is returned on error, 1 if there is nothing
- *  left in the tree.
- */
-int btrfs_search_root(struct btrfs_root *root, u64 search_start,
-                     u64 *found_objectid)
-{
-       struct btrfs_path *path;
-       struct btrfs_key search_key;
-       int ret;
-
-       root = root->fs_info->tree_root;
-       search_key.objectid = search_start;
-       search_key.type = (u8)-1;
-       search_key.offset = (u64)-1;
-
-       path = btrfs_alloc_path();
-       BUG_ON(!path);
-again:
-       ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
-       if (ret < 0)
-               goto out;
-       if (ret == 0) {
-               ret = 1;
-               goto out;
-       }
-       if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
-               ret = btrfs_next_leaf(root, path);
-               if (ret)
-                       goto out;
-       }
-       btrfs_item_key_to_cpu(path->nodes[0], &search_key, path->slots[0]);
-       if (search_key.type != BTRFS_ROOT_ITEM_KEY) {
-               search_key.offset++;
-               btrfs_release_path(root, path);
-               goto again;
-       }
-       ret = 0;
-       *found_objectid = search_key.objectid;
-
-out:
-       btrfs_free_path(path);
-       return ret;
-}
-
 /*
  * lookup the root with the highest offset for a given objectid.  The key we do
  * find is copied into 'key'.  If we find something return 0, otherwise 1, < 0
@@ -230,7 +183,7 @@ again:
 
                memcpy(&found_key, &key, sizeof(key));
                key.offset++;
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
                dead_root =
                        btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
                                                    &found_key);
@@ -292,7 +245,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root)
                }
 
                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-               btrfs_release_path(tree_root, path);
+               btrfs_release_path(path);
 
                if (key.objectid != BTRFS_ORPHAN_OBJECTID ||
                    key.type != BTRFS_ORPHAN_ITEM_KEY)
@@ -385,18 +338,22 @@ again:
                *sequence = btrfs_root_ref_sequence(leaf, ref);
 
                ret = btrfs_del_item(trans, tree_root, path);
-               BUG_ON(ret);
+               if (ret) {
+                       err = ret;
+                       goto out;
+               }
        } else
                err = -ENOENT;
 
        if (key.type == BTRFS_ROOT_BACKREF_KEY) {
-               btrfs_release_path(tree_root, path);
+               btrfs_release_path(path);
                key.objectid = ref_id;
                key.type = BTRFS_ROOT_REF_KEY;
                key.offset = root_id;
                goto again;
        }
 
+out:
        btrfs_free_path(path);
        return err;
 }
@@ -463,7 +420,7 @@ again:
        btrfs_mark_buffer_dirty(leaf);
 
        if (key.type == BTRFS_ROOT_BACKREF_KEY) {
-               btrfs_release_path(tree_root, path);
+               btrfs_release_path(path);
                key.objectid = ref_id;
                key.type = BTRFS_ROOT_REF_KEY;
                key.offset = root_id;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
new file mode 100644 (file)
index 0000000..6dfed0c
--- /dev/null
@@ -0,0 +1,1369 @@
+/*
+ * Copyright (C) 2011 STRATO.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/sched.h>
+#include <linux/pagemap.h>
+#include <linux/writeback.h>
+#include <linux/blkdev.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include "ctree.h"
+#include "volumes.h"
+#include "disk-io.h"
+#include "ordered-data.h"
+
+/*
+ * This is only the first step towards a full-features scrub. It reads all
+ * extent and super block and verifies the checksums. In case a bad checksum
+ * is found or the extent cannot be read, good data will be written back if
+ * any can be found.
+ *
+ * Future enhancements:
+ *  - To enhance the performance, better read-ahead strategies for the
+ *    extent-tree can be employed.
+ *  - In case an unrepairable extent is encountered, track which files are
+ *    affected and report them
+ *  - In case of a read error on files with nodatasum, map the file and read
+ *    the extent to trigger a writeback of the good copy
+ *  - track and record media errors, throw out bad devices
+ *  - add a mode to also read unallocated space
+ *  - make the prefetch cancellable
+ */
+
+struct scrub_bio;
+struct scrub_page;
+struct scrub_dev;
+static void scrub_bio_end_io(struct bio *bio, int err);
+static void scrub_checksum(struct btrfs_work *work);
+static int scrub_checksum_data(struct scrub_dev *sdev,
+                              struct scrub_page *spag, void *buffer);
+static int scrub_checksum_tree_block(struct scrub_dev *sdev,
+                                    struct scrub_page *spag, u64 logical,
+                                    void *buffer);
+static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer);
+static int scrub_fixup_check(struct scrub_bio *sbio, int ix);
+static void scrub_fixup_end_io(struct bio *bio, int err);
+static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
+                         struct page *page);
+static void scrub_fixup(struct scrub_bio *sbio, int ix);
+
+#define SCRUB_PAGES_PER_BIO    16      /* 64k per bio */
+#define SCRUB_BIOS_PER_DEV     16      /* 1 MB per device in flight */
+
+struct scrub_page {
+       u64                     flags;  /* extent flags */
+       u64                     generation;
+       u64                     mirror_num;
+       int                     have_csum;
+       u8                      csum[BTRFS_CSUM_SIZE];
+};
+
+struct scrub_bio {
+       int                     index;
+       struct scrub_dev        *sdev;
+       struct bio              *bio;
+       int                     err;
+       u64                     logical;
+       u64                     physical;
+       struct scrub_page       spag[SCRUB_PAGES_PER_BIO];
+       u64                     count;
+       int                     next_free;
+       struct btrfs_work       work;
+};
+
+struct scrub_dev {
+       struct scrub_bio        *bios[SCRUB_BIOS_PER_DEV];
+       struct btrfs_device     *dev;
+       int                     first_free;
+       int                     curr;
+       atomic_t                in_flight;
+       spinlock_t              list_lock;
+       wait_queue_head_t       list_wait;
+       u16                     csum_size;
+       struct list_head        csum_list;
+       atomic_t                cancel_req;
+       int                     readonly;
+       /*
+        * statistics
+        */
+       struct btrfs_scrub_progress stat;
+       spinlock_t              stat_lock;
+};
+
+static void scrub_free_csums(struct scrub_dev *sdev)
+{
+       while (!list_empty(&sdev->csum_list)) {
+               struct btrfs_ordered_sum *sum;
+               sum = list_first_entry(&sdev->csum_list,
+                                      struct btrfs_ordered_sum, list);
+               list_del(&sum->list);
+               kfree(sum);
+       }
+}
+
+static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
+{
+       int i;
+       int j;
+       struct page *last_page;
+
+       if (!sdev)
+               return;
+
+       for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
+               struct scrub_bio *sbio = sdev->bios[i];
+               struct bio *bio;
+
+               if (!sbio)
+                       break;
+
+               bio = sbio->bio;
+               if (bio) {
+                       last_page = NULL;
+                       for (j = 0; j < bio->bi_vcnt; ++j) {
+                               if (bio->bi_io_vec[j].bv_page == last_page)
+                                       continue;
+                               last_page = bio->bi_io_vec[j].bv_page;
+                               __free_page(last_page);
+                       }
+                       bio_put(bio);
+               }
+               kfree(sbio);
+       }
+
+       scrub_free_csums(sdev);
+       kfree(sdev);
+}
+
+static noinline_for_stack
+struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
+{
+       struct scrub_dev *sdev;
+       int             i;
+       int             j;
+       int             ret;
+       struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
+
+       sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
+       if (!sdev)
+               goto nomem;
+       sdev->dev = dev;
+       for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
+               struct bio *bio;
+               struct scrub_bio *sbio;
+
+               sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
+               if (!sbio)
+                       goto nomem;
+               sdev->bios[i] = sbio;
+
+               bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
+               if (!bio)
+                       goto nomem;
+
+               sbio->index = i;
+               sbio->sdev = sdev;
+               sbio->bio = bio;
+               sbio->count = 0;
+               sbio->work.func = scrub_checksum;
+               bio->bi_private = sdev->bios[i];
+               bio->bi_end_io = scrub_bio_end_io;
+               bio->bi_sector = 0;
+               bio->bi_bdev = dev->bdev;
+               bio->bi_size = 0;
+
+               for (j = 0; j < SCRUB_PAGES_PER_BIO; ++j) {
+                       struct page *page;
+                       page = alloc_page(GFP_NOFS);
+                       if (!page)
+                               goto nomem;
+
+                       ret = bio_add_page(bio, page, PAGE_SIZE, 0);
+                       if (!ret)
+                               goto nomem;
+               }
+               WARN_ON(bio->bi_vcnt != SCRUB_PAGES_PER_BIO);
+
+               if (i != SCRUB_BIOS_PER_DEV-1)
+                       sdev->bios[i]->next_free = i + 1;
+                else
+                       sdev->bios[i]->next_free = -1;
+       }
+       sdev->first_free = 0;
+       sdev->curr = -1;
+       atomic_set(&sdev->in_flight, 0);
+       atomic_set(&sdev->cancel_req, 0);
+       sdev->csum_size = btrfs_super_csum_size(&fs_info->super_copy);
+       INIT_LIST_HEAD(&sdev->csum_list);
+
+       spin_lock_init(&sdev->list_lock);
+       spin_lock_init(&sdev->stat_lock);
+       init_waitqueue_head(&sdev->list_wait);
+       return sdev;
+
+nomem:
+       scrub_free_dev(sdev);
+       return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * scrub_recheck_error gets called when either verification of the page
+ * failed or the bio failed to read, e.g. with EIO. In the latter case,
+ * recheck_error gets called for every page in the bio, even though only
+ * one may be bad
+ */
+static void scrub_recheck_error(struct scrub_bio *sbio, int ix)
+{
+       if (sbio->err) {
+               if (scrub_fixup_io(READ, sbio->sdev->dev->bdev,
+                                  (sbio->physical + ix * PAGE_SIZE) >> 9,
+                                  sbio->bio->bi_io_vec[ix].bv_page) == 0) {
+                       if (scrub_fixup_check(sbio, ix) == 0)
+                               return;
+               }
+       }
+
+       scrub_fixup(sbio, ix);
+}
+
+static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
+{
+       int ret = 1;
+       struct page *page;
+       void *buffer;
+       u64 flags = sbio->spag[ix].flags;
+
+       page = sbio->bio->bi_io_vec[ix].bv_page;
+       buffer = kmap_atomic(page, KM_USER0);
+       if (flags & BTRFS_EXTENT_FLAG_DATA) {
+               ret = scrub_checksum_data(sbio->sdev,
+                                         sbio->spag + ix, buffer);
+       } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+               ret = scrub_checksum_tree_block(sbio->sdev,
+                                               sbio->spag + ix,
+                                               sbio->logical + ix * PAGE_SIZE,
+                                               buffer);
+       } else {
+               WARN_ON(1);
+       }
+       kunmap_atomic(buffer, KM_USER0);
+
+       return ret;
+}
+
+static void scrub_fixup_end_io(struct bio *bio, int err)
+{
+       complete((struct completion *)bio->bi_private);
+}
+
+static void scrub_fixup(struct scrub_bio *sbio, int ix)
+{
+       struct scrub_dev *sdev = sbio->sdev;
+       struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
+       struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
+       struct btrfs_multi_bio *multi = NULL;
+       u64 logical = sbio->logical + ix * PAGE_SIZE;
+       u64 length;
+       int i;
+       int ret;
+       DECLARE_COMPLETION_ONSTACK(complete);
+
+       if ((sbio->spag[ix].flags & BTRFS_EXTENT_FLAG_DATA) &&
+           (sbio->spag[ix].have_csum == 0)) {
+               /*
+                * nodatasum, don't try to fix anything
+                * FIXME: we can do better, open the inode and trigger a
+                * writeback
+                */
+               goto uncorrectable;
+       }
+
+       length = PAGE_SIZE;
+       ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length,
+                             &multi, 0);
+       if (ret || !multi || length < PAGE_SIZE) {
+               printk(KERN_ERR
+                      "scrub_fixup: btrfs_map_block failed us for %llu\n",
+                      (unsigned long long)logical);
+               WARN_ON(1);
+               return;
+       }
+
+       if (multi->num_stripes == 1)
+               /* there aren't any replicas */
+               goto uncorrectable;
+
+       /*
+        * first find a good copy
+        */
+       for (i = 0; i < multi->num_stripes; ++i) {
+               if (i == sbio->spag[ix].mirror_num)
+                       continue;
+
+               if (scrub_fixup_io(READ, multi->stripes[i].dev->bdev,
+                                  multi->stripes[i].physical >> 9,
+                                  sbio->bio->bi_io_vec[ix].bv_page)) {
+                       /* I/O-error, this is not a good copy */
+                       continue;
+               }
+
+               if (scrub_fixup_check(sbio, ix) == 0)
+                       break;
+       }
+       if (i == multi->num_stripes)
+               goto uncorrectable;
+
+       if (!sdev->readonly) {
+               /*
+                * bi_io_vec[ix].bv_page now contains good data, write it back
+                */
+               if (scrub_fixup_io(WRITE, sdev->dev->bdev,
+                                  (sbio->physical + ix * PAGE_SIZE) >> 9,
+                                  sbio->bio->bi_io_vec[ix].bv_page)) {
+                       /* I/O-error, writeback failed, give up */
+                       goto uncorrectable;
+               }
+       }
+
+       kfree(multi);
+       spin_lock(&sdev->stat_lock);
+       ++sdev->stat.corrected_errors;
+       spin_unlock(&sdev->stat_lock);
+
+       if (printk_ratelimit())
+               printk(KERN_ERR "btrfs: fixed up at %llu\n",
+                      (unsigned long long)logical);
+       return;
+
+uncorrectable:
+       kfree(multi);
+       spin_lock(&sdev->stat_lock);
+       ++sdev->stat.uncorrectable_errors;
+       spin_unlock(&sdev->stat_lock);
+
+       if (printk_ratelimit())
+               printk(KERN_ERR "btrfs: unable to fixup at %llu\n",
+                        (unsigned long long)logical);
+}
+
+static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
+                        struct page *page)
+{
+       struct bio *bio = NULL;
+       int ret;
+       DECLARE_COMPLETION_ONSTACK(complete);
+
+       /* we are going to wait on this IO */
+       rw |= REQ_SYNC;
+
+       bio = bio_alloc(GFP_NOFS, 1);
+       bio->bi_bdev = bdev;
+       bio->bi_sector = sector;
+       bio_add_page(bio, page, PAGE_SIZE, 0);
+       bio->bi_end_io = scrub_fixup_end_io;
+       bio->bi_private = &complete;
+       submit_bio(rw, bio);
+
+       wait_for_completion(&complete);
+
+       ret = !test_bit(BIO_UPTODATE, &bio->bi_flags);
+       bio_put(bio);
+       return ret;
+}
+
+static void scrub_bio_end_io(struct bio *bio, int err)
+{
+       struct scrub_bio *sbio = bio->bi_private;
+       struct scrub_dev *sdev = sbio->sdev;
+       struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
+
+       sbio->err = err;
+
+       btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
+}
+
+static void scrub_checksum(struct btrfs_work *work)
+{
+       struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
+       struct scrub_dev *sdev = sbio->sdev;
+       struct page *page;
+       void *buffer;
+       int i;
+       u64 flags;
+       u64 logical;
+       int ret;
+
+       if (sbio->err) {
+               for (i = 0; i < sbio->count; ++i)
+                       scrub_recheck_error(sbio, i);
+
+               sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1);
+               sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
+               sbio->bio->bi_phys_segments = 0;
+               sbio->bio->bi_idx = 0;
+
+               for (i = 0; i < sbio->count; i++) {
+                       struct bio_vec *bi;
+                       bi = &sbio->bio->bi_io_vec[i];
+                       bi->bv_offset = 0;
+                       bi->bv_len = PAGE_SIZE;
+               }
+
+               spin_lock(&sdev->stat_lock);
+               ++sdev->stat.read_errors;
+               spin_unlock(&sdev->stat_lock);
+               goto out;
+       }
+       for (i = 0; i < sbio->count; ++i) {
+               page = sbio->bio->bi_io_vec[i].bv_page;
+               buffer = kmap_atomic(page, KM_USER0);
+               flags = sbio->spag[i].flags;
+               logical = sbio->logical + i * PAGE_SIZE;
+               ret = 0;
+               if (flags & BTRFS_EXTENT_FLAG_DATA) {
+                       ret = scrub_checksum_data(sdev, sbio->spag + i, buffer);
+               } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+                       ret = scrub_checksum_tree_block(sdev, sbio->spag + i,
+                                                       logical, buffer);
+               } else if (flags & BTRFS_EXTENT_FLAG_SUPER) {
+                       BUG_ON(i);
+                       (void)scrub_checksum_super(sbio, buffer);
+               } else {
+                       WARN_ON(1);
+               }
+               kunmap_atomic(buffer, KM_USER0);
+               if (ret)
+                       scrub_recheck_error(sbio, i);
+       }
+
+out:
+       spin_lock(&sdev->list_lock);
+       sbio->next_free = sdev->first_free;
+       sdev->first_free = sbio->index;
+       spin_unlock(&sdev->list_lock);
+       atomic_dec(&sdev->in_flight);
+       wake_up(&sdev->list_wait);
+}
+
+static int scrub_checksum_data(struct scrub_dev *sdev,
+                              struct scrub_page *spag, void *buffer)
+{
+       u8 csum[BTRFS_CSUM_SIZE];
+       u32 crc = ~(u32)0;
+       int fail = 0;
+       struct btrfs_root *root = sdev->dev->dev_root;
+
+       if (!spag->have_csum)
+               return 0;
+
+       crc = btrfs_csum_data(root, buffer, crc, PAGE_SIZE);
+       btrfs_csum_final(crc, csum);
+       if (memcmp(csum, spag->csum, sdev->csum_size))
+               fail = 1;
+
+       spin_lock(&sdev->stat_lock);
+       ++sdev->stat.data_extents_scrubbed;
+       sdev->stat.data_bytes_scrubbed += PAGE_SIZE;
+       if (fail)
+               ++sdev->stat.csum_errors;
+       spin_unlock(&sdev->stat_lock);
+
+       return fail;
+}
+
+static int scrub_checksum_tree_block(struct scrub_dev *sdev,
+                                    struct scrub_page *spag, u64 logical,
+                                    void *buffer)
+{
+       struct btrfs_header *h;
+       struct btrfs_root *root = sdev->dev->dev_root;
+       struct btrfs_fs_info *fs_info = root->fs_info;
+       u8 csum[BTRFS_CSUM_SIZE];
+       u32 crc = ~(u32)0;
+       int fail = 0;
+       int crc_fail = 0;
+
+       /*
+        * we don't use the getter functions here, as we
+        * a) don't have an extent buffer and
+        * b) the page is already kmapped
+        */
+       h = (struct btrfs_header *)buffer;
+
+       if (logical != le64_to_cpu(h->bytenr))
+               ++fail;
+
+       if (spag->generation != le64_to_cpu(h->generation))
+               ++fail;
+
+       if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
+               ++fail;
+
+       if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
+                  BTRFS_UUID_SIZE))
+               ++fail;
+
+       crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc,
+                             PAGE_SIZE - BTRFS_CSUM_SIZE);
+       btrfs_csum_final(crc, csum);
+       if (memcmp(csum, h->csum, sdev->csum_size))
+               ++crc_fail;
+
+       spin_lock(&sdev->stat_lock);
+       ++sdev->stat.tree_extents_scrubbed;
+       sdev->stat.tree_bytes_scrubbed += PAGE_SIZE;
+       if (crc_fail)
+               ++sdev->stat.csum_errors;
+       if (fail)
+               ++sdev->stat.verify_errors;
+       spin_unlock(&sdev->stat_lock);
+
+       return fail || crc_fail;
+}
+
+static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
+{
+       struct btrfs_super_block *s;
+       u64 logical;
+       struct scrub_dev *sdev = sbio->sdev;
+       struct btrfs_root *root = sdev->dev->dev_root;
+       struct btrfs_fs_info *fs_info = root->fs_info;
+       u8 csum[BTRFS_CSUM_SIZE];
+       u32 crc = ~(u32)0;
+       int fail = 0;
+
+       s = (struct btrfs_super_block *)buffer;
+       logical = sbio->logical;
+
+       if (logical != le64_to_cpu(s->bytenr))
+               ++fail;
+
+       if (sbio->spag[0].generation != le64_to_cpu(s->generation))
+               ++fail;
+
+       if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
+               ++fail;
+
+       crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc,
+                             PAGE_SIZE - BTRFS_CSUM_SIZE);
+       btrfs_csum_final(crc, csum);
+       if (memcmp(csum, s->csum, sbio->sdev->csum_size))
+               ++fail;
+
+       if (fail) {
+               /*
+                * if we find an error in a super block, we just report it.
+                * They will get written with the next transaction commit
+                * anyway
+                */
+               spin_lock(&sdev->stat_lock);
+               ++sdev->stat.super_errors;
+               spin_unlock(&sdev->stat_lock);
+       }
+
+       return fail;
+}
+
+static int scrub_submit(struct scrub_dev *sdev)
+{
+       struct scrub_bio *sbio;
+
+       if (sdev->curr == -1)
+               return 0;
+
+       sbio = sdev->bios[sdev->curr];
+
+       sbio->bio->bi_sector = sbio->physical >> 9;
+       sbio->bio->bi_size = sbio->count * PAGE_SIZE;
+       sbio->bio->bi_next = NULL;
+       sbio->bio->bi_flags |= 1 << BIO_UPTODATE;
+       sbio->bio->bi_comp_cpu = -1;
+       sbio->bio->bi_bdev = sdev->dev->bdev;
+       sbio->err = 0;
+       sdev->curr = -1;
+       atomic_inc(&sdev->in_flight);
+
+       submit_bio(0, sbio->bio);
+
+       return 0;
+}
+
+static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
+                     u64 physical, u64 flags, u64 gen, u64 mirror_num,
+                     u8 *csum, int force)
+{
+       struct scrub_bio *sbio;
+
+again:
+       /*
+        * grab a fresh bio or wait for one to become available
+        */
+       while (sdev->curr == -1) {
+               spin_lock(&sdev->list_lock);
+               sdev->curr = sdev->first_free;
+               if (sdev->curr != -1) {
+                       sdev->first_free = sdev->bios[sdev->curr]->next_free;
+                       sdev->bios[sdev->curr]->next_free = -1;
+                       sdev->bios[sdev->curr]->count = 0;
+                       spin_unlock(&sdev->list_lock);
+               } else {
+                       spin_unlock(&sdev->list_lock);
+                       wait_event(sdev->list_wait, sdev->first_free != -1);
+               }
+       }
+       sbio = sdev->bios[sdev->curr];
+       if (sbio->count == 0) {
+               sbio->physical = physical;
+               sbio->logical = logical;
+       } else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
+                  sbio->logical + sbio->count * PAGE_SIZE != logical) {
+               scrub_submit(sdev);
+               goto again;
+       }
+       sbio->spag[sbio->count].flags = flags;
+       sbio->spag[sbio->count].generation = gen;
+       sbio->spag[sbio->count].have_csum = 0;
+       sbio->spag[sbio->count].mirror_num = mirror_num;
+       if (csum) {
+               sbio->spag[sbio->count].have_csum = 1;
+               memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
+       }
+       ++sbio->count;
+       if (sbio->count == SCRUB_PAGES_PER_BIO || force)
+               scrub_submit(sdev);
+
+       return 0;
+}
+
+static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len,
+                          u8 *csum)
+{
+       struct btrfs_ordered_sum *sum = NULL;
+       int ret = 0;
+       unsigned long i;
+       unsigned long num_sectors;
+       u32 sectorsize = sdev->dev->dev_root->sectorsize;
+
+       while (!list_empty(&sdev->csum_list)) {
+               sum = list_first_entry(&sdev->csum_list,
+                                      struct btrfs_ordered_sum, list);
+               if (sum->bytenr > logical)
+                       return 0;
+               if (sum->bytenr + sum->len > logical)
+                       break;
+
+               ++sdev->stat.csum_discards;
+               list_del(&sum->list);
+               kfree(sum);
+               sum = NULL;
+       }
+       if (!sum)
+               return 0;
+
+       num_sectors = sum->len / sectorsize;
+       for (i = 0; i < num_sectors; ++i) {
+               if (sum->sums[i].bytenr == logical) {
+                       memcpy(csum, &sum->sums[i].sum, sdev->csum_size);
+                       ret = 1;
+                       break;
+               }
+       }
+       if (ret && i == num_sectors - 1) {
+               list_del(&sum->list);
+               kfree(sum);
+       }
+       return ret;
+}
+
+/* scrub extent tries to collect up to 64 kB for each bio */
+static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len,
+                       u64 physical, u64 flags, u64 gen, u64 mirror_num)
+{
+       int ret;
+       u8 csum[BTRFS_CSUM_SIZE];
+
+       while (len) {
+               u64 l = min_t(u64, len, PAGE_SIZE);
+               int have_csum = 0;
+
+               if (flags & BTRFS_EXTENT_FLAG_DATA) {
+                       /* push csums to sbio */
+                       have_csum = scrub_find_csum(sdev, logical, l, csum);
+                       if (have_csum == 0)
+                               ++sdev->stat.no_csum;
+               }
+               ret = scrub_page(sdev, logical, l, physical, flags, gen,
+                                mirror_num, have_csum ? csum : NULL, 0);
+               if (ret)
+                       return ret;
+               len -= l;
+               logical += l;
+               physical += l;
+       }
+       return 0;
+}
+
+static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
+       struct map_lookup *map, int num, u64 base, u64 length)
+{
+       struct btrfs_path *path;
+       struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
+       struct btrfs_root *root = fs_info->extent_root;
+       struct btrfs_root *csum_root = fs_info->csum_root;
+       struct btrfs_extent_item *extent;
+       u64 flags;
+       int ret;
+       int slot;
+       int i;
+       u64 nstripes;
+       int start_stripe;
+       struct extent_buffer *l;
+       struct btrfs_key key;
+       u64 physical;
+       u64 logical;
+       u64 generation;
+       u64 mirror_num;
+
+       u64 increment = map->stripe_len;
+       u64 offset;
+
+       nstripes = length;
+       offset = 0;
+       do_div(nstripes, map->stripe_len);
+       if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
+               offset = map->stripe_len * num;
+               increment = map->stripe_len * map->num_stripes;
+               mirror_num = 0;
+       } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
+               int factor = map->num_stripes / map->sub_stripes;
+               offset = map->stripe_len * (num / map->sub_stripes);
+               increment = map->stripe_len * factor;
+               mirror_num = num % map->sub_stripes;
+       } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
+               increment = map->stripe_len;
+               mirror_num = num % map->num_stripes;
+       } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
+               increment = map->stripe_len;
+               mirror_num = num % map->num_stripes;
+       } else {
+               increment = map->stripe_len;
+               mirror_num = 0;
+       }
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       path->reada = 2;
+       path->search_commit_root = 1;
+       path->skip_locking = 1;
+
+       /*
+        * find all extents for each stripe and just read them to get
+        * them into the page cache
+        * FIXME: we can do better. build a more intelligent prefetching
+        */
+       logical = base + offset;
+       physical = map->stripes[num].physical;
+       ret = 0;
+       for (i = 0; i < nstripes; ++i) {
+               key.objectid = logical;
+               key.type = BTRFS_EXTENT_ITEM_KEY;
+               key.offset = (u64)0;
+
+               ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+               if (ret < 0)
+                       goto out;
+
+               l = path->nodes[0];
+               slot = path->slots[0];
+               btrfs_item_key_to_cpu(l, &key, slot);
+               if (key.objectid != logical) {
+                       ret = btrfs_previous_item(root, path, 0,
+                                                 BTRFS_EXTENT_ITEM_KEY);
+                       if (ret < 0)
+                               goto out;
+               }
+
+               while (1) {
+                       l = path->nodes[0];
+                       slot = path->slots[0];
+                       if (slot >= btrfs_header_nritems(l)) {
+                               ret = btrfs_next_leaf(root, path);
+                               if (ret == 0)
+                                       continue;
+                               if (ret < 0)
+                                       goto out;
+
+                               break;
+                       }
+                       btrfs_item_key_to_cpu(l, &key, slot);
+
+                       if (key.objectid >= logical + map->stripe_len)
+                               break;
+
+                       path->slots[0]++;
+               }
+               btrfs_release_path(path);
+               logical += increment;
+               physical += map->stripe_len;
+               cond_resched();
+       }
+
+       /*
+        * collect all data csums for the stripe to avoid seeking during
+        * the scrub. This might currently (crc32) end up to be about 1MB
+        */
+       start_stripe = 0;
+again:
+       logical = base + offset + start_stripe * increment;
+       for (i = start_stripe; i < nstripes; ++i) {
+               ret = btrfs_lookup_csums_range(csum_root, logical,
+                                              logical + map->stripe_len - 1,
+                                              &sdev->csum_list, 1);
+               if (ret)
+                       goto out;
+
+               logical += increment;
+               cond_resched();
+       }
+       /*
+        * now find all extents for each stripe and scrub them
+        */
+       logical = base + offset + start_stripe * increment;
+       physical = map->stripes[num].physical + start_stripe * map->stripe_len;
+       ret = 0;
+       for (i = start_stripe; i < nstripes; ++i) {
+               /*
+                * canceled?
+                */
+               if (atomic_read(&fs_info->scrub_cancel_req) ||
+                   atomic_read(&sdev->cancel_req)) {
+                       ret = -ECANCELED;
+                       goto out;
+               }
+               /*
+                * check to see if we have to pause
+                */
+               if (atomic_read(&fs_info->scrub_pause_req)) {
+                       /* push queued extents */
+                       scrub_submit(sdev);
+                       wait_event(sdev->list_wait,
+                                  atomic_read(&sdev->in_flight) == 0);
+                       atomic_inc(&fs_info->scrubs_paused);
+                       wake_up(&fs_info->scrub_pause_wait);
+                       mutex_lock(&fs_info->scrub_lock);
+                       while (atomic_read(&fs_info->scrub_pause_req)) {
+                               mutex_unlock(&fs_info->scrub_lock);
+                               wait_event(fs_info->scrub_pause_wait,
+                                  atomic_read(&fs_info->scrub_pause_req) == 0);
+                               mutex_lock(&fs_info->scrub_lock);
+                       }
+                       atomic_dec(&fs_info->scrubs_paused);
+                       mutex_unlock(&fs_info->scrub_lock);
+                       wake_up(&fs_info->scrub_pause_wait);
+                       scrub_free_csums(sdev);
+                       start_stripe = i;
+                       goto again;
+               }
+
+               key.objectid = logical;
+               key.type = BTRFS_EXTENT_ITEM_KEY;
+               key.offset = (u64)0;
+
+               ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+               if (ret < 0)
+                       goto out;
+
+               l = path->nodes[0];
+               slot = path->slots[0];
+               btrfs_item_key_to_cpu(l, &key, slot);
+               if (key.objectid != logical) {
+                       ret = btrfs_previous_item(root, path, 0,
+                                                 BTRFS_EXTENT_ITEM_KEY);
+                       if (ret < 0)
+                               goto out;
+               }
+
+               while (1) {
+                       l = path->nodes[0];
+                       slot = path->slots[0];
+                       if (slot >= btrfs_header_nritems(l)) {
+                               ret = btrfs_next_leaf(root, path);
+                               if (ret == 0)
+                                       continue;
+                               if (ret < 0)
+                                       goto out;
+
+                               break;
+                       }
+                       btrfs_item_key_to_cpu(l, &key, slot);
+
+                       if (key.objectid + key.offset <= logical)
+                               goto next;
+
+                       if (key.objectid >= logical + map->stripe_len)
+                               break;
+
+                       if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY)
+                               goto next;
+
+                       extent = btrfs_item_ptr(l, slot,
+                                               struct btrfs_extent_item);
+                       flags = btrfs_extent_flags(l, extent);
+                       generation = btrfs_extent_generation(l, extent);
+
+                       if (key.objectid < logical &&
+                           (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
+                               printk(KERN_ERR
+                                      "btrfs scrub: tree block %llu spanning "
+                                      "stripes, ignored. logical=%llu\n",
+                                      (unsigned long long)key.objectid,
+                                      (unsigned long long)logical);
+                               goto next;
+                       }
+
+                       /*
+                        * trim extent to this stripe
+                        */
+                       if (key.objectid < logical) {
+                               key.offset -= logical - key.objectid;
+                               key.objectid = logical;
+                       }
+                       if (key.objectid + key.offset >
+                           logical + map->stripe_len) {
+                               key.offset = logical + map->stripe_len -
+                                            key.objectid;
+                       }
+
+                       ret = scrub_extent(sdev, key.objectid, key.offset,
+                                          key.objectid - logical + physical,
+                                          flags, generation, mirror_num);
+                       if (ret)
+                               goto out;
+
+next:
+                       path->slots[0]++;
+               }
+               btrfs_release_path(path);
+               logical += increment;
+               physical += map->stripe_len;
+               spin_lock(&sdev->stat_lock);
+               sdev->stat.last_physical = physical;
+               spin_unlock(&sdev->stat_lock);
+       }
+       /* push queued extents */
+       scrub_submit(sdev);
+
+out:
+       btrfs_free_path(path);
+       return ret < 0 ? ret : 0;
+}
+
+static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev,
+       u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length)
+{
+       struct btrfs_mapping_tree *map_tree =
+               &sdev->dev->dev_root->fs_info->mapping_tree;
+       struct map_lookup *map;
+       struct extent_map *em;
+       int i;
+       int ret = -EINVAL;
+
+       read_lock(&map_tree->map_tree.lock);
+       em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
+       read_unlock(&map_tree->map_tree.lock);
+
+       if (!em)
+               return -EINVAL;
+
+       map = (struct map_lookup *)em->bdev;
+       if (em->start != chunk_offset)
+               goto out;
+
+       if (em->len < length)
+               goto out;
+
+       for (i = 0; i < map->num_stripes; ++i) {
+               if (map->stripes[i].dev == sdev->dev) {
+                       ret = scrub_stripe(sdev, map, i, chunk_offset, length);
+                       if (ret)
+                               goto out;
+               }
+       }
+out:
+       free_extent_map(em);
+
+       return ret;
+}
+
+static noinline_for_stack
+int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
+{
+       struct btrfs_dev_extent *dev_extent = NULL;
+       struct btrfs_path *path;
+       struct btrfs_root *root = sdev->dev->dev_root;
+       struct btrfs_fs_info *fs_info = root->fs_info;
+       u64 length;
+       u64 chunk_tree;
+       u64 chunk_objectid;
+       u64 chunk_offset;
+       int ret;
+       int slot;
+       struct extent_buffer *l;
+       struct btrfs_key key;
+       struct btrfs_key found_key;
+       struct btrfs_block_group_cache *cache;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       path->reada = 2;
+       path->search_commit_root = 1;
+       path->skip_locking = 1;
+
+       key.objectid = sdev->dev->devid;
+       key.offset = 0ull;
+       key.type = BTRFS_DEV_EXTENT_KEY;
+
+
+       while (1) {
+               ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+               if (ret < 0)
+                       goto out;
+               ret = 0;
+
+               l = path->nodes[0];
+               slot = path->slots[0];
+
+               btrfs_item_key_to_cpu(l, &found_key, slot);
+
+               if (found_key.objectid != sdev->dev->devid)
+                       break;
+
+               if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
+                       break;
+
+               if (found_key.offset >= end)
+                       break;
+
+               if (found_key.offset < key.offset)
+                       break;
+
+               dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
+               length = btrfs_dev_extent_length(l, dev_extent);
+
+               if (found_key.offset + length <= start) {
+                       key.offset = found_key.offset + length;
+                       btrfs_release_path(path);
+                       continue;
+               }
+
+               chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
+               chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
+               chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
+
+               /*
+                * get a reference on the corresponding block group to prevent
+                * the chunk from going away while we scrub it
+                */
+               cache = btrfs_lookup_block_group(fs_info, chunk_offset);
+               if (!cache) {
+                       ret = -ENOENT;
+                       goto out;
+               }
+               ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
+                                 chunk_offset, length);
+               btrfs_put_block_group(cache);
+               if (ret)
+                       break;
+
+               key.offset = found_key.offset + length;
+               btrfs_release_path(path);
+       }
+
+out:
+       btrfs_free_path(path);
+       return ret;
+}
+
+static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
+{
+       int     i;
+       u64     bytenr;
+       u64     gen;
+       int     ret;
+       struct btrfs_device *device = sdev->dev;
+       struct btrfs_root *root = device->dev_root;
+
+       gen = root->fs_info->last_trans_committed;
+
+       for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+               bytenr = btrfs_sb_offset(i);
+               if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes)
+                       break;
+
+               ret = scrub_page(sdev, bytenr, PAGE_SIZE, bytenr,
+                                BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1);
+               if (ret)
+                       return ret;
+       }
+       wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
+
+       return 0;
+}
+
+/*
+ * get a reference count on fs_info->scrub_workers. start worker if necessary
+ */
+static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
+{
+       struct btrfs_fs_info *fs_info = root->fs_info;
+
+       mutex_lock(&fs_info->scrub_lock);
+       if (fs_info->scrub_workers_refcnt == 0)
+               btrfs_start_workers(&fs_info->scrub_workers, 1);
+       ++fs_info->scrub_workers_refcnt;
+       mutex_unlock(&fs_info->scrub_lock);
+
+       return 0;
+}
+
+static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
+{
+       struct btrfs_fs_info *fs_info = root->fs_info;
+
+       mutex_lock(&fs_info->scrub_lock);
+       if (--fs_info->scrub_workers_refcnt == 0)
+               btrfs_stop_workers(&fs_info->scrub_workers);
+       WARN_ON(fs_info->scrub_workers_refcnt < 0);
+       mutex_unlock(&fs_info->scrub_lock);
+}
+
+
+int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
+                   struct btrfs_scrub_progress *progress, int readonly)
+{
+       struct scrub_dev *sdev;
+       struct btrfs_fs_info *fs_info = root->fs_info;
+       int ret;
+       struct btrfs_device *dev;
+
+       if (root->fs_info->closing)
+               return -EINVAL;
+
+       /*
+        * check some assumptions
+        */
+       if (root->sectorsize != PAGE_SIZE ||
+           root->sectorsize != root->leafsize ||
+           root->sectorsize != root->nodesize) {
+               printk(KERN_ERR "btrfs_scrub: size assumptions fail\n");
+               return -EINVAL;
+       }
+
+       ret = scrub_workers_get(root);
+       if (ret)
+               return ret;
+
+       mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+       dev = btrfs_find_device(root, devid, NULL, NULL);
+       if (!dev || dev->missing) {
+               mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+               scrub_workers_put(root);
+               return -ENODEV;
+       }
+       mutex_lock(&fs_info->scrub_lock);
+
+       if (!dev->in_fs_metadata) {
+               mutex_unlock(&fs_info->scrub_lock);
+               mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+               scrub_workers_put(root);
+               return -ENODEV;
+       }
+
+       if (dev->scrub_device) {
+               mutex_unlock(&fs_info->scrub_lock);
+               mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+               scrub_workers_put(root);
+               return -EINPROGRESS;
+       }
+       sdev = scrub_setup_dev(dev);
+       if (IS_ERR(sdev)) {
+               mutex_unlock(&fs_info->scrub_lock);
+               mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+               scrub_workers_put(root);
+               return PTR_ERR(sdev);
+       }
+       sdev->readonly = readonly;
+       dev->scrub_device = sdev;
+
+       atomic_inc(&fs_info->scrubs_running);
+       mutex_unlock(&fs_info->scrub_lock);
+       mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+
+       down_read(&fs_info->scrub_super_lock);
+       ret = scrub_supers(sdev);
+       up_read(&fs_info->scrub_super_lock);
+
+       if (!ret)
+               ret = scrub_enumerate_chunks(sdev, start, end);
+
+       wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0);
+
+       atomic_dec(&fs_info->scrubs_running);
+       wake_up(&fs_info->scrub_pause_wait);
+
+       if (progress)
+               memcpy(progress, &sdev->stat, sizeof(*progress));
+
+       mutex_lock(&fs_info->scrub_lock);
+       dev->scrub_device = NULL;
+       mutex_unlock(&fs_info->scrub_lock);
+
+       scrub_free_dev(sdev);
+       scrub_workers_put(root);
+
+       return ret;
+}
+
+int btrfs_scrub_pause(struct btrfs_root *root)
+{
+       struct btrfs_fs_info *fs_info = root->fs_info;
+
+       mutex_lock(&fs_info->scrub_lock);
+       atomic_inc(&fs_info->scrub_pause_req);
+       while (atomic_read(&fs_info->scrubs_paused) !=
+              atomic_read(&fs_info->scrubs_running)) {
+               mutex_unlock(&fs_info->scrub_lock);
+               wait_event(fs_info->scrub_pause_wait,
+                          atomic_read(&fs_info->scrubs_paused) ==
+                          atomic_read(&fs_info->scrubs_running));
+               mutex_lock(&fs_info->scrub_lock);
+       }
+       mutex_unlock(&fs_info->scrub_lock);
+
+       return 0;
+}
+
+int btrfs_scrub_continue(struct btrfs_root *root)
+{
+       struct btrfs_fs_info *fs_info = root->fs_info;
+
+       atomic_dec(&fs_info->scrub_pause_req);
+       wake_up(&fs_info->scrub_pause_wait);
+       return 0;
+}
+
+int btrfs_scrub_pause_super(struct btrfs_root *root)
+{
+       down_write(&root->fs_info->scrub_super_lock);
+       return 0;
+}
+
+int btrfs_scrub_continue_super(struct btrfs_root *root)
+{
+       up_write(&root->fs_info->scrub_super_lock);
+       return 0;
+}
+
+int btrfs_scrub_cancel(struct btrfs_root *root)
+{
+       struct btrfs_fs_info *fs_info = root->fs_info;
+
+       mutex_lock(&fs_info->scrub_lock);
+       if (!atomic_read(&fs_info->scrubs_running)) {
+               mutex_unlock(&fs_info->scrub_lock);
+               return -ENOTCONN;
+       }
+
+       atomic_inc(&fs_info->scrub_cancel_req);
+       while (atomic_read(&fs_info->scrubs_running)) {
+               mutex_unlock(&fs_info->scrub_lock);
+               wait_event(fs_info->scrub_pause_wait,
+                          atomic_read(&fs_info->scrubs_running) == 0);
+               mutex_lock(&fs_info->scrub_lock);
+       }
+       atomic_dec(&fs_info->scrub_cancel_req);
+       mutex_unlock(&fs_info->scrub_lock);
+
+       return 0;
+}
+
+int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev)
+{
+       struct btrfs_fs_info *fs_info = root->fs_info;
+       struct scrub_dev *sdev;
+
+       mutex_lock(&fs_info->scrub_lock);
+       sdev = dev->scrub_device;
+       if (!sdev) {
+               mutex_unlock(&fs_info->scrub_lock);
+               return -ENOTCONN;
+       }
+       atomic_inc(&sdev->cancel_req);
+       while (dev->scrub_device) {
+               mutex_unlock(&fs_info->scrub_lock);
+               wait_event(fs_info->scrub_pause_wait,
+                          dev->scrub_device == NULL);
+               mutex_lock(&fs_info->scrub_lock);
+       }
+       mutex_unlock(&fs_info->scrub_lock);
+
+       return 0;
+}
+int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid)
+{
+       struct btrfs_fs_info *fs_info = root->fs_info;
+       struct btrfs_device *dev;
+       int ret;
+
+       /*
+        * we have to hold the device_list_mutex here so the device
+        * does not go away in cancel_dev. FIXME: find a better solution
+        */
+       mutex_lock(&fs_info->fs_devices->device_list_mutex);
+       dev = btrfs_find_device(root, devid, NULL, NULL);
+       if (!dev) {
+               mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+               return -ENODEV;
+       }
+       ret = btrfs_scrub_cancel_dev(root, dev);
+       mutex_unlock(&fs_info->fs_devices->device_list_mutex);
+
+       return ret;
+}
+
+int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
+                        struct btrfs_scrub_progress *progress)
+{
+       struct btrfs_device *dev;
+       struct scrub_dev *sdev = NULL;
+
+       mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+       dev = btrfs_find_device(root, devid, NULL, NULL);
+       if (dev)
+               sdev = dev->scrub_device;
+       if (sdev)
+               memcpy(progress, &sdev->stat, sizeof(*progress));
+       mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+
+       return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV;
+}
index 0ac712efcdf293ac8e858fef443b57316b039037..9b2e7e5bc3efa40e2779ba450ee99ac7623087d8 100644 (file)
@@ -39,7 +39,9 @@
 #include <linux/miscdevice.h>
 #include <linux/magic.h>
 #include <linux/slab.h>
+#include <linux/cleancache.h>
 #include "compat.h"
+#include "delayed-inode.h"
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -159,7 +161,7 @@ enum {
        Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
        Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
        Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
-       Opt_enospc_debug, Opt_subvolrootid, Opt_err,
+       Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_err,
 };
 
 static match_table_t tokens = {
@@ -190,6 +192,7 @@ static match_table_t tokens = {
        {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
        {Opt_enospc_debug, "enospc_debug"},
        {Opt_subvolrootid, "subvolrootid=%d"},
+       {Opt_defrag, "autodefrag"},
        {Opt_err, NULL},
 };
 
@@ -368,6 +371,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
                case Opt_enospc_debug:
                        btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
                        break;
+               case Opt_defrag:
+                       printk(KERN_INFO "btrfs: enabling auto defrag");
+                       btrfs_set_opt(info->mount_opt, AUTO_DEFRAG);
+                       break;
                case Opt_err:
                        printk(KERN_INFO "btrfs: unrecognized mount option "
                               "'%s'\n", p);
@@ -506,8 +513,10 @@ static struct dentry *get_default_root(struct super_block *sb,
         */
        dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
        di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
-       if (IS_ERR(di))
+       if (IS_ERR(di)) {
+               btrfs_free_path(path);
                return ERR_CAST(di);
+       }
        if (!di) {
                /*
                 * Ok the default dir item isn't there.  This is weird since
@@ -624,6 +633,7 @@ static int btrfs_fill_super(struct super_block *sb,
        sb->s_root = root_dentry;
 
        save_mount_options(sb, data);
+       cleancache_init_fs(sb);
        return 0;
 
 fail_close:
@@ -739,7 +749,7 @@ static int btrfs_set_super(struct super_block *s, void *data)
  *       for multiple device setup.  Make sure to keep it in sync.
  */
 static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
-               const char *dev_name, void *data)
+               const char *device_name, void *data)
 {
        struct block_device *bdev = NULL;
        struct super_block *s;
@@ -762,7 +772,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
        if (error)
                return ERR_PTR(error);
 
-       error = btrfs_scan_one_device(dev_name, mode, fs_type, &fs_devices);
+       error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices);
        if (error)
                goto error_free_subvol_name;
 
@@ -913,6 +923,32 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
        return 0;
 }
 
+/* Used to sort the devices by max_avail(descending sort) */
+static int btrfs_cmp_device_free_bytes(const void *dev_info1,
+                                      const void *dev_info2)
+{
+       if (((struct btrfs_device_info *)dev_info1)->max_avail >
+           ((struct btrfs_device_info *)dev_info2)->max_avail)
+               return -1;
+       else if (((struct btrfs_device_info *)dev_info1)->max_avail <
+                ((struct btrfs_device_info *)dev_info2)->max_avail)
+               return 1;
+       else
+       return 0;
+}
+
+/*
+ * sort the devices by max_avail, in which max free extent size of each device
+ * is stored.(Descending Sort)
+ */
+static inline void btrfs_descending_sort_devices(
+                                       struct btrfs_device_info *devices,
+                                       size_t nr_devices)
+{
+       sort(devices, nr_devices, sizeof(struct btrfs_device_info),
+            btrfs_cmp_device_free_bytes, NULL);
+}
+
 /*
  * The helper to calc the free space on the devices that can be used to store
  * file data.
@@ -1206,10 +1242,14 @@ static int __init init_btrfs_fs(void)
        if (err)
                goto free_extent_io;
 
-       err = btrfs_interface_init();
+       err = btrfs_delayed_inode_init();
        if (err)
                goto free_extent_map;
 
+       err = btrfs_interface_init();
+       if (err)
+               goto free_delayed_inode;
+
        err = register_filesystem(&btrfs_fs_type);
        if (err)
                goto unregister_ioctl;
@@ -1219,6 +1259,8 @@ static int __init init_btrfs_fs(void)
 
 unregister_ioctl:
        btrfs_interface_exit();
+free_delayed_inode:
+       btrfs_delayed_inode_exit();
 free_extent_map:
        extent_map_exit();
 free_extent_io:
@@ -1235,6 +1277,7 @@ free_sysfs:
 static void __exit exit_btrfs_fs(void)
 {
        btrfs_destroy_cachep();
+       btrfs_delayed_inode_exit();
        extent_map_exit();
        extent_io_exit();
        btrfs_interface_exit();
index 4ce16ef702a3a9e7384ea9e1bc591c51a5f66c14..c3c223ae66918d9e244fc909c4d6b7836490ef95 100644 (file)
@@ -174,86 +174,9 @@ static const struct sysfs_ops btrfs_root_attr_ops = {
        .store  = btrfs_root_attr_store,
 };
 
-static struct kobj_type btrfs_root_ktype = {
-       .default_attrs  = btrfs_root_attrs,
-       .sysfs_ops      = &btrfs_root_attr_ops,
-       .release        = btrfs_root_release,
-};
-
-static struct kobj_type btrfs_super_ktype = {
-       .default_attrs  = btrfs_super_attrs,
-       .sysfs_ops      = &btrfs_super_attr_ops,
-       .release        = btrfs_super_release,
-};
-
 /* /sys/fs/btrfs/ entry */
 static struct kset *btrfs_kset;
 
-int btrfs_sysfs_add_super(struct btrfs_fs_info *fs)
-{
-       int error;
-       char *name;
-       char c;
-       int len = strlen(fs->sb->s_id) + 1;
-       int i;
-
-       name = kmalloc(len, GFP_NOFS);
-       if (!name) {
-               error = -ENOMEM;
-               goto fail;
-       }
-
-       for (i = 0; i < len; i++) {
-               c = fs->sb->s_id[i];
-               if (c == '/' || c == '\\')
-                       c = '!';
-               name[i] = c;
-       }
-       name[len] = '\0';
-
-       fs->super_kobj.kset = btrfs_kset;
-       error = kobject_init_and_add(&fs->super_kobj, &btrfs_super_ktype,
-                                    NULL, "%s", name);
-       kfree(name);
-       if (error)
-               goto fail;
-
-       return 0;
-
-fail:
-       printk(KERN_ERR "btrfs: sysfs creation for super failed\n");
-       return error;
-}
-
-int btrfs_sysfs_add_root(struct btrfs_root *root)
-{
-       int error;
-
-       error = kobject_init_and_add(&root->root_kobj, &btrfs_root_ktype,
-                                    &root->fs_info->super_kobj,
-                                    "%s", root->name);
-       if (error)
-               goto fail;
-
-       return 0;
-
-fail:
-       printk(KERN_ERR "btrfs: sysfs creation for root failed\n");
-       return error;
-}
-
-void btrfs_sysfs_del_root(struct btrfs_root *root)
-{
-       kobject_put(&root->root_kobj);
-       wait_for_completion(&root->kobj_unregister);
-}
-
-void btrfs_sysfs_del_super(struct btrfs_fs_info *fs)
-{
-       kobject_put(&fs->super_kobj);
-       wait_for_completion(&fs->kobj_unregister);
-}
-
 int btrfs_init_sysfs(void)
 {
        btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj);
index c571734d5e5a802ea73a0c6458dd41c0b2e56659..dc80f7156923ae120c5496f3fd0093a8616fbeec 100644 (file)
@@ -27,6 +27,7 @@
 #include "transaction.h"
 #include "locking.h"
 #include "tree-log.h"
+#include "inode-map.h"
 
 #define BTRFS_ROOT_TRANS_TAG 0
 
@@ -80,8 +81,7 @@ static noinline int join_transaction(struct btrfs_root *root)
                INIT_LIST_HEAD(&cur_trans->pending_snapshots);
                list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
                extent_io_tree_init(&cur_trans->dirty_pages,
-                                    root->fs_info->btree_inode->i_mapping,
-                                    GFP_NOFS);
+                                    root->fs_info->btree_inode->i_mapping);
                spin_lock(&root->fs_info->new_trans_lock);
                root->fs_info->running_transaction = cur_trans;
                spin_unlock(&root->fs_info->new_trans_lock);
@@ -347,49 +347,6 @@ out_unlock:
        return ret;
 }
 
-#if 0
-/*
- * rate limit against the drop_snapshot code.  This helps to slow down new
- * operations if the drop_snapshot code isn't able to keep up.
- */
-static void throttle_on_drops(struct btrfs_root *root)
-{
-       struct btrfs_fs_info *info = root->fs_info;
-       int harder_count = 0;
-
-harder:
-       if (atomic_read(&info->throttles)) {
-               DEFINE_WAIT(wait);
-               int thr;
-               thr = atomic_read(&info->throttle_gen);
-
-               do {
-                       prepare_to_wait(&info->transaction_throttle,
-                                       &wait, TASK_UNINTERRUPTIBLE);
-                       if (!atomic_read(&info->throttles)) {
-                               finish_wait(&info->transaction_throttle, &wait);
-                               break;
-                       }
-                       schedule();
-                       finish_wait(&info->transaction_throttle, &wait);
-               } while (thr == atomic_read(&info->throttle_gen));
-               harder_count++;
-
-               if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
-                   harder_count < 2)
-                       goto harder;
-
-               if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
-                   harder_count < 10)
-                       goto harder;
-
-               if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
-                   harder_count < 20)
-                       goto harder;
-       }
-}
-#endif
-
 void btrfs_throttle(struct btrfs_root *root)
 {
        mutex_lock(&root->fs_info->trans_mutex);
@@ -487,19 +444,40 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
                          struct btrfs_root *root)
 {
-       return __btrfs_end_transaction(trans, root, 0, 1);
+       int ret;
+
+       ret = __btrfs_end_transaction(trans, root, 0, 1);
+       if (ret)
+               return ret;
+       return 0;
 }
 
 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
                                   struct btrfs_root *root)
 {
-       return __btrfs_end_transaction(trans, root, 1, 1);
+       int ret;
+
+       ret = __btrfs_end_transaction(trans, root, 1, 1);
+       if (ret)
+               return ret;
+       return 0;
 }
 
 int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root)
 {
-       return __btrfs_end_transaction(trans, root, 0, 0);
+       int ret;
+
+       ret = __btrfs_end_transaction(trans, root, 0, 0);
+       if (ret)
+               return ret;
+       return 0;
+}
+
+int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
+                               struct btrfs_root *root)
+{
+       return __btrfs_end_transaction(trans, root, 1, 1);
 }
 
 /*
@@ -760,8 +738,14 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
                        btrfs_update_reloc_root(trans, root);
                        btrfs_orphan_commit_root(trans, root);
 
+                       btrfs_save_ino_cache(root, trans);
+
                        if (root->commit_root != root->node) {
+                               mutex_lock(&root->fs_commit_mutex);
                                switch_commit_root(root);
+                               btrfs_unpin_free_ino(root);
+                               mutex_unlock(&root->fs_commit_mutex);
+
                                btrfs_set_root_node(&root->root_item,
                                                    root->node);
                        }
@@ -809,97 +793,6 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
        return ret;
 }
 
-#if 0
-/*
- * when dropping snapshots, we generate a ton of delayed refs, and it makes
- * sense not to join the transaction while it is trying to flush the current
- * queue of delayed refs out.
- *
- * This is used by the drop snapshot code only
- */
-static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info)
-{
-       DEFINE_WAIT(wait);
-
-       mutex_lock(&info->trans_mutex);
-       while (info->running_transaction &&
-              info->running_transaction->delayed_refs.flushing) {
-               prepare_to_wait(&info->transaction_wait, &wait,
-                               TASK_UNINTERRUPTIBLE);
-               mutex_unlock(&info->trans_mutex);
-
-               schedule();
-
-               mutex_lock(&info->trans_mutex);
-               finish_wait(&info->transaction_wait, &wait);
-       }
-       mutex_unlock(&info->trans_mutex);
-       return 0;
-}
-
-/*
- * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
- * all of them
- */
-int btrfs_drop_dead_root(struct btrfs_root *root)
-{
-       struct btrfs_trans_handle *trans;
-       struct btrfs_root *tree_root = root->fs_info->tree_root;
-       unsigned long nr;
-       int ret;
-
-       while (1) {
-               /*
-                * we don't want to jump in and create a bunch of
-                * delayed refs if the transaction is starting to close
-                */
-               wait_transaction_pre_flush(tree_root->fs_info);
-               trans = btrfs_start_transaction(tree_root, 1);
-
-               /*
-                * we've joined a transaction, make sure it isn't
-                * closing right now
-                */
-               if (trans->transaction->delayed_refs.flushing) {
-                       btrfs_end_transaction(trans, tree_root);
-                       continue;
-               }
-
-               ret = btrfs_drop_snapshot(trans, root);
-               if (ret != -EAGAIN)
-                       break;
-
-               ret = btrfs_update_root(trans, tree_root,
-                                       &root->root_key,
-                                       &root->root_item);
-               if (ret)
-                       break;
-
-               nr = trans->blocks_used;
-               ret = btrfs_end_transaction(trans, tree_root);
-               BUG_ON(ret);
-
-               btrfs_btree_balance_dirty(tree_root, nr);
-               cond_resched();
-       }
-       BUG_ON(ret);
-
-       ret = btrfs_del_root(trans, tree_root, &root->root_key);
-       BUG_ON(ret);
-
-       nr = trans->blocks_used;
-       ret = btrfs_end_transaction(trans, tree_root);
-       BUG_ON(ret);
-
-       free_extent_buffer(root->node);
-       free_extent_buffer(root->commit_root);
-       kfree(root);
-
-       btrfs_btree_balance_dirty(tree_root, nr);
-       return ret;
-}
-#endif
-
 /*
  * new snapshots need to be created at a very specific time in the
  * transaction commit.  This does the actual creation
@@ -930,7 +823,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
                goto fail;
        }
 
-       ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
+       ret = btrfs_find_free_objectid(tree_root, &objectid);
        if (ret) {
                pending->error = ret;
                goto fail;
@@ -967,7 +860,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        BUG_ON(ret);
        ret = btrfs_insert_dir_item(trans, parent_root,
                                dentry->d_name.name, dentry->d_name.len,
-                               parent_inode->i_ino, &key,
+                               parent_inode, &key,
                                BTRFS_FT_DIR, index);
        BUG_ON(ret);
 
@@ -1009,7 +902,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
         */
        ret = btrfs_add_root_ref(trans, tree_root, objectid,
                                 parent_root->root_key.objectid,
-                                parent_inode->i_ino, index,
+                                btrfs_ino(parent_inode), index,
                                 dentry->d_name.name, dentry->d_name.len);
        BUG_ON(ret);
        dput(parent);
@@ -1037,6 +930,14 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
        int ret;
 
        list_for_each_entry(pending, head, list) {
+               /*
+                * We must deal with the delayed items before creating
+                * snapshots, or we will create a snapthot with inconsistent
+                * information.
+               */
+               ret = btrfs_run_delayed_items(trans, fs_info->fs_root);
+               BUG_ON(ret);
+
                ret = create_pending_snapshot(trans, fs_info, pending);
                BUG_ON(ret);
        }
@@ -1290,6 +1191,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                        BUG_ON(ret);
                }
 
+               ret = btrfs_run_delayed_items(trans, root);
+               BUG_ON(ret);
+
                /*
                 * rename don't use btrfs_join_transaction, so, once we
                 * set the transaction to blocked above, we aren't going
@@ -1316,11 +1220,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        ret = create_pending_snapshots(trans, root->fs_info);
        BUG_ON(ret);
 
+       ret = btrfs_run_delayed_items(trans, root);
+       BUG_ON(ret);
+
        ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
        BUG_ON(ret);
 
        WARN_ON(cur_trans != trans->transaction);
 
+       btrfs_scrub_pause(root);
        /* btrfs_commit_tree_roots is responsible for getting the
         * various roots consistent with each other.  Every pointer
         * in the tree of tree roots has to point to the most up to date
@@ -1405,6 +1313,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        mutex_unlock(&root->fs_info->trans_mutex);
 
+       btrfs_scrub_continue(root);
+
        if (current->journal_info == trans)
                current->journal_info = NULL;
 
@@ -1432,6 +1342,8 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
                root = list_entry(list.next, struct btrfs_root, root_list);
                list_del(&root->root_list);
 
+               btrfs_kill_all_delayed_nodes(root);
+
                if (btrfs_header_backref_rev(root->node) <
                    BTRFS_MIXED_BACKREF_REV)
                        btrfs_drop_snapshot(root, NULL, 0);
index e441acc6c584a7e03859d3e63cd3e455312ff631..804c88639e5de00a4065267aaf52d050dd88303c 100644 (file)
@@ -101,11 +101,8 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
                                     struct btrfs_root *root);
-int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
-                           struct btrfs_root *root);
 
 int btrfs_add_dead_root(struct btrfs_root *root);
-int btrfs_drop_dead_root(struct btrfs_root *root);
 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly);
 int btrfs_clean_old_snapshots(struct btrfs_root *root);
 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
@@ -115,6 +112,8 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
                                   int wait_for_unblock);
 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
                                   struct btrfs_root *root);
+int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
+                               struct btrfs_root *root);
 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root);
 void btrfs_throttle(struct btrfs_root *root);
index 992ab425599d10bfe0b32e866b9c4bd366a1a9b9..3b580ee8ab1ddc01eb06028c8790c815562c977a 100644 (file)
@@ -97,7 +97,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
                ret = 0;
                goto out;
        }
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        wret = btrfs_search_slot(trans, root, &key, path, 0, 1);
 
        if (wret < 0) {
index f997ec0c1ba4b8efb6b46f88160de43cda3da4f2..592396c6dc47465bccf4422d6259d0dfe2885e0c 100644 (file)
@@ -333,13 +333,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
                        goto insert;
 
                if (item_size == 0) {
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        return 0;
                }
                dst_copy = kmalloc(item_size, GFP_NOFS);
                src_copy = kmalloc(item_size, GFP_NOFS);
                if (!dst_copy || !src_copy) {
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        kfree(dst_copy);
                        kfree(src_copy);
                        return -ENOMEM;
@@ -361,13 +361,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
                 * sync
                 */
                if (ret == 0) {
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        return 0;
                }
 
        }
 insert:
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        /* try to insert the key into the destination tree */
        ret = btrfs_insert_empty_item(trans, root, path,
                                      key, item_size);
@@ -382,7 +382,6 @@ insert:
                } else if (found_size < item_size) {
                        ret = btrfs_extend_item(trans, root, path,
                                                item_size - found_size);
-                       BUG_ON(ret);
                }
        } else if (ret) {
                return ret;
@@ -438,7 +437,7 @@ insert:
        }
 no_copy:
        btrfs_mark_buffer_dirty(path->nodes[0]);
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        return 0;
 }
 
@@ -519,7 +518,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
         * file.  This must be done before the btrfs_drop_extents run
         * so we don't try to drop this extent.
         */
-       ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+       ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
                                       start, 0);
 
        if (ret == 0 &&
@@ -544,11 +543,11 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
                 * we don't have to do anything
                 */
                if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        goto out;
                }
        }
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
        saved_nbytes = inode_get_bytes(inode);
        /* drop any overlapping extents */
@@ -590,6 +589,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
                                                ins.objectid, ins.offset,
                                                0, root->root_key.objectid,
                                                key->objectid, offset);
+                               BUG_ON(ret);
                        } else {
                                /*
                                 * insert the extent pointer in the extent
@@ -600,7 +600,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
                                                key->objectid, offset, &ins);
                                BUG_ON(ret);
                        }
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
 
                        if (btrfs_file_extent_compression(eb, item)) {
                                csum_start = ins.objectid;
@@ -614,7 +614,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
 
                        ret = btrfs_lookup_csums_range(root->log_root,
                                                csum_start, csum_end - 1,
-                                               &ordered_sums);
+                                               &ordered_sums, 0);
                        BUG_ON(ret);
                        while (!list_empty(&ordered_sums)) {
                                struct btrfs_ordered_sum *sums;
@@ -629,7 +629,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
                                kfree(sums);
                        }
                } else {
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                }
        } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
                /* inline extents are easy, we just overwrite them */
@@ -675,10 +675,13 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
                return -ENOMEM;
 
        read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
        inode = read_one_inode(root, location.objectid);
-       BUG_ON(!inode);
+       if (!inode) {
+               kfree(name);
+               return -EIO;
+       }
 
        ret = link_to_fixup_dir(trans, root, path, location.objectid);
        BUG_ON(ret);
@@ -713,7 +716,7 @@ static noinline int inode_in_dir(struct btrfs_root *root,
                        goto out;
        } else
                goto out;
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
        di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
        if (di && !IS_ERR(di)) {
@@ -724,7 +727,7 @@ static noinline int inode_in_dir(struct btrfs_root *root,
                goto out;
        match = 1;
 out:
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        return match;
 }
 
@@ -817,7 +820,10 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
                return -ENOENT;
 
        inode = read_one_inode(root, key->objectid);
-       BUG_ON(!inode);
+       if (!inode) {
+               iput(dir);
+               return -EIO;
+       }
 
        ref_ptr = btrfs_item_ptr_offset(eb, slot);
        ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
@@ -832,7 +838,7 @@ again:
        read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen);
 
        /* if we already have a perfect match, we're done */
-       if (inode_in_dir(root, path, dir->i_ino, inode->i_ino,
+       if (inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
                         btrfs_inode_ref_index(eb, ref),
                         name, namelen)) {
                goto out;
@@ -884,7 +890,7 @@ again:
                        if (!backref_in_log(log, key, victim_name,
                                            victim_name_len)) {
                                btrfs_inc_nlink(inode);
-                               btrfs_release_path(root, path);
+                               btrfs_release_path(path);
 
                                ret = btrfs_unlink_inode(trans, root, dir,
                                                         inode, victim_name,
@@ -901,7 +907,7 @@ again:
                 */
                search_done = 1;
        }
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
 insert:
        /* insert our name */
@@ -922,7 +928,7 @@ out:
        BUG_ON(ret);
 
 out_nowrite:
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        iput(dir);
        iput(inode);
        return 0;
@@ -960,8 +966,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
        unsigned long ptr;
        unsigned long ptr_end;
        int name_len;
+       u64 ino = btrfs_ino(inode);
 
-       key.objectid = inode->i_ino;
+       key.objectid = ino;
        key.type = BTRFS_INODE_REF_KEY;
        key.offset = (u64)-1;
 
@@ -980,7 +987,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
                }
                btrfs_item_key_to_cpu(path->nodes[0], &key,
                                      path->slots[0]);
-               if (key.objectid != inode->i_ino ||
+               if (key.objectid != ino ||
                    key.type != BTRFS_INODE_REF_KEY)
                        break;
                ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
@@ -999,9 +1006,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
                if (key.offset == 0)
                        break;
                key.offset--;
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
        }
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        if (nlink != inode->i_nlink) {
                inode->i_nlink = nlink;
                btrfs_update_inode(trans, root, inode);
@@ -1011,10 +1018,10 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
        if (inode->i_nlink == 0) {
                if (S_ISDIR(inode->i_mode)) {
                        ret = replay_dir_deletes(trans, root, NULL, path,
-                                                inode->i_ino, 1);
+                                                ino, 1);
                        BUG_ON(ret);
                }
-               ret = insert_orphan_item(trans, root, inode->i_ino);
+               ret = insert_orphan_item(trans, root, ino);
                BUG_ON(ret);
        }
        btrfs_free_path(path);
@@ -1050,11 +1057,13 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
                        break;
 
                ret = btrfs_del_item(trans, root, path);
-               BUG_ON(ret);
+               if (ret)
+                       goto out;
 
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
                inode = read_one_inode(root, key.offset);
-               BUG_ON(!inode);
+               if (!inode)
+                       return -EIO;
 
                ret = fixup_inode_link_count(trans, root, inode);
                BUG_ON(ret);
@@ -1068,8 +1077,10 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
                 */
                key.offset = (u64)-1;
        }
-       btrfs_release_path(root, path);
-       return 0;
+       ret = 0;
+out:
+       btrfs_release_path(path);
+       return ret;
 }
 
 
@@ -1088,7 +1099,8 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
        struct inode *inode;
 
        inode = read_one_inode(root, objectid);
-       BUG_ON(!inode);
+       if (!inode)
+               return -EIO;
 
        key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
        btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
@@ -1096,7 +1108,7 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
 
        ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
 
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        if (ret == 0) {
                btrfs_inc_nlink(inode);
                btrfs_update_inode(trans, root, inode);
@@ -1175,7 +1187,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
        int ret;
 
        dir = read_one_inode(root, key->objectid);
-       BUG_ON(!dir);
+       if (!dir)
+               return -EIO;
 
        name_len = btrfs_dir_name_len(eb, di);
        name = kmalloc(name_len, GFP_NOFS);
@@ -1192,7 +1205,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
                exists = 1;
        else
                exists = 0;
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
        if (key->type == BTRFS_DIR_ITEM_KEY) {
                dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
@@ -1205,7 +1218,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
        } else {
                BUG();
        }
-       if (!dst_di || IS_ERR(dst_di)) {
+       if (IS_ERR_OR_NULL(dst_di)) {
                /* we need a sequence number to insert, so we only
                 * do inserts for the BTRFS_DIR_INDEX_KEY types
                 */
@@ -1236,13 +1249,13 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
        if (key->type == BTRFS_DIR_INDEX_KEY)
                goto insert;
 out:
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        kfree(name);
        iput(dir);
        return 0;
 
 insert:
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        ret = insert_one_name(trans, root, path, key->objectid, key->offset,
                              name, name_len, log_type, &log_key);
 
@@ -1363,7 +1376,7 @@ next:
        *end_ret = found_end;
        ret = 0;
 out:
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        return ret;
 }
 
@@ -1426,12 +1439,15 @@ again:
                                                     dir_key->offset,
                                                     name, name_len, 0);
                }
-               if (!log_di || IS_ERR(log_di)) {
+               if (IS_ERR_OR_NULL(log_di)) {
                        btrfs_dir_item_key_to_cpu(eb, di, &location);
-                       btrfs_release_path(root, path);
-                       btrfs_release_path(log, log_path);
+                       btrfs_release_path(path);
+                       btrfs_release_path(log_path);
                        inode = read_one_inode(root, location.objectid);
-                       BUG_ON(!inode);
+                       if (!inode) {
+                               kfree(name);
+                               return -EIO;
+                       }
 
                        ret = link_to_fixup_dir(trans, root,
                                                path, location.objectid);
@@ -1453,7 +1469,7 @@ again:
                        ret = 0;
                        goto out;
                }
-               btrfs_release_path(log, log_path);
+               btrfs_release_path(log_path);
                kfree(name);
 
                ptr = (unsigned long)(di + 1);
@@ -1461,8 +1477,8 @@ again:
        }
        ret = 0;
 out:
-       btrfs_release_path(root, path);
-       btrfs_release_path(log, log_path);
+       btrfs_release_path(path);
+       btrfs_release_path(log_path);
        return ret;
 }
 
@@ -1550,7 +1566,7 @@ again:
                                break;
                        dir_key.offset = found_key.offset + 1;
                }
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
                if (range_end == (u64)-1)
                        break;
                range_start = range_end + 1;
@@ -1561,11 +1577,11 @@ next_type:
        if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
                key_type = BTRFS_DIR_LOG_INDEX_KEY;
                dir_key.type = BTRFS_DIR_INDEX_KEY;
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
                goto again;
        }
 out:
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
        btrfs_free_path(log_path);
        iput(dir);
        return ret;
@@ -2093,7 +2109,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
         * the running transaction open, so a full commit can't hop
         * in and cause problems either.
         */
+       btrfs_scrub_pause_super(root);
        write_ctree_super(trans, root->fs_info->tree_root, 1);
+       btrfs_scrub_continue_super(root);
        ret = 0;
 
        mutex_lock(&root->log_mutex);
@@ -2197,6 +2215,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
        int ret;
        int err = 0;
        int bytes_del = 0;
+       u64 dir_ino = btrfs_ino(dir);
 
        if (BTRFS_I(dir)->logged_trans < trans->transid)
                return 0;
@@ -2214,7 +2233,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
                goto out_unlock;
        }
 
-       di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino,
+       di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
                                   name, name_len, -1);
        if (IS_ERR(di)) {
                err = PTR_ERR(di);
@@ -2225,8 +2244,8 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
                bytes_del += name_len;
                BUG_ON(ret);
        }
-       btrfs_release_path(log, path);
-       di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino,
+       btrfs_release_path(path);
+       di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
                                         index, name, name_len, -1);
        if (IS_ERR(di)) {
                err = PTR_ERR(di);
@@ -2244,10 +2263,10 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
        if (bytes_del) {
                struct btrfs_key key;
 
-               key.objectid = dir->i_ino;
+               key.objectid = dir_ino;
                key.offset = 0;
                key.type = BTRFS_INODE_ITEM_KEY;
-               btrfs_release_path(log, path);
+               btrfs_release_path(path);
 
                ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
                if (ret < 0) {
@@ -2269,7 +2288,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
                        btrfs_mark_buffer_dirty(path->nodes[0]);
                } else
                        ret = 0;
-               btrfs_release_path(log, path);
+               btrfs_release_path(path);
        }
 fail:
        btrfs_free_path(path);
@@ -2303,7 +2322,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
        log = root->log_root;
        mutex_lock(&BTRFS_I(inode)->log_mutex);
 
-       ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino,
+       ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
                                  dirid, &index);
        mutex_unlock(&BTRFS_I(inode)->log_mutex);
        if (ret == -ENOSPC) {
@@ -2344,7 +2363,7 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
                              struct btrfs_dir_log_item);
        btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
        btrfs_mark_buffer_dirty(path->nodes[0]);
-       btrfs_release_path(log, path);
+       btrfs_release_path(path);
        return 0;
 }
 
@@ -2369,13 +2388,14 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
        int nritems;
        u64 first_offset = min_offset;
        u64 last_offset = (u64)-1;
+       u64 ino = btrfs_ino(inode);
 
        log = root->log_root;
-       max_key.objectid = inode->i_ino;
+       max_key.objectid = ino;
        max_key.offset = (u64)-1;
        max_key.type = key_type;
 
-       min_key.objectid = inode->i_ino;
+       min_key.objectid = ino;
        min_key.type = key_type;
        min_key.offset = min_offset;
 
@@ -2388,18 +2408,17 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
         * we didn't find anything from this transaction, see if there
         * is anything at all
         */
-       if (ret != 0 || min_key.objectid != inode->i_ino ||
-           min_key.type != key_type) {
-               min_key.objectid = inode->i_ino;
+       if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
+               min_key.objectid = ino;
                min_key.type = key_type;
                min_key.offset = (u64)-1;
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
                ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
                if (ret < 0) {
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        return ret;
                }
-               ret = btrfs_previous_item(root, path, inode->i_ino, key_type);
+               ret = btrfs_previous_item(root, path, ino, key_type);
 
                /* if ret == 0 there are items for this type,
                 * create a range to tell us the last key of this type.
@@ -2417,7 +2436,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
        }
 
        /* go backward to find any previous key */
-       ret = btrfs_previous_item(root, path, inode->i_ino, key_type);
+       ret = btrfs_previous_item(root, path, ino, key_type);
        if (ret == 0) {
                struct btrfs_key tmp;
                btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
@@ -2432,7 +2451,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
                        }
                }
        }
-       btrfs_release_path(root, path);
+       btrfs_release_path(path);
 
        /* find the first key from this transaction again */
        ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
@@ -2452,8 +2471,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
                for (i = path->slots[0]; i < nritems; i++) {
                        btrfs_item_key_to_cpu(src, &min_key, i);
 
-                       if (min_key.objectid != inode->i_ino ||
-                           min_key.type != key_type)
+                       if (min_key.objectid != ino || min_key.type != key_type)
                                goto done;
                        ret = overwrite_item(trans, log, dst_path, src, i,
                                             &min_key);
@@ -2474,7 +2492,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
                        goto done;
                }
                btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
-               if (tmp.objectid != inode->i_ino || tmp.type != key_type) {
+               if (tmp.objectid != ino || tmp.type != key_type) {
                        last_offset = (u64)-1;
                        goto done;
                }
@@ -2490,8 +2508,8 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
                }
        }
 done:
-       btrfs_release_path(root, path);
-       btrfs_release_path(log, dst_path);
+       btrfs_release_path(path);
+       btrfs_release_path(dst_path);
 
        if (err == 0) {
                *last_offset_ret = last_offset;
@@ -2500,8 +2518,7 @@ done:
                 * is valid
                 */
                ret = insert_dir_log_key(trans, log, path, key_type,
-                                        inode->i_ino, first_offset,
-                                        last_offset);
+                                        ino, first_offset, last_offset);
                if (ret)
                        err = ret;
        }
@@ -2587,10 +2604,11 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
                        break;
 
                ret = btrfs_del_item(trans, log, path);
-               BUG_ON(ret);
-               btrfs_release_path(log, path);
+               if (ret)
+                       break;
+               btrfs_release_path(path);
        }
-       btrfs_release_path(log, path);
+       btrfs_release_path(path);
        return ret;
 }
 
@@ -2665,6 +2683,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
                        extent = btrfs_item_ptr(src, start_slot + i,
                                                struct btrfs_file_extent_item);
 
+                       if (btrfs_file_extent_generation(src, extent) < trans->transid)
+                               continue;
+
                        found_type = btrfs_file_extent_type(src, extent);
                        if (found_type == BTRFS_FILE_EXTENT_REG ||
                            found_type == BTRFS_FILE_EXTENT_PREALLOC) {
@@ -2689,14 +2710,14 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
                                ret = btrfs_lookup_csums_range(
                                                log->fs_info->csum_root,
                                                ds + cs, ds + cs + cl - 1,
-                                               &ordered_sums);
+                                               &ordered_sums, 0);
                                BUG_ON(ret);
                        }
                }
        }
 
        btrfs_mark_buffer_dirty(dst_path->nodes[0]);
-       btrfs_release_path(log, dst_path);
+       btrfs_release_path(dst_path);
        kfree(ins_data);
 
        /*
@@ -2745,6 +2766,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
        int nritems;
        int ins_start_slot = 0;
        int ins_nr;
+       u64 ino = btrfs_ino(inode);
 
        log = root->log_root;
 
@@ -2757,11 +2779,11 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                return -ENOMEM;
        }
 
-       min_key.objectid = inode->i_ino;
+       min_key.objectid = ino;
        min_key.type = BTRFS_INODE_ITEM_KEY;
        min_key.offset = 0;
 
-       max_key.objectid = inode->i_ino;
+       max_key.objectid = ino;
 
        /* today the code can only do partial logging of directories */
        if (!S_ISDIR(inode->i_mode))
@@ -2773,6 +2795,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                max_key.type = (u8)-1;
        max_key.offset = (u64)-1;
 
+       ret = btrfs_commit_inode_delayed_items(trans, inode);
+       if (ret) {
+               btrfs_free_path(path);
+               btrfs_free_path(dst_path);
+               return ret;
+       }
+
        mutex_lock(&BTRFS_I(inode)->log_mutex);
 
        /*
@@ -2784,8 +2813,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
 
                if (inode_only == LOG_INODE_EXISTS)
                        max_key_type = BTRFS_XATTR_ITEM_KEY;
-               ret = drop_objectid_items(trans, log, path,
-                                         inode->i_ino, max_key_type);
+               ret = drop_objectid_items(trans, log, path, ino, max_key_type);
        } else {
                ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0);
        }
@@ -2803,7 +2831,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                        break;
 again:
                /* note, ins_nr might be > 0 here, cleanup outside the loop */
-               if (min_key.objectid != inode->i_ino)
+               if (min_key.objectid != ino)
                        break;
                if (min_key.type > max_key.type)
                        break;
@@ -2845,7 +2873,7 @@ next_slot:
                        }
                        ins_nr = 0;
                }
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
 
                if (min_key.offset < (u64)-1)
                        min_key.offset++;
@@ -2868,8 +2896,8 @@ next_slot:
        }
        WARN_ON(ins_nr);
        if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
-               btrfs_release_path(root, path);
-               btrfs_release_path(log, dst_path);
+               btrfs_release_path(path);
+               btrfs_release_path(dst_path);
                ret = log_directory_changes(trans, root, inode, path, dst_path);
                if (ret) {
                        err = ret;
@@ -3136,7 +3164,7 @@ again:
                }
                btrfs_item_key_to_cpu(path->nodes[0], &found_key,
                                      path->slots[0]);
-               btrfs_release_path(log_root_tree, path);
+               btrfs_release_path(path);
                if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
                        break;
 
@@ -3171,7 +3199,7 @@ again:
                if (found_key.offset == 0)
                        break;
        }
-       btrfs_release_path(log_root_tree, path);
+       btrfs_release_path(path);
 
        /* step one is to pin it all, step two is to replay just inodes */
        if (wc.pin) {
index 3dfae84c8cc8bc1fada072bb45a3676dcedb323d..2270ac58d7469849c748de5bf55aa96ece38f7a2 100644 (file)
@@ -38,7 +38,6 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
                               struct btrfs_root *root,
                               const char *name, int name_len,
                               struct inode *inode, u64 dirid);
-int btrfs_join_running_log_trans(struct btrfs_root *root);
 int btrfs_end_log_trans(struct btrfs_root *root);
 int btrfs_pin_log_trans(struct btrfs_root *root);
 int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/version.sh b/fs/btrfs/version.sh
deleted file mode 100644 (file)
index 1ca1952..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/bash
-#
-# determine-version -- report a useful version for releases
-#
-# Copyright 2008, Aron Griffis <agriffis@n01se.net>
-# Copyright 2008, Oracle
-# Released under the GNU GPLv2
-v="v0.16"
-
-which git &> /dev/null
-if [ $? == 0 ]; then
-    git branch >& /dev/null
-    if [ $? == 0 ]; then
-           if head=`git rev-parse --verify HEAD 2>/dev/null`; then
-               if tag=`git describe --tags 2>/dev/null`; then
-                   v="$tag"
-               fi
-
-               # Are there uncommitted changes?
-               git update-index --refresh --unmerged > /dev/null
-               if git diff-index --name-only HEAD | \
-                   grep -v "^scripts/package" \
-                   | read dummy; then
-                   v="$v"-dirty
-               fi
-           fi
-    fi
-fi
-echo "#ifndef __BUILD_VERSION" > .build-version.h
-echo "#define __BUILD_VERSION" >> .build-version.h
-echo "#define BTRFS_BUILD_VERSION \"Btrfs $v\"" >> .build-version.h
-echo "#endif" >> .build-version.h
-
-diff -q version.h .build-version.h >& /dev/null
-
-if [ $? == 0 ]; then
-    rm .build-version.h
-    exit 0
-fi
-
-mv .build-version.h version.h
index c7367ae5a3e6d9427add05ebf8a594ae92e81d5b..c48214ef5c09611100590c75f1eed8c8f0547e04 100644 (file)
@@ -38,22 +38,9 @@ static int init_first_rw_device(struct btrfs_trans_handle *trans,
                                struct btrfs_device *device);
 static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
 
-#define map_lookup_size(n) (sizeof(struct map_lookup) + \
-                           (sizeof(struct btrfs_bio_stripe) * (n)))
-
 static DEFINE_MUTEX(uuid_mutex);
 static LIST_HEAD(fs_uuids);
 
-void btrfs_lock_volumes(void)
-{
-       mutex_lock(&uuid_mutex);
-}
-
-void btrfs_unlock_volumes(void)
-{
-       mutex_unlock(&uuid_mutex);
-}
-
 static void lock_chunks(struct btrfs_root *root)
 {
        mutex_lock(&root->fs_info->chunk_mutex);
@@ -363,7 +350,7 @@ static noinline int device_list_add(const char *path,
                INIT_LIST_HEAD(&device->dev_alloc_list);
 
                mutex_lock(&fs_devices->device_list_mutex);
-               list_add(&device->dev_list, &fs_devices->devices);
+               list_add_rcu(&device->dev_list, &fs_devices->devices);
                mutex_unlock(&fs_devices->device_list_mutex);
 
                device->fs_devices = fs_devices;
@@ -406,7 +393,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
        fs_devices->latest_trans = orig->latest_trans;
        memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
 
-       mutex_lock(&orig->device_list_mutex);
+       /* We have held the volume lock, it is safe to get the devices. */
        list_for_each_entry(orig_dev, &orig->devices, dev_list) {
                device = kzalloc(sizeof(*device), GFP_NOFS);
                if (!device)
@@ -429,10 +416,8 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
                device->fs_devices = fs_devices;
                fs_devices->num_devices++;
        }
-       mutex_unlock(&orig->device_list_mutex);
        return fs_devices;
 error:
-       mutex_unlock(&orig->device_list_mutex);
        free_fs_devices(fs_devices);
        return ERR_PTR(-ENOMEM);
 }
@@ -443,7 +428,7 @@ int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
 
        mutex_lock(&uuid_mutex);
 again:
-       mutex_lock(&fs_devices->device_list_mutex);
+       /* This is the initialized path, it is safe to release the devices. */
        list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
                if (device->in_fs_metadata)
                        continue;
@@ -463,7 +448,6 @@ again:
                kfree(device->name);
                kfree(device);
        }
-       mutex_unlock(&fs_devices->device_list_mutex);
 
        if (fs_devices->seed) {
                fs_devices = fs_devices->seed;
@@ -474,6 +458,29 @@ again:
        return 0;
 }
 
+static void __free_device(struct work_struct *work)
+{
+       struct btrfs_device *device;
+
+       device = container_of(work, struct btrfs_device, rcu_work);
+
+       if (device->bdev)
+               blkdev_put(device->bdev, device->mode);
+
+       kfree(device->name);
+       kfree(device);
+}
+
+static void free_device(struct rcu_head *head)
+{
+       struct btrfs_device *device;
+
+       device = container_of(head, struct btrfs_device, rcu);
+
+       INIT_WORK(&device->rcu_work, __free_device);
+       schedule_work(&device->rcu_work);
+}
+
 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
 {
        struct btrfs_device *device;
@@ -481,20 +488,32 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
        if (--fs_devices->opened > 0)
                return 0;
 
+       mutex_lock(&fs_devices->device_list_mutex);
        list_for_each_entry(device, &fs_devices->devices, dev_list) {
-               if (device->bdev) {
-                       blkdev_put(device->bdev, device->mode);
+               struct btrfs_device *new_device;
+
+               if (device->bdev)
                        fs_devices->open_devices--;
-               }
+
                if (device->writeable) {
                        list_del_init(&device->dev_alloc_list);
                        fs_devices->rw_devices--;
                }
 
-               device->bdev = NULL;
-               device->writeable = 0;
-               device->in_fs_metadata = 0;
+               new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
+               BUG_ON(!new_device);
+               memcpy(new_device, device, sizeof(*new_device));
+               new_device->name = kstrdup(device->name, GFP_NOFS);
+               BUG_ON(!new_device->name);
+               new_device->bdev = NULL;
+               new_device->writeable = 0;
+               new_device->in_fs_metadata = 0;
+               list_replace_rcu(&device->dev_list, &new_device->dev_list);
+
+               call_rcu(&device->rcu, free_device);
        }
+       mutex_unlock(&fs_devices->device_list_mutex);
+
        WARN_ON(fs_devices->open_devices);
        WARN_ON(fs_devices->rw_devices);
        fs_devices->opened = 0;
@@ -597,6 +616,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
                        list_add(&device->dev_alloc_list,
                                 &fs_devices->alloc_list);
                }
+               brelse(bh);
                continue;
 
 error_brelse:
@@ -815,10 +835,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans,
        /* we don't want to overwrite the superblock on the drive,
         * so we make sure to start at an offset of at least 1MB
         */
-       search_start = 1024 * 1024;
-
-       if (root->fs_info->alloc_start + num_bytes <= search_end)
-               search_start = max(root->fs_info->alloc_start, search_start);
+       search_start = max(root->fs_info->alloc_start, 1024ull * 1024);
 
        max_hole_start = search_start;
        max_hole_size = 0;
@@ -949,14 +966,14 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
        if (ret > 0) {
                ret = btrfs_previous_item(root, path, key.objectid,
                                          BTRFS_DEV_EXTENT_KEY);
-               BUG_ON(ret);
+               if (ret)
+                       goto out;
                leaf = path->nodes[0];
                btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
                extent = btrfs_item_ptr(leaf, path->slots[0],
                                        struct btrfs_dev_extent);
                BUG_ON(found_key.offset > start || found_key.offset +
                       btrfs_dev_extent_length(leaf, extent) < start);
-               ret = 0;
        } else if (ret == 0) {
                leaf = path->nodes[0];
                extent = btrfs_item_ptr(leaf, path->slots[0],
@@ -967,8 +984,8 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
        if (device->bytes_used > 0)
                device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
        ret = btrfs_del_item(trans, root, path);
-       BUG_ON(ret);
 
+out:
        btrfs_free_path(path);
        return ret;
 }
@@ -1203,11 +1220,13 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
        struct block_device *bdev;
        struct buffer_head *bh = NULL;
        struct btrfs_super_block *disk_super;
+       struct btrfs_fs_devices *cur_devices;
        u64 all_avail;
        u64 devid;
        u64 num_devices;
        u8 *dev_uuid;
        int ret = 0;
+       bool clear_super = false;
 
        mutex_lock(&uuid_mutex);
        mutex_lock(&root->fs_info->volume_mutex);
@@ -1238,14 +1257,16 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
 
                device = NULL;
                devices = &root->fs_info->fs_devices->devices;
-               mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+               /*
+                * It is safe to read the devices since the volume_mutex
+                * is held.
+                */
                list_for_each_entry(tmp, devices, dev_list) {
                        if (tmp->in_fs_metadata && !tmp->bdev) {
                                device = tmp;
                                break;
                        }
                }
-               mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
                bdev = NULL;
                bh = NULL;
                disk_super = NULL;
@@ -1287,8 +1308,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
        }
 
        if (device->writeable) {
+               lock_chunks(root);
                list_del_init(&device->dev_alloc_list);
+               unlock_chunks(root);
                root->fs_info->fs_devices->rw_devices--;
+               clear_super = true;
        }
 
        ret = btrfs_shrink_device(device, 0);
@@ -1300,15 +1324,17 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
                goto error_undo;
 
        device->in_fs_metadata = 0;
+       btrfs_scrub_cancel_dev(root, device);
 
        /*
         * the device list mutex makes sure that we don't change
         * the device list while someone else is writing out all
         * the device supers.
         */
+
+       cur_devices = device->fs_devices;
        mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
-       list_del_init(&device->dev_list);
-       mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+       list_del_rcu(&device->dev_list);
 
        device->fs_devices->num_devices--;
 
@@ -1322,34 +1348,36 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
        if (device->bdev == root->fs_info->fs_devices->latest_bdev)
                root->fs_info->fs_devices->latest_bdev = next_device->bdev;
 
-       if (device->bdev) {
-               blkdev_put(device->bdev, device->mode);
-               device->bdev = NULL;
+       if (device->bdev)
                device->fs_devices->open_devices--;
-       }
+
+       call_rcu(&device->rcu, free_device);
+       mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
 
        num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
        btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
 
-       if (device->fs_devices->open_devices == 0) {
+       if (cur_devices->open_devices == 0) {
                struct btrfs_fs_devices *fs_devices;
                fs_devices = root->fs_info->fs_devices;
                while (fs_devices) {
-                       if (fs_devices->seed == device->fs_devices)
+                       if (fs_devices->seed == cur_devices)
                                break;
                        fs_devices = fs_devices->seed;
                }
-               fs_devices->seed = device->fs_devices->seed;
-               device->fs_devices->seed = NULL;
-               __btrfs_close_devices(device->fs_devices);
-               free_fs_devices(device->fs_devices);
+               fs_devices->seed = cur_devices->seed;
+               cur_devices->seed = NULL;
+               lock_chunks(root);
+               __btrfs_close_devices(cur_devices);
+               unlock_chunks(root);
+               free_fs_devices(cur_devices);
        }
 
        /*
         * at this point, the device is zero sized.  We want to
         * remove it from the devices list and zero out the old super
         */
-       if (device->writeable) {
+       if (clear_super) {
                /* make sure this device isn't detected as part of
                 * the FS anymore
                 */
@@ -1358,8 +1386,6 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
                sync_dirty_buffer(bh);
        }
 
-       kfree(device->name);
-       kfree(device);
        ret = 0;
 
 error_brelse:
@@ -1373,8 +1399,10 @@ out:
        return ret;
 error_undo:
        if (device->writeable) {
+               lock_chunks(root);
                list_add(&device->dev_alloc_list,
                         &root->fs_info->fs_devices->alloc_list);
+               unlock_chunks(root);
                root->fs_info->fs_devices->rw_devices++;
        }
        goto error_brelse;
@@ -1414,7 +1442,12 @@ static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
        INIT_LIST_HEAD(&seed_devices->devices);
        INIT_LIST_HEAD(&seed_devices->alloc_list);
        mutex_init(&seed_devices->device_list_mutex);
-       list_splice_init(&fs_devices->devices, &seed_devices->devices);
+
+       mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+       list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
+                             synchronize_rcu);
+       mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+
        list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
        list_for_each_entry(device, &seed_devices->devices, dev_list) {
                device->fs_devices = seed_devices;
@@ -1475,7 +1508,7 @@ next_slot:
                                goto error;
                        leaf = path->nodes[0];
                        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        continue;
                }
 
@@ -1611,7 +1644,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
         * half setup
         */
        mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
-       list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
+       list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices);
        list_add(&device->dev_alloc_list,
                 &root->fs_info->fs_devices->alloc_list);
        root->fs_info->fs_devices->num_devices++;
@@ -1769,10 +1802,9 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
        BUG_ON(ret);
 
        ret = btrfs_del_item(trans, root, path);
-       BUG_ON(ret);
 
        btrfs_free_path(path);
-       return 0;
+       return ret;
 }
 
 static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
@@ -1947,7 +1979,7 @@ again:
                chunk = btrfs_item_ptr(leaf, path->slots[0],
                                       struct btrfs_chunk);
                chunk_type = btrfs_chunk_type(leaf, chunk);
-               btrfs_release_path(chunk_root, path);
+               btrfs_release_path(path);
 
                if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
                        ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
@@ -2065,7 +2097,7 @@ int btrfs_balance(struct btrfs_root *dev_root)
                if (found_key.offset == 0)
                        break;
 
-               btrfs_release_path(chunk_root, path);
+               btrfs_release_path(path);
                ret = btrfs_relocate_chunk(chunk_root,
                                           chunk_root->root_key.objectid,
                                           found_key.objectid,
@@ -2137,7 +2169,7 @@ again:
                        goto done;
                if (ret) {
                        ret = 0;
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        break;
                }
 
@@ -2146,7 +2178,7 @@ again:
                btrfs_item_key_to_cpu(l, &key, path->slots[0]);
 
                if (key.objectid != device->devid) {
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        break;
                }
 
@@ -2154,14 +2186,14 @@ again:
                length = btrfs_dev_extent_length(l, dev_extent);
 
                if (key.offset + length <= new_size) {
-                       btrfs_release_path(root, path);
+                       btrfs_release_path(path);
                        break;
                }
 
                chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
                chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
                chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
 
                ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
                                           chunk_offset);
@@ -2237,275 +2269,204 @@ static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
        return 0;
 }
 
-static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size,
-                                       int num_stripes, int sub_stripes)
+/*
+ * sort the devices in descending order by max_avail, total_avail
+ */
+static int btrfs_cmp_device_info(const void *a, const void *b)
 {
-       if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
-               return calc_size;
-       else if (type & BTRFS_BLOCK_GROUP_RAID10)
-               return calc_size * (num_stripes / sub_stripes);
-       else
-               return calc_size * num_stripes;
-}
+       const struct btrfs_device_info *di_a = a;
+       const struct btrfs_device_info *di_b = b;
 
-/* Used to sort the devices by max_avail(descending sort) */
-int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2)
-{
-       if (((struct btrfs_device_info *)dev_info1)->max_avail >
-           ((struct btrfs_device_info *)dev_info2)->max_avail)
+       if (di_a->max_avail > di_b->max_avail)
                return -1;
-       else if (((struct btrfs_device_info *)dev_info1)->max_avail <
-                ((struct btrfs_device_info *)dev_info2)->max_avail)
+       if (di_a->max_avail < di_b->max_avail)
                return 1;
-       else
-               return 0;
+       if (di_a->total_avail > di_b->total_avail)
+               return -1;
+       if (di_a->total_avail < di_b->total_avail)
+               return 1;
+       return 0;
 }
 
-static int __btrfs_calc_nstripes(struct btrfs_fs_devices *fs_devices, u64 type,
-                                int *num_stripes, int *min_stripes,
-                                int *sub_stripes)
+static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+                              struct btrfs_root *extent_root,
+                              struct map_lookup **map_ret,
+                              u64 *num_bytes_out, u64 *stripe_size_out,
+                              u64 start, u64 type)
 {
-       *num_stripes = 1;
-       *min_stripes = 1;
-       *sub_stripes = 0;
+       struct btrfs_fs_info *info = extent_root->fs_info;
+       struct btrfs_fs_devices *fs_devices = info->fs_devices;
+       struct list_head *cur;
+       struct map_lookup *map = NULL;
+       struct extent_map_tree *em_tree;
+       struct extent_map *em;
+       struct btrfs_device_info *devices_info = NULL;
+       u64 total_avail;
+       int num_stripes;        /* total number of stripes to allocate */
+       int sub_stripes;        /* sub_stripes info for map */
+       int dev_stripes;        /* stripes per dev */
+       int devs_max;           /* max devs to use */
+       int devs_min;           /* min devs needed */
+       int devs_increment;     /* ndevs has to be a multiple of this */
+       int ncopies;            /* how many copies to data has */
+       int ret;
+       u64 max_stripe_size;
+       u64 max_chunk_size;
+       u64 stripe_size;
+       u64 num_bytes;
+       int ndevs;
+       int i;
+       int j;
 
-       if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
-               *num_stripes = fs_devices->rw_devices;
-               *min_stripes = 2;
-       }
-       if (type & (BTRFS_BLOCK_GROUP_DUP)) {
-               *num_stripes = 2;
-               *min_stripes = 2;
-       }
-       if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
-               if (fs_devices->rw_devices < 2)
-                       return -ENOSPC;
-               *num_stripes = 2;
-               *min_stripes = 2;
-       }
-       if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
-               *num_stripes = fs_devices->rw_devices;
-               if (*num_stripes < 4)
-                       return -ENOSPC;
-               *num_stripes &= ~(u32)1;
-               *sub_stripes = 2;
-               *min_stripes = 4;
+       if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
+           (type & BTRFS_BLOCK_GROUP_DUP)) {
+               WARN_ON(1);
+               type &= ~BTRFS_BLOCK_GROUP_DUP;
        }
 
-       return 0;
-}
+       if (list_empty(&fs_devices->alloc_list))
+               return -ENOSPC;
 
-static u64 __btrfs_calc_stripe_size(struct btrfs_fs_devices *fs_devices,
-                                   u64 proposed_size, u64 type,
-                                   int num_stripes, int small_stripe)
-{
-       int min_stripe_size = 1 * 1024 * 1024;
-       u64 calc_size = proposed_size;
-       u64 max_chunk_size = calc_size;
-       int ncopies = 1;
+       sub_stripes = 1;
+       dev_stripes = 1;
+       devs_increment = 1;
+       ncopies = 1;
+       devs_max = 0;   /* 0 == as many as possible */
+       devs_min = 1;
 
-       if (type & (BTRFS_BLOCK_GROUP_RAID1 |
-                   BTRFS_BLOCK_GROUP_DUP |
-                   BTRFS_BLOCK_GROUP_RAID10))
+       /*
+        * define the properties of each RAID type.
+        * FIXME: move this to a global table and use it in all RAID
+        * calculation code
+        */
+       if (type & (BTRFS_BLOCK_GROUP_DUP)) {
+               dev_stripes = 2;
+               ncopies = 2;
+               devs_max = 1;
+       } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
+               devs_min = 2;
+       } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
+               devs_increment = 2;
                ncopies = 2;
+               devs_max = 2;
+               devs_min = 2;
+       } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
+               sub_stripes = 2;
+               devs_increment = 2;
+               ncopies = 2;
+               devs_min = 4;
+       } else {
+               devs_max = 1;
+       }
 
        if (type & BTRFS_BLOCK_GROUP_DATA) {
-               max_chunk_size = 10 * calc_size;
-               min_stripe_size = 64 * 1024 * 1024;
+               max_stripe_size = 1024 * 1024 * 1024;
+               max_chunk_size = 10 * max_stripe_size;
        } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
-               max_chunk_size = 256 * 1024 * 1024;
-               min_stripe_size = 32 * 1024 * 1024;
+               max_stripe_size = 256 * 1024 * 1024;
+               max_chunk_size = max_stripe_size;
        } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
-               calc_size = 8 * 1024 * 1024;
-               max_chunk_size = calc_size * 2;
-               min_stripe_size = 1 * 1024 * 1024;
+               max_stripe_size = 8 * 1024 * 1024;
+               max_chunk_size = 2 * max_stripe_size;
+       } else {
+               printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
+                      type);
+               BUG_ON(1);
        }
 
        /* we don't want a chunk larger than 10% of writeable space */
        max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
                             max_chunk_size);
 
-       if (calc_size * num_stripes > max_chunk_size * ncopies) {
-               calc_size = max_chunk_size * ncopies;
-               do_div(calc_size, num_stripes);
-               do_div(calc_size, BTRFS_STRIPE_LEN);
-               calc_size *= BTRFS_STRIPE_LEN;
-       }
+       devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
+                              GFP_NOFS);
+       if (!devices_info)
+               return -ENOMEM;
 
-       /* we don't want tiny stripes */
-       if (!small_stripe)
-               calc_size = max_t(u64, min_stripe_size, calc_size);
+       cur = fs_devices->alloc_list.next;
 
        /*
-        * we're about to do_div by the BTRFS_STRIPE_LEN so lets make sure
-        * we end up with something bigger than a stripe
+        * in the first pass through the devices list, we gather information
+        * about the available holes on each device.
         */
-       calc_size = max_t(u64, calc_size, BTRFS_STRIPE_LEN);
-
-       do_div(calc_size, BTRFS_STRIPE_LEN);
-       calc_size *= BTRFS_STRIPE_LEN;
-
-       return calc_size;
-}
-
-static struct map_lookup *__shrink_map_lookup_stripes(struct map_lookup *map,
-                                                     int num_stripes)
-{
-       struct map_lookup *new;
-       size_t len = map_lookup_size(num_stripes);
-
-       BUG_ON(map->num_stripes < num_stripes);
-
-       if (map->num_stripes == num_stripes)
-               return map;
-
-       new = kmalloc(len, GFP_NOFS);
-       if (!new) {
-               /* just change map->num_stripes */
-               map->num_stripes = num_stripes;
-               return map;
-       }
-
-       memcpy(new, map, len);
-       new->num_stripes = num_stripes;
-       kfree(map);
-       return new;
-}
+       ndevs = 0;
+       while (cur != &fs_devices->alloc_list) {
+               struct btrfs_device *device;
+               u64 max_avail;
+               u64 dev_offset;
 
-/*
- * helper to allocate device space from btrfs_device_info, in which we stored
- * max free space information of every device. It is used when we can not
- * allocate chunks by default size.
- *
- * By this helper, we can allocate a new chunk as larger as possible.
- */
-static int __btrfs_alloc_tiny_space(struct btrfs_trans_handle *trans,
-                                   struct btrfs_fs_devices *fs_devices,
-                                   struct btrfs_device_info *devices,
-                                   int nr_device, u64 type,
-                                   struct map_lookup **map_lookup,
-                                   int min_stripes, u64 *stripe_size)
-{
-       int i, index, sort_again = 0;
-       int min_devices = min_stripes;
-       u64 max_avail, min_free;
-       struct map_lookup *map = *map_lookup;
-       int ret;
+               device = list_entry(cur, struct btrfs_device, dev_alloc_list);
 
-       if (nr_device < min_stripes)
-               return -ENOSPC;
+               cur = cur->next;
 
-       btrfs_descending_sort_devices(devices, nr_device);
+               if (!device->writeable) {
+                       printk(KERN_ERR
+                              "btrfs: read-only device in alloc_list\n");
+                       WARN_ON(1);
+                       continue;
+               }
 
-       max_avail = devices[0].max_avail;
-       if (!max_avail)
-               return -ENOSPC;
+               if (!device->in_fs_metadata)
+                       continue;
 
-       for (i = 0; i < nr_device; i++) {
-               /*
-                * if dev_offset = 0, it means the free space of this device
-                * is less than what we need, and we didn't search max avail
-                * extent on this device, so do it now.
+               if (device->total_bytes > device->bytes_used)
+                       total_avail = device->total_bytes - device->bytes_used;
+               else
+                       total_avail = 0;
+               /* avail is off by max(alloc_start, 1MB), but that is the same
+                * for all devices, so it doesn't hurt the sorting later on
                 */
-               if (!devices[i].dev_offset) {
-                       ret = find_free_dev_extent(trans, devices[i].dev,
-                                                  max_avail,
-                                                  &devices[i].dev_offset,
-                                                  &devices[i].max_avail);
-                       if (ret != 0 && ret != -ENOSPC)
-                               return ret;
-                       sort_again = 1;
-               }
-       }
-
-       /* we update the max avail free extent of each devices, sort again */
-       if (sort_again)
-               btrfs_descending_sort_devices(devices, nr_device);
 
-       if (type & BTRFS_BLOCK_GROUP_DUP)
-               min_devices = 1;
+               ret = find_free_dev_extent(trans, device,
+                                          max_stripe_size * dev_stripes,
+                                          &dev_offset, &max_avail);
+               if (ret && ret != -ENOSPC)
+                       goto error;
 
-       if (!devices[min_devices - 1].max_avail)
-               return -ENOSPC;
+               if (ret == 0)
+                       max_avail = max_stripe_size * dev_stripes;
 
-       max_avail = devices[min_devices - 1].max_avail;
-       if (type & BTRFS_BLOCK_GROUP_DUP)
-               do_div(max_avail, 2);
+               if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
+                       continue;
 
-       max_avail = __btrfs_calc_stripe_size(fs_devices, max_avail, type,
-                                            min_stripes, 1);
-       if (type & BTRFS_BLOCK_GROUP_DUP)
-               min_free = max_avail * 2;
-       else
-               min_free = max_avail;
+               devices_info[ndevs].dev_offset = dev_offset;
+               devices_info[ndevs].max_avail = max_avail;
+               devices_info[ndevs].total_avail = total_avail;
+               devices_info[ndevs].dev = device;
+               ++ndevs;
+       }
 
-       if (min_free > devices[min_devices - 1].max_avail)
-               return -ENOSPC;
+       /*
+        * now sort the devices by hole size / available space
+        */
+       sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
+            btrfs_cmp_device_info, NULL);
 
-       map = __shrink_map_lookup_stripes(map, min_stripes);
-       *stripe_size = max_avail;
+       /* round down to number of usable stripes */
+       ndevs -= ndevs % devs_increment;
 
-       index = 0;
-       for (i = 0; i < min_stripes; i++) {
-               map->stripes[i].dev = devices[index].dev;
-               map->stripes[i].physical = devices[index].dev_offset;
-               if (type & BTRFS_BLOCK_GROUP_DUP) {
-                       i++;
-                       map->stripes[i].dev = devices[index].dev;
-                       map->stripes[i].physical = devices[index].dev_offset +
-                                                  max_avail;
-               }
-               index++;
+       if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) {
+               ret = -ENOSPC;
+               goto error;
        }
-       *map_lookup = map;
 
-       return 0;
-}
-
-static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
-                              struct btrfs_root *extent_root,
-                              struct map_lookup **map_ret,
-                              u64 *num_bytes, u64 *stripe_size,
-                              u64 start, u64 type)
-{
-       struct btrfs_fs_info *info = extent_root->fs_info;
-       struct btrfs_device *device = NULL;
-       struct btrfs_fs_devices *fs_devices = info->fs_devices;
-       struct list_head *cur;
-       struct map_lookup *map;
-       struct extent_map_tree *em_tree;
-       struct extent_map *em;
-       struct btrfs_device_info *devices_info;
-       struct list_head private_devs;
-       u64 calc_size = 1024 * 1024 * 1024;
-       u64 min_free;
-       u64 avail;
-       u64 dev_offset;
-       int num_stripes;
-       int min_stripes;
-       int sub_stripes;
-       int min_devices;        /* the min number of devices we need */
-       int i;
-       int ret;
-       int index;
+       if (devs_max && ndevs > devs_max)
+               ndevs = devs_max;
+       /*
+        * the primary goal is to maximize the number of stripes, so use as many
+        * devices as possible, even if the stripes are not maximum sized.
+        */
+       stripe_size = devices_info[ndevs-1].max_avail;
+       num_stripes = ndevs * dev_stripes;
 
-       if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
-           (type & BTRFS_BLOCK_GROUP_DUP)) {
-               WARN_ON(1);
-               type &= ~BTRFS_BLOCK_GROUP_DUP;
+       if (stripe_size * num_stripes > max_chunk_size * ncopies) {
+               stripe_size = max_chunk_size * ncopies;
+               do_div(stripe_size, num_stripes);
        }
-       if (list_empty(&fs_devices->alloc_list))
-               return -ENOSPC;
-
-       ret = __btrfs_calc_nstripes(fs_devices, type, &num_stripes,
-                                   &min_stripes, &sub_stripes);
-       if (ret)
-               return ret;
 
-       devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices,
-                              GFP_NOFS);
-       if (!devices_info)
-               return -ENOMEM;
+       do_div(stripe_size, dev_stripes);
+       do_div(stripe_size, BTRFS_STRIPE_LEN);
+       stripe_size *= BTRFS_STRIPE_LEN;
 
        map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
        if (!map) {
@@ -2514,85 +2475,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
        }
        map->num_stripes = num_stripes;
 
-       cur = fs_devices->alloc_list.next;
-       index = 0;
-       i = 0;
-
-       calc_size = __btrfs_calc_stripe_size(fs_devices, calc_size, type,
-                                            num_stripes, 0);
-
-       if (type & BTRFS_BLOCK_GROUP_DUP) {
-               min_free = calc_size * 2;
-               min_devices = 1;
-       } else {
-               min_free = calc_size;
-               min_devices = min_stripes;
-       }
-
-       INIT_LIST_HEAD(&private_devs);
-       while (index < num_stripes) {
-               device = list_entry(cur, struct btrfs_device, dev_alloc_list);
-               BUG_ON(!device->writeable);
-               if (device->total_bytes > device->bytes_used)
-                       avail = device->total_bytes - device->bytes_used;
-               else
-                       avail = 0;
-               cur = cur->next;
-
-               if (device->in_fs_metadata && avail >= min_free) {
-                       ret = find_free_dev_extent(trans, device, min_free,
-                                                  &devices_info[i].dev_offset,
-                                                  &devices_info[i].max_avail);
-                       if (ret == 0) {
-                               list_move_tail(&device->dev_alloc_list,
-                                              &private_devs);
-                               map->stripes[index].dev = device;
-                               map->stripes[index].physical =
-                                               devices_info[i].dev_offset;
-                               index++;
-                               if (type & BTRFS_BLOCK_GROUP_DUP) {
-                                       map->stripes[index].dev = device;
-                                       map->stripes[index].physical =
-                                               devices_info[i].dev_offset +
-                                               calc_size;
-                                       index++;
-                               }
-                       } else if (ret != -ENOSPC)
-                               goto error;
-
-                       devices_info[i].dev = device;
-                       i++;
-               } else if (device->in_fs_metadata &&
-                          avail >= BTRFS_STRIPE_LEN) {
-                       devices_info[i].dev = device;
-                       devices_info[i].max_avail = avail;
-                       i++;
-               }
-
-               if (cur == &fs_devices->alloc_list)
-                       break;
-       }
-
-       list_splice(&private_devs, &fs_devices->alloc_list);
-       if (index < num_stripes) {
-               if (index >= min_stripes) {
-                       num_stripes = index;
-                       if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
-                               num_stripes /= sub_stripes;
-                               num_stripes *= sub_stripes;
-                       }
-
-                       map = __shrink_map_lookup_stripes(map, num_stripes);
-               } else if (i >= min_devices) {
-                       ret = __btrfs_alloc_tiny_space(trans, fs_devices,
-                                                      devices_info, i, type,
-                                                      &map, min_stripes,
-                                                      &calc_size);
-                       if (ret)
-                               goto error;
-               } else {
-                       ret = -ENOSPC;
-                       goto error;
+       for (i = 0; i < ndevs; ++i) {
+               for (j = 0; j < dev_stripes; ++j) {
+                       int s = i * dev_stripes + j;
+                       map->stripes[s].dev = devices_info[i].dev;
+                       map->stripes[s].physical = devices_info[i].dev_offset +
+                                                  j * stripe_size;
                }
        }
        map->sector_size = extent_root->sectorsize;
@@ -2603,20 +2491,21 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
        map->sub_stripes = sub_stripes;
 
        *map_ret = map;
-       *stripe_size = calc_size;
-       *num_bytes = chunk_bytes_by_type(type, calc_size,
-                                        map->num_stripes, sub_stripes);
+       num_bytes = stripe_size * (num_stripes / ncopies);
 
-       trace_btrfs_chunk_alloc(info->chunk_root, map, start, *num_bytes);
+       *stripe_size_out = stripe_size;
+       *num_bytes_out = num_bytes;
 
-       em = alloc_extent_map(GFP_NOFS);
+       trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes);
+
+       em = alloc_extent_map();
        if (!em) {
                ret = -ENOMEM;
                goto error;
        }
        em->bdev = (struct block_device *)map;
        em->start = start;
-       em->len = *num_bytes;
+       em->len = num_bytes;
        em->block_start = 0;
        em->block_len = em->len;
 
@@ -2629,20 +2518,21 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
 
        ret = btrfs_make_block_group(trans, extent_root, 0, type,
                                     BTRFS_FIRST_CHUNK_TREE_OBJECTID,
-                                    start, *num_bytes);
+                                    start, num_bytes);
        BUG_ON(ret);
 
-       index = 0;
-       while (index < map->num_stripes) {
-               device = map->stripes[index].dev;
-               dev_offset = map->stripes[index].physical;
+       for (i = 0; i < map->num_stripes; ++i) {
+               struct btrfs_device *device;
+               u64 dev_offset;
+
+               device = map->stripes[i].dev;
+               dev_offset = map->stripes[i].physical;
 
                ret = btrfs_alloc_dev_extent(trans, device,
                                info->chunk_root->root_key.objectid,
                                BTRFS_FIRST_CHUNK_TREE_OBJECTID,
-                               start, dev_offset, calc_size);
+                               start, dev_offset, stripe_size);
                BUG_ON(ret);
-               index++;
        }
 
        kfree(devices_info);
@@ -2849,7 +2739,7 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
 
 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
 {
-       extent_map_tree_init(&tree->map_tree, GFP_NOFS);
+       extent_map_tree_init(&tree->map_tree);
 }
 
 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
@@ -3499,7 +3389,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                free_extent_map(em);
        }
 
-       em = alloc_extent_map(GFP_NOFS);
+       em = alloc_extent_map();
        if (!em)
                return -ENOMEM;
        num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
@@ -3688,15 +3578,6 @@ static int read_one_dev(struct btrfs_root *root,
        return ret;
 }
 
-int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
-{
-       struct btrfs_dev_item *dev_item;
-
-       dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
-                                                    dev_item);
-       return read_one_dev(root, buf, dev_item);
-}
-
 int btrfs_read_sys_array(struct btrfs_root *root)
 {
        struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
@@ -3813,7 +3694,7 @@ again:
        }
        if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
                key.objectid = 0;
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
                goto again;
        }
        ret = 0;
index cc2eadaf7a27b996af24eaea258b14c5c5c1cfe0..7c12d61ae7aed7936d07886d8abfb4a8028dea4b 100644 (file)
@@ -85,7 +85,12 @@ struct btrfs_device {
        /* physical drive uuid (or lvm uuid) */
        u8 uuid[BTRFS_UUID_SIZE];
 
+       /* per-device scrub information */
+       struct scrub_dev *scrub_device;
+
        struct btrfs_work work;
+       struct rcu_head rcu;
+       struct work_struct rcu_work;
 };
 
 struct btrfs_fs_devices {
@@ -144,6 +149,7 @@ struct btrfs_device_info {
        struct btrfs_device *dev;
        u64 dev_offset;
        u64 max_avail;
+       u64 total_avail;
 };
 
 struct map_lookup {
@@ -157,20 +163,8 @@ struct map_lookup {
        struct btrfs_bio_stripe stripes[];
 };
 
-/* Used to sort the devices by max_avail(descending sort) */
-int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2);
-
-/*
- * sort the devices by max_avail, in which max free extent size of each device
- * is stored.(Descending Sort)
- */
-static inline void btrfs_descending_sort_devices(
-                                       struct btrfs_device_info *devices,
-                                       size_t nr_devices)
-{
-       sort(devices, nr_devices, sizeof(struct btrfs_device_info),
-            btrfs_cmp_device_free_bytes, NULL);
-}
+#define map_lookup_size(n) (sizeof(struct map_lookup) + \
+                           (sizeof(struct btrfs_bio_stripe) * (n)))
 
 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
                                   u64 end, u64 *length);
@@ -196,7 +190,6 @@ void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
                  int mirror_num, int async_submit);
-int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf);
 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
                       fmode_t flags, void *holder);
 int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
@@ -209,8 +202,6 @@ int btrfs_add_device(struct btrfs_trans_handle *trans,
 int btrfs_rm_device(struct btrfs_root *root, char *device_path);
 int btrfs_cleanup_fs_uuids(void);
 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len);
-int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
-                     u64 logical, struct page *page);
 int btrfs_grow_device(struct btrfs_trans_handle *trans,
                      struct btrfs_device *device, u64 new_size);
 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
@@ -218,8 +209,6 @@ struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
 int btrfs_init_new_device(struct btrfs_root *root, char *path);
 int btrfs_balance(struct btrfs_root *dev_root);
-void btrfs_unlock_volumes(void);
-void btrfs_lock_volumes(void);
 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
 int find_free_dev_extent(struct btrfs_trans_handle *trans,
                         struct btrfs_device *device, u64 num_bytes,
index cfd660550ded035fd2fad7aadce102aa23fc27fb..f3107e4b4d56a3d4b31a5f889ff321328e881978 100644 (file)
@@ -44,7 +44,7 @@ ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
                return -ENOMEM;
 
        /* lookup the xattr by name */
-       di = btrfs_lookup_xattr(NULL, root, path, inode->i_ino, name,
+       di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name,
                                strlen(name), 0);
        if (!di) {
                ret = -ENODATA;
@@ -103,7 +103,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
                return -ENOMEM;
 
        /* first lets see if we already have this xattr */
-       di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name,
+       di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,
                                strlen(name), -1);
        if (IS_ERR(di)) {
                ret = PTR_ERR(di);
@@ -120,13 +120,13 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
 
                ret = btrfs_delete_one_dir_name(trans, root, path, di);
                BUG_ON(ret);
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
 
                /* if we don't have a value then we are removing the xattr */
                if (!value)
                        goto out;
        } else {
-               btrfs_release_path(root, path);
+               btrfs_release_path(path);
 
                if (flags & XATTR_REPLACE) {
                        /* we couldn't find the attr to replace */
@@ -136,7 +136,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
        }
 
        /* ok we have to create a completely new xattr */
-       ret = btrfs_insert_xattr_item(trans, root, path, inode->i_ino,
+       ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
                                      name, name_len, value, size);
        BUG_ON(ret);
 out:
@@ -190,7 +190,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
         * NOTE: we set key.offset = 0; because we want to start with the
         * first xattr that we find and walk forward
         */
-       key.objectid = inode->i_ino;
+       key.objectid = btrfs_ino(inode);
        btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
        key.offset = 0;
 
index a08bb8e61c6fc275376c9a0748226deb34a6b0f0..698c6b2cc462ab067debcab84f77cffcb785a9cd 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/bitops.h>
 #include <linux/mpage.h>
 #include <linux/bit_spinlock.h>
+#include <linux/cleancache.h>
 
 static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
 
@@ -269,6 +270,10 @@ void invalidate_bdev(struct block_device *bdev)
        invalidate_bh_lrus();
        lru_add_drain_all();    /* make sure all lru add caches are flushed */
        invalidate_mapping_pages(mapping, 0, -1);
+       /* 99% of the time, we don't need to flush the cleancache on the bdev.
+        * But, for the strange corners, lets be cautious
+        */
+       cleancache_flush_inode(mapping);
 }
 EXPORT_SYMBOL(invalidate_bdev);
 
@@ -2331,24 +2336,26 @@ EXPORT_SYMBOL(block_commit_write);
  * page lock we can determine safely if the page is beyond EOF. If it is not
  * beyond EOF, then the page is guaranteed safe against truncation until we
  * unlock the page.
+ *
+ * Direct callers of this function should call vfs_check_frozen() so that page
+ * fault does not busyloop until the fs is thawed.
  */
-int
-block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
-                  get_block_t get_block)
+int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
+                        get_block_t get_block)
 {
        struct page *page = vmf->page;
        struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
        unsigned long end;
        loff_t size;
-       int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
+       int ret;
 
        lock_page(page);
        size = i_size_read(inode);
        if ((page->mapping != inode->i_mapping) ||
            (page_offset(page) > size)) {
-               /* page got truncated out from underneath us */
-               unlock_page(page);
-               goto out;
+               /* We overload EFAULT to mean page got truncated */
+               ret = -EFAULT;
+               goto out_unlock;
        }
 
        /* page is wholly or partially inside EOF */
@@ -2361,18 +2368,41 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
        if (!ret)
                ret = block_commit_write(page, 0, end);
 
-       if (unlikely(ret)) {
-               unlock_page(page);
-               if (ret == -ENOMEM)
-                       ret = VM_FAULT_OOM;
-               else /* -ENOSPC, -EIO, etc */
-                       ret = VM_FAULT_SIGBUS;
-       } else
-               ret = VM_FAULT_LOCKED;
-
-out:
+       if (unlikely(ret < 0))
+               goto out_unlock;
+       /*
+        * Freezing in progress? We check after the page is marked dirty and
+        * with page lock held so if the test here fails, we are sure freezing
+        * code will wait during syncing until the page fault is done - at that
+        * point page will be dirty and unlocked so freezing code will write it
+        * and writeprotect it again.
+        */
+       set_page_dirty(page);
+       if (inode->i_sb->s_frozen != SB_UNFROZEN) {
+               ret = -EAGAIN;
+               goto out_unlock;
+       }
+       return 0;
+out_unlock:
+       unlock_page(page);
        return ret;
 }
+EXPORT_SYMBOL(__block_page_mkwrite);
+
+int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
+                  get_block_t get_block)
+{
+       int ret;
+       struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb;
+
+       /*
+        * This check is racy but catches the common case. The check in
+        * __block_page_mkwrite() is reliable.
+        */
+       vfs_check_frozen(sb, SB_FREEZE_WRITE);
+       ret = __block_page_mkwrite(vma, vmf, get_block);
+       return block_page_mkwrite_return(ret);
+}
 EXPORT_SYMBOL(block_page_mkwrite);
 
 /*
index 75c47cd8d086eb0e0466ca21cce0658d5eed8edb..1cd4c3a1862d72491432bd4a017782e78b331dcd 100644 (file)
@@ -153,26 +153,6 @@ config CIFS_ACL
            Allows to fetch CIFS/NTFS ACL from the server.  The DACL blob
            is handed over to the application/caller.
 
-config CIFS_SMB2
-       bool "SMB2 network file system support (EXPERIMENTAL)"
-       depends on EXPERIMENTAL && INET && BROKEN
-       select NLS
-       select KEYS
-       select FSCACHE
-       select DNS_RESOLVER
-
-       help
-         This enables experimental support for the SMB2 (Server Message Block
-         version 2) protocol. The SMB2 protocol is the successor to the
-         popular CIFS and SMB network file sharing protocols. SMB2 is the
-         native file sharing mechanism for recent versions of Windows
-         operating systems (since Vista).  SMB2 enablement will eventually
-         allow users better performance, security and features, than would be
-         possible with cifs. Note that smb2 mount options also are simpler
-         (compared to cifs) due to protocol improvements.
-
-         Unless you are a developer or tester, say N.
-
 config CIFS_NFSD_EXPORT
          bool "Allow nfsd to export CIFS file system (EXPERIMENTAL)"
          depends on CIFS && EXPERIMENTAL
index 4a3ca0e5ca24d3ad6445dfa73a5a76dc73e590c0..c5c2c5e5f0f296f4df15bac07163ef693105947a 100644 (file)
@@ -457,6 +457,9 @@ A partial list of the supported mount options follows:
                otherwise - read from the server. All written data are stored
                in the cache, but if the client doesn't have Exclusive Oplock,
                it writes the data to the server.
+  rwpidforward  Forward pid of a process who opened a file to any read or write
+               operation on that file. This prevent applications like WINE
+               from failing on read and write if we use mandatory brlock style.
   acl          Allow setfacl and getfacl to manage posix ACLs if server
                supports them.  (default)
   noacl        Do not allow setfacl and getfacl calls on this mount
index 53d57a3fe427c3d94218a7f54b4ab87df3d68031..dd8584d35a14df875e09b4716f041d87f028e70a 100644 (file)
@@ -146,7 +146,7 @@ static char *extract_sharename(const char *treename)
 static uint16_t cifs_super_get_key(const void *cookie_netfs_data, void *buffer,
                                   uint16_t maxbuf)
 {
-       const struct cifsTconInfo *tcon = cookie_netfs_data;
+       const struct cifs_tcon *tcon = cookie_netfs_data;
        char *sharename;
        uint16_t len;
 
@@ -173,7 +173,7 @@ cifs_fscache_super_get_aux(const void *cookie_netfs_data, void *buffer,
                           uint16_t maxbuf)
 {
        struct cifs_fscache_super_auxdata auxdata;
-       const struct cifsTconInfo *tcon = cookie_netfs_data;
+       const struct cifs_tcon *tcon = cookie_netfs_data;
 
        memset(&auxdata, 0, sizeof(auxdata));
        auxdata.resource_id = tcon->resource_id;
@@ -192,7 +192,7 @@ fscache_checkaux cifs_fscache_super_check_aux(void *cookie_netfs_data,
                                              uint16_t datalen)
 {
        struct cifs_fscache_super_auxdata auxdata;
-       const struct cifsTconInfo *tcon = cookie_netfs_data;
+       const struct cifs_tcon *tcon = cookie_netfs_data;
 
        if (datalen != sizeof(auxdata))
                return FSCACHE_CHECKAUX_OBSOLETE;
index 18f4272d9047aa75046c57ac833f908b775df103..2fe3cf13b2e92b968221703f0d462a94c834ca29 100644 (file)
@@ -110,8 +110,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
        struct list_head *tmp1, *tmp2, *tmp3;
        struct mid_q_entry *mid_entry;
        struct TCP_Server_Info *server;
-       struct cifsSesInfo *ses;
-       struct cifsTconInfo *tcon;
+       struct cifs_ses *ses;
+       struct cifs_tcon *tcon;
        int i, j;
        __u32 dev_type;
 
@@ -152,7 +152,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
                                    tcp_ses_list);
                i++;
                list_for_each(tmp2, &server->smb_ses_list) {
-                       ses = list_entry(tmp2, struct cifsSesInfo,
+                       ses = list_entry(tmp2, struct cifs_ses,
                                         smb_ses_list);
                        if ((ses->serverDomain == NULL) ||
                                (ses->serverOS == NULL) ||
@@ -171,7 +171,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
                        seq_printf(m, "TCP status: %d\n\tLocal Users To "
                                   "Server: %d SecMode: 0x%x Req On Wire: %d",
                                   server->tcpStatus, server->srv_count,
-                                  server->secMode,
+                                  server->sec_mode,
                                   atomic_read(&server->inFlight));
 
 #ifdef CONFIG_CIFS_STATS2
@@ -183,7 +183,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
                        seq_puts(m, "\n\tShares:");
                        j = 0;
                        list_for_each(tmp3, &ses->tcon_list) {
-                               tcon = list_entry(tmp3, struct cifsTconInfo,
+                               tcon = list_entry(tmp3, struct cifs_tcon,
                                                  tcon_list);
                                ++j;
                                dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType);
@@ -256,8 +256,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
        int rc;
        struct list_head *tmp1, *tmp2, *tmp3;
        struct TCP_Server_Info *server;
-       struct cifsSesInfo *ses;
-       struct cifsTconInfo *tcon;
+       struct cifs_ses *ses;
+       struct cifs_tcon *tcon;
 
        rc = get_user(c, buffer);
        if (rc)
@@ -273,11 +273,11 @@ static ssize_t cifs_stats_proc_write(struct file *file,
                        server = list_entry(tmp1, struct TCP_Server_Info,
                                            tcp_ses_list);
                        list_for_each(tmp2, &server->smb_ses_list) {
-                               ses = list_entry(tmp2, struct cifsSesInfo,
+                               ses = list_entry(tmp2, struct cifs_ses,
                                                 smb_ses_list);
                                list_for_each(tmp3, &ses->tcon_list) {
                                        tcon = list_entry(tmp3,
-                                                         struct cifsTconInfo,
+                                                         struct cifs_tcon,
                                                          tcon_list);
                                        atomic_set(&tcon->num_smbs_sent, 0);
                                        atomic_set(&tcon->num_writes, 0);
@@ -312,8 +312,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
        int i;
        struct list_head *tmp1, *tmp2, *tmp3;
        struct TCP_Server_Info *server;
-       struct cifsSesInfo *ses;
-       struct cifsTconInfo *tcon;
+       struct cifs_ses *ses;
+       struct cifs_tcon *tcon;
 
        seq_printf(m,
                        "Resources in use\nCIFS Session: %d\n",
@@ -346,11 +346,11 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
                server = list_entry(tmp1, struct TCP_Server_Info,
                                    tcp_ses_list);
                list_for_each(tmp2, &server->smb_ses_list) {
-                       ses = list_entry(tmp2, struct cifsSesInfo,
+                       ses = list_entry(tmp2, struct cifs_ses,
                                         smb_ses_list);
                        list_for_each(tmp3, &ses->tcon_list) {
                                tcon = list_entry(tmp3,
-                                                 struct cifsTconInfo,
+                                                 struct cifs_tcon,
                                                  tcon_list);
                                i++;
                                seq_printf(m, "\n%d) %s", i, tcon->treeName);
index 2b68ac57d97d3ffdd0bc8bd425cff8ef121138a3..8d8f28c94c0fe608f61684c2b34e5045f01489f9 100644 (file)
@@ -272,7 +272,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
        struct dfs_info3_param *referrals = NULL;
        unsigned int num_referrals = 0;
        struct cifs_sb_info *cifs_sb;
-       struct cifsSesInfo *ses;
+       struct cifs_ses *ses;
        char *full_path;
        int xid, i;
        int rc;
index a9d5692e0c2067783a3a0607da7fc7e3cda06c5d..ffb1459dc6ecf04f2da417a85c734374177f45ce 100644 (file)
@@ -41,6 +41,7 @@
 #define CIFS_MOUNT_MF_SYMLINKS 0x10000 /* Minshall+French Symlinks enabled */
 #define CIFS_MOUNT_MULTIUSER   0x20000 /* multiuser mount */
 #define CIFS_MOUNT_STRICT_IO   0x40000 /* strict cache mode */
+#define CIFS_MOUNT_RWPIDFORWARD        0x80000 /* use pid forwarding for rw */
 
 struct cifs_sb_info {
        struct rb_root tlink_tree;
@@ -56,8 +57,6 @@ struct cifs_sb_info {
        mode_t  mnt_file_mode;
        mode_t  mnt_dir_mode;
        unsigned int mnt_cifs_flags;
-       int     prepathlen;
-       char   *prepath; /* relative path under the share to mount to */
        char   *mountdata; /* options received at mount time or via DFS refs */
        struct backing_dev_info bdi;
        struct delayed_work prune_tlinks;
index 33d221394acae068807adea4eeca47317a821888..2272fd5fe5b74fcac62d001987ce3980a8a0b3e0 100644 (file)
@@ -95,7 +95,7 @@ struct key_type cifs_spnego_key_type = {
 
 /* get a key struct with a SPNEGO security blob, suitable for session setup */
 struct key *
-cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
+cifs_get_spnego_key(struct cifs_ses *sesInfo)
 {
        struct TCP_Server_Info *server = sesInfo->server;
        struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
index e4041ec4d712943c412e0d97addbea7c4964c77a..31bef9ee078bdebb977ca830c0acb0a33d2565ba 100644 (file)
@@ -41,7 +41,7 @@ struct cifs_spnego_msg {
 
 #ifdef __KERNEL__
 extern struct key_type cifs_spnego_key_type;
-extern struct key *cifs_get_spnego_key(struct cifsSesInfo *sesInfo);
+extern struct key *cifs_get_spnego_key(struct cifs_ses *sesInfo);
 #endif /* KERNEL */
 
 #endif /* _CIFS_SPNEGO_H */
index f3c6fb9942ac9f03651cc1b1f6e3b46f954b7af8..8f1700623b41078a35dda35957a858aa9cf08550 100644 (file)
@@ -38,7 +38,7 @@ static const struct cifs_sid sid_everyone = {
        1, 1, {0, 0, 0, 0, 0, 1}, {0} };
 /* security id for Authenticated Users system group */
 static const struct cifs_sid sid_authusers = {
-       1, 1, {0, 0, 0, 0, 0, 5}, {11} };
+       1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} };
 /* group users */
 static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
 
@@ -458,7 +458,8 @@ int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
        if (num_subauth) {
                for (i = 0; i < num_subauth; ++i) {
                        if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
-                               if (ctsid->sub_auth[i] > cwsid->sub_auth[i])
+                               if (le32_to_cpu(ctsid->sub_auth[i]) >
+                                       le32_to_cpu(cwsid->sub_auth[i]))
                                        return 1;
                                else
                                        return -1;
@@ -945,7 +946,7 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
        int oplock = 0;
        int xid, rc;
        __u16 fid;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
        struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
 
        if (IS_ERR(tlink))
@@ -1013,7 +1014,7 @@ static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
        int oplock = 0;
        int xid, rc;
        __u16 fid;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
        struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
 
        if (IS_ERR(tlink))
index 45c3f78c8f81374d4cc7c695bb5e9e4b5f9f12d3..dfbd9f1f373daa9b39adeb852595438af0c4177b 100644 (file)
@@ -229,7 +229,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
 }
 
 /* first calculate 24 bytes ntlm response and then 16 byte session key */
-int setup_ntlm_response(struct cifsSesInfo *ses)
+int setup_ntlm_response(struct cifs_ses *ses)
 {
        int rc = 0;
        unsigned int temp_len = CIFS_SESS_KEY_SIZE + CIFS_AUTH_RESP_SIZE;
@@ -312,7 +312,7 @@ int calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt,
  * Allocate domain name which gets freed when session struct is deallocated.
  */
 static int
-build_avpair_blob(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
+build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
 {
        unsigned int dlen;
        unsigned int wlen;
@@ -400,7 +400,7 @@ build_avpair_blob(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
  * about target string i.e. for some, just user name might suffice.
  */
 static int
-find_domain_name(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
+find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
 {
        unsigned int attrsize;
        unsigned int type;
@@ -445,7 +445,7 @@ find_domain_name(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
        return 0;
 }
 
-static int calc_ntlmv2_hash(struct cifsSesInfo *ses, char *ntlmv2_hash,
+static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
                            const struct nls_table *nls_cp)
 {
        int rc = 0;
@@ -527,7 +527,7 @@ calc_exit_2:
 }
 
 static int
-CalcNTLMv2_response(const struct cifsSesInfo *ses, char *ntlmv2_hash)
+CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
 {
        int rc;
        unsigned int offset = CIFS_SESS_KEY_SIZE + 8;
@@ -563,7 +563,7 @@ CalcNTLMv2_response(const struct cifsSesInfo *ses, char *ntlmv2_hash)
 
 
 int
-setup_ntlmv2_rsp(struct cifsSesInfo *ses, const struct nls_table *nls_cp)
+setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
 {
        int rc;
        int baselen;
@@ -649,7 +649,7 @@ setup_ntlmv2_rsp_ret:
 }
 
 int
-calc_seckey(struct cifsSesInfo *ses)
+calc_seckey(struct cifs_ses *ses)
 {
        int rc;
        struct crypto_blkcipher *tfm_arc4;
index 493b74ca5648b69d676dd423ede90a24c5c6e452..989442dcfb45d5af094d619d468b648094820908 100644 (file)
@@ -104,46 +104,25 @@ cifs_sb_deactive(struct super_block *sb)
 }
 
 static int
-cifs_read_super(struct super_block *sb, void *data,
+cifs_read_super(struct super_block *sb, struct smb_vol *volume_info,
                const char *devname, int silent)
 {
        struct inode *inode;
        struct cifs_sb_info *cifs_sb;
        int rc = 0;
 
-       /* BB should we make this contingent on mount parm? */
-       sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
-       sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
        cifs_sb = CIFS_SB(sb);
-       if (cifs_sb == NULL)
-               return -ENOMEM;
 
        spin_lock_init(&cifs_sb->tlink_tree_lock);
        cifs_sb->tlink_tree = RB_ROOT;
 
        rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
-       if (rc) {
-               kfree(cifs_sb);
+       if (rc)
                return rc;
-       }
-       cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
 
-       /*
-        * Copy mount params to sb for use in submounts. Better to do
-        * the copy here and deal with the error before cleanup gets
-        * complicated post-mount.
-        */
-       if (data) {
-               cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
-               if (cifs_sb->mountdata == NULL) {
-                       bdi_destroy(&cifs_sb->bdi);
-                       kfree(sb->s_fs_info);
-                       sb->s_fs_info = NULL;
-                       return -ENOMEM;
-               }
-       }
+       cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
 
-       rc = cifs_mount(sb, cifs_sb, devname);
+       rc = cifs_mount(sb, cifs_sb, volume_info, devname);
 
        if (rc) {
                if (!silent)
@@ -194,15 +173,7 @@ out_no_root:
        cifs_umount(sb, cifs_sb);
 
 out_mount_failed:
-       if (cifs_sb) {
-               if (cifs_sb->mountdata) {
-                       kfree(cifs_sb->mountdata);
-                       cifs_sb->mountdata = NULL;
-               }
-               unload_nls(cifs_sb->local_nls);
-               bdi_destroy(&cifs_sb->bdi);
-               kfree(cifs_sb);
-       }
+       bdi_destroy(&cifs_sb->bdi);
        return rc;
 }
 
@@ -237,7 +208,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
        struct super_block *sb = dentry->d_sb;
        struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
-       struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+       struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
        int rc = -EOPNOTSUPP;
        int xid;
 
@@ -390,7 +361,7 @@ static int
 cifs_show_options(struct seq_file *s, struct vfsmount *m)
 {
        struct cifs_sb_info *cifs_sb = CIFS_SB(m->mnt_sb);
-       struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+       struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
        struct sockaddr *srcaddr;
        srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
 
@@ -444,14 +415,20 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
                seq_printf(s, ",nocase");
        if (tcon->retry)
                seq_printf(s, ",hard");
-       if (cifs_sb->prepath)
-               seq_printf(s, ",prepath=%s", cifs_sb->prepath);
+       if (tcon->unix_ext)
+               seq_printf(s, ",unix");
+       else
+               seq_printf(s, ",nounix");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
                seq_printf(s, ",posixpaths");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
                seq_printf(s, ",setuids");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
                seq_printf(s, ",serverino");
+       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+               seq_printf(s, ",rwpidforward");
+       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
+               seq_printf(s, ",forcemand");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
                seq_printf(s, ",directio");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
@@ -484,7 +461,7 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
 static void cifs_umount_begin(struct super_block *sb)
 {
        struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
 
        if (cifs_sb == NULL)
                return;
@@ -559,29 +536,189 @@ static const struct super_operations cifs_super_ops = {
 #endif
 };
 
+/*
+ * Get root dentry from superblock according to prefix path mount option.
+ * Return dentry with refcount + 1 on success and NULL otherwise.
+ */
+static struct dentry *
+cifs_get_root(struct smb_vol *vol, struct super_block *sb)
+{
+       int xid, rc;
+       struct inode *inode;
+       struct qstr name;
+       struct dentry *dparent = NULL, *dchild = NULL, *alias;
+       struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+       unsigned int i, full_len, len;
+       char *full_path = NULL, *pstart;
+       char sep;
+
+       full_path = cifs_build_path_to_root(vol, cifs_sb,
+                                           cifs_sb_master_tcon(cifs_sb));
+       if (full_path == NULL)
+               return NULL;
+
+       cFYI(1, "Get root dentry for %s", full_path);
+
+       xid = GetXid();
+       sep = CIFS_DIR_SEP(cifs_sb);
+       dparent = dget(sb->s_root);
+       full_len = strlen(full_path);
+       full_path[full_len] = sep;
+       pstart = full_path + 1;
+
+       for (i = 1, len = 0; i <= full_len; i++) {
+               if (full_path[i] != sep || !len) {
+                       len++;
+                       continue;
+               }
+
+               full_path[i] = 0;
+               cFYI(1, "get dentry for %s", pstart);
+
+               name.name = pstart;
+               name.len = len;
+               name.hash = full_name_hash(pstart, len);
+               dchild = d_lookup(dparent, &name);
+               if (dchild == NULL) {
+                       cFYI(1, "not exists");
+                       dchild = d_alloc(dparent, &name);
+                       if (dchild == NULL) {
+                               dput(dparent);
+                               dparent = NULL;
+                               goto out;
+                       }
+               }
+
+               cFYI(1, "get inode");
+               if (dchild->d_inode == NULL) {
+                       cFYI(1, "not exists");
+                       inode = NULL;
+                       if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
+                               rc = cifs_get_inode_info_unix(&inode, full_path,
+                                                             sb, xid);
+                       else
+                               rc = cifs_get_inode_info(&inode, full_path,
+                                                        NULL, sb, xid, NULL);
+                       if (rc) {
+                               dput(dchild);
+                               dput(dparent);
+                               dparent = NULL;
+                               goto out;
+                       }
+                       alias = d_materialise_unique(dchild, inode);
+                       if (alias != NULL) {
+                               dput(dchild);
+                               if (IS_ERR(alias)) {
+                                       dput(dparent);
+                                       dparent = NULL;
+                                       goto out;
+                               }
+                               dchild = alias;
+                       }
+               }
+               cFYI(1, "parent %p, child %p", dparent, dchild);
+
+               dput(dparent);
+               dparent = dchild;
+               len = 0;
+               pstart = full_path + i + 1;
+               full_path[i] = sep;
+       }
+out:
+       _FreeXid(xid);
+       kfree(full_path);
+       return dparent;
+}
+
 static struct dentry *
 cifs_do_mount(struct file_system_type *fs_type,
-           int flags, const char *dev_name, void *data)
+             int flags, const char *dev_name, void *data)
 {
        int rc;
        struct super_block *sb;
-
-       sb = sget(fs_type, NULL, set_anon_super, NULL);
+       struct cifs_sb_info *cifs_sb;
+       struct smb_vol *volume_info;
+       struct cifs_mnt_data mnt_data;
+       struct dentry *root;
 
        cFYI(1, "Devname: %s flags: %d ", dev_name, flags);
 
-       if (IS_ERR(sb))
-               return ERR_CAST(sb);
+       rc = cifs_setup_volume_info(&volume_info, (char *)data, dev_name);
+       if (rc)
+               return ERR_PTR(rc);
+
+       cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
+       if (cifs_sb == NULL) {
+               root = ERR_PTR(-ENOMEM);
+               goto out;
+       }
+
+       cifs_setup_cifs_sb(volume_info, cifs_sb);
+
+       mnt_data.vol = volume_info;
+       mnt_data.cifs_sb = cifs_sb;
+       mnt_data.flags = flags;
+
+       sb = sget(fs_type, cifs_match_super, set_anon_super, &mnt_data);
+       if (IS_ERR(sb)) {
+               root = ERR_CAST(sb);
+               goto out_cifs_sb;
+       }
+
+       if (sb->s_fs_info) {
+               cFYI(1, "Use existing superblock");
+               goto out_shared;
+       }
+
+       /*
+        * Copy mount params for use in submounts. Better to do
+        * the copy here and deal with the error before cleanup gets
+        * complicated post-mount.
+        */
+       cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL);
+       if (cifs_sb->mountdata == NULL) {
+               root = ERR_PTR(-ENOMEM);
+               goto out_super;
+       }
 
        sb->s_flags = flags;
+       /* BB should we make this contingent on mount parm? */
+       sb->s_flags |= MS_NODIRATIME | MS_NOATIME;
+       sb->s_fs_info = cifs_sb;
 
-       rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0);
+       rc = cifs_read_super(sb, volume_info, dev_name,
+                            flags & MS_SILENT ? 1 : 0);
        if (rc) {
-               deactivate_locked_super(sb);
-               return ERR_PTR(rc);
+               root = ERR_PTR(rc);
+               goto out_super;
        }
+
        sb->s_flags |= MS_ACTIVE;
-       return dget(sb->s_root);
+
+       root = cifs_get_root(volume_info, sb);
+       if (root == NULL)
+               goto out_super;
+
+       cFYI(1, "dentry root is: %p", root);
+       goto out;
+
+out_shared:
+       root = cifs_get_root(volume_info, sb);
+       if (root)
+               cFYI(1, "dentry root is: %p", root);
+       goto out;
+
+out_super:
+       kfree(cifs_sb->mountdata);
+       deactivate_locked_super(sb);
+
+out_cifs_sb:
+       unload_nls(cifs_sb->local_nls);
+       kfree(cifs_sb);
+
+out:
+       cifs_cleanup_volume_info(&volume_info);
+       return root;
 }
 
 static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
index 76b4517e74b07d5ff15865c2f4860ed9062c7780..6255fa812c7a28b2071ced3f7585cb2359e6a3c4 100644 (file)
@@ -155,6 +155,81 @@ struct cifs_cred {
  *****************************************************************
  */
 
+struct smb_vol {
+       char *username;
+       char *password;
+       char *domainname;
+       char *UNC;
+       char *UNCip;
+       char *iocharset;  /* local code page for mapping to and from Unicode */
+       char source_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* clnt nb name */
+       char target_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* srvr nb name */
+       uid_t cred_uid;
+       uid_t linux_uid;
+       gid_t linux_gid;
+       mode_t file_mode;
+       mode_t dir_mode;
+       unsigned secFlg;
+       bool retry:1;
+       bool intr:1;
+       bool setuids:1;
+       bool override_uid:1;
+       bool override_gid:1;
+       bool dynperm:1;
+       bool noperm:1;
+       bool no_psx_acl:1; /* set if posix acl support should be disabled */
+       bool cifs_acl:1;
+       bool no_xattr:1;   /* set if xattr (EA) support should be disabled*/
+       bool server_ino:1; /* use inode numbers from server ie UniqueId */
+       bool direct_io:1;
+       bool strict_io:1; /* strict cache behavior */
+       bool remap:1;      /* set to remap seven reserved chars in filenames */
+       bool posix_paths:1; /* unset to not ask for posix pathnames. */
+       bool no_linux_ext:1;
+       bool sfu_emul:1;
+       bool nullauth:1;   /* attempt to authenticate with null user */
+       bool nocase:1;     /* request case insensitive filenames */
+       bool nobrl:1;      /* disable sending byte range locks to srv */
+       bool mand_lock:1;  /* send mandatory not posix byte range lock reqs */
+       bool seal:1;       /* request transport encryption on share */
+       bool nodfs:1;      /* Do not request DFS, even if available */
+       bool local_lease:1; /* check leases only on local system, not remote */
+       bool noblocksnd:1;
+       bool noautotune:1;
+       bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
+       bool fsc:1;     /* enable fscache */
+       bool mfsymlinks:1; /* use Minshall+French Symlinks */
+       bool multiuser:1;
+       bool rwpidforward:1; /* pid forward for read/write operations */
+       unsigned int rsize;
+       unsigned int wsize;
+       bool sockopt_tcp_nodelay:1;
+       unsigned short int port;
+       unsigned long actimeo; /* attribute cache timeout (jiffies) */
+       char *prepath;
+       struct sockaddr_storage srcaddr; /* allow binding to a local IP */
+       struct nls_table *local_nls;
+};
+
+#define CIFS_MOUNT_MASK (CIFS_MOUNT_NO_PERM | CIFS_MOUNT_SET_UID | \
+                        CIFS_MOUNT_SERVER_INUM | CIFS_MOUNT_DIRECT_IO | \
+                        CIFS_MOUNT_NO_XATTR | CIFS_MOUNT_MAP_SPECIAL_CHR | \
+                        CIFS_MOUNT_UNX_EMUL | CIFS_MOUNT_NO_BRL | \
+                        CIFS_MOUNT_CIFS_ACL | CIFS_MOUNT_OVERR_UID | \
+                        CIFS_MOUNT_OVERR_GID | CIFS_MOUNT_DYNPERM | \
+                        CIFS_MOUNT_NOPOSIXBRL | CIFS_MOUNT_NOSSYNC | \
+                        CIFS_MOUNT_FSCACHE | CIFS_MOUNT_MF_SYMLINKS | \
+                        CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO)
+
+#define CIFS_MS_MASK (MS_RDONLY | MS_MANDLOCK | MS_NOEXEC | MS_NOSUID | \
+                     MS_NODEV | MS_SYNCHRONOUS)
+
+struct cifs_mnt_data {
+       struct cifs_sb_info *cifs_sb;
+       struct smb_vol *vol;
+       int flags;
+};
+
 struct TCP_Server_Info {
        struct list_head tcp_ses_list;
        struct list_head smb_ses_list;
@@ -179,7 +254,7 @@ struct TCP_Server_Info {
        struct mutex srv_mutex;
        struct task_struct *tsk;
        char server_GUID[16];
-       char secMode;
+       char sec_mode;
        bool session_estab; /* mark when very first sess is established */
        u16 dialect; /* dialect index that server chose */
        enum securityEnum secType;
@@ -254,7 +329,7 @@ static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
 /*
  * Session structure.  One of these for each uid session with a particular host
  */
-struct cifsSesInfo {
+struct cifs_ses {
        struct list_head smb_ses_list;
        struct list_head tcon_list;
        struct mutex session_mutex;
@@ -294,11 +369,11 @@ struct cifsSesInfo {
  * there is one of these for each connection to a resource on a particular
  * session
  */
-struct cifsTconInfo {
+struct cifs_tcon {
        struct list_head tcon_list;
        int tc_count;
        struct list_head openFileList;
-       struct cifsSesInfo *ses;        /* pointer to session associated with */
+       struct cifs_ses *ses;   /* pointer to session associated with */
        char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
        char *nativeFileSystem;
        char *password;         /* for share-level security */
@@ -380,12 +455,12 @@ struct tcon_link {
 #define TCON_LINK_IN_TREE      2
        unsigned long           tl_time;
        atomic_t                tl_count;
-       struct cifsTconInfo     *tl_tcon;
+       struct cifs_tcon        *tl_tcon;
 };
 
 extern struct tcon_link *cifs_sb_tlink(struct cifs_sb_info *cifs_sb);
 
-static inline struct cifsTconInfo *
+static inline struct cifs_tcon *
 tlink_tcon(struct tcon_link *tlink)
 {
        return tlink->tl_tcon;
@@ -402,7 +477,7 @@ cifs_get_tlink(struct tcon_link *tlink)
 }
 
 /* This function is always expected to succeed */
-extern struct cifsTconInfo *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb);
+extern struct cifs_tcon *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb);
 
 /*
  * This info hangs off the cifsFileInfo structure, pointed to by llist.
@@ -455,6 +530,14 @@ struct cifsFileInfo {
        struct work_struct oplock_break; /* work for oplock breaks */
 };
 
+struct cifs_io_parms {
+       __u16 netfid;
+       __u32 pid;
+       __u64 offset;
+       unsigned int length;
+       struct cifs_tcon *tcon;
+};
+
 /*
  * Take a reference on the file private data. Must be called with
  * cifs_file_list_lock held.
@@ -509,10 +592,30 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
                return '\\';
 }
 
+static inline void
+convert_delimiter(char *path, char delim)
+{
+       int i;
+       char old_delim;
+
+       if (path == NULL)
+               return;
+
+       if (delim == '/')
+               old_delim = '\\';
+       else
+               old_delim = '/';
+
+       for (i = 0; path[i] != '\0'; i++) {
+               if (path[i] == old_delim)
+                       path[i] = delim;
+       }
+}
+
 #ifdef CONFIG_CIFS_STATS
 #define cifs_stats_inc atomic_inc
 
-static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
+static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
                                            unsigned int bytes)
 {
        if (bytes) {
@@ -522,7 +625,7 @@ static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
        }
 }
 
-static inline void cifs_stats_bytes_read(struct cifsTconInfo *tcon,
+static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon,
                                         unsigned int bytes)
 {
        spin_lock(&tcon->stat_lock);
@@ -543,9 +646,8 @@ struct mid_q_entry;
  * This is the prototype for the mid callback function. When creating one,
  * take special care to avoid deadlocks. Things to bear in mind:
  *
- * - it will be called by cifsd
- * - the GlobalMid_Lock will be held
- * - the mid will be removed from the pending_mid_q list
+ * - it will be called by cifsd, with no locks held
+ * - the mid will be removed from any lists
  */
 typedef void (mid_callback_t)(struct mid_q_entry *mid);
 
@@ -573,7 +675,7 @@ struct mid_q_entry {
 struct oplock_q_entry {
        struct list_head qhead;
        struct inode *pinode;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
        __u16 netfid;
 };
 
@@ -656,6 +758,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
 #define   MID_RESPONSE_RECEIVED 4
 #define   MID_RETRY_NEEDED      8 /* session closed while this request out */
 #define   MID_RESPONSE_MALFORMED 0x10
+#define   MID_SHUTDOWN          0x20
 
 /* Types of response buffer returned from SendReceive2 */
 #define   CIFS_NO_BUFFER        0    /* Response buffer not returned */
index 6e69e06a30b3334687cf07034044a01c6b0beaed..953f84413c771b1cb0e3bb404a4349f7d90b08fa 100644 (file)
@@ -57,8 +57,9 @@ extern int init_cifs_idmap(void);
 extern void exit_cifs_idmap(void);
 extern void cifs_destroy_idmaptrees(void);
 extern char *build_path_from_dentry(struct dentry *);
-extern char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb,
-                                       struct cifsTconInfo *tcon);
+extern char *cifs_build_path_to_root(struct smb_vol *vol,
+                                    struct cifs_sb_info *cifs_sb,
+                                    struct cifs_tcon *tcon);
 extern char *build_wildcard_path_from_dentry(struct dentry *direntry);
 extern char *cifs_compose_mount_options(const char *sb_mountdata,
                const char *fullpath, const struct dfs_info3_param *ref,
@@ -67,20 +68,22 @@ extern char *cifs_compose_mount_options(const char *sb_mountdata,
 extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
                                        struct TCP_Server_Info *server);
 extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
-extern int cifs_call_async(struct TCP_Server_Info *server,
-                          struct smb_hdr *in_buf, mid_callback_t *callback,
-                          void *cbdata);
-extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *,
+extern int cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
+                          unsigned int nvec, mid_callback_t *callback,
+                          void *cbdata, bool ignore_pend);
+extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
                        struct smb_hdr * /* input */ ,
                        struct smb_hdr * /* out */ ,
                        int * /* bytes returned */ , const int long_op);
-extern int SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
+extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
                        struct smb_hdr *in_buf, int flags);
-extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *,
+extern int cifs_check_receive(struct mid_q_entry *mid,
+                       struct TCP_Server_Info *server, bool log_error);
+extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
                        struct kvec *, int /* nvec to send */,
                        int * /* type of buf returned */ , const int flags);
 extern int SendReceiveBlockingLock(const unsigned int xid,
-                       struct cifsTconInfo *ptcon,
+                       struct cifs_tcon *ptcon,
                        struct smb_hdr *in_buf ,
                        struct smb_hdr *out_buf,
                        int *bytes_returned);
@@ -99,14 +102,14 @@ extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len);
 extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port);
 extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len,
                                const unsigned short int port);
-extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr);
+extern int map_smb_to_linux_error(struct smb_hdr *smb, bool logErr);
 extern void header_assemble(struct smb_hdr *, char /* command */ ,
-                           const struct cifsTconInfo *, int /* length of
+                           const struct cifs_tcon *, int /* length of
                            fixed section (word count) in two byte units */);
 extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
-                               struct cifsSesInfo *ses,
+                               struct cifs_ses *ses,
                                void **request_buf);
-extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
+extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
                             const struct nls_table *nls_cp);
 extern __u16 GetNextMid(struct TCP_Server_Info *server);
 extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
@@ -148,102 +151,108 @@ extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
 extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
                                const char *);
 
+extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+                              struct cifs_sb_info *cifs_sb);
+extern int cifs_match_super(struct super_block *, void *);
+extern void cifs_cleanup_volume_info(struct smb_vol **pvolume_info);
+extern int cifs_setup_volume_info(struct smb_vol **pvolume_info,
+                                 char *mount_data, const char *devname);
 extern int cifs_mount(struct super_block *, struct cifs_sb_info *,
-                       const char *);
+                     struct smb_vol *, const char *);
 extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
 extern void cifs_dfs_release_automount_timer(void);
 void cifs_proc_init(void);
 void cifs_proc_clean(void);
 
 extern int cifs_negotiate_protocol(unsigned int xid,
-                                 struct cifsSesInfo *ses);
-extern int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
+                                 struct cifs_ses *ses);
+extern int cifs_setup_session(unsigned int xid, struct cifs_ses *ses,
                        struct nls_table *nls_info);
-extern int CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses);
+extern int CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses);
 
-extern int CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
-                       const char *tree, struct cifsTconInfo *tcon,
+extern int CIFSTCon(unsigned int xid, struct cifs_ses *ses,
+                       const char *tree, struct cifs_tcon *tcon,
                        const struct nls_table *);
 
-extern int CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
                const char *searchName, const struct nls_table *nls_codepage,
                __u16 *searchHandle, struct cifs_search_info *psrch_inf,
                int map, const char dirsep);
 
-extern int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
                __u16 searchHandle, struct cifs_search_info *psrch_inf);
 
-extern int CIFSFindClose(const int, struct cifsTconInfo *tcon,
+extern int CIFSFindClose(const int, struct cifs_tcon *tcon,
                        const __u16 search_handle);
 
-extern int CIFSSMBQFileInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBQFileInfo(const int xid, struct cifs_tcon *tcon,
                        u16 netfid, FILE_ALL_INFO *pFindData);
-extern int CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBQPathInfo(const int xid, struct cifs_tcon *tcon,
                        const unsigned char *searchName,
                        FILE_ALL_INFO *findData,
                        int legacy /* whether to use old info level */,
                        const struct nls_table *nls_codepage, int remap);
-extern int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
+extern int SMBQueryInformation(const int xid, struct cifs_tcon *tcon,
                        const unsigned char *searchName,
                        FILE_ALL_INFO *findData,
                        const struct nls_table *nls_codepage, int remap);
 
-extern int CIFSSMBUnixQFileInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBUnixQFileInfo(const int xid, struct cifs_tcon *tcon,
                        u16 netfid, FILE_UNIX_BASIC_INFO *pFindData);
 extern int CIFSSMBUnixQPathInfo(const int xid,
-                       struct cifsTconInfo *tcon,
+                       struct cifs_tcon *tcon,
                        const unsigned char *searchName,
                        FILE_UNIX_BASIC_INFO *pFindData,
                        const struct nls_table *nls_codepage, int remap);
 
-extern int CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses,
+extern int CIFSGetDFSRefer(const int xid, struct cifs_ses *ses,
                        const unsigned char *searchName,
                        struct dfs_info3_param **target_nodes,
                        unsigned int *number_of_nodes_in_array,
                        const struct nls_table *nls_codepage, int remap);
 
-extern int get_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
+extern int get_dfs_path(int xid, struct cifs_ses *pSesInfo,
                        const char *old_path,
                        const struct nls_table *nls_codepage,
                        unsigned int *pnum_referrals,
                        struct dfs_info3_param **preferrals,
                        int remap);
-extern void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
+extern void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
                                 struct super_block *sb, struct smb_vol *vol);
-extern int CIFSSMBQFSInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon,
                        struct kstatfs *FSData);
-extern int SMBOldQFSInfo(const int xid, struct cifsTconInfo *tcon,
+extern int SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon,
                        struct kstatfs *FSData);
-extern int CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetFSUnixInfo(const int xid, struct cifs_tcon *tcon,
                        __u64 cap);
 
 extern int CIFSSMBQFSAttributeInfo(const int xid,
-                       struct cifsTconInfo *tcon);
-extern int CIFSSMBQFSDeviceInfo(const int xid, struct cifsTconInfo *tcon);
-extern int CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon);
-extern int CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon,
+                       struct cifs_tcon *tcon);
+extern int CIFSSMBQFSDeviceInfo(const int xid, struct cifs_tcon *tcon);
+extern int CIFSSMBQFSUnixInfo(const int xid, struct cifs_tcon *tcon);
+extern int CIFSSMBQFSPosixInfo(const int xid, struct cifs_tcon *tcon,
                        struct kstatfs *FSData);
 
-extern int CIFSSMBSetPathInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetPathInfo(const int xid, struct cifs_tcon *tcon,
                        const char *fileName, const FILE_BASIC_INFO *data,
                        const struct nls_table *nls_codepage,
                        int remap_special_chars);
-extern int CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon,
                        const FILE_BASIC_INFO *data, __u16 fid,
                        __u32 pid_of_opener);
-extern int CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon,
                        bool delete_file, __u16 fid, __u32 pid_of_opener);
 #if 0
-extern int CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetAttrLegacy(int xid, struct cifs_tcon *tcon,
                        char *fileName, __u16 dos_attributes,
                        const struct nls_table *nls_codepage);
 #endif /* possibly unneeded function */
-extern int CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetEOF(const int xid, struct cifs_tcon *tcon,
                        const char *fileName, __u64 size,
                        bool setAllocationSizeFlag,
                        const struct nls_table *nls_codepage,
                        int remap_special_chars);
-extern int CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon,
                         __u64 size, __u16 fileHandle, __u32 opener_pid,
                        bool AllocSizeFlag);
 
@@ -257,120 +266,116 @@ struct cifs_unix_set_info_args {
        dev_t   device;
 };
 
-extern int CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon,
                                  const struct cifs_unix_set_info_args *args,
                                  u16 fid, u32 pid_of_opener);
 
-extern int CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *pTcon,
+extern int CIFSSMBUnixSetPathInfo(const int xid, struct cifs_tcon *pTcon,
                        char *fileName,
                        const struct cifs_unix_set_info_args *args,
                        const struct nls_table *nls_codepage,
                        int remap_special_chars);
 
-extern int CIFSSMBMkDir(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBMkDir(const int xid, struct cifs_tcon *tcon,
                        const char *newName,
                        const struct nls_table *nls_codepage,
                        int remap_special_chars);
-extern int CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBRmDir(const int xid, struct cifs_tcon *tcon,
                        const char *name, const struct nls_table *nls_codepage,
                        int remap_special_chars);
-extern int CIFSPOSIXDelFile(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSPOSIXDelFile(const int xid, struct cifs_tcon *tcon,
                        const char *name, __u16 type,
                        const struct nls_table *nls_codepage,
                        int remap_special_chars);
-extern int CIFSSMBDelFile(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBDelFile(const int xid, struct cifs_tcon *tcon,
                        const char *name,
                        const struct nls_table *nls_codepage,
                        int remap_special_chars);
-extern int CIFSSMBRename(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBRename(const int xid, struct cifs_tcon *tcon,
                        const char *fromName, const char *toName,
                        const struct nls_table *nls_codepage,
                        int remap_special_chars);
-extern int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
+extern int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon,
                        int netfid, const char *target_name,
                        const struct nls_table *nls_codepage,
                        int remap_special_chars);
 extern int CIFSCreateHardLink(const int xid,
-                       struct cifsTconInfo *tcon,
+                       struct cifs_tcon *tcon,
                        const char *fromName, const char *toName,
                        const struct nls_table *nls_codepage,
                        int remap_special_chars);
 extern int CIFSUnixCreateHardLink(const int xid,
-                       struct cifsTconInfo *tcon,
+                       struct cifs_tcon *tcon,
                        const char *fromName, const char *toName,
                        const struct nls_table *nls_codepage,
                        int remap_special_chars);
 extern int CIFSUnixCreateSymLink(const int xid,
-                       struct cifsTconInfo *tcon,
+                       struct cifs_tcon *tcon,
                        const char *fromName, const char *toName,
                        const struct nls_table *nls_codepage);
 extern int CIFSSMBUnixQuerySymLink(const int xid,
-                       struct cifsTconInfo *tcon,
+                       struct cifs_tcon *tcon,
                        const unsigned char *searchName, char **syminfo,
                        const struct nls_table *nls_codepage);
 #ifdef CONFIG_CIFS_SYMLINK_EXPERIMENTAL
 extern int CIFSSMBQueryReparseLinkInfo(const int xid,
-                       struct cifsTconInfo *tcon,
+                       struct cifs_tcon *tcon,
                        const unsigned char *searchName,
                        char *symlinkinfo, const int buflen, __u16 fid,
                        const struct nls_table *nls_codepage);
 #endif /* temporarily unused until cifs_symlink fixed */
-extern int CIFSSMBOpen(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBOpen(const int xid, struct cifs_tcon *tcon,
                        const char *fileName, const int disposition,
                        const int access_flags, const int omode,
                        __u16 *netfid, int *pOplock, FILE_ALL_INFO *,
                        const struct nls_table *nls_codepage, int remap);
-extern int SMBLegacyOpen(const int xid, struct cifsTconInfo *tcon,
+extern int SMBLegacyOpen(const int xid, struct cifs_tcon *tcon,
                        const char *fileName, const int disposition,
                        const int access_flags, const int omode,
                        __u16 *netfid, int *pOplock, FILE_ALL_INFO *,
                        const struct nls_table *nls_codepage, int remap);
-extern int CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSPOSIXCreate(const int xid, struct cifs_tcon *tcon,
                        u32 posix_flags, __u64 mode, __u16 *netfid,
                        FILE_UNIX_BASIC_INFO *pRetData,
                        __u32 *pOplock, const char *name,
                        const struct nls_table *nls_codepage, int remap);
-extern int CIFSSMBClose(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBClose(const int xid, struct cifs_tcon *tcon,
                        const int smb_file_id);
 
-extern int CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBFlush(const int xid, struct cifs_tcon *tcon,
                        const int smb_file_id);
 
-extern int CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
-                       const int netfid, unsigned int count,
-                       const __u64 lseek, unsigned int *nbytes, char **buf,
+extern int CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms,
+                       unsigned int *nbytes, char **buf,
                        int *return_buf_type);
-extern int CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
-                       const int netfid, const unsigned int count,
-                       const __u64 lseek, unsigned int *nbytes,
-                       const char *buf, const char __user *ubuf,
+extern int CIFSSMBWrite(const int xid, struct cifs_io_parms *io_parms,
+                       unsigned int *nbytes, const char *buf,
+                       const char __user *ubuf, const int long_op);
+extern int CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms,
+                       unsigned int *nbytes, struct kvec *iov, const int nvec,
                        const int long_op);
-extern int CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
-                       const int netfid, const unsigned int count,
-                       const __u64 offset, unsigned int *nbytes,
-                       struct kvec *iov, const int nvec, const int long_op);
-extern int CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon,
                        const unsigned char *searchName, __u64 *inode_number,
                        const struct nls_table *nls_codepage,
                        int remap_special_chars);
 
-extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
                        const __u16 netfid, const __u64 len,
                        const __u64 offset, const __u32 numUnlock,
                        const __u32 numLock, const __u8 lockType,
                        const bool waitFlag, const __u8 oplock_level);
-extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
                        const __u16 smb_file_id, const int get_flag,
                        const __u64 len, struct file_lock *,
                        const __u16 lock_type, const bool waitFlag);
-extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon);
+extern int CIFSSMBTDis(const int xid, struct cifs_tcon *tcon);
 extern int CIFSSMBEcho(struct TCP_Server_Info *server);
-extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses);
+extern int CIFSSMBLogoff(const int xid, struct cifs_ses *ses);
 
-extern struct cifsSesInfo *sesInfoAlloc(void);
-extern void sesInfoFree(struct cifsSesInfo *);
-extern struct cifsTconInfo *tconInfoAlloc(void);
-extern void tconInfoFree(struct cifsTconInfo *);
+extern struct cifs_ses *sesInfoAlloc(void);
+extern void sesInfoFree(struct cifs_ses *);
+extern struct cifs_tcon *tconInfoAlloc(void);
+extern void tconInfoFree(struct cifs_tcon *);
 
 extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
 extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
@@ -379,51 +384,51 @@ extern int cifs_verify_signature(struct smb_hdr *,
                                 struct TCP_Server_Info *server,
                                __u32 expected_sequence_number);
 extern int SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *);
-extern int setup_ntlm_response(struct cifsSesInfo *);
-extern int setup_ntlmv2_rsp(struct cifsSesInfo *, const struct nls_table *);
+extern int setup_ntlm_response(struct cifs_ses *);
+extern int setup_ntlmv2_rsp(struct cifs_ses *, const struct nls_table *);
 extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *);
 extern void cifs_crypto_shash_release(struct TCP_Server_Info *);
-extern int calc_seckey(struct cifsSesInfo *);
+extern int calc_seckey(struct cifs_ses *);
 
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
 extern int calc_lanman_hash(const char *password, const char *cryptkey,
                                bool encrypt, char *lnm_session_key);
 #endif /* CIFS_WEAK_PW_HASH */
 #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */
-extern int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon,
                        const int notify_subdirs, const __u16 netfid,
                        __u32 filter, struct file *file, int multishot,
                        const struct nls_table *nls_codepage);
 #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */
 extern int CIFSSMBCopy(int xid,
-                       struct cifsTconInfo *source_tcon,
+                       struct cifs_tcon *source_tcon,
                        const char *fromName,
                        const __u16 target_tid,
                        const char *toName, const int flags,
                        const struct nls_table *nls_codepage,
                        int remap_special_chars);
-extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
+extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon,
                        const unsigned char *searchName,
                        const unsigned char *ea_name, char *EAData,
                        size_t bufsize, const struct nls_table *nls_codepage,
                        int remap_special_chars);
-extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon,
                const char *fileName, const char *ea_name,
                const void *ea_value, const __u16 ea_value_len,
                const struct nls_table *nls_codepage, int remap_special_chars);
-extern int CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon,
                        __u16 fid, struct cifs_ntsd **acl_inf, __u32 *buflen);
-extern int CIFSSMBSetCIFSACL(const int, struct cifsTconInfo *, __u16,
+extern int CIFSSMBSetCIFSACL(const int, struct cifs_tcon *, __u16,
                        struct cifs_ntsd *, __u32);
-extern int CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon,
                const unsigned char *searchName,
                char *acl_inf, const int buflen, const int acl_type,
                const struct nls_table *nls_codepage, int remap_special_chars);
-extern int CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSSMBSetPosixACL(const int xid, struct cifs_tcon *tcon,
                const unsigned char *fileName,
                const char *local_acl, const int buflen, const int acl_type,
                const struct nls_table *nls_codepage, int remap_special_chars);
-extern int CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon,
+extern int CIFSGetExtAttr(const int xid, struct cifs_tcon *tcon,
                        const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
 extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
 extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr);
@@ -434,4 +439,22 @@ extern int mdfour(unsigned char *, unsigned char *, int);
 extern int E_md4hash(const unsigned char *passwd, unsigned char *p16);
 extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8,
                        unsigned char *p24);
+
+/* asynchronous write support */
+struct cifs_writedata {
+       struct kref                     refcount;
+       enum writeback_sync_modes       sync_mode;
+       struct work_struct              work;
+       struct cifsFileInfo             *cfile;
+       __u64                           offset;
+       unsigned int                    bytes;
+       int                             result;
+       unsigned int                    nr_pages;
+       struct page                     *pages[1];
+};
+
+int cifs_async_writev(struct cifs_writedata *wdata);
+struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages);
+void cifs_writedata_release(struct kref *refcount);
+
 #endif                 /* _CIFSPROTO_H */
index 83df937b814e0b92bd99020a35e5db8988e4a1c4..1a9fe7f816d1b83dd630141412d3dd109b23a385 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/vfs.h>
 #include <linux/slab.h>
 #include <linux/posix_acl_xattr.h>
+#include <linux/pagemap.h>
 #include <asm/uaccess.h>
 #include "cifspdu.h"
 #include "cifsglob.h"
@@ -84,7 +85,7 @@ static struct {
 
 /* Mark as invalid, all open files on tree connections since they
    were closed when session to server was lost */
-static void mark_open_files_invalid(struct cifsTconInfo *pTcon)
+static void mark_open_files_invalid(struct cifs_tcon *pTcon)
 {
        struct cifsFileInfo *open_file = NULL;
        struct list_head *tmp;
@@ -104,10 +105,10 @@ static void mark_open_files_invalid(struct cifsTconInfo *pTcon)
 
 /* reconnect the socket, tcon, and smb session if needed */
 static int
-cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
+cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
 {
        int rc = 0;
-       struct cifsSesInfo *ses;
+       struct cifs_ses *ses;
        struct TCP_Server_Info *server;
        struct nls_table *nls_codepage;
 
@@ -226,7 +227,7 @@ out:
    SMB information in the SMB header.  If the return code is zero, this
    function must have filled in request_buf pointer */
 static int
-small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+small_smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
                void **request_buf)
 {
        int rc;
@@ -252,7 +253,7 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
 
 int
 small_smb_init_no_tc(const int smb_command, const int wct,
-                    struct cifsSesInfo *ses, void **request_buf)
+                    struct cifs_ses *ses, void **request_buf)
 {
        int rc;
        struct smb_hdr *buffer;
@@ -278,7 +279,7 @@ small_smb_init_no_tc(const int smb_command, const int wct,
 
 /* If the return code is zero, this function must fill in request_buf pointer */
 static int
-__smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+__smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
                        void **request_buf, void **response_buf)
 {
        *request_buf = cifs_buf_get();
@@ -304,7 +305,7 @@ __smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
 
 /* If the return code is zero, this function must fill in request_buf pointer */
 static int
-smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
+smb_init(int smb_command, int wct, struct cifs_tcon *tcon,
         void **request_buf, void **response_buf)
 {
        int rc;
@@ -317,7 +318,7 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
 }
 
 static int
-smb_init_no_reconnect(int smb_command, int wct, struct cifsTconInfo *tcon,
+smb_init_no_reconnect(int smb_command, int wct, struct cifs_tcon *tcon,
                        void **request_buf, void **response_buf)
 {
        if (tcon->ses->need_reconnect || tcon->need_reconnect)
@@ -366,7 +367,7 @@ static inline void inc_rfc1001_len(void *pSMB, int count)
 }
 
 int
-CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
+CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
 {
        NEGOTIATE_REQ *pSMB;
        NEGOTIATE_RSP *pSMBr;
@@ -450,7 +451,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
                        rc = -EOPNOTSUPP;
                        goto neg_err_exit;
                }
-               server->secMode = (__u8)le16_to_cpu(rsp->SecurityMode);
+               server->sec_mode = (__u8)le16_to_cpu(rsp->SecurityMode);
                server->maxReq = le16_to_cpu(rsp->MaxMpxCount);
                server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize),
                                (__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
@@ -504,7 +505,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
                                cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) {
                        memcpy(ses->server->cryptkey, rsp->EncryptionKey,
                                CIFS_CRYPTO_KEY_SIZE);
-               } else if (server->secMode & SECMODE_PW_ENCRYPT) {
+               } else if (server->sec_mode & SECMODE_PW_ENCRYPT) {
                        rc = -EIO; /* need cryptkey unless plain text */
                        goto neg_err_exit;
                }
@@ -526,11 +527,11 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
                goto neg_err_exit;
        }
        /* else wct == 17 NTLM */
-       server->secMode = pSMBr->SecurityMode;
-       if ((server->secMode & SECMODE_USER) == 0)
+       server->sec_mode = pSMBr->SecurityMode;
+       if ((server->sec_mode & SECMODE_USER) == 0)
                cFYI(1, "share mode security");
 
-       if ((server->secMode & SECMODE_PW_ENCRYPT) == 0)
+       if ((server->sec_mode & SECMODE_PW_ENCRYPT) == 0)
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
                if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0)
 #endif /* CIFS_WEAK_PW_HASH */
@@ -570,18 +571,10 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
        if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) {
                memcpy(ses->server->cryptkey, pSMBr->u.EncryptionKey,
                       CIFS_CRYPTO_KEY_SIZE);
-       } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC)
-                       && (pSMBr->EncryptionKeyLength == 0)) {
+       } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC ||
+                       server->capabilities & CAP_EXTENDED_SECURITY) &&
+                               (pSMBr->EncryptionKeyLength == 0)) {
                /* decode security blob */
-       } else if (server->secMode & SECMODE_PW_ENCRYPT) {
-               rc = -EIO; /* no crypt key only if plain text pwd */
-               goto neg_err_exit;
-       }
-
-       /* BB might be helpful to save off the domain of server here */
-
-       if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) &&
-               (server->capabilities & CAP_EXTENDED_SECURITY)) {
                count = get_bcc(&pSMBr->hdr);
                if (count < 16) {
                        rc = -EIO;
@@ -624,6 +617,9 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
                        } else
                                        rc = -EOPNOTSUPP;
                }
+       } else if (server->sec_mode & SECMODE_PW_ENCRYPT) {
+               rc = -EIO; /* no crypt key only if plain text pwd */
+               goto neg_err_exit;
        } else
                server->capabilities &= ~CAP_EXTENDED_SECURITY;
 
@@ -634,27 +630,27 @@ signing_check:
                /* MUST_SIGN already includes the MAY_SIGN FLAG
                   so if this is zero it means that signing is disabled */
                cFYI(1, "Signing disabled");
-               if (server->secMode & SECMODE_SIGN_REQUIRED) {
+               if (server->sec_mode & SECMODE_SIGN_REQUIRED) {
                        cERROR(1, "Server requires "
                                   "packet signing to be enabled in "
                                   "/proc/fs/cifs/SecurityFlags.");
                        rc = -EOPNOTSUPP;
                }
-               server->secMode &=
+               server->sec_mode &=
                        ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
        } else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) {
                /* signing required */
                cFYI(1, "Must sign - secFlags 0x%x", secFlags);
-               if ((server->secMode &
+               if ((server->sec_mode &
                        (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) {
                        cERROR(1, "signing required but server lacks support");
                        rc = -EOPNOTSUPP;
                } else
-                       server->secMode |= SECMODE_SIGN_REQUIRED;
+                       server->sec_mode |= SECMODE_SIGN_REQUIRED;
        } else {
                /* signing optional ie CIFSSEC_MAY_SIGN */
-               if ((server->secMode & SECMODE_SIGN_REQUIRED) == 0)
-                       server->secMode &=
+               if ((server->sec_mode & SECMODE_SIGN_REQUIRED) == 0)
+                       server->sec_mode &=
                                ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
        }
 
@@ -666,7 +662,7 @@ neg_err_exit:
 }
 
 int
-CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon)
+CIFSSMBTDis(const int xid, struct cifs_tcon *tcon)
 {
        struct smb_hdr *smb_buffer;
        int rc = 0;
@@ -725,6 +721,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
 {
        ECHO_REQ *smb;
        int rc = 0;
+       struct kvec iov;
 
        cFYI(1, "In echo request");
 
@@ -739,9 +736,10 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
        put_bcc(1, &smb->hdr);
        smb->Data[0] = 'a';
        inc_rfc1001_len(smb, 3);
+       iov.iov_base = smb;
+       iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
 
-       rc = cifs_call_async(server, (struct smb_hdr *)smb,
-                               cifs_echo_callback, server);
+       rc = cifs_call_async(server, &iov, 1, cifs_echo_callback, server, true);
        if (rc)
                cFYI(1, "Echo request failed: %d", rc);
 
@@ -751,7 +749,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
 }
 
 int
-CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
+CIFSSMBLogoff(const int xid, struct cifs_ses *ses)
 {
        LOGOFF_ANDX_REQ *pSMB;
        int rc = 0;
@@ -778,7 +776,7 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses)
 
        pSMB->hdr.Mid = GetNextMid(ses->server);
 
-       if (ses->server->secMode &
+       if (ses->server->sec_mode &
                   (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
                        pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 
@@ -798,7 +796,7 @@ session_already_dead:
 }
 
 int
-CIFSPOSIXDelFile(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+CIFSPOSIXDelFile(const int xid, struct cifs_tcon *tcon, const char *fileName,
                 __u16 type, const struct nls_table *nls_codepage, int remap)
 {
        TRANSACTION2_SPI_REQ *pSMB = NULL;
@@ -873,7 +871,7 @@ PsxDelete:
 }
 
 int
-CIFSSMBDelFile(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+CIFSSMBDelFile(const int xid, struct cifs_tcon *tcon, const char *fileName,
               const struct nls_table *nls_codepage, int remap)
 {
        DELETE_FILE_REQ *pSMB = NULL;
@@ -918,7 +916,7 @@ DelFileRetry:
 }
 
 int
-CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon, const char *dirName,
+CIFSSMBRmDir(const int xid, struct cifs_tcon *tcon, const char *dirName,
             const struct nls_table *nls_codepage, int remap)
 {
        DELETE_DIRECTORY_REQ *pSMB = NULL;
@@ -961,7 +959,7 @@ RmDirRetry:
 }
 
 int
-CIFSSMBMkDir(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBMkDir(const int xid, struct cifs_tcon *tcon,
             const char *name, const struct nls_table *nls_codepage, int remap)
 {
        int rc = 0;
@@ -1004,7 +1002,7 @@ MkDirRetry:
 }
 
 int
-CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, __u32 posix_flags,
+CIFSPOSIXCreate(const int xid, struct cifs_tcon *tcon, __u32 posix_flags,
                __u64 mode, __u16 *netfid, FILE_UNIX_BASIC_INFO *pRetData,
                __u32 *pOplock, const char *name,
                const struct nls_table *nls_codepage, int remap)
@@ -1170,7 +1168,7 @@ access_flags_to_smbopen_mode(const int access_flags)
 }
 
 int
-SMBLegacyOpen(const int xid, struct cifsTconInfo *tcon,
+SMBLegacyOpen(const int xid, struct cifs_tcon *tcon,
            const char *fileName, const int openDisposition,
            const int access_flags, const int create_options, __u16 *netfid,
            int *pOplock, FILE_ALL_INFO *pfile_info,
@@ -1277,7 +1275,7 @@ OldOpenRetry:
 }
 
 int
-CIFSSMBOpen(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBOpen(const int xid, struct cifs_tcon *tcon,
            const char *fileName, const int openDisposition,
            const int access_flags, const int create_options, __u16 *netfid,
            int *pOplock, FILE_ALL_INFO *pfile_info,
@@ -1379,8 +1377,7 @@ openRetry:
 }
 
 int
-CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
-           const unsigned int count, const __u64 lseek, unsigned int *nbytes,
+CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes,
            char **buf, int *pbuf_type)
 {
        int rc = -EACCES;
@@ -1390,13 +1387,18 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
        int wct;
        int resp_buf_type = 0;
        struct kvec iov[1];
+       __u32 pid = io_parms->pid;
+       __u16 netfid = io_parms->netfid;
+       __u64 offset = io_parms->offset;
+       struct cifs_tcon *tcon = io_parms->tcon;
+       unsigned int count = io_parms->length;
 
        cFYI(1, "Reading %d bytes on fid %d", count, netfid);
        if (tcon->ses->capabilities & CAP_LARGE_FILES)
                wct = 12;
        else {
                wct = 10; /* old style read */
-               if ((lseek >> 32) > 0)  {
+               if ((offset >> 32) > 0)  {
                        /* can not handle this big offset for old */
                        return -EIO;
                }
@@ -1407,15 +1409,18 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
        if (rc)
                return rc;
 
+       pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
+       pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
+
        /* tcon and ses pointer are checked in smb_init */
        if (tcon->ses->server == NULL)
                return -ECONNABORTED;
 
        pSMB->AndXCommand = 0xFF;       /* none */
        pSMB->Fid = netfid;
-       pSMB->OffsetLow = cpu_to_le32(lseek & 0xFFFFFFFF);
+       pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF);
        if (wct == 12)
-               pSMB->OffsetHigh = cpu_to_le32(lseek >> 32);
+               pSMB->OffsetHigh = cpu_to_le32(offset >> 32);
 
        pSMB->Remaining = 0;
        pSMB->MaxCount = cpu_to_le16(count & 0xFFFF);
@@ -1484,9 +1489,8 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid,
 
 
 int
-CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
-            const int netfid, const unsigned int count,
-            const __u64 offset, unsigned int *nbytes, const char *buf,
+CIFSSMBWrite(const int xid, struct cifs_io_parms *io_parms,
+            unsigned int *nbytes, const char *buf,
             const char __user *ubuf, const int long_op)
 {
        int rc = -EACCES;
@@ -1495,6 +1499,11 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
        int bytes_returned, wct;
        __u32 bytes_sent;
        __u16 byte_count;
+       __u32 pid = io_parms->pid;
+       __u16 netfid = io_parms->netfid;
+       __u64 offset = io_parms->offset;
+       struct cifs_tcon *tcon = io_parms->tcon;
+       unsigned int count = io_parms->length;
 
        *nbytes = 0;
 
@@ -1516,6 +1525,10 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
                      (void **) &pSMBr);
        if (rc)
                return rc;
+
+       pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
+       pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
+
        /* tcon and ses pointer are checked in smb_init */
        if (tcon->ses->server == NULL)
                return -ECONNABORTED;
@@ -1602,17 +1615,259 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
        return rc;
 }
 
+void
+cifs_writedata_release(struct kref *refcount)
+{
+       struct cifs_writedata *wdata = container_of(refcount,
+                                       struct cifs_writedata, refcount);
+
+       if (wdata->cfile)
+               cifsFileInfo_put(wdata->cfile);
+
+       kfree(wdata);
+}
+
+/*
+ * Write failed with a retryable error. Resend the write request. It's also
+ * possible that the page was redirtied so re-clean the page.
+ */
+static void
+cifs_writev_requeue(struct cifs_writedata *wdata)
+{
+       int i, rc;
+       struct inode *inode = wdata->cfile->dentry->d_inode;
+
+       for (i = 0; i < wdata->nr_pages; i++) {
+               lock_page(wdata->pages[i]);
+               clear_page_dirty_for_io(wdata->pages[i]);
+       }
+
+       do {
+               rc = cifs_async_writev(wdata);
+       } while (rc == -EAGAIN);
+
+       for (i = 0; i < wdata->nr_pages; i++) {
+               if (rc != 0)
+                       SetPageError(wdata->pages[i]);
+               unlock_page(wdata->pages[i]);
+       }
+
+       mapping_set_error(inode->i_mapping, rc);
+       kref_put(&wdata->refcount, cifs_writedata_release);
+}
+
+static void
+cifs_writev_complete(struct work_struct *work)
+{
+       struct cifs_writedata *wdata = container_of(work,
+                                               struct cifs_writedata, work);
+       struct inode *inode = wdata->cfile->dentry->d_inode;
+       int i = 0;
+
+       if (wdata->result == 0) {
+               cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes);
+               cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink),
+                                        wdata->bytes);
+       } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN)
+               return cifs_writev_requeue(wdata);
+
+       for (i = 0; i < wdata->nr_pages; i++) {
+               struct page *page = wdata->pages[i];
+               if (wdata->result == -EAGAIN)
+                       __set_page_dirty_nobuffers(page);
+               else if (wdata->result < 0)
+                       SetPageError(page);
+               end_page_writeback(page);
+               page_cache_release(page);
+       }
+       if (wdata->result != -EAGAIN)
+               mapping_set_error(inode->i_mapping, wdata->result);
+       kref_put(&wdata->refcount, cifs_writedata_release);
+}
+
+struct cifs_writedata *
+cifs_writedata_alloc(unsigned int nr_pages)
+{
+       struct cifs_writedata *wdata;
+
+       /* this would overflow */
+       if (nr_pages == 0) {
+               cERROR(1, "%s: called with nr_pages == 0!", __func__);
+               return NULL;
+       }
+
+       /* writedata + number of page pointers */
+       wdata = kzalloc(sizeof(*wdata) +
+                       sizeof(struct page *) * (nr_pages - 1), GFP_NOFS);
+       if (wdata != NULL) {
+               INIT_WORK(&wdata->work, cifs_writev_complete);
+               kref_init(&wdata->refcount);
+       }
+       return wdata;
+}
+
+/*
+ * Check the midState and signature on received buffer (if any), and queue the
+ * workqueue completion task.
+ */
+static void
+cifs_writev_callback(struct mid_q_entry *mid)
+{
+       struct cifs_writedata *wdata = mid->callback_data;
+       struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
+       unsigned int written;
+       WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
+
+       switch (mid->midState) {
+       case MID_RESPONSE_RECEIVED:
+               wdata->result = cifs_check_receive(mid, tcon->ses->server, 0);
+               if (wdata->result != 0)
+                       break;
+
+               written = le16_to_cpu(smb->CountHigh);
+               written <<= 16;
+               written += le16_to_cpu(smb->Count);
+               /*
+                * Mask off high 16 bits when bytes written as returned
+                * by the server is greater than bytes requested by the
+                * client. OS/2 servers are known to set incorrect
+                * CountHigh values.
+                */
+               if (written > wdata->bytes)
+                       written &= 0xFFFF;
+
+               if (written < wdata->bytes)
+                       wdata->result = -ENOSPC;
+               else
+                       wdata->bytes = written;
+               break;
+       case MID_REQUEST_SUBMITTED:
+       case MID_RETRY_NEEDED:
+               wdata->result = -EAGAIN;
+               break;
+       default:
+               wdata->result = -EIO;
+               break;
+       }
+
+       queue_work(system_nrt_wq, &wdata->work);
+       DeleteMidQEntry(mid);
+       atomic_dec(&tcon->ses->server->inFlight);
+       wake_up(&tcon->ses->server->request_q);
+}
+
+/* cifs_async_writev - send an async write, and set up mid to handle result */
+int
+cifs_async_writev(struct cifs_writedata *wdata)
+{
+       int i, rc = -EACCES;
+       WRITE_REQ *smb = NULL;
+       int wct;
+       struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
+       struct inode *inode = wdata->cfile->dentry->d_inode;
+       struct kvec *iov = NULL;
+
+       if (tcon->ses->capabilities & CAP_LARGE_FILES) {
+               wct = 14;
+       } else {
+               wct = 12;
+               if (wdata->offset >> 32 > 0) {
+                       /* can not handle big offset for old srv */
+                       return -EIO;
+               }
+       }
+
+       rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **)&smb);
+       if (rc)
+               goto async_writev_out;
+
+       /* 1 iov per page + 1 for header */
+       iov = kzalloc((wdata->nr_pages + 1) * sizeof(*iov), GFP_NOFS);
+       if (iov == NULL) {
+               rc = -ENOMEM;
+               goto async_writev_out;
+       }
+
+       smb->hdr.Pid = cpu_to_le16((__u16)wdata->cfile->pid);
+       smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->cfile->pid >> 16));
+
+       smb->AndXCommand = 0xFF;        /* none */
+       smb->Fid = wdata->cfile->netfid;
+       smb->OffsetLow = cpu_to_le32(wdata->offset & 0xFFFFFFFF);
+       if (wct == 14)
+               smb->OffsetHigh = cpu_to_le32(wdata->offset >> 32);
+       smb->Reserved = 0xFFFFFFFF;
+       smb->WriteMode = 0;
+       smb->Remaining = 0;
+
+       smb->DataOffset =
+           cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4);
+
+       /* 4 for RFC1001 length + 1 for BCC */
+       iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4 + 1;
+       iov[0].iov_base = smb;
+
+       /* marshal up the pages into iov array */
+       wdata->bytes = 0;
+       for (i = 0; i < wdata->nr_pages; i++) {
+               iov[i + 1].iov_len = min(inode->i_size -
+                                     page_offset(wdata->pages[i]),
+                                       (loff_t)PAGE_CACHE_SIZE);
+               iov[i + 1].iov_base = kmap(wdata->pages[i]);
+               wdata->bytes += iov[i + 1].iov_len;
+       }
+
+       cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes);
+
+       smb->DataLengthLow = cpu_to_le16(wdata->bytes & 0xFFFF);
+       smb->DataLengthHigh = cpu_to_le16(wdata->bytes >> 16);
+
+       if (wct == 14) {
+               inc_rfc1001_len(&smb->hdr, wdata->bytes + 1);
+               put_bcc(wdata->bytes + 1, &smb->hdr);
+       } else {
+               /* wct == 12 */
+               struct smb_com_writex_req *smbw =
+                               (struct smb_com_writex_req *)smb;
+               inc_rfc1001_len(&smbw->hdr, wdata->bytes + 5);
+               put_bcc(wdata->bytes + 5, &smbw->hdr);
+               iov[0].iov_len += 4; /* pad bigger by four bytes */
+       }
+
+       kref_get(&wdata->refcount);
+       rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1,
+                            cifs_writev_callback, wdata, false);
+
+       if (rc == 0)
+               cifs_stats_inc(&tcon->num_writes);
+       else
+               kref_put(&wdata->refcount, cifs_writedata_release);
+
+       /* send is done, unmap pages */
+       for (i = 0; i < wdata->nr_pages; i++)
+               kunmap(wdata->pages[i]);
+
+async_writev_out:
+       cifs_small_buf_release(smb);
+       kfree(iov);
+       return rc;
+}
+
 int
-CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
-            const int netfid, const unsigned int count,
-            const __u64 offset, unsigned int *nbytes, struct kvec *iov,
-            int n_vec, const int long_op)
+CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms,
+             unsigned int *nbytes, struct kvec *iov, int n_vec,
+             const int long_op)
 {
        int rc = -EACCES;
        WRITE_REQ *pSMB = NULL;
        int wct;
        int smb_hdr_len;
        int resp_buf_type = 0;
+       __u32 pid = io_parms->pid;
+       __u16 netfid = io_parms->netfid;
+       __u64 offset = io_parms->offset;
+       struct cifs_tcon *tcon = io_parms->tcon;
+       unsigned int count = io_parms->length;
 
        *nbytes = 0;
 
@@ -1630,6 +1885,10 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
        rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB);
        if (rc)
                return rc;
+
+       pSMB->hdr.Pid = cpu_to_le16((__u16)pid);
+       pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16));
+
        /* tcon and ses pointer are checked in smb_init */
        if (tcon->ses->server == NULL)
                return -ECONNABORTED;
@@ -1705,7 +1964,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
 
 
 int
-CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
            const __u16 smb_file_id, const __u64 len,
            const __u64 offset, const __u32 numUnlock,
            const __u32 numLock, const __u8 lockType,
@@ -1775,7 +2034,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
 }
 
 int
-CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
                const __u16 smb_file_id, const int get_flag, const __u64 len,
                struct file_lock *pLockData, const __u16 lock_type,
                const bool waitFlag)
@@ -1913,7 +2172,7 @@ plk_err_exit:
 
 
 int
-CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
+CIFSSMBClose(const int xid, struct cifs_tcon *tcon, int smb_file_id)
 {
        int rc = 0;
        CLOSE_REQ *pSMB = NULL;
@@ -1946,7 +2205,7 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
 }
 
 int
-CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
+CIFSSMBFlush(const int xid, struct cifs_tcon *tcon, int smb_file_id)
 {
        int rc = 0;
        FLUSH_REQ *pSMB = NULL;
@@ -1967,7 +2226,7 @@ CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
 }
 
 int
-CIFSSMBRename(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBRename(const int xid, struct cifs_tcon *tcon,
              const char *fromName, const char *toName,
              const struct nls_table *nls_codepage, int remap)
 {
@@ -2034,7 +2293,7 @@ renameRetry:
        return rc;
 }
 
-int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
+int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon,
                int netfid, const char *target_name,
                const struct nls_table *nls_codepage, int remap)
 {
@@ -2114,7 +2373,7 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
 }
 
 int
-CIFSSMBCopy(const int xid, struct cifsTconInfo *tcon, const char *fromName,
+CIFSSMBCopy(const int xid, struct cifs_tcon *tcon, const char *fromName,
            const __u16 target_tid, const char *toName, const int flags,
            const struct nls_table *nls_codepage, int remap)
 {
@@ -2182,7 +2441,7 @@ copyRetry:
 }
 
 int
-CIFSUnixCreateSymLink(const int xid, struct cifsTconInfo *tcon,
+CIFSUnixCreateSymLink(const int xid, struct cifs_tcon *tcon,
                      const char *fromName, const char *toName,
                      const struct nls_table *nls_codepage)
 {
@@ -2271,7 +2530,7 @@ createSymLinkRetry:
 }
 
 int
-CIFSUnixCreateHardLink(const int xid, struct cifsTconInfo *tcon,
+CIFSUnixCreateHardLink(const int xid, struct cifs_tcon *tcon,
                       const char *fromName, const char *toName,
                       const struct nls_table *nls_codepage, int remap)
 {
@@ -2356,7 +2615,7 @@ createHardLinkRetry:
 }
 
 int
-CIFSCreateHardLink(const int xid, struct cifsTconInfo *tcon,
+CIFSCreateHardLink(const int xid, struct cifs_tcon *tcon,
                   const char *fromName, const char *toName,
                   const struct nls_table *nls_codepage, int remap)
 {
@@ -2428,7 +2687,7 @@ winCreateHardLinkRetry:
 }
 
 int
-CIFSSMBUnixQuerySymLink(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBUnixQuerySymLink(const int xid, struct cifs_tcon *tcon,
                        const unsigned char *searchName, char **symlinkinfo,
                        const struct nls_table *nls_codepage)
 {
@@ -2533,7 +2792,7 @@ querySymLinkRetry:
  *     it is not compiled in by default until callers fixed up and more tested.
  */
 int
-CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQueryReparseLinkInfo(const int xid, struct cifs_tcon *tcon,
                        const unsigned char *searchName,
                        char *symlinkinfo, const int buflen, __u16 fid,
                        const struct nls_table *nls_codepage)
@@ -2771,7 +3030,7 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
 }
 
 int
-CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon,
                   const unsigned char *searchName,
                   char *acl_inf, const int buflen, const int acl_type,
                   const struct nls_table *nls_codepage, int remap)
@@ -2859,7 +3118,7 @@ queryAclRetry:
 }
 
 int
-CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBSetPosixACL(const int xid, struct cifs_tcon *tcon,
                   const unsigned char *fileName,
                   const char *local_acl, const int buflen,
                   const int acl_type,
@@ -2939,7 +3198,7 @@ setACLerrorExit:
 
 /* BB fix tabs in this function FIXME BB */
 int
-CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon,
+CIFSGetExtAttr(const int xid, struct cifs_tcon *tcon,
               const int netfid, __u64 *pExtAttrBits, __u64 *pMask)
 {
        int rc = 0;
@@ -3032,7 +3291,7 @@ GetExtAttrOut:
  */
 static int
 smb_init_nttransact(const __u16 sub_command, const int setup_count,
-                  const int parm_len, struct cifsTconInfo *tcon,
+                  const int parm_len, struct cifs_tcon *tcon,
                   void **ret_buf)
 {
        int rc;
@@ -3115,7 +3374,7 @@ validate_ntransact(char *buf, char **ppparm, char **ppdata,
 
 /* Get Security Descriptor (by handle) from remote server for a file or dir */
 int
-CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
+CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
                  struct cifs_ntsd **acl_inf, __u32 *pbuflen)
 {
        int rc = 0;
@@ -3207,7 +3466,7 @@ qsec_out:
 }
 
 int
-CIFSSMBSetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
+CIFSSMBSetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
                        struct cifs_ntsd *pntsd, __u32 acllen)
 {
        __u16 byte_count, param_count, data_count, param_offset, data_offset;
@@ -3273,7 +3532,7 @@ setCifsAclRetry:
 
 /* Legacy Query Path Information call for lookup to old servers such
    as Win9x/WinME */
-int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
+int SMBQueryInformation(const int xid, struct cifs_tcon *tcon,
                        const unsigned char *searchName,
                        FILE_ALL_INFO *pFinfo,
                        const struct nls_table *nls_codepage, int remap)
@@ -3341,7 +3600,7 @@ QInfRetry:
 }
 
 int
-CIFSSMBQFileInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQFileInfo(const int xid, struct cifs_tcon *tcon,
                 u16 netfid, FILE_ALL_INFO *pFindData)
 {
        struct smb_t2_qfi_req *pSMB = NULL;
@@ -3408,7 +3667,7 @@ QFileInfoRetry:
 }
 
 int
-CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQPathInfo(const int xid, struct cifs_tcon *tcon,
                 const unsigned char *searchName,
                 FILE_ALL_INFO *pFindData,
                 int legacy /* old style infolevel */,
@@ -3509,7 +3768,7 @@ QPathInfoRetry:
 }
 
 int
-CIFSSMBUnixQFileInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBUnixQFileInfo(const int xid, struct cifs_tcon *tcon,
                 u16 netfid, FILE_UNIX_BASIC_INFO *pFindData)
 {
        struct smb_t2_qfi_req *pSMB = NULL;
@@ -3578,7 +3837,7 @@ UnixQFileInfoRetry:
 }
 
 int
-CIFSSMBUnixQPathInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBUnixQPathInfo(const int xid, struct cifs_tcon *tcon,
                     const unsigned char *searchName,
                     FILE_UNIX_BASIC_INFO *pFindData,
                     const struct nls_table *nls_codepage, int remap)
@@ -3664,7 +3923,7 @@ UnixQPathInfoRetry:
 
 /* xid, tcon, searchName and codepage are input parms, rest are returned */
 int
-CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
+CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
              const char *searchName,
              const struct nls_table *nls_codepage,
              __u16 *pnetfid,
@@ -3812,7 +4071,7 @@ findFirstRetry:
        return rc;
 }
 
-int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
+int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
                 __u16 searchHandle, struct cifs_search_info *psrch_inf)
 {
        TRANSACTION2_FNEXT_REQ *pSMB = NULL;
@@ -3950,7 +4209,7 @@ FNext2_err_exit:
 }
 
 int
-CIFSFindClose(const int xid, struct cifsTconInfo *tcon,
+CIFSFindClose(const int xid, struct cifs_tcon *tcon,
              const __u16 searchHandle)
 {
        int rc = 0;
@@ -3982,7 +4241,7 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon,
 }
 
 int
-CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
+CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon,
                      const unsigned char *searchName,
                      __u64 *inode_number,
                      const struct nls_table *nls_codepage, int remap)
@@ -4184,7 +4443,7 @@ parse_DFS_referrals_exit:
 }
 
 int
-CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses,
+CIFSGetDFSRefer(const int xid, struct cifs_ses *ses,
                const unsigned char *searchName,
                struct dfs_info3_param **target_nodes,
                unsigned int *num_of_nodes,
@@ -4233,7 +4492,7 @@ getDFSRetry:
        }
 
        if (ses->server) {
-               if (ses->server->secMode &
+               if (ses->server->sec_mode &
                   (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
                        pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
        }
@@ -4298,7 +4557,7 @@ GetDFSRefExit:
 
 /* Query File System Info such as free space to old servers such as Win 9x */
 int
-SMBOldQFSInfo(const int xid, struct cifsTconInfo *tcon, struct kstatfs *FSData)
+SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData)
 {
 /* level 0x01 SMB_QUERY_FILE_SYSTEM_INFO */
        TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4377,7 +4636,7 @@ oldQFSInfoRetry:
 }
 
 int
-CIFSSMBQFSInfo(const int xid, struct cifsTconInfo *tcon, struct kstatfs *FSData)
+CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData)
 {
 /* level 0x103 SMB_QUERY_FILE_SYSTEM_INFO */
        TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4456,7 +4715,7 @@ QFSInfoRetry:
 }
 
 int
-CIFSSMBQFSAttributeInfo(const int xid, struct cifsTconInfo *tcon)
+CIFSSMBQFSAttributeInfo(const int xid, struct cifs_tcon *tcon)
 {
 /* level 0x105  SMB_QUERY_FILE_SYSTEM_INFO */
        TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4526,7 +4785,7 @@ QFSAttributeRetry:
 }
 
 int
-CIFSSMBQFSDeviceInfo(const int xid, struct cifsTconInfo *tcon)
+CIFSSMBQFSDeviceInfo(const int xid, struct cifs_tcon *tcon)
 {
 /* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */
        TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4597,7 +4856,7 @@ QFSDeviceRetry:
 }
 
 int
-CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon)
+CIFSSMBQFSUnixInfo(const int xid, struct cifs_tcon *tcon)
 {
 /* level 0x200  SMB_QUERY_CIFS_UNIX_INFO */
        TRANSACTION2_QFSI_REQ *pSMB = NULL;
@@ -4667,7 +4926,7 @@ QFSUnixRetry:
 }
 
 int
-CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, __u64 cap)
+CIFSSMBSetFSUnixInfo(const int xid, struct cifs_tcon *tcon, __u64 cap)
 {
 /* level 0x200  SMB_SET_CIFS_UNIX_INFO */
        TRANSACTION2_SETFSI_REQ *pSMB = NULL;
@@ -4741,7 +5000,7 @@ SETFSUnixRetry:
 
 
 int
-CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQFSPosixInfo(const int xid, struct cifs_tcon *tcon,
                   struct kstatfs *FSData)
 {
 /* level 0x201  SMB_QUERY_CIFS_POSIX_INFO */
@@ -4834,7 +5093,7 @@ QFSPosixRetry:
    in Samba which this routine can run into */
 
 int
-CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+CIFSSMBSetEOF(const int xid, struct cifs_tcon *tcon, const char *fileName,
              __u64 size, bool SetAllocation,
              const struct nls_table *nls_codepage, int remap)
 {
@@ -4923,7 +5182,7 @@ SetEOFRetry:
 }
 
 int
-CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
+CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon, __u64 size,
                   __u16 fid, __u32 pid_of_opener, bool SetAllocation)
 {
        struct smb_com_transaction2_sfi_req *pSMB  = NULL;
@@ -5005,7 +5264,7 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
    time and resort to the original setpathinfo level which takes the ancient
    DOS time format with 2 second granularity */
 int
-CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon,
                    const FILE_BASIC_INFO *data, __u16 fid, __u32 pid_of_opener)
 {
        struct smb_com_transaction2_sfi_req *pSMB  = NULL;
@@ -5067,7 +5326,7 @@ CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon,
 }
 
 int
-CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon,
                          bool delete_file, __u16 fid, __u32 pid_of_opener)
 {
        struct smb_com_transaction2_sfi_req *pSMB  = NULL;
@@ -5123,7 +5382,7 @@ CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon,
 }
 
 int
-CIFSSMBSetPathInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBSetPathInfo(const int xid, struct cifs_tcon *tcon,
                   const char *fileName, const FILE_BASIC_INFO *data,
                   const struct nls_table *nls_codepage, int remap)
 {
@@ -5207,7 +5466,7 @@ SetTimesRetry:
          handling it anyway and NT4 was what we thought it would be needed for
          Do not delete it until we prove whether needed for Win9x though */
 int
-CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon, char *fileName,
+CIFSSMBSetAttrLegacy(int xid, struct cifs_tcon *tcon, char *fileName,
                __u16 dos_attrs, const struct nls_table *nls_codepage)
 {
        SETATTR_REQ *pSMB = NULL;
@@ -5295,7 +5554,7 @@ cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset,
 }
 
 int
-CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon,
                       const struct cifs_unix_set_info_args *args,
                       u16 fid, u32 pid_of_opener)
 {
@@ -5358,7 +5617,7 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon,
 }
 
 int
-CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *tcon, char *fileName,
+CIFSSMBUnixSetPathInfo(const int xid, struct cifs_tcon *tcon, char *fileName,
                       const struct cifs_unix_set_info_args *args,
                       const struct nls_table *nls_codepage, int remap)
 {
@@ -5445,7 +5704,7 @@ setPermsRetry:
  * the data isn't copied to it, but the length is returned.
  */
 ssize_t
-CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon,
+CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon,
                const unsigned char *searchName, const unsigned char *ea_name,
                char *EAData, size_t buf_size,
                const struct nls_table *nls_codepage, int remap)
@@ -5626,7 +5885,7 @@ QAllEAsOut:
 }
 
 int
-CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, const char *fileName,
+CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon, const char *fileName,
             const char *ea_name, const void *ea_value,
             const __u16 ea_value_len, const struct nls_table *nls_codepage,
             int remap)
@@ -5753,7 +6012,7 @@ SetEARetry:
  *     incompatible for network fs clients, we could instead simply
  *     expose this config flag by adding a future cifs (and smb2) notify ioctl.
  */
-int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon,
+int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon,
                  const int notify_subdirs, const __u16 netfid,
                  __u32 filter, struct file *pfile, int multishot,
                  const struct nls_table *nls_codepage)
index da284e3cb6535e206b9d502e45fdd1c5b9adf5d4..6d88b82537c3d4cf899a5741c114bb50c61ad564 100644 (file)
 
 extern mempool_t *cifs_req_poolp;
 
-struct smb_vol {
-       char *username;
-       char *password;
-       char *domainname;
-       char *UNC;
-       char *UNCip;
-       char *iocharset;  /* local code page for mapping to and from Unicode */
-       char source_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* clnt nb name */
-       char target_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* srvr nb name */
-       uid_t cred_uid;
-       uid_t linux_uid;
-       gid_t linux_gid;
-       mode_t file_mode;
-       mode_t dir_mode;
-       unsigned secFlg;
-       bool retry:1;
-       bool intr:1;
-       bool setuids:1;
-       bool override_uid:1;
-       bool override_gid:1;
-       bool dynperm:1;
-       bool noperm:1;
-       bool no_psx_acl:1; /* set if posix acl support should be disabled */
-       bool cifs_acl:1;
-       bool no_xattr:1;   /* set if xattr (EA) support should be disabled*/
-       bool server_ino:1; /* use inode numbers from server ie UniqueId */
-       bool direct_io:1;
-       bool strict_io:1; /* strict cache behavior */
-       bool remap:1;      /* set to remap seven reserved chars in filenames */
-       bool posix_paths:1; /* unset to not ask for posix pathnames. */
-       bool no_linux_ext:1;
-       bool sfu_emul:1;
-       bool nullauth:1;   /* attempt to authenticate with null user */
-       bool nocase:1;     /* request case insensitive filenames */
-       bool nobrl:1;      /* disable sending byte range locks to srv */
-       bool mand_lock:1;  /* send mandatory not posix byte range lock reqs */
-       bool seal:1;       /* request transport encryption on share */
-       bool nodfs:1;      /* Do not request DFS, even if available */
-       bool local_lease:1; /* check leases only on local system, not remote */
-       bool noblocksnd:1;
-       bool noautotune:1;
-       bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
-       bool fsc:1;     /* enable fscache */
-       bool mfsymlinks:1; /* use Minshall+French Symlinks */
-       bool multiuser:1;
-       bool use_smb2:1; /* force smb2 use on mount instead of cifs */
-       unsigned int rsize;
-       unsigned int wsize;
-       bool sockopt_tcp_nodelay:1;
-       unsigned short int port;
-       unsigned long actimeo; /* attribute cache timeout (jiffies) */
-       char *prepath;
-       struct sockaddr_storage srcaddr; /* allow binding to a local IP */
-       struct nls_table *local_nls;
-};
-
 /* FIXME: should these be tunable? */
 #define TLINK_ERROR_EXPIRE     (1 * HZ)
 #define TLINK_IDLE_EXPIRE      (600 * HZ)
@@ -135,9 +79,10 @@ cifs_reconnect(struct TCP_Server_Info *server)
 {
        int rc = 0;
        struct list_head *tmp, *tmp2;
-       struct cifsSesInfo *ses;
-       struct cifsTconInfo *tcon;
+       struct cifs_ses *ses;
+       struct cifs_tcon *tcon;
        struct mid_q_entry *mid_entry;
+       struct list_head retry_list;
 
        spin_lock(&GlobalMid_Lock);
        if (server->tcpStatus == CifsExiting) {
@@ -157,11 +102,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
        cFYI(1, "%s: marking sessions and tcons for reconnect", __func__);
        spin_lock(&cifs_tcp_ses_lock);
        list_for_each(tmp, &server->smb_ses_list) {
-               ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
+               ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
                ses->need_reconnect = true;
                ses->ipc_tid = 0;
                list_for_each(tmp2, &ses->tcon_list) {
-                       tcon = list_entry(tmp2, struct cifsTconInfo, tcon_list);
+                       tcon = list_entry(tmp2, struct cifs_tcon, tcon_list);
                        tcon->need_reconnect = true;
                }
        }
@@ -189,16 +134,23 @@ cifs_reconnect(struct TCP_Server_Info *server)
        mutex_unlock(&server->srv_mutex);
 
        /* mark submitted MIDs for retry and issue callback */
-       cFYI(1, "%s: issuing mid callbacks", __func__);
+       INIT_LIST_HEAD(&retry_list);
+       cFYI(1, "%s: moving mids to private list", __func__);
        spin_lock(&GlobalMid_Lock);
        list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
                mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
                if (mid_entry->midState == MID_REQUEST_SUBMITTED)
                        mid_entry->midState = MID_RETRY_NEEDED;
+               list_move(&mid_entry->qhead, &retry_list);
+       }
+       spin_unlock(&GlobalMid_Lock);
+
+       cFYI(1, "%s: issuing mid callbacks", __func__);
+       list_for_each_safe(tmp, tmp2, &retry_list) {
+               mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
                list_del_init(&mid_entry->qhead);
                mid_entry->callback(mid_entry);
        }
-       spin_unlock(&GlobalMid_Lock);
 
        while (server->tcpStatus == CifsNeedReconnect) {
                try_to_freeze();
@@ -672,12 +624,12 @@ multi_t2_fnd:
                        mid_entry->when_received = jiffies;
 #endif
                        list_del_init(&mid_entry->qhead);
-                       mid_entry->callback(mid_entry);
                        break;
                }
                spin_unlock(&GlobalMid_Lock);
 
                if (mid_entry != NULL) {
+                       mid_entry->callback(mid_entry);
                        /* Was previous buf put in mpx struct for multi-rsp? */
                        if (!isMultiRsp) {
                                /* smb buffer will be freed by user thread */
@@ -741,15 +693,25 @@ multi_t2_fnd:
                cifs_small_buf_release(smallbuf);
 
        if (!list_empty(&server->pending_mid_q)) {
+               struct list_head dispose_list;
+
+               INIT_LIST_HEAD(&dispose_list);
                spin_lock(&GlobalMid_Lock);
                list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
                        mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
-                       cFYI(1, "Clearing Mid 0x%x - issuing callback",
-                                        mid_entry->mid);
+                       cFYI(1, "Clearing mid 0x%x", mid_entry->mid);
+                       mid_entry->midState = MID_SHUTDOWN;
+                       list_move(&mid_entry->qhead, &dispose_list);
+               }
+               spin_unlock(&GlobalMid_Lock);
+
+               /* now walk dispose list and issue callbacks */
+               list_for_each_safe(tmp, tmp2, &dispose_list) {
+                       mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+                       cFYI(1, "Callback mid 0x%x", mid_entry->mid);
                        list_del_init(&mid_entry->qhead);
                        mid_entry->callback(mid_entry);
                }
-               spin_unlock(&GlobalMid_Lock);
                /* 1/8th of sec is more than enough time for them to exit */
                msleep(125);
        }
@@ -1062,13 +1024,6 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                                   (strnicmp(value, "1", 1) == 0)) {
                                /* this is the default */
                                continue;
-                       } else if ((strnicmp(value, "smb2", 4) == 0) ||
-                                  (strnicmp(value, "2", 1) == 0)) {
-#ifdef CONFIG_CIFS_SMB2
-                               vol->use_smb2 = true;
-#else
-                               cERROR(1, "smb2 support not enabled");
-#endif /* CONFIG_CIFS_SMB2 */
                        }
                } else if ((strnicmp(data, "unc", 3) == 0)
                           || (strnicmp(data, "target", 6) == 0)
@@ -1404,6 +1359,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                        vol->server_ino = 1;
                } else if (strnicmp(data, "noserverino", 9) == 0) {
                        vol->server_ino = 0;
+               } else if (strnicmp(data, "rwpidforward", 4) == 0) {
+                       vol->rwpidforward = 1;
                } else if (strnicmp(data, "cifsacl", 7) == 0) {
                        vol->cifs_acl = 1;
                } else if (strnicmp(data, "nocifsacl", 9) == 0) {
@@ -1640,16 +1597,35 @@ match_security(struct TCP_Server_Info *server, struct smb_vol *vol)
 
        /* now check if signing mode is acceptable */
        if ((secFlags & CIFSSEC_MAY_SIGN) == 0 &&
-           (server->secMode & SECMODE_SIGN_REQUIRED))
+           (server->sec_mode & SECMODE_SIGN_REQUIRED))
                        return false;
        else if (((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) &&
-                (server->secMode &
+                (server->sec_mode &
                  (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)) == 0)
                        return false;
 
        return true;
 }
 
+static int match_server(struct TCP_Server_Info *server, struct sockaddr *addr,
+                        struct smb_vol *vol)
+{
+       if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
+               return 0;
+
+       if (!match_address(server, addr,
+                          (struct sockaddr *)&vol->srcaddr))
+               return 0;
+
+       if (!match_port(server, addr))
+               return 0;
+
+       if (!match_security(server, vol))
+               return 0;
+
+       return 1;
+}
+
 static struct TCP_Server_Info *
 cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol)
 {
@@ -1657,17 +1633,7 @@ cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol)
 
        spin_lock(&cifs_tcp_ses_lock);
        list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
-               if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
-                       continue;
-
-               if (!match_address(server, addr,
-                                  (struct sockaddr *)&vol->srcaddr))
-                       continue;
-
-               if (!match_port(server, addr))
-                       continue;
-
-               if (!match_security(server, vol))
+               if (!match_server(server, addr, vol))
                        continue;
 
                ++server->srv_count;
@@ -1861,32 +1827,39 @@ out_err:
        return ERR_PTR(rc);
 }
 
-static struct cifsSesInfo *
+static int match_session(struct cifs_ses *ses, struct smb_vol *vol)
+{
+       switch (ses->server->secType) {
+       case Kerberos:
+               if (vol->cred_uid != ses->cred_uid)
+                       return 0;
+               break;
+       default:
+               /* anything else takes username/password */
+               if (ses->user_name == NULL)
+                       return 0;
+               if (strncmp(ses->user_name, vol->username,
+                           MAX_USERNAME_SIZE))
+                       return 0;
+               if (strlen(vol->username) != 0 &&
+                   ses->password != NULL &&
+                   strncmp(ses->password,
+                           vol->password ? vol->password : "",
+                           MAX_PASSWORD_SIZE))
+                       return 0;
+       }
+       return 1;
+}
+
+static struct cifs_ses *
 cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
 {
-       struct cifsSesInfo *ses;
+       struct cifs_ses *ses;
 
        spin_lock(&cifs_tcp_ses_lock);
        list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
-               switch (server->secType) {
-               case Kerberos:
-                       if (vol->cred_uid != ses->cred_uid)
-                               continue;
-                       break;
-               default:
-                       /* anything else takes username/password */
-                       if (ses->user_name == NULL)
-                               continue;
-                       if (strncmp(ses->user_name, vol->username,
-                                   MAX_USERNAME_SIZE))
-                               continue;
-                       if (strlen(vol->username) != 0 &&
-                           ses->password != NULL &&
-                           strncmp(ses->password,
-                                   vol->password ? vol->password : "",
-                                   MAX_PASSWORD_SIZE))
-                               continue;
-               }
+               if (!match_session(ses, vol))
+                       continue;
                ++ses->ses_count;
                spin_unlock(&cifs_tcp_ses_lock);
                return ses;
@@ -1896,7 +1869,7 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol)
 }
 
 static void
-cifs_put_smb_ses(struct cifsSesInfo *ses)
+cifs_put_smb_ses(struct cifs_ses *ses)
 {
        int xid;
        struct TCP_Server_Info *server = ses->server;
@@ -1922,11 +1895,11 @@ cifs_put_smb_ses(struct cifsSesInfo *ses)
 
 static bool warned_on_ntlm;  /* globals init to false automatically */
 
-static struct cifsSesInfo *
+static struct cifs_ses *
 cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
 {
        int rc = -ENOMEM, xid;
-       struct cifsSesInfo *ses;
+       struct cifs_ses *ses;
        struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr;
        struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr;
 
@@ -2029,20 +2002,26 @@ get_ses_fail:
        return ERR_PTR(rc);
 }
 
-static struct cifsTconInfo *
-cifs_find_tcon(struct cifsSesInfo *ses, const char *unc)
+static int match_tcon(struct cifs_tcon *tcon, const char *unc)
+{
+       if (tcon->tidStatus == CifsExiting)
+               return 0;
+       if (strncmp(tcon->treeName, unc, MAX_TREE_SIZE))
+               return 0;
+       return 1;
+}
+
+static struct cifs_tcon *
+cifs_find_tcon(struct cifs_ses *ses, const char *unc)
 {
        struct list_head *tmp;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
 
        spin_lock(&cifs_tcp_ses_lock);
        list_for_each(tmp, &ses->tcon_list) {
-               tcon = list_entry(tmp, struct cifsTconInfo, tcon_list);
-               if (tcon->tidStatus == CifsExiting)
-                       continue;
-               if (strncmp(tcon->treeName, unc, MAX_TREE_SIZE))
+               tcon = list_entry(tmp, struct cifs_tcon, tcon_list);
+               if (!match_tcon(tcon, unc))
                        continue;
-
                ++tcon->tc_count;
                spin_unlock(&cifs_tcp_ses_lock);
                return tcon;
@@ -2052,10 +2031,10 @@ cifs_find_tcon(struct cifsSesInfo *ses, const char *unc)
 }
 
 static void
-cifs_put_tcon(struct cifsTconInfo *tcon)
+cifs_put_tcon(struct cifs_tcon *tcon)
 {
        int xid;
-       struct cifsSesInfo *ses = tcon->ses;
+       struct cifs_ses *ses = tcon->ses;
 
        cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count);
        spin_lock(&cifs_tcp_ses_lock);
@@ -2076,11 +2055,11 @@ cifs_put_tcon(struct cifsTconInfo *tcon)
        cifs_put_smb_ses(ses);
 }
 
-static struct cifsTconInfo *
-cifs_get_tcon(struct cifsSesInfo *ses, struct smb_vol *volume_info)
+static struct cifs_tcon *
+cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
 {
        int rc, xid;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
 
        tcon = cifs_find_tcon(ses, volume_info->UNC);
        if (tcon) {
@@ -2169,8 +2148,102 @@ cifs_put_tlink(struct tcon_link *tlink)
        return;
 }
 
+static inline struct tcon_link *
+cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb);
+
+static int
+compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
+{
+       struct cifs_sb_info *old = CIFS_SB(sb);
+       struct cifs_sb_info *new = mnt_data->cifs_sb;
+
+       if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
+               return 0;
+
+       if ((old->mnt_cifs_flags & CIFS_MOUNT_MASK) !=
+           (new->mnt_cifs_flags & CIFS_MOUNT_MASK))
+               return 0;
+
+       if (old->rsize != new->rsize)
+               return 0;
+
+       /*
+        * We want to share sb only if we don't specify wsize or specified wsize
+        * is greater or equal than existing one.
+        */
+       if (new->wsize && new->wsize < old->wsize)
+               return 0;
+
+       if (old->mnt_uid != new->mnt_uid || old->mnt_gid != new->mnt_gid)
+               return 0;
+
+       if (old->mnt_file_mode != new->mnt_file_mode ||
+           old->mnt_dir_mode != new->mnt_dir_mode)
+               return 0;
+
+       if (strcmp(old->local_nls->charset, new->local_nls->charset))
+               return 0;
+
+       if (old->actimeo != new->actimeo)
+               return 0;
+
+       return 1;
+}
+
 int
-get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path,
+cifs_match_super(struct super_block *sb, void *data)
+{
+       struct cifs_mnt_data *mnt_data = (struct cifs_mnt_data *)data;
+       struct smb_vol *volume_info;
+       struct cifs_sb_info *cifs_sb;
+       struct TCP_Server_Info *tcp_srv;
+       struct cifs_ses *ses;
+       struct cifs_tcon *tcon;
+       struct tcon_link *tlink;
+       struct sockaddr_storage addr;
+       int rc = 0;
+
+       memset(&addr, 0, sizeof(struct sockaddr_storage));
+
+       spin_lock(&cifs_tcp_ses_lock);
+       cifs_sb = CIFS_SB(sb);
+       tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
+       if (IS_ERR(tlink)) {
+               spin_unlock(&cifs_tcp_ses_lock);
+               return rc;
+       }
+       tcon = tlink_tcon(tlink);
+       ses = tcon->ses;
+       tcp_srv = ses->server;
+
+       volume_info = mnt_data->vol;
+
+       if (!volume_info->UNCip || !volume_info->UNC)
+               goto out;
+
+       rc = cifs_fill_sockaddr((struct sockaddr *)&addr,
+                               volume_info->UNCip,
+                               strlen(volume_info->UNCip),
+                               volume_info->port);
+       if (!rc)
+               goto out;
+
+       if (!match_server(tcp_srv, (struct sockaddr *)&addr, volume_info) ||
+           !match_session(ses, volume_info) ||
+           !match_tcon(tcon, volume_info->UNC)) {
+               rc = 0;
+               goto out;
+       }
+
+       rc = compare_mount_options(sb, mnt_data);
+out:
+       cifs_put_tlink(tlink);
+       spin_unlock(&cifs_tcp_ses_lock);
+       return rc;
+}
+
+int
+get_dfs_path(int xid, struct cifs_ses *pSesInfo, const char *old_path,
             const struct nls_table *nls_codepage, unsigned int *pnum_referrals,
             struct dfs_info3_param **preferrals, int remap)
 {
@@ -2469,7 +2542,7 @@ ip_connect(struct TCP_Server_Info *server)
        return generic_ip_connect(server);
 }
 
-void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
+void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
                          struct super_block *sb, struct smb_vol *vol_info)
 {
        /* if we are reconnecting then should we check to see if
@@ -2498,7 +2571,7 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
 
        if (!CIFSSMBQFSUnixInfo(xid, tcon)) {
                __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
-
+               cFYI(1, "unix caps which server supports %lld", cap);
                /* check for reconnect case in which we do not
                   want to change the mount behavior if we can avoid it */
                if (vol_info == NULL) {
@@ -2516,6 +2589,9 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
                        }
                }
 
+               if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
+                       cERROR(1, "per-share encryption not supported yet");
+
                cap &= CIFS_UNIX_CAP_MASK;
                if (vol_info && vol_info->no_psx_acl)
                        cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
@@ -2534,12 +2610,6 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
                                        CIFS_MOUNT_POSIX_PATHS;
                }
 
-               /* We might be setting the path sep back to a different
-               form if we are reconnecting and the server switched its
-               posix path capability for this share */
-               if (sb && (CIFS_SB(sb)->prepathlen > 0))
-                       CIFS_SB(sb)->prepath[0] = CIFS_DIR_SEP(CIFS_SB(sb));
-
                if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) {
                        if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
                                CIFS_SB(sb)->rsize = 127 * 1024;
@@ -2564,6 +2634,10 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
                        cFYI(1, "very large read cap");
                if (cap & CIFS_UNIX_LARGE_WRITE_CAP)
                        cFYI(1, "very large write cap");
+               if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP)
+                       cFYI(1, "transport encryption cap");
+               if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)
+                       cFYI(1, "mandatory transport encryption cap");
 #endif /* CIFS_DEBUG2 */
                if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
                        if (vol_info == NULL) {
@@ -2580,28 +2654,8 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
        }
 }
 
-static void
-convert_delimiter(char *path, char delim)
-{
-       int i;
-       char old_delim;
-
-       if (path == NULL)
-               return;
-
-       if (delim == '/')
-               old_delim = '\\';
-       else
-               old_delim = '/';
-
-       for (i = 0; path[i] != '\0'; i++) {
-               if (path[i] == old_delim)
-                       path[i] = delim;
-       }
-}
-
-static void setup_cifs_sb(struct smb_vol *pvolume_info,
-                         struct cifs_sb_info *cifs_sb)
+void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
+                       struct cifs_sb_info *cifs_sb)
 {
        INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
 
@@ -2615,40 +2669,19 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
        else /* default */
                cifs_sb->rsize = CIFSMaxBufSize;
 
-       if (pvolume_info->wsize > PAGEVEC_SIZE * PAGE_CACHE_SIZE) {
-               cERROR(1, "wsize %d too large, using 4096 instead",
-                         pvolume_info->wsize);
-               cifs_sb->wsize = 4096;
-       } else if (pvolume_info->wsize)
-               cifs_sb->wsize = pvolume_info->wsize;
-       else
-               cifs_sb->wsize = min_t(const int,
-                                       PAGEVEC_SIZE * PAGE_CACHE_SIZE,
-                                       127*1024);
-               /* old default of CIFSMaxBufSize was too small now
-                  that SMB Write2 can send multiple pages in kvec.
-                  RFC1001 does not describe what happens when frame
-                  bigger than 128K is sent so use that as max in
-                  conjunction with 52K kvec constraint on arch with 4K
-                  page size  */
-
        if (cifs_sb->rsize < 2048) {
                cifs_sb->rsize = 2048;
                /* Windows ME may prefer this */
                cFYI(1, "readsize set to minimum: 2048");
        }
-       /* calculate prepath */
-       cifs_sb->prepath = pvolume_info->prepath;
-       if (cifs_sb->prepath) {
-               cifs_sb->prepathlen = strlen(cifs_sb->prepath);
-               /* we can not convert the / to \ in the path
-               separators in the prefixpath yet because we do not
-               know (until reset_cifs_unix_caps is called later)
-               whether POSIX PATH CAP is available. We normalize
-               the / to \ after reset_cifs_unix_caps is called */
-               pvolume_info->prepath = NULL;
-       } else
-               cifs_sb->prepathlen = 0;
+
+       /*
+        * Temporarily set wsize for matching superblock. If we end up using
+        * new sb then cifs_negotiate_wsize will later negotiate it downward
+        * if needed.
+        */
+       cifs_sb->wsize = pvolume_info->wsize;
+
        cifs_sb->mnt_uid = pvolume_info->linux_uid;
        cifs_sb->mnt_gid = pvolume_info->linux_gid;
        cifs_sb->mnt_file_mode = pvolume_info->file_mode;
@@ -2657,6 +2690,7 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
                cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
 
        cifs_sb->actimeo = pvolume_info->actimeo;
+       cifs_sb->local_nls = pvolume_info->local_nls;
 
        if (pvolume_info->noperm)
                cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
@@ -2676,6 +2710,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
                cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC;
        if (pvolume_info->mand_lock)
                cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL;
+       if (pvolume_info->rwpidforward)
+               cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD;
        if (pvolume_info->cifs_acl)
                cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL;
        if (pvolume_info->override_uid)
@@ -2709,8 +2745,55 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info,
                           "mount option supported");
 }
 
+/*
+ * When the server supports very large writes via POSIX extensions, we can
+ * allow up to 2^24 - PAGE_CACHE_SIZE.
+ *
+ * Note that this might make for "interesting" allocation problems during
+ * writeback however (as we have to allocate an array of pointers for the
+ * pages). A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
+ */
+#define CIFS_MAX_WSIZE ((1<<24) - PAGE_CACHE_SIZE)
+
+/*
+ * When the server doesn't allow large posix writes, default to a wsize of
+ * 128k - PAGE_CACHE_SIZE -- one page less than the largest frame size
+ * described in RFC1001. This allows space for the header without going over
+ * that by default.
+ */
+#define CIFS_MAX_RFC1001_WSIZE (128 * 1024 - PAGE_CACHE_SIZE)
+
+/*
+ * The default wsize is 1M. find_get_pages seems to return a maximum of 256
+ * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill
+ * a single wsize request with a single call.
+ */
+#define CIFS_DEFAULT_WSIZE (1024 * 1024)
+
+static unsigned int
+cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
+{
+       __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
+       struct TCP_Server_Info *server = tcon->ses->server;
+       unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize :
+                               CIFS_DEFAULT_WSIZE;
+
+       /* can server support 24-bit write sizes? (via UNIX extensions) */
+       if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
+               wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1001_WSIZE);
+
+       /* no CAP_LARGE_WRITE_X? Limit it to 16 bits */
+       if (!(server->capabilities & CAP_LARGE_WRITE_X))
+               wsize = min_t(unsigned int, wsize, USHRT_MAX);
+
+       /* hard limit of CIFS_MAX_WSIZE */
+       wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE);
+
+       return wsize;
+}
+
 static int
-is_path_accessible(int xid, struct cifsTconInfo *tcon,
+is_path_accessible(int xid, struct cifs_tcon *tcon,
                   struct cifs_sb_info *cifs_sb, const char *full_path)
 {
        int rc;
@@ -2733,8 +2816,8 @@ is_path_accessible(int xid, struct cifsTconInfo *tcon,
        return rc;
 }
 
-static void
-cleanup_volume_info(struct smb_vol **pvolume_info)
+void
+cifs_cleanup_volume_info(struct smb_vol **pvolume_info)
 {
        struct smb_vol *volume_info;
 
@@ -2764,24 +2847,13 @@ build_unc_path_to_root(const struct smb_vol *volume_info,
        char *full_path;
 
        int unc_len = strnlen(volume_info->UNC, MAX_TREE_SIZE + 1);
-       full_path = kmalloc(unc_len + cifs_sb->prepathlen + 1, GFP_KERNEL);
+       full_path = kmalloc(unc_len + 1, GFP_KERNEL);
        if (full_path == NULL)
                return ERR_PTR(-ENOMEM);
 
        strncpy(full_path, volume_info->UNC, unc_len);
-       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
-               int i;
-               for (i = 0; i < unc_len; i++) {
-                       if (full_path[i] == '\\')
-                               full_path[i] = '/';
-               }
-       }
-
-       if (cifs_sb->prepathlen)
-               strncpy(full_path + unc_len, cifs_sb->prepath,
-                               cifs_sb->prepathlen);
-
-       full_path[unc_len + cifs_sb->prepathlen] = 0; /* add trailing null */
+       full_path[unc_len] = 0; /* add trailing null */
+       convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
        return full_path;
 }
 
@@ -2796,7 +2868,7 @@ build_unc_path_to_root(const struct smb_vol *volume_info,
  * determine whether there were referrals.
  */
 static int
-expand_dfs_referral(int xid, struct cifsSesInfo *pSesInfo,
+expand_dfs_referral(int xid, struct cifs_ses *pSesInfo,
                    struct smb_vol *volume_info, struct cifs_sb_info *cifs_sb,
                    int check_prefix)
 {
@@ -2840,40 +2912,13 @@ expand_dfs_referral(int xid, struct cifsSesInfo *pSesInfo,
 }
 #endif
 
-int
-cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
-               const char *devname)
+int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data,
+                          const char *devname)
 {
-       int rc;
-       int xid;
        struct smb_vol *volume_info;
-       struct cifsSesInfo *pSesInfo;
-       struct cifsTconInfo *tcon;
-       struct TCP_Server_Info *srvTcp;
-       char   *full_path;
-       struct tcon_link *tlink;
-#ifdef CONFIG_CIFS_DFS_UPCALL
-       int referral_walks_count = 0;
-try_mount_again:
-       /* cleanup activities if we're chasing a referral */
-       if (referral_walks_count) {
-               if (tcon)
-                       cifs_put_tcon(tcon);
-               else if (pSesInfo)
-                       cifs_put_smb_ses(pSesInfo);
-
-               cleanup_volume_info(&volume_info);
-               FreeXid(xid);
-       }
-#endif
-       rc = 0;
-       tcon = NULL;
-       pSesInfo = NULL;
-       srvTcp = NULL;
-       full_path = NULL;
-       tlink = NULL;
+       int rc = 0;
 
-       xid = GetXid();
+       *pvolume_info = NULL;
 
        volume_info = kzalloc(sizeof(struct smb_vol), GFP_KERNEL);
        if (!volume_info) {
@@ -2881,7 +2926,7 @@ try_mount_again:
                goto out;
        }
 
-       if (cifs_parse_mount_options(cifs_sb->mountdata, devname,
+       if (cifs_parse_mount_options(mount_data, devname,
                                     volume_info)) {
                rc = -EINVAL;
                goto out;
@@ -2914,7 +2959,46 @@ try_mount_again:
                        goto out;
                }
        }
-       cifs_sb->local_nls = volume_info->local_nls;
+
+       *pvolume_info = volume_info;
+       return rc;
+out:
+       cifs_cleanup_volume_info(&volume_info);
+       return rc;
+}
+
+int
+cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
+          struct smb_vol *volume_info, const char *devname)
+{
+       int rc = 0;
+       int xid;
+       struct cifs_ses *pSesInfo;
+       struct cifs_tcon *tcon;
+       struct TCP_Server_Info *srvTcp;
+       char   *full_path;
+       struct tcon_link *tlink;
+#ifdef CONFIG_CIFS_DFS_UPCALL
+       int referral_walks_count = 0;
+try_mount_again:
+       /* cleanup activities if we're chasing a referral */
+       if (referral_walks_count) {
+               if (tcon)
+                       cifs_put_tcon(tcon);
+               else if (pSesInfo)
+                       cifs_put_smb_ses(pSesInfo);
+
+               cifs_cleanup_volume_info(&volume_info);
+               FreeXid(xid);
+       }
+#endif
+       tcon = NULL;
+       pSesInfo = NULL;
+       srvTcp = NULL;
+       full_path = NULL;
+       tlink = NULL;
+
+       xid = GetXid();
 
        /* get a reference to a tcp session */
        srvTcp = cifs_get_tcp_session(volume_info);
@@ -2931,7 +3015,6 @@ try_mount_again:
                goto mount_fail_check;
        }
 
-       setup_cifs_sb(volume_info, cifs_sb);
        if (pSesInfo->capabilities & CAP_LARGE_FILES)
                sb->s_maxbytes = MAX_LFS_FILESIZE;
        else
@@ -2948,35 +3031,36 @@ try_mount_again:
                goto remote_path_check;
        }
 
-       /* do not care if following two calls succeed - informational */
-       if (!tcon->ipc) {
-               CIFSSMBQFSDeviceInfo(xid, tcon);
-               CIFSSMBQFSAttributeInfo(xid, tcon);
-       }
-
        /* tell server which Unix caps we support */
-       if (tcon->ses->capabilities & CAP_UNIX)
+       if (tcon->ses->capabilities & CAP_UNIX) {
                /* reset of caps checks mount to see if unix extensions
                   disabled for just this mount */
                reset_cifs_unix_caps(xid, tcon, sb, volume_info);
-       else
+               if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
+                   (le64_to_cpu(tcon->fsUnixInfo.Capability) &
+                    CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
+                       rc = -EACCES;
+                       goto mount_fail_check;
+               }
+       } else
                tcon->unix_ext = 0; /* server does not support them */
 
-       /* convert forward to back slashes in prepath here if needed */
-       if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0)
-               convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
+       /* do not care if following two calls succeed - informational */
+       if (!tcon->ipc) {
+               CIFSSMBQFSDeviceInfo(xid, tcon);
+               CIFSSMBQFSAttributeInfo(xid, tcon);
+       }
 
        if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) {
                cifs_sb->rsize = 1024 * 127;
                cFYI(DBG2, "no very large read support, rsize now 127K");
        }
-       if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X))
-               cifs_sb->wsize = min(cifs_sb->wsize,
-                              (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE));
        if (!(tcon->ses->capabilities & CAP_LARGE_READ_X))
                cifs_sb->rsize = min(cifs_sb->rsize,
                               (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE));
 
+       cifs_sb->wsize = cifs_negotiate_wsize(tcon, volume_info);
+
 remote_path_check:
 #ifdef CONFIG_CIFS_DFS_UPCALL
        /*
@@ -2996,10 +3080,10 @@ remote_path_check:
        }
 #endif
 
-       /* check if a whole path (including prepath) is not remote */
+       /* check if a whole path is not remote */
        if (!rc && tcon) {
                /* build_path_to_root works only when we have a valid tcon */
-               full_path = cifs_build_path_to_root(cifs_sb, tcon);
+               full_path = cifs_build_path_to_root(volume_info, cifs_sb, tcon);
                if (full_path == NULL) {
                        rc = -ENOMEM;
                        goto mount_fail_check;
@@ -3025,10 +3109,6 @@ remote_path_check:
                        rc = -ELOOP;
                        goto mount_fail_check;
                }
-               /* convert forward to back slashes in prepath here if needed */
-               if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0)
-                       convert_delimiter(cifs_sb->prepath,
-                                       CIFS_DIR_SEP(cifs_sb));
 
                rc = expand_dfs_referral(xid, pSesInfo, volume_info, cifs_sb,
                                         true);
@@ -3087,14 +3167,13 @@ mount_fail_check:
        password will be freed at unmount time) */
 out:
        /* zero out password before freeing */
-       cleanup_volume_info(&volume_info);
        FreeXid(xid);
        return rc;
 }
 
 int
-CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
-        const char *tree, struct cifsTconInfo *tcon,
+CIFSTCon(unsigned int xid, struct cifs_ses *ses,
+        const char *tree, struct cifs_tcon *tcon,
         const struct nls_table *nls_codepage)
 {
        struct smb_hdr *smb_buffer;
@@ -3126,7 +3205,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
        pSMB->AndXCommand = 0xFF;
        pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
        bcc_ptr = &pSMB->Password[0];
-       if ((ses->server->secMode) & SECMODE_USER) {
+       if ((ses->server->sec_mode) & SECMODE_USER) {
                pSMB->PasswordLength = cpu_to_le16(1);  /* minimum */
                *bcc_ptr = 0; /* password is null byte */
                bcc_ptr++;              /* skip password */
@@ -3143,7 +3222,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
                if ((global_secflags & CIFSSEC_MAY_LANMAN) &&
                    (ses->server->secType == LANMAN))
                        calc_lanman_hash(tcon->password, ses->server->cryptkey,
-                                        ses->server->secMode &
+                                        ses->server->sec_mode &
                                            SECMODE_PW_ENCRYPT ? true : false,
                                         bcc_ptr);
                else
@@ -3159,7 +3238,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
                }
        }
 
-       if (ses->server->secMode &
+       if (ses->server->sec_mode &
                        (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
                smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 
@@ -3255,7 +3334,6 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
        struct rb_root *root = &cifs_sb->tlink_tree;
        struct rb_node *node;
        struct tcon_link *tlink;
-       char *tmp;
 
        cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
 
@@ -3272,15 +3350,10 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
        }
        spin_unlock(&cifs_sb->tlink_tree_lock);
 
-       tmp = cifs_sb->prepath;
-       cifs_sb->prepathlen = 0;
-       cifs_sb->prepath = NULL;
-       kfree(tmp);
-
        return 0;
 }
 
-int cifs_negotiate_protocol(unsigned int xid, struct cifsSesInfo *ses)
+int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
 {
        int rc = 0;
        struct TCP_Server_Info *server = ses->server;
@@ -3310,7 +3383,7 @@ int cifs_negotiate_protocol(unsigned int xid, struct cifsSesInfo *ses)
 }
 
 
-int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
+int cifs_setup_session(unsigned int xid, struct cifs_ses *ses,
                        struct nls_table *nls_info)
 {
        int rc = 0;
@@ -3322,7 +3395,7 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
                ses->capabilities &= (~CAP_UNIX);
 
        cFYI(1, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d",
-                server->secMode, server->capabilities, server->timeAdj);
+                server->sec_mode, server->capabilities, server->timeAdj);
 
        rc = CIFS_SessSetup(xid, ses, nls_info);
        if (rc) {
@@ -3354,12 +3427,12 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses,
        return rc;
 }
 
-static struct cifsTconInfo *
+static struct cifs_tcon *
 cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
 {
-       struct cifsTconInfo *master_tcon = cifs_sb_master_tcon(cifs_sb);
-       struct cifsSesInfo *ses;
-       struct cifsTconInfo *tcon = NULL;
+       struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
+       struct cifs_ses *ses;
+       struct cifs_tcon *tcon = NULL;
        struct smb_vol *vol_info;
        char username[28]; /* big enough for "krb50x" + hex of ULONG_MAX 6+16 */
                           /* We used to have this as MAX_USERNAME which is   */
@@ -3392,7 +3465,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
 
        ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info);
        if (IS_ERR(ses)) {
-               tcon = (struct cifsTconInfo *)ses;
+               tcon = (struct cifs_tcon *)ses;
                cifs_put_tcp_session(master_tcon->ses->server);
                goto out;
        }
@@ -3417,7 +3490,7 @@ cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
        return cifs_sb->master_tlink;
 }
 
-struct cifsTconInfo *
+struct cifs_tcon *
 cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
 {
        return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
index 9ea65cf367146905cc0dababe262397752a15894..81914df47ef1612c1ab742d228503bbf5e598046 100644 (file)
@@ -50,12 +50,11 @@ build_path_from_dentry(struct dentry *direntry)
 {
        struct dentry *temp;
        int namelen;
-       int pplen;
        int dfsplen;
        char *full_path;
        char dirsep;
        struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
-       struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+       struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 
        if (direntry == NULL)
                return NULL;  /* not much we can do if dentry is freed and
@@ -63,13 +62,12 @@ build_path_from_dentry(struct dentry *direntry)
                when the server crashed */
 
        dirsep = CIFS_DIR_SEP(cifs_sb);
-       pplen = cifs_sb->prepathlen;
        if (tcon->Flags & SMB_SHARE_IS_IN_DFS)
                dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
        else
                dfsplen = 0;
 cifs_bp_rename_retry:
-       namelen = pplen + dfsplen;
+       namelen = dfsplen;
        for (temp = direntry; !IS_ROOT(temp);) {
                namelen += (1 + temp->d_name.len);
                temp = temp->d_parent;
@@ -100,7 +98,7 @@ cifs_bp_rename_retry:
                        return NULL;
                }
        }
-       if (namelen != pplen + dfsplen) {
+       if (namelen != dfsplen) {
                cERROR(1, "did not end path lookup where expected namelen is %d",
                        namelen);
                /* presumably this is only possible if racing with a rename
@@ -126,7 +124,6 @@ cifs_bp_rename_retry:
                        }
                }
        }
-       strncpy(full_path + dfsplen, CIFS_SB(direntry->d_sb)->prepath, pplen);
        return full_path;
 }
 
@@ -152,7 +149,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
        __u16 fileHandle;
        struct cifs_sb_info *cifs_sb;
        struct tcon_link *tlink;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
        char *full_path = NULL;
        FILE_ALL_INFO *buf = NULL;
        struct inode *newinode = NULL;
@@ -356,7 +353,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
        int xid;
        struct cifs_sb_info *cifs_sb;
        struct tcon_link *tlink;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
+       struct cifs_io_parms io_parms;
        char *full_path = NULL;
        struct inode *newinode = NULL;
        int oplock = 0;
@@ -439,16 +437,19 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
         * timestamps in, but we can reuse it safely */
 
        pdev = (struct win_dev *)buf;
+       io_parms.netfid = fileHandle;
+       io_parms.pid = current->tgid;
+       io_parms.tcon = pTcon;
+       io_parms.offset = 0;
+       io_parms.length = sizeof(struct win_dev);
        if (S_ISCHR(mode)) {
                memcpy(pdev->type, "IntxCHR", 8);
                pdev->major =
                      cpu_to_le64(MAJOR(device_number));
                pdev->minor =
                      cpu_to_le64(MINOR(device_number));
-               rc = CIFSSMBWrite(xid, pTcon,
-                       fileHandle,
-                       sizeof(struct win_dev),
-                       0, &bytes_written, (char *)pdev,
+               rc = CIFSSMBWrite(xid, &io_parms,
+                       &bytes_written, (char *)pdev,
                        NULL, 0);
        } else if (S_ISBLK(mode)) {
                memcpy(pdev->type, "IntxBLK", 8);
@@ -456,10 +457,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
                      cpu_to_le64(MAJOR(device_number));
                pdev->minor =
                      cpu_to_le64(MINOR(device_number));
-               rc = CIFSSMBWrite(xid, pTcon,
-                       fileHandle,
-                       sizeof(struct win_dev),
-                       0, &bytes_written, (char *)pdev,
+               rc = CIFSSMBWrite(xid, &io_parms,
+                       &bytes_written, (char *)pdev,
                        NULL, 0);
        } /* else if (S_ISFIFO) */
        CIFSSMBClose(xid, pTcon, fileHandle);
@@ -486,7 +485,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
        bool posix_open = false;
        struct cifs_sb_info *cifs_sb;
        struct tcon_link *tlink;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        struct cifsFileInfo *cfile;
        struct inode *newInode = NULL;
        char *full_path = NULL;
index c672afef0c096b2361bcfbd777a0f09c23b37d06..bb71471a4d9d68516269550d6b5eeb5603d5a46b 100644 (file)
@@ -114,7 +114,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
        struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
        struct cifs_fattr fattr;
        struct tcon_link *tlink;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
 
        cFYI(1, "posix open %s", full_path);
 
@@ -168,7 +168,7 @@ posix_open_ret:
 
 static int
 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
-            struct cifsTconInfo *tcon, unsigned int f_flags, __u32 *poplock,
+            struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock,
             __u16 *pnetfid, int xid)
 {
        int rc;
@@ -285,7 +285,7 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
 {
        struct inode *inode = cifs_file->dentry->d_inode;
-       struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
+       struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
        struct cifsInodeInfo *cifsi = CIFS_I(inode);
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct cifsLockInfo *li, *tmp;
@@ -343,7 +343,7 @@ int cifs_open(struct inode *inode, struct file *file)
        int xid;
        __u32 oplock;
        struct cifs_sb_info *cifs_sb;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
        struct tcon_link *tlink;
        struct cifsFileInfo *pCifsFile = NULL;
        char *full_path = NULL;
@@ -457,7 +457,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
        int xid;
        __u32 oplock;
        struct cifs_sb_info *cifs_sb;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
        struct cifsInodeInfo *pCifsInode;
        struct inode *inode;
        char *full_path = NULL;
@@ -596,7 +596,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
        xid = GetXid();
 
        if (pCFileStruct) {
-               struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink);
+               struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink);
 
                cFYI(1, "Freeing private data in close dir");
                spin_lock(&cifs_file_list_lock);
@@ -653,7 +653,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
        __u64 length;
        bool wait_flag = false;
        struct cifs_sb_info *cifs_sb;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
        __u16 netfid;
        __u8 lockType = LOCKING_ANDX_LARGE_FILES;
        bool posix_locking = 0;
@@ -725,8 +725,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
                        else
                                posix_lock_type = CIFS_WRLCK;
                        rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */,
-                                       length, pfLock,
-                                       posix_lock_type, wait_flag);
+                                       length, pfLock, posix_lock_type,
+                                       wait_flag);
                        FreeXid(xid);
                        return rc;
                }
@@ -797,8 +797,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
                        posix_lock_type = CIFS_UNLCK;
 
                rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */,
-                                     length, pfLock,
-                                     posix_lock_type, wait_flag);
+                                     length, pfLock, posix_lock_type,
+                                     wait_flag);
        } else {
                struct cifsFileInfo *fid = file->private_data;
 
@@ -857,7 +857,7 @@ cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
                cifsi->server_eof = end_of_write;
 }
 
-static ssize_t cifs_write(struct cifsFileInfo *open_file,
+static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid,
                          const char *write_data, size_t write_size,
                          loff_t *poffset)
 {
@@ -865,10 +865,11 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
        unsigned int bytes_written = 0;
        unsigned int total_written;
        struct cifs_sb_info *cifs_sb;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        int xid;
        struct dentry *dentry = open_file->dentry;
        struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode);
+       struct cifs_io_parms io_parms;
 
        cifs_sb = CIFS_SB(dentry->d_sb);
 
@@ -901,8 +902,13 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file,
                        /* iov[0] is reserved for smb header */
                        iov[1].iov_base = (char *)write_data + total_written;
                        iov[1].iov_len = len;
-                       rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, len,
-                                          *poffset, &bytes_written, iov, 1, 0);
+                       io_parms.netfid = open_file->netfid;
+                       io_parms.pid = pid;
+                       io_parms.tcon = pTcon;
+                       io_parms.offset = *poffset;
+                       io_parms.length = len;
+                       rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov,
+                                          1, 0);
                }
                if (rc || (bytes_written == 0)) {
                        if (total_written)
@@ -1071,8 +1077,8 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
 
        open_file = find_writable_file(CIFS_I(mapping->host), false);
        if (open_file) {
-               bytes_written = cifs_write(open_file, write_data,
-                                          to - from, &offset);
+               bytes_written = cifs_write(open_file, open_file->pid,
+                                          write_data, to - from, &offset);
                cifsFileInfo_put(open_file);
                /* Does mm or vfs already set times? */
                inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb);
@@ -1092,58 +1098,20 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
 static int cifs_writepages(struct address_space *mapping,
                           struct writeback_control *wbc)
 {
-       unsigned int bytes_to_write;
-       unsigned int bytes_written;
-       struct cifs_sb_info *cifs_sb;
-       int done = 0;
-       pgoff_t end;
-       pgoff_t index;
-       int range_whole = 0;
-       struct kvec *iov;
-       int len;
-       int n_iov = 0;
-       pgoff_t next;
-       int nr_pages;
-       __u64 offset = 0;
-       struct cifsFileInfo *open_file;
-       struct cifsTconInfo *tcon;
-       struct cifsInodeInfo *cifsi = CIFS_I(mapping->host);
+       struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
+       bool done = false, scanned = false, range_whole = false;
+       pgoff_t end, index;
+       struct cifs_writedata *wdata;
        struct page *page;
-       struct pagevec pvec;
        int rc = 0;
-       int scanned = 0;
-       int xid;
-
-       cifs_sb = CIFS_SB(mapping->host->i_sb);
 
        /*
-        * If wsize is smaller that the page cache size, default to writing
+        * If wsize is smaller than the page cache size, default to writing
         * one page at a time via cifs_writepage
         */
        if (cifs_sb->wsize < PAGE_CACHE_SIZE)
                return generic_writepages(mapping, wbc);
 
-       iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL);
-       if (iov == NULL)
-               return generic_writepages(mapping, wbc);
-
-       /*
-        * if there's no open file, then this is likely to fail too,
-        * but it'll at least handle the return. Maybe it should be
-        * a BUG() instead?
-        */
-       open_file = find_writable_file(CIFS_I(mapping->host), false);
-       if (!open_file) {
-               kfree(iov);
-               return generic_writepages(mapping, wbc);
-       }
-
-       tcon = tlink_tcon(open_file->tlink);
-       cifsFileInfo_put(open_file);
-
-       xid = GetXid();
-
-       pagevec_init(&pvec, 0);
        if (wbc->range_cyclic) {
                index = mapping->writeback_index; /* Start from prev offset */
                end = -1;
@@ -1151,24 +1119,49 @@ static int cifs_writepages(struct address_space *mapping,
                index = wbc->range_start >> PAGE_CACHE_SHIFT;
                end = wbc->range_end >> PAGE_CACHE_SHIFT;
                if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
-                       range_whole = 1;
-               scanned = 1;
+                       range_whole = true;
+               scanned = true;
        }
 retry:
-       while (!done && (index <= end) &&
-              (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
-                       PAGECACHE_TAG_DIRTY,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) {
-               int first;
-               unsigned int i;
-
-               first = -1;
-               next = 0;
-               n_iov = 0;
-               bytes_to_write = 0;
-
-               for (i = 0; i < nr_pages; i++) {
-                       page = pvec.pages[i];
+       while (!done && index <= end) {
+               unsigned int i, nr_pages, found_pages;
+               pgoff_t next = 0, tofind;
+               struct page **pages;
+
+               tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1,
+                               end - index) + 1;
+
+               wdata = cifs_writedata_alloc((unsigned int)tofind);
+               if (!wdata) {
+                       rc = -ENOMEM;
+                       break;
+               }
+
+               /*
+                * find_get_pages_tag seems to return a max of 256 on each
+                * iteration, so we must call it several times in order to
+                * fill the array or the wsize is effectively limited to
+                * 256 * PAGE_CACHE_SIZE.
+                */
+               found_pages = 0;
+               pages = wdata->pages;
+               do {
+                       nr_pages = find_get_pages_tag(mapping, &index,
+                                                       PAGECACHE_TAG_DIRTY,
+                                                       tofind, pages);
+                       found_pages += nr_pages;
+                       tofind -= nr_pages;
+                       pages += nr_pages;
+               } while (nr_pages && tofind && index <= end);
+
+               if (found_pages == 0) {
+                       kref_put(&wdata->refcount, cifs_writedata_release);
+                       break;
+               }
+
+               nr_pages = 0;
+               for (i = 0; i < found_pages; i++) {
+                       page = wdata->pages[i];
                        /*
                         * At this point we hold neither mapping->tree_lock nor
                         * lock on the page itself: the page may be truncated or
@@ -1177,7 +1170,7 @@ retry:
                         * mapping
                         */
 
-                       if (first < 0)
+                       if (nr_pages == 0)
                                lock_page(page);
                        else if (!trylock_page(page))
                                break;
@@ -1188,7 +1181,7 @@ retry:
                        }
 
                        if (!wbc->range_cyclic && page->index > end) {
-                               done = 1;
+                               done = true;
                                unlock_page(page);
                                break;
                        }
@@ -1215,119 +1208,89 @@ retry:
                        set_page_writeback(page);
 
                        if (page_offset(page) >= mapping->host->i_size) {
-                               done = 1;
+                               done = true;
                                unlock_page(page);
                                end_page_writeback(page);
                                break;
                        }
 
-                       /*
-                        * BB can we get rid of this?  pages are held by pvec
-                        */
-                       page_cache_get(page);
+                       wdata->pages[i] = page;
+                       next = page->index + 1;
+                       ++nr_pages;
+               }
 
-                       len = min(mapping->host->i_size - page_offset(page),
-                                 (loff_t)PAGE_CACHE_SIZE);
+               /* reset index to refind any pages skipped */
+               if (nr_pages == 0)
+                       index = wdata->pages[0]->index + 1;
 
-                       /* reserve iov[0] for the smb header */
-                       n_iov++;
-                       iov[n_iov].iov_base = kmap(page);
-                       iov[n_iov].iov_len = len;
-                       bytes_to_write += len;
+               /* put any pages we aren't going to use */
+               for (i = nr_pages; i < found_pages; i++) {
+                       page_cache_release(wdata->pages[i]);
+                       wdata->pages[i] = NULL;
+               }
 
-                       if (first < 0) {
-                               first = i;
-                               offset = page_offset(page);
-                       }
-                       next = page->index + 1;
-                       if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize)
-                               break;
+               /* nothing to write? */
+               if (nr_pages == 0) {
+                       kref_put(&wdata->refcount, cifs_writedata_release);
+                       continue;
                }
-               if (n_iov) {
-retry_write:
-                       open_file = find_writable_file(CIFS_I(mapping->host),
-                                                       false);
-                       if (!open_file) {
-                               cERROR(1, "No writable handles for inode");
-                               rc = -EBADF;
-                       } else {
-                               rc = CIFSSMBWrite2(xid, tcon, open_file->netfid,
-                                                  bytes_to_write, offset,
-                                                  &bytes_written, iov, n_iov,
-                                                  0);
-                               cifsFileInfo_put(open_file);
-                       }
 
-                       cFYI(1, "Write2 rc=%d, wrote=%u", rc, bytes_written);
+               wdata->sync_mode = wbc->sync_mode;
+               wdata->nr_pages = nr_pages;
+               wdata->offset = page_offset(wdata->pages[0]);
 
-                       /*
-                        * For now, treat a short write as if nothing got
-                        * written. A zero length write however indicates
-                        * ENOSPC or EFBIG. We have no way to know which
-                        * though, so call it ENOSPC for now. EFBIG would
-                        * get translated to AS_EIO anyway.
-                        *
-                        * FIXME: make it take into account the data that did
-                        *        get written
-                        */
-                       if (rc == 0) {
-                               if (bytes_written == 0)
-                                       rc = -ENOSPC;
-                               else if (bytes_written < bytes_to_write)
-                                       rc = -EAGAIN;
+               do {
+                       if (wdata->cfile != NULL)
+                               cifsFileInfo_put(wdata->cfile);
+                       wdata->cfile = find_writable_file(CIFS_I(mapping->host),
+                                                         false);
+                       if (!wdata->cfile) {
+                               cERROR(1, "No writable handles for inode");
+                               rc = -EBADF;
+                               break;
                        }
+                       rc = cifs_async_writev(wdata);
+               } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
 
-                       /* retry on data-integrity flush */
-                       if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
-                               goto retry_write;
-
-                       /* fix the stats and EOF */
-                       if (bytes_written > 0) {
-                               cifs_stats_bytes_written(tcon, bytes_written);
-                               cifs_update_eof(cifsi, offset, bytes_written);
-                       }
+               for (i = 0; i < nr_pages; ++i)
+                       unlock_page(wdata->pages[i]);
 
-                       for (i = 0; i < n_iov; i++) {
-                               page = pvec.pages[first + i];
-                               /* on retryable write error, redirty page */
+               /* send failure -- clean up the mess */
+               if (rc != 0) {
+                       for (i = 0; i < nr_pages; ++i) {
                                if (rc == -EAGAIN)
-                                       redirty_page_for_writepage(wbc, page);
-                               else if (rc != 0)
-                                       SetPageError(page);
-                               kunmap(page);
-                               unlock_page(page);
-                               end_page_writeback(page);
-                               page_cache_release(page);
+                                       redirty_page_for_writepage(wbc,
+                                                          wdata->pages[i]);
+                               else
+                                       SetPageError(wdata->pages[i]);
+                               end_page_writeback(wdata->pages[i]);
+                               page_cache_release(wdata->pages[i]);
                        }
-
                        if (rc != -EAGAIN)
                                mapping_set_error(mapping, rc);
-                       else
-                               rc = 0;
+               }
+               kref_put(&wdata->refcount, cifs_writedata_release);
 
-                       if ((wbc->nr_to_write -= n_iov) <= 0)
-                               done = 1;
-                       index = next;
-               } else
-                       /* Need to re-find the pages we skipped */
-                       index = pvec.pages[0]->index + 1;
+               wbc->nr_to_write -= nr_pages;
+               if (wbc->nr_to_write <= 0)
+                       done = true;
 
-               pagevec_release(&pvec);
+               index = next;
        }
+
        if (!scanned && !done) {
                /*
                 * We hit the last page and there is more work to be done: wrap
                 * back to the start of the file
                 */
-               scanned = 1;
+               scanned = true;
                index = 0;
                goto retry;
        }
+
        if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
                mapping->writeback_index = index;
 
-       FreeXid(xid);
-       kfree(iov);
        return rc;
 }
 
@@ -1383,6 +1346,14 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
 {
        int rc;
        struct inode *inode = mapping->host;
+       struct cifsFileInfo *cfile = file->private_data;
+       struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
+       __u32 pid;
+
+       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+               pid = cfile->pid;
+       else
+               pid = current->tgid;
 
        cFYI(1, "write_end for page %p from pos %lld with %d bytes",
                 page, pos, copied);
@@ -1406,8 +1377,7 @@ static int cifs_write_end(struct file *file, struct address_space *mapping,
                /* BB check if anything else missing out of ppw
                   such as updating last write time */
                page_data = kmap(page);
-               rc = cifs_write(file->private_data, page_data + offset,
-                               copied, &pos);
+               rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
                /* if (rc < 0) should we set writebehind rc? */
                kunmap(page);
 
@@ -1435,7 +1405,7 @@ int cifs_strict_fsync(struct file *file, int datasync)
 {
        int xid;
        int rc = 0;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
        struct cifsFileInfo *smbfile = file->private_data;
        struct inode *inode = file->f_path.dentry->d_inode;
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
@@ -1465,7 +1435,7 @@ int cifs_fsync(struct file *file, int datasync)
 {
        int xid;
        int rc = 0;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
        struct cifsFileInfo *smbfile = file->private_data;
        struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 
@@ -1556,9 +1526,11 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
        struct iov_iter it;
        struct inode *inode;
        struct cifsFileInfo *open_file;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        struct cifs_sb_info *cifs_sb;
+       struct cifs_io_parms io_parms;
        int xid, rc;
+       __u32 pid;
 
        len = iov_length(iov, nr_segs);
        if (!len)
@@ -1590,6 +1562,12 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
 
        xid = GetXid();
        open_file = file->private_data;
+
+       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+               pid = open_file->pid;
+       else
+               pid = current->tgid;
+
        pTcon = tlink_tcon(open_file->tlink);
        inode = file->f_path.dentry->d_inode;
 
@@ -1616,9 +1594,13 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
                                if (rc != 0)
                                        break;
                        }
-                       rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid,
-                                          cur_len, *poffset, &written,
-                                          to_send, npages, 0);
+                       io_parms.netfid = open_file->netfid;
+                       io_parms.pid = pid;
+                       io_parms.tcon = pTcon;
+                       io_parms.offset = *poffset;
+                       io_parms.length = cur_len;
+                       rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send,
+                                          npages, 0);
                } while (rc == -EAGAIN);
 
                for (i = 0; i < npages; i++)
@@ -1711,10 +1693,12 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
        size_t len, cur_len;
        int iov_offset = 0;
        struct cifs_sb_info *cifs_sb;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        struct cifsFileInfo *open_file;
        struct smb_com_read_rsp *pSMBr;
+       struct cifs_io_parms io_parms;
        char *read_data;
+       __u32 pid;
 
        if (!nr_segs)
                return 0;
@@ -1729,6 +1713,11 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
        open_file = file->private_data;
        pTcon = tlink_tcon(open_file->tlink);
 
+       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+               pid = open_file->pid;
+       else
+               pid = current->tgid;
+
        if ((file->f_flags & O_ACCMODE) == O_WRONLY)
                cFYI(1, "attempting read on write only file instance");
 
@@ -1744,8 +1733,12 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
                                if (rc != 0)
                                        break;
                        }
-                       rc = CIFSSMBRead(xid, pTcon, open_file->netfid,
-                                        cur_len, *poffset, &bytes_read,
+                       io_parms.netfid = open_file->netfid;
+                       io_parms.pid = pid;
+                       io_parms.tcon = pTcon;
+                       io_parms.offset = *poffset;
+                       io_parms.length = len;
+                       rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
                                         &read_data, &buf_type);
                        pSMBr = (struct smb_com_read_rsp *)read_data;
                        if (read_data) {
@@ -1822,11 +1815,13 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
        unsigned int total_read;
        unsigned int current_read_size;
        struct cifs_sb_info *cifs_sb;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        int xid;
        char *current_offset;
        struct cifsFileInfo *open_file;
+       struct cifs_io_parms io_parms;
        int buf_type = CIFS_NO_BUFFER;
+       __u32 pid;
 
        xid = GetXid();
        cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
@@ -1839,6 +1834,11 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
        open_file = file->private_data;
        pTcon = tlink_tcon(open_file->tlink);
 
+       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+               pid = open_file->pid;
+       else
+               pid = current->tgid;
+
        if ((file->f_flags & O_ACCMODE) == O_WRONLY)
                cFYI(1, "attempting read on write only file instance");
 
@@ -1861,11 +1861,13 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
                                if (rc != 0)
                                        break;
                        }
-                       rc = CIFSSMBRead(xid, pTcon,
-                                        open_file->netfid,
-                                        current_read_size, *poffset,
-                                        &bytes_read, &current_offset,
-                                        &buf_type);
+                       io_parms.netfid = open_file->netfid;
+                       io_parms.pid = pid;
+                       io_parms.tcon = pTcon;
+                       io_parms.offset = *poffset;
+                       io_parms.length = current_read_size;
+                       rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
+                                        &current_offset, &buf_type);
                }
                if (rc || (bytes_read == 0)) {
                        if (total_read) {
@@ -1996,13 +1998,15 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
        loff_t offset;
        struct page *page;
        struct cifs_sb_info *cifs_sb;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        unsigned int bytes_read = 0;
        unsigned int read_size, i;
        char *smb_read_data = NULL;
        struct smb_com_read_rsp *pSMBr;
        struct cifsFileInfo *open_file;
+       struct cifs_io_parms io_parms;
        int buf_type = CIFS_NO_BUFFER;
+       __u32 pid;
 
        xid = GetXid();
        if (file->private_data == NULL) {
@@ -2024,6 +2028,11 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                goto read_complete;
 
        cFYI(DBG2, "rpages: num pages %d", num_pages);
+       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
+               pid = open_file->pid;
+       else
+               pid = current->tgid;
+
        for (i = 0; i < num_pages; ) {
                unsigned contig_pages;
                struct page *tmp_page;
@@ -2065,12 +2074,13 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
                                if (rc != 0)
                                        break;
                        }
-
-                       rc = CIFSSMBRead(xid, pTcon,
-                                        open_file->netfid,
-                                        read_size, offset,
-                                        &bytes_read, &smb_read_data,
-                                        &buf_type);
+                       io_parms.netfid = open_file->netfid;
+                       io_parms.pid = pid;
+                       io_parms.tcon = pTcon;
+                       io_parms.offset = offset;
+                       io_parms.length = read_size;
+                       rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
+                                        &smb_read_data, &buf_type);
                        /* BB more RC checks ? */
                        if (rc == -EAGAIN) {
                                if (smb_read_data) {
index 297a43d0ff7f5a4716d13eccd595286b44b3af65..d368a47ba5ebf317b377a0c34b0aeb2938a6c871 100644 (file)
@@ -40,7 +40,7 @@ void cifs_fscache_release_client_cookie(struct TCP_Server_Info *server)
        server->fscache = NULL;
 }
 
-void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon)
+void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
 {
        struct TCP_Server_Info *server = tcon->ses->server;
 
@@ -51,7 +51,7 @@ void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon)
                                server->fscache, tcon->fscache);
 }
 
-void cifs_fscache_release_super_cookie(struct cifsTconInfo *tcon)
+void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon)
 {
        cFYI(1, "CIFS: releasing superblock cookie (0x%p)", tcon->fscache);
        fscache_relinquish_cookie(tcon->fscache, 0);
@@ -62,7 +62,7 @@ static void cifs_fscache_enable_inode_cookie(struct inode *inode)
 {
        struct cifsInodeInfo *cifsi = CIFS_I(inode);
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
-       struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+       struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 
        if (cifsi->fscache)
                return;
index 31b88ec2341e2b36fcf10ffdfc9effb02c5b4e36..63539323e0b960f3ecf9788eca6e67ba0ec8ce67 100644 (file)
@@ -40,8 +40,8 @@ extern void cifs_fscache_unregister(void);
  */
 extern void cifs_fscache_get_client_cookie(struct TCP_Server_Info *);
 extern void cifs_fscache_release_client_cookie(struct TCP_Server_Info *);
-extern void cifs_fscache_get_super_cookie(struct cifsTconInfo *);
-extern void cifs_fscache_release_super_cookie(struct cifsTconInfo *);
+extern void cifs_fscache_get_super_cookie(struct cifs_tcon *);
+extern void cifs_fscache_release_super_cookie(struct cifs_tcon *);
 
 extern void cifs_fscache_release_inode_cookie(struct inode *);
 extern void cifs_fscache_set_inode_cookie(struct inode *, struct file *);
@@ -99,9 +99,9 @@ static inline void
 cifs_fscache_get_client_cookie(struct TCP_Server_Info *server) {}
 static inline void
 cifs_fscache_release_client_cookie(struct TCP_Server_Info *server) {}
-static inline void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon) {}
+static inline void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) {}
 static inline void
-cifs_fscache_release_super_cookie(struct cifsTconInfo *tcon) {}
+cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) {}
 
 static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {}
 static inline void cifs_fscache_set_inode_cookie(struct inode *inode,
index de02ed5e25c2ad09919cf84c23c69b2383e55360..9b018c8334fa1d67bf090db12f8821700341c1df 100644 (file)
@@ -295,7 +295,7 @@ int cifs_get_file_info_unix(struct file *filp)
        struct inode *inode = filp->f_path.dentry->d_inode;
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct cifsFileInfo *cfile = filp->private_data;
-       struct cifsTconInfo *tcon = tlink_tcon(cfile->tlink);
+       struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
 
        xid = GetXid();
        rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->netfid, &find_data);
@@ -318,7 +318,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
        int rc;
        FILE_UNIX_BASIC_INFO find_data;
        struct cifs_fattr fattr;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
        struct tcon_link *tlink;
        struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
 
@@ -373,7 +373,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
        int oplock = 0;
        __u16 netfid;
        struct tcon_link *tlink;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
+       struct cifs_io_parms io_parms;
        char buf[24];
        unsigned int bytes_read;
        char *pbuf;
@@ -405,9 +406,13 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path,
        if (rc == 0) {
                int buf_type = CIFS_NO_BUFFER;
                        /* Read header */
-               rc = CIFSSMBRead(xid, tcon, netfid,
-                                24 /* length */, 0 /* offset */,
-                                &bytes_read, &pbuf, &buf_type);
+               io_parms.netfid = netfid;
+               io_parms.pid = current->tgid;
+               io_parms.tcon = tcon;
+               io_parms.offset = 0;
+               io_parms.length = 24;
+               rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf,
+                                &buf_type);
                if ((rc == 0) && (bytes_read >= 8)) {
                        if (memcmp("IntxBLK", pbuf, 8) == 0) {
                                cFYI(1, "Block device");
@@ -468,7 +473,7 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
        char ea_value[4];
        __u32 mode;
        struct tcon_link *tlink;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
 
        tlink = cifs_sb_tlink(cifs_sb);
        if (IS_ERR(tlink))
@@ -502,7 +507,7 @@ static void
 cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
                       struct cifs_sb_info *cifs_sb, bool adjust_tz)
 {
-       struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+       struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 
        memset(fattr, 0, sizeof(*fattr));
        fattr->cf_cifsattrs = le32_to_cpu(info->Attributes);
@@ -553,7 +558,7 @@ int cifs_get_file_info(struct file *filp)
        struct inode *inode = filp->f_path.dentry->d_inode;
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct cifsFileInfo *cfile = filp->private_data;
-       struct cifsTconInfo *tcon = tlink_tcon(cfile->tlink);
+       struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
 
        xid = GetXid();
        rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data);
@@ -590,7 +595,7 @@ int cifs_get_inode_info(struct inode **pinode,
        struct super_block *sb, int xid, const __u16 *pfid)
 {
        int rc = 0, tmprc;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        struct tcon_link *tlink;
        struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
        char *buf = NULL;
@@ -735,10 +740,10 @@ static const struct inode_operations cifs_ipc_inode_ops = {
        .lookup = cifs_lookup,
 };
 
-char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb,
-                               struct cifsTconInfo *tcon)
+char *cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
+                             struct cifs_tcon *tcon)
 {
-       int pplen = cifs_sb->prepathlen;
+       int pplen = vol->prepath ? strlen(vol->prepath) : 0;
        int dfsplen;
        char *full_path = NULL;
 
@@ -772,7 +777,7 @@ char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb,
                        }
                }
        }
-       strncpy(full_path + dfsplen, cifs_sb->prepath, pplen);
+       strncpy(full_path + dfsplen, vol->prepath, pplen);
        full_path[dfsplen + pplen] = 0; /* add trailing null */
        return full_path;
 }
@@ -884,19 +889,13 @@ struct inode *cifs_root_iget(struct super_block *sb)
        struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
        struct inode *inode = NULL;
        long rc;
-       char *full_path;
-       struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
-
-       full_path = cifs_build_path_to_root(cifs_sb, tcon);
-       if (full_path == NULL)
-               return ERR_PTR(-ENOMEM);
+       struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 
        xid = GetXid();
        if (tcon->unix_ext)
-               rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
+               rc = cifs_get_inode_info_unix(&inode, "", sb, xid);
        else
-               rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
-                                               xid, NULL);
+               rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL);
 
        if (!inode) {
                inode = ERR_PTR(rc);
@@ -922,7 +921,6 @@ struct inode *cifs_root_iget(struct super_block *sb)
        }
 
 out:
-       kfree(full_path);
        /* can not call macro FreeXid here since in a void func
         * TODO: This is no longer true
         */
@@ -943,7 +941,7 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid,
        struct cifsInodeInfo *cifsInode = CIFS_I(inode);
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct tcon_link *tlink = NULL;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        FILE_BASIC_INFO info_buf;
 
        if (attrs == NULL)
@@ -1061,7 +1059,7 @@ cifs_rename_pending_delete(char *full_path, struct dentry *dentry, int xid)
        struct cifsInodeInfo *cifsInode = CIFS_I(inode);
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct tcon_link *tlink;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
        __u32 dosattr, origattr;
        FILE_BASIC_INFO *info_buf = NULL;
 
@@ -1179,7 +1177,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
        struct super_block *sb = dir->i_sb;
        struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
        struct tcon_link *tlink;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
        struct iattr *attrs = NULL;
        __u32 dosattr = 0, origattr = 0;
 
@@ -1277,7 +1275,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
        int xid;
        struct cifs_sb_info *cifs_sb;
        struct tcon_link *tlink;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        char *full_path = NULL;
        struct inode *newinode = NULL;
        struct cifs_fattr fattr;
@@ -1455,7 +1453,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
        int xid;
        struct cifs_sb_info *cifs_sb;
        struct tcon_link *tlink;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        char *full_path = NULL;
        struct cifsInodeInfo *cifsInode;
 
@@ -1512,7 +1510,7 @@ cifs_do_rename(int xid, struct dentry *from_dentry, const char *fromPath,
 {
        struct cifs_sb_info *cifs_sb = CIFS_SB(from_dentry->d_sb);
        struct tcon_link *tlink;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        __u16 srcfid;
        int oplock, rc;
 
@@ -1564,7 +1562,7 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry,
        char *toName = NULL;
        struct cifs_sb_info *cifs_sb;
        struct tcon_link *tlink;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
        FILE_UNIX_BASIC_INFO *info_buf_source = NULL;
        FILE_UNIX_BASIC_INFO *info_buf_target;
        int xid, rc, tmprc;
@@ -1794,7 +1792,7 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
                 struct kstat *stat)
 {
        struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb);
-       struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb);
+       struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
        struct inode *inode = dentry->d_inode;
        int rc;
 
@@ -1872,7 +1870,8 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
        struct cifsInodeInfo *cifsInode = CIFS_I(inode);
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct tcon_link *tlink = NULL;
-       struct cifsTconInfo *pTcon = NULL;
+       struct cifs_tcon *pTcon = NULL;
+       struct cifs_io_parms io_parms;
 
        /*
         * To avoid spurious oplock breaks from server, in the case of
@@ -1894,8 +1893,14 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
                cFYI(1, "SetFSize for attrs rc = %d", rc);
                if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
                        unsigned int bytes_written;
-                       rc = CIFSSMBWrite(xid, pTcon, nfid, 0, attrs->ia_size,
-                                         &bytes_written, NULL, NULL, 1);
+
+                       io_parms.netfid = nfid;
+                       io_parms.pid = npid;
+                       io_parms.tcon = pTcon;
+                       io_parms.offset = 0;
+                       io_parms.length = attrs->ia_size;
+                       rc = CIFSSMBWrite(xid, &io_parms, &bytes_written,
+                                         NULL, NULL, 1);
                        cFYI(1, "Wrt seteof rc %d", rc);
                }
        } else
@@ -1930,10 +1935,15 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
                                        CIFS_MOUNT_MAP_SPECIAL_CHR);
                        if (rc == 0) {
                                unsigned int bytes_written;
-                               rc = CIFSSMBWrite(xid, pTcon, netfid, 0,
-                                                 attrs->ia_size,
-                                                 &bytes_written, NULL,
-                                                 NULL, 1);
+
+                               io_parms.netfid = netfid;
+                               io_parms.pid = current->tgid;
+                               io_parms.tcon = pTcon;
+                               io_parms.offset = 0;
+                               io_parms.length = attrs->ia_size;
+                               rc = CIFSSMBWrite(xid, &io_parms,
+                                                 &bytes_written,
+                                                 NULL, NULL,  1);
                                cFYI(1, "wrt seteof rc %d", rc);
                                CIFSSMBClose(xid, pTcon, netfid);
                        }
@@ -1961,7 +1971,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
        struct cifsInodeInfo *cifsInode = CIFS_I(inode);
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct tcon_link *tlink;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        struct cifs_unix_set_info_args *args = NULL;
        struct cifsFileInfo *open_file;
 
@@ -2247,7 +2257,7 @@ cifs_setattr(struct dentry *direntry, struct iattr *attrs)
 {
        struct inode *inode = direntry->d_inode;
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
-       struct cifsTconInfo *pTcon = cifs_sb_master_tcon(cifs_sb);
+       struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb);
 
        if (pTcon->unix_ext)
                return cifs_setattr_unix(direntry, attrs);
index 0c98672d01225ccadb67bb09705e7c21922332c7..4221b5e48a426af74b540105ec255026291e8c05 100644 (file)
@@ -38,7 +38,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
        struct cifs_sb_info *cifs_sb;
 #ifdef CONFIG_CIFS_POSIX
        struct cifsFileInfo *pSMBFile = filep->private_data;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
        __u64   ExtAttrBits = 0;
        __u64   ExtAttrMask = 0;
        __u64   caps;
index ce417a9764a3fe5c3963e1e93fe5e5b17bf7390c..556b1a0b54de95159aede9e710d47d66a6bc97d6 100644 (file)
@@ -175,7 +175,7 @@ CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str)
 }
 
 static int
-CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon,
+CIFSCreateMFSymLink(const int xid, struct cifs_tcon *tcon,
                    const char *fromName, const char *toName,
                    const struct nls_table *nls_codepage, int remap)
 {
@@ -184,6 +184,7 @@ CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon,
        __u16 netfid = 0;
        u8 *buf;
        unsigned int bytes_written = 0;
+       struct cifs_io_parms io_parms;
 
        buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
        if (!buf)
@@ -203,10 +204,13 @@ CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon,
                return rc;
        }
 
-       rc = CIFSSMBWrite(xid, tcon, netfid,
-                         CIFS_MF_SYMLINK_FILE_SIZE /* length */,
-                         0 /* offset */,
-                         &bytes_written, buf, NULL, 0);
+       io_parms.netfid = netfid;
+       io_parms.pid = current->tgid;
+       io_parms.tcon = tcon;
+       io_parms.offset = 0;
+       io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
+
+       rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, buf, NULL, 0);
        CIFSSMBClose(xid, tcon, netfid);
        kfree(buf);
        if (rc != 0)
@@ -219,7 +223,7 @@ CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon,
 }
 
 static int
-CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon,
+CIFSQueryMFSymLink(const int xid, struct cifs_tcon *tcon,
                   const unsigned char *searchName, char **symlinkinfo,
                   const struct nls_table *nls_codepage, int remap)
 {
@@ -231,6 +235,7 @@ CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon,
        unsigned int bytes_read = 0;
        int buf_type = CIFS_NO_BUFFER;
        unsigned int link_len = 0;
+       struct cifs_io_parms io_parms;
        FILE_ALL_INFO file_info;
 
        rc = CIFSSMBOpen(xid, tcon, searchName, FILE_OPEN, GENERIC_READ,
@@ -249,11 +254,13 @@ CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon,
        if (!buf)
                return -ENOMEM;
        pbuf = buf;
+       io_parms.netfid = netfid;
+       io_parms.pid = current->tgid;
+       io_parms.tcon = tcon;
+       io_parms.offset = 0;
+       io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
 
-       rc = CIFSSMBRead(xid, tcon, netfid,
-                        CIFS_MF_SYMLINK_FILE_SIZE /* length */,
-                        0 /* offset */,
-                        &bytes_read, &pbuf, &buf_type);
+       rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type);
        CIFSSMBClose(xid, tcon, netfid);
        if (rc != 0) {
                kfree(buf);
@@ -291,7 +298,8 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
        int oplock = 0;
        __u16 netfid = 0;
        struct tcon_link *tlink;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
+       struct cifs_io_parms io_parms;
        u8 *buf;
        char *pbuf;
        unsigned int bytes_read = 0;
@@ -328,11 +336,13 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
                goto out;
        }
        pbuf = buf;
+       io_parms.netfid = netfid;
+       io_parms.pid = current->tgid;
+       io_parms.tcon = pTcon;
+       io_parms.offset = 0;
+       io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
 
-       rc = CIFSSMBRead(xid, pTcon, netfid,
-                        CIFS_MF_SYMLINK_FILE_SIZE /* length */,
-                        0 /* offset */,
-                        &bytes_read, &pbuf, &buf_type);
+       rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type);
        CIFSSMBClose(xid, pTcon, netfid);
        if (rc != 0) {
                kfree(buf);
@@ -370,7 +380,7 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
        char *toName = NULL;
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct tcon_link *tlink;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        struct cifsInodeInfo *cifsInode;
 
        tlink = cifs_sb_tlink(cifs_sb);
@@ -445,7 +455,7 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
        char *target_path = NULL;
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct tcon_link *tlink = NULL;
-       struct cifsTconInfo *tcon;
+       struct cifs_tcon *tcon;
 
        xid = GetXid();
 
@@ -518,7 +528,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
        int xid;
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct tcon_link *tlink;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        char *full_path = NULL;
        struct inode *newinode = NULL;
 
index 907531ac58886dda635f9f9a739c4c223ec4835d..03a1f491d39b494632ecf09845c84ac839f6a6eb 100644 (file)
@@ -67,12 +67,12 @@ _FreeXid(unsigned int xid)
        spin_unlock(&GlobalMid_Lock);
 }
 
-struct cifsSesInfo *
+struct cifs_ses *
 sesInfoAlloc(void)
 {
-       struct cifsSesInfo *ret_buf;
+       struct cifs_ses *ret_buf;
 
-       ret_buf = kzalloc(sizeof(struct cifsSesInfo), GFP_KERNEL);
+       ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
        if (ret_buf) {
                atomic_inc(&sesInfoAllocCount);
                ret_buf->status = CifsNew;
@@ -85,7 +85,7 @@ sesInfoAlloc(void)
 }
 
 void
-sesInfoFree(struct cifsSesInfo *buf_to_free)
+sesInfoFree(struct cifs_ses *buf_to_free)
 {
        if (buf_to_free == NULL) {
                cFYI(1, "Null buffer passed to sesInfoFree");
@@ -105,11 +105,11 @@ sesInfoFree(struct cifsSesInfo *buf_to_free)
        kfree(buf_to_free);
 }
 
-struct cifsTconInfo *
+struct cifs_tcon *
 tconInfoAlloc(void)
 {
-       struct cifsTconInfo *ret_buf;
-       ret_buf = kzalloc(sizeof(struct cifsTconInfo), GFP_KERNEL);
+       struct cifs_tcon *ret_buf;
+       ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
        if (ret_buf) {
                atomic_inc(&tconInfoAllocCount);
                ret_buf->tidStatus = CifsNew;
@@ -124,7 +124,7 @@ tconInfoAlloc(void)
 }
 
 void
-tconInfoFree(struct cifsTconInfo *buf_to_free)
+tconInfoFree(struct cifs_tcon *buf_to_free)
 {
        if (buf_to_free == NULL) {
                cFYI(1, "Null buffer passed to tconInfoFree");
@@ -295,11 +295,11 @@ __u16 GetNextMid(struct TCP_Server_Info *server)
    case it is responsbility of caller to set the mid */
 void
 header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
-               const struct cifsTconInfo *treeCon, int word_count
+               const struct cifs_tcon *treeCon, int word_count
                /* length of fixed section (word count) in two byte units  */)
 {
        struct list_head *temp_item;
-       struct cifsSesInfo *ses;
+       struct cifs_ses *ses;
        char *temp = (char *) buffer;
 
        memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
@@ -359,7 +359,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
                                                 "did not match tcon uid");
                                        spin_lock(&cifs_tcp_ses_lock);
                                        list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
-                                               ses = list_entry(temp_item, struct cifsSesInfo, smb_ses_list);
+                                               ses = list_entry(temp_item, struct cifs_ses, smb_ses_list);
                                                if (ses->linux_uid == current_fsuid()) {
                                                        if (ses->server == treeCon->ses->server) {
                                                                cFYI(1, "found matching uid substitute right smb_uid");
@@ -380,7 +380,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
                if (treeCon->nocase)
                        buffer->Flags  |= SMBFLG_CASELESS;
                if ((treeCon->ses) && (treeCon->ses->server))
-                       if (treeCon->ses->server->secMode &
+                       if (treeCon->ses->server->sec_mode &
                          (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
                                buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
        }
@@ -507,8 +507,8 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
 {
        struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
        struct list_head *tmp, *tmp1, *tmp2;
-       struct cifsSesInfo *ses;
-       struct cifsTconInfo *tcon;
+       struct cifs_ses *ses;
+       struct cifs_tcon *tcon;
        struct cifsInodeInfo *pCifsInode;
        struct cifsFileInfo *netfile;
 
@@ -566,9 +566,9 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
        /* look up tcon based on tid & uid */
        spin_lock(&cifs_tcp_ses_lock);
        list_for_each(tmp, &srv->smb_ses_list) {
-               ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list);
+               ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
                list_for_each(tmp1, &ses->tcon_list) {
-                       tcon = list_entry(tmp1, struct cifsTconInfo, tcon_list);
+                       tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
                        if (tcon->tid != buf->Tid)
                                continue;
 
index 79b71c2c7c9dd2f4b5f9857e8cf32de73c02fd7b..73e47e84b61a4827fa13ed69a41e205be510234e 100644 (file)
@@ -836,7 +836,7 @@ ntstatus_to_dos(__u32 ntstatus, __u8 *eclass, __u16 *ecode)
 }
 
 int
-map_smb_to_linux_error(struct smb_hdr *smb, int logErr)
+map_smb_to_linux_error(struct smb_hdr *smb, bool logErr)
 {
        unsigned int i;
        int rc = -EIO;  /* if transport error smb error may not be set */
index f8e4cd2a79127a855b8c319b4d6c2d9fbfa5f2c1..6751e745bbc6a5611081a462c36c5159cbf9ee05 100644 (file)
@@ -195,7 +195,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
        int len;
        int oplock = 0;
        int rc;
-       struct cifsTconInfo *ptcon = cifs_sb_tcon(cifs_sb);
+       struct cifs_tcon *ptcon = cifs_sb_tcon(cifs_sb);
        char *tmpbuffer;
 
        rc = CIFSSMBOpen(xid, ptcon, full_path, FILE_OPEN, GENERIC_READ,
@@ -223,7 +223,7 @@ static int initiate_cifs_search(const int xid, struct file *file)
        struct cifsFileInfo *cifsFile;
        struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
        struct tcon_link *tlink = NULL;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
 
        if (file->private_data == NULL) {
                tlink = cifs_sb_tlink(cifs_sb);
@@ -496,7 +496,7 @@ static int cifs_save_resume_key(const char *current_entry,
    assume that they are located in the findfirst return buffer.*/
 /* We start counting in the buffer with entry 2 and increment for every
    entry (do not increment for . or .. entry) */
-static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
+static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
        struct file *file, char **ppCurrentEntry, int *num_to_ret)
 {
        int rc = 0;
@@ -764,7 +764,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
 {
        int rc = 0;
        int xid, i;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        struct cifsFileInfo *cifsFile = NULL;
        char *current_entry;
        int num_to_fill = 0;
index 7dd46210037807aa17f65bb3d2076dbae03bfe4c..3892ab817a36407975d6738a1b20d7c90c846574 100644 (file)
  * the socket has been reestablished (so we know whether to use vc 0).
  * Called while holding the cifs_tcp_ses_lock, so do not block
  */
-static bool is_first_ses_reconnect(struct cifsSesInfo *ses)
+static bool is_first_ses_reconnect(struct cifs_ses *ses)
 {
        struct list_head *tmp;
-       struct cifsSesInfo *tmp_ses;
+       struct cifs_ses *tmp_ses;
 
        list_for_each(tmp, &ses->server->smb_ses_list) {
-               tmp_ses = list_entry(tmp, struct cifsSesInfo,
+               tmp_ses = list_entry(tmp, struct cifs_ses,
                                     smb_ses_list);
                if (tmp_ses->need_reconnect == false)
                        return false;
@@ -61,11 +61,11 @@ static bool is_first_ses_reconnect(struct cifsSesInfo *ses)
  *     any vc but zero (some servers reset the connection on vcnum zero)
  *
  */
-static __le16 get_next_vcnum(struct cifsSesInfo *ses)
+static __le16 get_next_vcnum(struct cifs_ses *ses)
 {
        __u16 vcnum = 0;
        struct list_head *tmp;
-       struct cifsSesInfo *tmp_ses;
+       struct cifs_ses *tmp_ses;
        __u16 max_vcs = ses->server->max_vcs;
        __u16 i;
        int free_vc_found = 0;
@@ -87,7 +87,7 @@ static __le16 get_next_vcnum(struct cifsSesInfo *ses)
                free_vc_found = 1;
 
                list_for_each(tmp, &ses->server->smb_ses_list) {
-                       tmp_ses = list_entry(tmp, struct cifsSesInfo,
+                       tmp_ses = list_entry(tmp, struct cifs_ses,
                                             smb_ses_list);
                        if (tmp_ses->vcnum == i) {
                                free_vc_found = 0;
@@ -114,7 +114,7 @@ get_vc_num_exit:
        return cpu_to_le16(vcnum);
 }
 
-static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
+static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
 {
        __u32 capabilities = 0;
 
@@ -136,7 +136,7 @@ static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
        capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
                        CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
 
-       if (ses->server->secMode &
+       if (ses->server->sec_mode &
            (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
                pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 
@@ -181,7 +181,7 @@ unicode_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp)
        *pbcc_area = bcc_ptr;
 }
 
-static void unicode_domain_string(char **pbcc_area, struct cifsSesInfo *ses,
+static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
                                   const struct nls_table *nls_cp)
 {
        char *bcc_ptr = *pbcc_area;
@@ -204,7 +204,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifsSesInfo *ses,
 }
 
 
-static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
+static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
                                   const struct nls_table *nls_cp)
 {
        char *bcc_ptr = *pbcc_area;
@@ -236,7 +236,7 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
        *pbcc_area = bcc_ptr;
 }
 
-static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
+static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
                                 const struct nls_table *nls_cp)
 {
        char *bcc_ptr = *pbcc_area;
@@ -276,7 +276,7 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
 }
 
 static void
-decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
+decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifs_ses *ses,
                      const struct nls_table *nls_cp)
 {
        int len;
@@ -310,7 +310,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
 }
 
 static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
-                              struct cifsSesInfo *ses,
+                              struct cifs_ses *ses,
                               const struct nls_table *nls_cp)
 {
        int rc = 0;
@@ -364,7 +364,7 @@ static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
 }
 
 static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
-                                   struct cifsSesInfo *ses)
+                                   struct cifs_ses *ses)
 {
        unsigned int tioffset; /* challenge message target info area */
        unsigned int tilen; /* challenge message target info area length  */
@@ -411,7 +411,7 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
 /* We do not malloc the blob, it is passed in pbuffer, because
    it is fixed size, and small, making this approach cleaner */
 static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
-                                        struct cifsSesInfo *ses)
+                                        struct cifs_ses *ses)
 {
        NEGOTIATE_MESSAGE *sec_blob = (NEGOTIATE_MESSAGE *)pbuffer;
        __u32 flags;
@@ -424,7 +424,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
        flags = NTLMSSP_NEGOTIATE_56 |  NTLMSSP_REQUEST_TARGET |
                NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
                NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
-       if (ses->server->secMode &
+       if (ses->server->sec_mode &
                        (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
                flags |= NTLMSSP_NEGOTIATE_SIGN;
                if (!ses->server->session_estab)
@@ -449,7 +449,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
    This function returns the length of the data in the blob */
 static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                                        u16 *buflen,
-                                  struct cifsSesInfo *ses,
+                                  struct cifs_ses *ses,
                                   const struct nls_table *nls_cp)
 {
        int rc;
@@ -464,10 +464,10 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
                NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
                NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC;
-       if (ses->server->secMode &
+       if (ses->server->sec_mode &
           (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
                flags |= NTLMSSP_NEGOTIATE_SIGN;
-       if (ses->server->secMode & SECMODE_SIGN_REQUIRED)
+       if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
                flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN;
 
        tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
@@ -551,7 +551,7 @@ setup_ntlmv2_ret:
 }
 
 int
-CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
+CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
               const struct nls_table *nls_cp)
 {
        int rc = 0;
@@ -657,7 +657,7 @@ ssetup_ntlmssp_authenticate:
                 */
 
                rc = calc_lanman_hash(ses->password, ses->server->cryptkey,
-                                ses->server->secMode & SECMODE_PW_ENCRYPT ?
+                                ses->server->sec_mode & SECMODE_PW_ENCRYPT ?
                                        true : false, lnm_session_key);
 
                ses->flags |= CIFS_SES_LANMAN;
index f2513fb8c391d5bdd39eaeef08987165e1ac2006..147aa22c3c3a4b0f2b647b6978ec31340ff1a5da 100644 (file)
@@ -295,7 +295,7 @@ static int wait_for_free_request(struct TCP_Server_Info *server,
        return 0;
 }
 
-static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf,
+static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
                        struct mid_q_entry **ppmidQ)
 {
        if (ses->server->tcpStatus == CifsExiting) {
@@ -342,22 +342,24 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
  * the result. Caller is responsible for dealing with timeouts.
  */
 int
-cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
-               mid_callback_t *callback, void *cbdata)
+cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
+               unsigned int nvec, mid_callback_t *callback, void *cbdata,
+               bool ignore_pend)
 {
        int rc;
        struct mid_q_entry *mid;
+       struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base;
 
-       rc = wait_for_free_request(server, CIFS_ASYNC_OP);
+       rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0);
        if (rc)
                return rc;
 
        /* enable signing if server requires it */
-       if (server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
-               in_buf->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+       if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+               hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 
        mutex_lock(&server->srv_mutex);
-       mid = AllocMidQEntry(in_buf, server);
+       mid = AllocMidQEntry(hdr, server);
        if (mid == NULL) {
                mutex_unlock(&server->srv_mutex);
                return -ENOMEM;
@@ -368,7 +370,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
        list_add_tail(&mid->qhead, &server->pending_mid_q);
        spin_unlock(&GlobalMid_Lock);
 
-       rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
+       rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number);
        if (rc) {
                mutex_unlock(&server->srv_mutex);
                goto out_err;
@@ -380,7 +382,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
 #ifdef CONFIG_CIFS_STATS2
        atomic_inc(&server->inSend);
 #endif
-       rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
+       rc = smb_sendv(server, iov, nvec);
 #ifdef CONFIG_CIFS_STATS2
        atomic_dec(&server->inSend);
        mid->when_sent = jiffies;
@@ -407,7 +409,7 @@ out_err:
  *
  */
 int
-SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
+SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
                struct smb_hdr *in_buf, int flags)
 {
        int rc;
@@ -424,7 +426,7 @@ SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
 }
 
 static int
-sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
+cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
 {
        int rc = 0;
 
@@ -432,28 +434,21 @@ sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
                mid->mid, mid->midState);
 
        spin_lock(&GlobalMid_Lock);
-       /* ensure that it's no longer on the pending_mid_q */
-       list_del_init(&mid->qhead);
-
        switch (mid->midState) {
        case MID_RESPONSE_RECEIVED:
                spin_unlock(&GlobalMid_Lock);
                return rc;
-       case MID_REQUEST_SUBMITTED:
-               /* socket is going down, reject all calls */
-               if (server->tcpStatus == CifsExiting) {
-                       cERROR(1, "%s: canceling mid=%d cmd=0x%x state=%d",
-                              __func__, mid->mid, mid->command, mid->midState);
-                       rc = -EHOSTDOWN;
-                       break;
-               }
        case MID_RETRY_NEEDED:
                rc = -EAGAIN;
                break;
        case MID_RESPONSE_MALFORMED:
                rc = -EIO;
                break;
+       case MID_SHUTDOWN:
+               rc = -EHOSTDOWN;
+               break;
        default:
+               list_del_init(&mid->qhead);
                cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__,
                        mid->mid, mid->midState);
                rc = -EIO;
@@ -502,13 +497,31 @@ send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
 }
 
 int
-SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
+cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
+                  bool log_error)
+{
+       dump_smb(mid->resp_buf,
+                min_t(u32, 92, be32_to_cpu(mid->resp_buf->smb_buf_length)));
+
+       /* convert the length into a more usable form */
+       if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
+               /* FIXME: add code to kill session */
+               if (cifs_verify_signature(mid->resp_buf, server,
+                                         mid->sequence_number + 1) != 0)
+                       cERROR(1, "Unexpected SMB signature");
+       }
+
+       /* BB special case reconnect tid and uid here? */
+       return map_smb_to_linux_error(mid->resp_buf, log_error);
+}
+
+int
+SendReceive2(const unsigned int xid, struct cifs_ses *ses,
             struct kvec *iov, int n_vec, int *pRespBufType /* ret */,
             const int flags)
 {
        int rc = 0;
        int long_op;
-       unsigned int receive_len;
        struct mid_q_entry *midQ;
        struct smb_hdr *in_buf = iov[0].iov_base;
 
@@ -598,61 +611,31 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
 
        cifs_small_buf_release(in_buf);
 
-       rc = sync_mid_result(midQ, ses->server);
+       rc = cifs_sync_mid_result(midQ, ses->server);
        if (rc != 0) {
                atomic_dec(&ses->server->inFlight);
                wake_up(&ses->server->request_q);
                return rc;
        }
 
-       receive_len = be32_to_cpu(midQ->resp_buf->smb_buf_length);
-
-       if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
-               cERROR(1, "Frame too large received.  Length: %d  Xid: %d",
-                       receive_len, xid);
+       if (!midQ->resp_buf || midQ->midState != MID_RESPONSE_RECEIVED) {
                rc = -EIO;
+               cFYI(1, "Bad MID state?");
                goto out;
        }
 
-       /* rcvd frame is ok */
-
-       if (midQ->resp_buf &&
-           (midQ->midState == MID_RESPONSE_RECEIVED)) {
-
-               iov[0].iov_base = (char *)midQ->resp_buf;
-               if (midQ->largeBuf)
-                       *pRespBufType = CIFS_LARGE_BUFFER;
-               else
-                       *pRespBufType = CIFS_SMALL_BUFFER;
-               iov[0].iov_len = receive_len + 4;
-
-               dump_smb(midQ->resp_buf, 80);
-               /* convert the length into a more usable form */
-               if ((receive_len > 24) &&
-                   (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
-                                            SECMODE_SIGN_ENABLED))) {
-                       rc = cifs_verify_signature(midQ->resp_buf,
-                                               ses->server,
-                                               midQ->sequence_number+1);
-                       if (rc) {
-                               cERROR(1, "Unexpected SMB signature");
-                               /* BB FIXME add code to kill session */
-                       }
-               }
-
-               /* BB special case reconnect tid and uid here? */
-               rc = map_smb_to_linux_error(midQ->resp_buf,
-                                           flags & CIFS_LOG_ERROR);
+       iov[0].iov_base = (char *)midQ->resp_buf;
+       iov[0].iov_len = be32_to_cpu(midQ->resp_buf->smb_buf_length) + 4;
+       if (midQ->largeBuf)
+               *pRespBufType = CIFS_LARGE_BUFFER;
+       else
+               *pRespBufType = CIFS_SMALL_BUFFER;
 
-               if ((flags & CIFS_NO_RESP) == 0)
-                       midQ->resp_buf = NULL;  /* mark it so buf will
-                                                  not be freed by
-                                                  delete_mid */
-       } else {
-               rc = -EIO;
-               cFYI(1, "Bad MID state?");
-       }
+       rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR);
 
+       /* mark it so buf will not be freed by delete_mid */
+       if ((flags & CIFS_NO_RESP) == 0)
+               midQ->resp_buf = NULL;
 out:
        delete_mid(midQ);
        atomic_dec(&ses->server->inFlight);
@@ -662,12 +645,11 @@ out:
 }
 
 int
-SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
+SendReceive(const unsigned int xid, struct cifs_ses *ses,
            struct smb_hdr *in_buf, struct smb_hdr *out_buf,
            int *pbytes_returned, const int long_op)
 {
        int rc = 0;
-       unsigned int receive_len;
        struct mid_q_entry *midQ;
 
        if (ses == NULL) {
@@ -750,54 +732,23 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
                spin_unlock(&GlobalMid_Lock);
        }
 
-       rc = sync_mid_result(midQ, ses->server);
+       rc = cifs_sync_mid_result(midQ, ses->server);
        if (rc != 0) {
                atomic_dec(&ses->server->inFlight);
                wake_up(&ses->server->request_q);
                return rc;
        }
 
-       receive_len = be32_to_cpu(midQ->resp_buf->smb_buf_length);
-
-       if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
-               cERROR(1, "Frame too large received.  Length: %d  Xid: %d",
-                       receive_len, xid);
-               rc = -EIO;
-               goto out;
-       }
-
-       /* rcvd frame is ok */
-
-       if (midQ->resp_buf && out_buf
-           && (midQ->midState == MID_RESPONSE_RECEIVED)) {
-               out_buf->smb_buf_length = cpu_to_be32(receive_len);
-               memcpy((char *)out_buf + 4,
-                      (char *)midQ->resp_buf + 4,
-                      receive_len);
-
-               dump_smb(out_buf, 92);
-               /* convert the length into a more usable form */
-               if ((receive_len > 24) &&
-                   (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
-                                            SECMODE_SIGN_ENABLED))) {
-                       rc = cifs_verify_signature(out_buf,
-                                               ses->server,
-                                               midQ->sequence_number+1);
-                       if (rc) {
-                               cERROR(1, "Unexpected SMB signature");
-                               /* BB FIXME add code to kill session */
-                       }
-               }
-
-               *pbytes_returned = be32_to_cpu(out_buf->smb_buf_length);
-
-               /* BB special case reconnect tid and uid here? */
-               rc = map_smb_to_linux_error(out_buf, 0 /* no log */ );
-       } else {
+       if (!midQ->resp_buf || !out_buf ||
+           midQ->midState != MID_RESPONSE_RECEIVED) {
                rc = -EIO;
                cERROR(1, "Bad MID state?");
+               goto out;
        }
 
+       *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
+       memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
+       rc = cifs_check_receive(midQ, ses->server, 0);
 out:
        delete_mid(midQ);
        atomic_dec(&ses->server->inFlight);
@@ -810,12 +761,12 @@ out:
    blocking lock to return. */
 
 static int
-send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon,
+send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
                        struct smb_hdr *in_buf,
                        struct smb_hdr *out_buf)
 {
        int bytes_returned;
-       struct cifsSesInfo *ses = tcon->ses;
+       struct cifs_ses *ses = tcon->ses;
        LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
 
        /* We just modify the current in_buf to change
@@ -832,15 +783,14 @@ send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon,
 }
 
 int
-SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
+SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
            struct smb_hdr *in_buf, struct smb_hdr *out_buf,
            int *pbytes_returned)
 {
        int rc = 0;
        int rstart = 0;
-       unsigned int receive_len;
        struct mid_q_entry *midQ;
-       struct cifsSesInfo *ses;
+       struct cifs_ses *ses;
 
        if (tcon == NULL || tcon->ses == NULL) {
                cERROR(1, "Null smb session");
@@ -957,50 +907,20 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
                rstart = 1;
        }
 
-       rc = sync_mid_result(midQ, ses->server);
+       rc = cifs_sync_mid_result(midQ, ses->server);
        if (rc != 0)
                return rc;
 
-       receive_len = be32_to_cpu(midQ->resp_buf->smb_buf_length);
-       if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) {
-               cERROR(1, "Frame too large received.  Length: %d  Xid: %d",
-                       receive_len, xid);
-               rc = -EIO;
-               goto out;
-       }
-
        /* rcvd frame is ok */
-
-       if ((out_buf == NULL) || (midQ->midState != MID_RESPONSE_RECEIVED)) {
+       if (out_buf == NULL || midQ->midState != MID_RESPONSE_RECEIVED) {
                rc = -EIO;
                cERROR(1, "Bad MID state?");
                goto out;
        }
 
-       out_buf->smb_buf_length = cpu_to_be32(receive_len);
-       memcpy((char *)out_buf + 4,
-              (char *)midQ->resp_buf + 4,
-              receive_len);
-
-       dump_smb(out_buf, 92);
-       /* convert the length into a more usable form */
-       if ((receive_len > 24) &&
-           (ses->server->secMode & (SECMODE_SIGN_REQUIRED |
-                                    SECMODE_SIGN_ENABLED))) {
-               rc = cifs_verify_signature(out_buf,
-                                          ses->server,
-                                          midQ->sequence_number+1);
-               if (rc) {
-                       cERROR(1, "Unexpected SMB signature");
-                       /* BB FIXME add code to kill session */
-               }
-       }
-
-       *pbytes_returned = be32_to_cpu(out_buf->smb_buf_length);
-
-       /* BB special case reconnect tid and uid here? */
-       rc = map_smb_to_linux_error(out_buf, 0 /* no log */ );
-
+       *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length);
+       memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
+       rc = cifs_check_receive(midQ, ses->server, 0);
 out:
        delete_mid(midQ);
        if (rstart && rc == -EACCES)
index 912995e013ecb8414b3058543237bc19f8b5e903..2a22fb2989e4780fd977c683b27ddf23e78e7d8e 100644 (file)
@@ -49,7 +49,7 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name)
        int xid;
        struct cifs_sb_info *cifs_sb;
        struct tcon_link *tlink;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        struct super_block *sb;
        char *full_path = NULL;
 
@@ -109,7 +109,7 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
        int xid;
        struct cifs_sb_info *cifs_sb;
        struct tcon_link *tlink;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        struct super_block *sb;
        char *full_path;
        struct cifs_ntsd *pacl;
@@ -240,7 +240,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
        int xid;
        struct cifs_sb_info *cifs_sb;
        struct tcon_link *tlink;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        struct super_block *sb;
        char *full_path;
 
@@ -372,7 +372,7 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size)
        int xid;
        struct cifs_sb_info *cifs_sb;
        struct tcon_link *tlink;
-       struct cifsTconInfo *pTcon;
+       struct cifs_tcon *pTcon;
        struct super_block *sb;
        char *full_path;
 
index 2b8dae4d121ee5ad7d42ff879ce1a7c496d295f0..a46126fd57355642a832a43231b222ad4218b155 100644 (file)
@@ -336,6 +336,8 @@ static int coda_rmdir(struct inode *dir, struct dentry *de)
        int len = de->d_name.len;
        int error;
 
+       dentry_unhash(de);
+
        error = venus_rmdir(dir->i_sb, coda_i2f(dir), name, len);
        if (!error) {
                /* VFS may delete the child */
@@ -359,6 +361,9 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
        int new_length = new_dentry->d_name.len;
        int error;
 
+       if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        error = venus_rename(old_dir->i_sb, coda_i2f(old_dir),
                             coda_i2f(new_dir), old_length, new_length,
                             (const char *) old_name, (const char *)new_name);
index 9a37a9b6de3a23f026f6dc0d9c2906e07b1f8bf0..9d17d350abc55ec3dfffca5282ba12f2d418facd 100644 (file)
@@ -1359,6 +1359,8 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
        struct module *subsys_owner = NULL, *dead_item_owner = NULL;
        int ret;
 
+       dentry_unhash(dentry);
+
        if (dentry->d_parent == configfs_sb->s_root)
                return -EPERM;
 
index b80e0aa3cfa534e6ef130dc09bb36454fbbe260d..5a59efa0bb469ebb9991be5d29bdb98c93233cfd 100644 (file)
@@ -50,7 +50,7 @@ static int __init init_dlm(void)
        if (error)
                goto out_netlink;
 
-       printk("DLM (built %s %s) installed\n", __DATE__, __TIME__);
+       printk("DLM installed\n");
 
        return 0;
 
index 4d4cc6a90cd57f18b330674f8e5320205cc2a5e6..bc116b9ffcf2264c19fe9d725ebeea2a1954e530 100644 (file)
@@ -521,12 +521,16 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
        struct dentry *lower_dir_dentry;
        int rc;
 
+       dentry_unhash(dentry);
+
        lower_dentry = ecryptfs_dentry_to_lower(dentry);
        dget(dentry);
        lower_dir_dentry = lock_parent(lower_dentry);
        dget(lower_dentry);
        rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry);
        dput(lower_dentry);
+       if (!rc && dentry->d_inode)
+               clear_nlink(dentry->d_inode);
        fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode);
        dir->i_nlink = lower_dir_dentry->d_inode->i_nlink;
        unlock_dir(lower_dir_dentry);
@@ -571,6 +575,9 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct dentry *lower_new_dir_dentry;
        struct dentry *trap = NULL;
 
+       if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
        lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
        dget(lower_old_dentry);
index 03e609c450120674673a5a6e07460b2014751ed0..27a7fefb83eb07f04d092416654b3b3f4142e55d 100644 (file)
@@ -599,8 +599,8 @@ struct ecryptfs_write_tag_70_packet_silly_stack {
        struct mutex *tfm_mutex;
        char *block_aligned_filename;
        struct ecryptfs_auth_tok *auth_tok;
-       struct scatterlist src_sg;
-       struct scatterlist dst_sg;
+       struct scatterlist src_sg[2];
+       struct scatterlist dst_sg[2];
        struct blkcipher_desc desc;
        char iv[ECRYPTFS_MAX_IV_BYTES];
        char hash[ECRYPTFS_TAG_70_DIGEST_SIZE];
@@ -816,23 +816,21 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
        memcpy(&s->block_aligned_filename[s->num_rand_bytes], filename,
               filename_size);
        rc = virt_to_scatterlist(s->block_aligned_filename,
-                                s->block_aligned_filename_size, &s->src_sg, 1);
-       if (rc != 1) {
+                                s->block_aligned_filename_size, s->src_sg, 2);
+       if (rc < 1) {
                printk(KERN_ERR "%s: Internal error whilst attempting to "
-                      "convert filename memory to scatterlist; "
-                      "expected rc = 1; got rc = [%d]. "
+                      "convert filename memory to scatterlist; rc = [%d]. "
                       "block_aligned_filename_size = [%zd]\n", __func__, rc,
                       s->block_aligned_filename_size);
                goto out_release_free_unlock;
        }
        rc = virt_to_scatterlist(&dest[s->i], s->block_aligned_filename_size,
-                                &s->dst_sg, 1);
-       if (rc != 1) {
+                                s->dst_sg, 2);
+       if (rc < 1) {
                printk(KERN_ERR "%s: Internal error whilst attempting to "
                       "convert encrypted filename memory to scatterlist; "
-                      "expected rc = 1; got rc = [%d]. "
-                      "block_aligned_filename_size = [%zd]\n", __func__, rc,
-                      s->block_aligned_filename_size);
+                      "rc = [%d]. block_aligned_filename_size = [%zd]\n",
+                      __func__, rc, s->block_aligned_filename_size);
                goto out_release_free_unlock;
        }
        /* The characters in the first block effectively do the job
@@ -855,7 +853,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
                       mount_crypt_stat->global_default_fn_cipher_key_bytes);
                goto out_release_free_unlock;
        }
-       rc = crypto_blkcipher_encrypt_iv(&s->desc, &s->dst_sg, &s->src_sg,
+       rc = crypto_blkcipher_encrypt_iv(&s->desc, s->dst_sg, s->src_sg,
                                         s->block_aligned_filename_size);
        if (rc) {
                printk(KERN_ERR "%s: Error attempting to encrypt filename; "
@@ -891,8 +889,8 @@ struct ecryptfs_parse_tag_70_packet_silly_stack {
        struct mutex *tfm_mutex;
        char *decrypted_filename;
        struct ecryptfs_auth_tok *auth_tok;
-       struct scatterlist src_sg;
-       struct scatterlist dst_sg;
+       struct scatterlist src_sg[2];
+       struct scatterlist dst_sg[2];
        struct blkcipher_desc desc;
        char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1];
        char iv[ECRYPTFS_MAX_IV_BYTES];
@@ -1008,13 +1006,12 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
        }
        mutex_lock(s->tfm_mutex);
        rc = virt_to_scatterlist(&data[(*packet_size)],
-                                s->block_aligned_filename_size, &s->src_sg, 1);
-       if (rc != 1) {
+                                s->block_aligned_filename_size, s->src_sg, 2);
+       if (rc < 1) {
                printk(KERN_ERR "%s: Internal error whilst attempting to "
                       "convert encrypted filename memory to scatterlist; "
-                      "expected rc = 1; got rc = [%d]. "
-                      "block_aligned_filename_size = [%zd]\n", __func__, rc,
-                      s->block_aligned_filename_size);
+                      "rc = [%d]. block_aligned_filename_size = [%zd]\n",
+                      __func__, rc, s->block_aligned_filename_size);
                goto out_unlock;
        }
        (*packet_size) += s->block_aligned_filename_size;
@@ -1028,13 +1025,12 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
                goto out_unlock;
        }
        rc = virt_to_scatterlist(s->decrypted_filename,
-                                s->block_aligned_filename_size, &s->dst_sg, 1);
-       if (rc != 1) {
+                                s->block_aligned_filename_size, s->dst_sg, 2);
+       if (rc < 1) {
                printk(KERN_ERR "%s: Internal error whilst attempting to "
                       "convert decrypted filename memory to scatterlist; "
-                      "expected rc = 1; got rc = [%d]. "
-                      "block_aligned_filename_size = [%zd]\n", __func__, rc,
-                      s->block_aligned_filename_size);
+                      "rc = [%d]. block_aligned_filename_size = [%zd]\n",
+                      __func__, rc, s->block_aligned_filename_size);
                goto out_free_unlock;
        }
        /* The characters in the first block effectively do the job of
@@ -1065,7 +1061,7 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
                       mount_crypt_stat->global_default_fn_cipher_key_bytes);
                goto out_free_unlock;
        }
-       rc = crypto_blkcipher_decrypt_iv(&s->desc, &s->dst_sg, &s->src_sg,
+       rc = crypto_blkcipher_decrypt_iv(&s->desc, s->dst_sg, s->src_sg,
                                         s->block_aligned_filename_size);
        if (rc) {
                printk(KERN_ERR "%s: Error attempting to decrypt filename; "
index 936f5776655c906c25a40400ace80bc33b643c87..ea5f748906a83218eae60cbb241aaab53cec8ac8 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -42,7 +42,6 @@
 #include <linux/pid_namespace.h>
 #include <linux/module.h>
 #include <linux/namei.h>
-#include <linux/proc_fs.h>
 #include <linux/mount.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
@@ -1624,6 +1623,41 @@ expand_fail:
        return ret;
 }
 
+static int cn_print_exe_file(struct core_name *cn)
+{
+       struct file *exe_file;
+       char *pathbuf, *path, *p;
+       int ret;
+
+       exe_file = get_mm_exe_file(current->mm);
+       if (!exe_file)
+               return cn_printf(cn, "(unknown)");
+
+       pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY);
+       if (!pathbuf) {
+               ret = -ENOMEM;
+               goto put_exe_file;
+       }
+
+       path = d_path(&exe_file->f_path, pathbuf, PATH_MAX);
+       if (IS_ERR(path)) {
+               ret = PTR_ERR(path);
+               goto free_buf;
+       }
+
+       for (p = path; *p; p++)
+               if (*p == '/')
+                       *p = '!';
+
+       ret = cn_printf(cn, "%s", path);
+
+free_buf:
+       kfree(pathbuf);
+put_exe_file:
+       fput(exe_file);
+       return ret;
+}
+
 /* format_corename will inspect the pattern parameter, and output a
  * name into corename, which must have space for at least
  * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
@@ -1695,6 +1729,9 @@ static int format_corename(struct core_name *cn, long signr)
                        case 'e':
                                err = cn_printf(cn, "%s", current->comm);
                                break;
+                       case 'E':
+                               err = cn_print_exe_file(cn);
+                               break;
                        /* core limit size */
                        case 'c':
                                err = cn_printf(cn, "%lu",
index 3c6a9e0eadc18214b45686e175f0a474f638b23e..aad153ef6b783a6820e9f38e60d61d6cbea2c8eb 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/quotaops.h>
 #include <linux/seq_file.h>
 #include <linux/log2.h>
+#include <linux/cleancache.h>
 
 #include <asm/uaccess.h>
 
@@ -1367,6 +1368,7 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
        } else {
                ext3_msg(sb, KERN_INFO, "using internal journal");
        }
+       cleancache_init_fs(sb);
        return res;
 }
 
index c947e36eda6c9d568b0c00b28c778ac2216996d5..04109460ba9e3f22d0115f8cc8b16d4e9b13d3f6 100644 (file)
@@ -6,7 +6,8 @@ obj-$(CONFIG_EXT4_FS) += ext4.o
 
 ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
                ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \
-               ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o
+               ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \
+               mmp.o
 
 ext4-$(CONFIG_EXT4_FS_XATTR)           += xattr.o xattr_user.o xattr_trusted.o
 ext4-$(CONFIG_EXT4_FS_POSIX_ACL)       += acl.o
index 1c67139ad4b422119c76d10f384dad9eabdf9158..264f6949511ef842169438571049ccb736955531 100644 (file)
@@ -361,130 +361,6 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
        return bh;
 }
 
-/**
- * ext4_add_groupblocks() -- Add given blocks to an existing group
- * @handle:                    handle to this transaction
- * @sb:                                super block
- * @block:                     start physcial block to add to the block group
- * @count:                     number of blocks to free
- *
- * This marks the blocks as free in the bitmap. We ask the
- * mballoc to reload the buddy after this by setting group
- * EXT4_GROUP_INFO_NEED_INIT_BIT flag
- */
-void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
-                        ext4_fsblk_t block, unsigned long count)
-{
-       struct buffer_head *bitmap_bh = NULL;
-       struct buffer_head *gd_bh;
-       ext4_group_t block_group;
-       ext4_grpblk_t bit;
-       unsigned int i;
-       struct ext4_group_desc *desc;
-       struct ext4_sb_info *sbi = EXT4_SB(sb);
-       int err = 0, ret, blk_free_count;
-       ext4_grpblk_t blocks_freed;
-       struct ext4_group_info *grp;
-
-       ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
-
-       ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
-       grp = ext4_get_group_info(sb, block_group);
-       /*
-        * Check to see if we are freeing blocks across a group
-        * boundary.
-        */
-       if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
-               goto error_return;
-       }
-       bitmap_bh = ext4_read_block_bitmap(sb, block_group);
-       if (!bitmap_bh)
-               goto error_return;
-       desc = ext4_get_group_desc(sb, block_group, &gd_bh);
-       if (!desc)
-               goto error_return;
-
-       if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
-           in_range(ext4_inode_bitmap(sb, desc), block, count) ||
-           in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
-           in_range(block + count - 1, ext4_inode_table(sb, desc),
-                    sbi->s_itb_per_group)) {
-               ext4_error(sb, "Adding blocks in system zones - "
-                          "Block = %llu, count = %lu",
-                          block, count);
-               goto error_return;
-       }
-
-       /*
-        * We are about to add blocks to the bitmap,
-        * so we need undo access.
-        */
-       BUFFER_TRACE(bitmap_bh, "getting undo access");
-       err = ext4_journal_get_undo_access(handle, bitmap_bh);
-       if (err)
-               goto error_return;
-
-       /*
-        * We are about to modify some metadata.  Call the journal APIs
-        * to unshare ->b_data if a currently-committing transaction is
-        * using it
-        */
-       BUFFER_TRACE(gd_bh, "get_write_access");
-       err = ext4_journal_get_write_access(handle, gd_bh);
-       if (err)
-               goto error_return;
-       /*
-        * make sure we don't allow a parallel init on other groups in the
-        * same buddy cache
-        */
-       down_write(&grp->alloc_sem);
-       for (i = 0, blocks_freed = 0; i < count; i++) {
-               BUFFER_TRACE(bitmap_bh, "clear bit");
-               if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
-                                               bit + i, bitmap_bh->b_data)) {
-                       ext4_error(sb, "bit already cleared for block %llu",
-                                  (ext4_fsblk_t)(block + i));
-                       BUFFER_TRACE(bitmap_bh, "bit already cleared");
-               } else {
-                       blocks_freed++;
-               }
-       }
-       ext4_lock_group(sb, block_group);
-       blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc);
-       ext4_free_blks_set(sb, desc, blk_free_count);
-       desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
-       ext4_unlock_group(sb, block_group);
-       percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);
-
-       if (sbi->s_log_groups_per_flex) {
-               ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
-               atomic_add(blocks_freed,
-                          &sbi->s_flex_groups[flex_group].free_blocks);
-       }
-       /*
-        * request to reload the buddy with the
-        * new bitmap information
-        */
-       set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
-       grp->bb_free += blocks_freed;
-       up_write(&grp->alloc_sem);
-
-       /* We dirtied the bitmap block */
-       BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
-       err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
-
-       /* And the group descriptor block */
-       BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
-       ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
-       if (!err)
-               err = ret;
-
-error_return:
-       brelse(bitmap_bh);
-       ext4_std_error(sb, err);
-       return;
-}
-
 /**
  * ext4_has_free_blocks()
  * @sbi:       in-core super block structure.
@@ -493,7 +369,8 @@ error_return:
  * Check if filesystem has nblocks free & available for allocation.
  * On success return 1, return 0 on failure.
  */
-static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
+static int ext4_has_free_blocks(struct ext4_sb_info *sbi,
+                               s64 nblocks, unsigned int flags)
 {
        s64 free_blocks, dirty_blocks, root_blocks;
        struct percpu_counter *fbc = &sbi->s_freeblocks_counter;
@@ -507,11 +384,6 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
                                                EXT4_FREEBLOCKS_WATERMARK) {
                free_blocks  = percpu_counter_sum_positive(fbc);
                dirty_blocks = percpu_counter_sum_positive(dbc);
-               if (dirty_blocks < 0) {
-                       printk(KERN_CRIT "Dirty block accounting "
-                                       "went wrong %lld\n",
-                                       (long long)dirty_blocks);
-               }
        }
        /* Check whether we have space after
         * accounting for current dirty blocks & root reserved blocks.
@@ -522,7 +394,9 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
        /* Hm, nope.  Are (enough) root reserved blocks available? */
        if (sbi->s_resuid == current_fsuid() ||
            ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
-           capable(CAP_SYS_RESOURCE)) {
+           capable(CAP_SYS_RESOURCE) ||
+               (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
+
                if (free_blocks >= (nblocks + dirty_blocks))
                        return 1;
        }
@@ -531,9 +405,9 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
 }
 
 int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
-                                               s64 nblocks)
+                          s64 nblocks, unsigned int flags)
 {
-       if (ext4_has_free_blocks(sbi, nblocks)) {
+       if (ext4_has_free_blocks(sbi, nblocks, flags)) {
                percpu_counter_add(&sbi->s_dirtyblocks_counter, nblocks);
                return 0;
        } else
@@ -554,7 +428,7 @@ int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
  */
 int ext4_should_retry_alloc(struct super_block *sb, int *retries)
 {
-       if (!ext4_has_free_blocks(EXT4_SB(sb), 1) ||
+       if (!ext4_has_free_blocks(EXT4_SB(sb), 1, 0) ||
            (*retries)++ > 3 ||
            !EXT4_SB(sb)->s_journal)
                return 0;
@@ -577,7 +451,8 @@ int ext4_should_retry_alloc(struct super_block *sb, int *retries)
  * error stores in errp pointer
  */
 ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
-               ext4_fsblk_t goal, unsigned long *count, int *errp)
+                                 ext4_fsblk_t goal, unsigned int flags,
+                                 unsigned long *count, int *errp)
 {
        struct ext4_allocation_request ar;
        ext4_fsblk_t ret;
@@ -587,6 +462,7 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
        ar.inode = inode;
        ar.goal = goal;
        ar.len = count ? *count : 1;
+       ar.flags = flags;
 
        ret = ext4_mb_new_blocks(handle, &ar, errp);
        if (count)
index 4daaf2b753f4fa037fd783ac34e47e15d1438b55..a74b89c09f90f1dcc587b9e5b81b0dfc222257db 100644 (file)
@@ -108,7 +108,8 @@ typedef unsigned int ext4_group_t;
 #define EXT4_MB_DELALLOC_RESERVED      0x0400
 /* We are doing stream allocation */
 #define EXT4_MB_STREAM_ALLOC           0x0800
-
+/* Use reserved root blocks if needed */
+#define EXT4_MB_USE_ROOT_BLOCKS                0x1000
 
 struct ext4_allocation_request {
        /* target inode for block we're allocating */
@@ -209,6 +210,8 @@ struct ext4_io_submit {
  */
 #define        EXT4_BAD_INO             1      /* Bad blocks inode */
 #define EXT4_ROOT_INO           2      /* Root inode */
+#define EXT4_USR_QUOTA_INO      3      /* User quota inode */
+#define EXT4_GRP_QUOTA_INO      4      /* Group quota inode */
 #define EXT4_BOOT_LOADER_INO    5      /* Boot loader inode */
 #define EXT4_UNDEL_DIR_INO      6      /* Undelete directory inode */
 #define EXT4_RESIZE_INO                 7      /* Reserved group descriptors inode */
@@ -512,6 +515,10 @@ struct ext4_new_group_data {
        /* Convert extent to initialized after IO complete */
 #define EXT4_GET_BLOCKS_IO_CONVERT_EXT         (EXT4_GET_BLOCKS_CONVERT|\
                                         EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
+       /* Punch out blocks of an extent */
+#define EXT4_GET_BLOCKS_PUNCH_OUT_EXT          0x0020
+       /* Don't normalize allocation size (used for fallocate) */
+#define EXT4_GET_BLOCKS_NO_NORMALIZE           0x0040
 
 /*
  * Flags used by ext4_free_blocks
@@ -1028,7 +1035,7 @@ struct ext4_super_block {
        __le16  s_want_extra_isize;     /* New inodes should reserve # bytes */
        __le32  s_flags;                /* Miscellaneous flags */
        __le16  s_raid_stride;          /* RAID stride */
-       __le16  s_mmp_interval;         /* # seconds to wait in MMP checking */
+       __le16  s_mmp_update_interval;  /* # seconds to wait in MMP checking */
        __le64  s_mmp_block;            /* Block for multi-mount protection */
        __le32  s_raid_stripe_width;    /* blocks on all data disks (N*stride)*/
        __u8    s_log_groups_per_flex;  /* FLEX_BG group size */
@@ -1144,6 +1151,9 @@ struct ext4_sb_info {
        unsigned long s_ext_blocks;
        unsigned long s_ext_extents;
 #endif
+       /* ext4 extent cache stats */
+       unsigned long extent_cache_hits;
+       unsigned long extent_cache_misses;
 
        /* for buddy allocator */
        struct ext4_group_info ***s_group_info;
@@ -1201,6 +1211,9 @@ struct ext4_sb_info {
        struct ext4_li_request *s_li_request;
        /* Wait multiplier for lazy initialization thread */
        unsigned int s_li_wait_mult;
+
+       /* Kernel thread for multiple mount protection */
+       struct task_struct *s_mmp_tsk;
 };
 
 static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1338,6 +1351,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
 #define EXT4_FEATURE_RO_COMPAT_GDT_CSUM                0x0010
 #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK       0x0020
 #define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE     0x0040
+#define EXT4_FEATURE_RO_COMPAT_QUOTA           0x0100
 
 #define EXT4_FEATURE_INCOMPAT_COMPRESSION      0x0001
 #define EXT4_FEATURE_INCOMPAT_FILETYPE         0x0002
@@ -1351,13 +1365,29 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
 #define EXT4_FEATURE_INCOMPAT_EA_INODE         0x0400 /* EA in inode */
 #define EXT4_FEATURE_INCOMPAT_DIRDATA          0x1000 /* data in dirent */
 
+#define EXT2_FEATURE_COMPAT_SUPP       EXT4_FEATURE_COMPAT_EXT_ATTR
+#define EXT2_FEATURE_INCOMPAT_SUPP     (EXT4_FEATURE_INCOMPAT_FILETYPE| \
+                                        EXT4_FEATURE_INCOMPAT_META_BG)
+#define EXT2_FEATURE_RO_COMPAT_SUPP    (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+                                        EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
+                                        EXT4_FEATURE_RO_COMPAT_BTREE_DIR)
+
+#define EXT3_FEATURE_COMPAT_SUPP       EXT4_FEATURE_COMPAT_EXT_ATTR
+#define EXT3_FEATURE_INCOMPAT_SUPP     (EXT4_FEATURE_INCOMPAT_FILETYPE| \
+                                        EXT4_FEATURE_INCOMPAT_RECOVER| \
+                                        EXT4_FEATURE_INCOMPAT_META_BG)
+#define EXT3_FEATURE_RO_COMPAT_SUPP    (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
+                                        EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
+                                        EXT4_FEATURE_RO_COMPAT_BTREE_DIR)
+
 #define EXT4_FEATURE_COMPAT_SUPP       EXT2_FEATURE_COMPAT_EXT_ATTR
 #define EXT4_FEATURE_INCOMPAT_SUPP     (EXT4_FEATURE_INCOMPAT_FILETYPE| \
                                         EXT4_FEATURE_INCOMPAT_RECOVER| \
                                         EXT4_FEATURE_INCOMPAT_META_BG| \
                                         EXT4_FEATURE_INCOMPAT_EXTENTS| \
                                         EXT4_FEATURE_INCOMPAT_64BIT| \
-                                        EXT4_FEATURE_INCOMPAT_FLEX_BG)
+                                        EXT4_FEATURE_INCOMPAT_FLEX_BG| \
+                                        EXT4_FEATURE_INCOMPAT_MMP)
 #define EXT4_FEATURE_RO_COMPAT_SUPP    (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \
                                         EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \
                                         EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \
@@ -1590,12 +1620,6 @@ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
  */
 struct ext4_lazy_init {
        unsigned long           li_state;
-
-       wait_queue_head_t       li_wait_daemon;
-       wait_queue_head_t       li_wait_task;
-       struct timer_list       li_timer;
-       struct task_struct      *li_task;
-
        struct list_head        li_request_list;
        struct mutex            li_list_mtx;
 };
@@ -1614,6 +1638,67 @@ struct ext4_features {
        struct completion f_kobj_unregister;
 };
 
+/*
+ * This structure will be used for multiple mount protection. It will be
+ * written into the block number saved in the s_mmp_block field in the
+ * superblock. Programs that check MMP should assume that if
+ * SEQ_FSCK (or any unknown code above SEQ_MAX) is present then it is NOT safe
+ * to use the filesystem, regardless of how old the timestamp is.
+ */
+#define EXT4_MMP_MAGIC     0x004D4D50U /* ASCII for MMP */
+#define EXT4_MMP_SEQ_CLEAN 0xFF4D4D50U /* mmp_seq value for clean unmount */
+#define EXT4_MMP_SEQ_FSCK  0xE24D4D50U /* mmp_seq value when being fscked */
+#define EXT4_MMP_SEQ_MAX   0xE24D4D4FU /* maximum valid mmp_seq value */
+
+struct mmp_struct {
+       __le32  mmp_magic;              /* Magic number for MMP */
+       __le32  mmp_seq;                /* Sequence no. updated periodically */
+
+       /*
+        * mmp_time, mmp_nodename & mmp_bdevname are only used for information
+        * purposes and do not affect the correctness of the algorithm
+        */
+       __le64  mmp_time;               /* Time last updated */
+       char    mmp_nodename[64];       /* Node which last updated MMP block */
+       char    mmp_bdevname[32];       /* Bdev which last updated MMP block */
+
+       /*
+        * mmp_check_interval is used to verify if the MMP block has been
+        * updated on the block device. The value is updated based on the
+        * maximum time to write the MMP block during an update cycle.
+        */
+       __le16  mmp_check_interval;
+
+       __le16  mmp_pad1;
+       __le32  mmp_pad2[227];
+};
+
+/* arguments passed to the mmp thread */
+struct mmpd_data {
+       struct buffer_head *bh; /* bh from initial read_mmp_block() */
+       struct super_block *sb;  /* super block of the fs */
+};
+
+/*
+ * Check interval multiplier
+ * The MMP block is written every update interval and initially checked every
+ * update interval x the multiplier (the value is then adapted based on the
+ * write latency). The reason is that writes can be delayed under load and we
+ * don't want readers to incorrectly assume that the filesystem is no longer
+ * in use.
+ */
+#define EXT4_MMP_CHECK_MULT            2UL
+
+/*
+ * Minimum interval for MMP checking in seconds.
+ */
+#define EXT4_MMP_MIN_CHECK_INTERVAL    5UL
+
+/*
+ * Maximum interval for MMP checking in seconds.
+ */
+#define EXT4_MMP_MAX_CHECK_INTERVAL    300UL
+
 /*
  * Function prototypes
  */
@@ -1638,10 +1723,12 @@ extern int ext4_bg_has_super(struct super_block *sb, ext4_group_t group);
 extern unsigned long ext4_bg_num_gdb(struct super_block *sb,
                        ext4_group_t group);
 extern ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
-                       ext4_fsblk_t goal, unsigned long *count, int *errp);
-extern int ext4_claim_free_blocks(struct ext4_sb_info *sbi, s64 nblocks);
-extern void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
-                               ext4_fsblk_t block, unsigned long count);
+                                        ext4_fsblk_t goal,
+                                        unsigned int flags,
+                                        unsigned long *count,
+                                        int *errp);
+extern int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
+                                 s64 nblocks, unsigned int flags);
 extern ext4_fsblk_t ext4_count_free_blocks(struct super_block *);
 extern void ext4_check_blocks_bitmap(struct super_block *);
 extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
@@ -1706,6 +1793,8 @@ extern void ext4_free_blocks(handle_t *handle, struct inode *inode,
                             unsigned long count, int flags);
 extern int ext4_mb_add_groupinfo(struct super_block *sb,
                ext4_group_t i, struct ext4_group_desc *desc);
+extern void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
+                               ext4_fsblk_t block, unsigned long count);
 extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
 
 /* inode.c */
@@ -1729,6 +1818,7 @@ extern int ext4_change_inode_journal_flag(struct inode *, int);
 extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
 extern int ext4_can_truncate(struct inode *inode);
 extern void ext4_truncate(struct inode *);
+extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length);
 extern int ext4_truncate_restart_trans(handle_t *, struct inode *, int nblocks);
 extern void ext4_set_inode_flags(struct inode *);
 extern void ext4_get_inode_flags(struct ext4_inode_info *);
@@ -1738,6 +1828,8 @@ extern int ext4_writepage_trans_blocks(struct inode *);
 extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
 extern int ext4_block_truncate_page(handle_t *handle,
                struct address_space *mapping, loff_t from);
+extern int ext4_block_zero_page_range(handle_t *handle,
+               struct address_space *mapping, loff_t from, loff_t length);
 extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 extern qsize_t *ext4_get_reserved_space(struct inode *inode);
 extern void ext4_da_update_reserve_space(struct inode *inode,
@@ -1788,6 +1880,10 @@ extern void __ext4_warning(struct super_block *, const char *, unsigned int,
                                                       __LINE__, ## message)
 extern void ext4_msg(struct super_block *, const char *, const char *, ...)
        __attribute__ ((format (printf, 3, 4)));
+extern void __dump_mmp_msg(struct super_block *, struct mmp_struct *mmp,
+                          const char *, unsigned int, const char *);
+#define dump_mmp_msg(sb, mmp, msg)     __dump_mmp_msg(sb, mmp, __func__, \
+                                                      __LINE__, msg)
 extern void __ext4_grp_locked_error(const char *, unsigned int, \
                                    struct super_block *, ext4_group_t, \
                                    unsigned long, ext4_fsblk_t, \
@@ -2064,6 +2160,8 @@ extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks,
 extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
                               struct ext4_map_blocks *map, int flags);
 extern void ext4_ext_truncate(struct inode *);
+extern int ext4_ext_punch_hole(struct file *file, loff_t offset,
+                               loff_t length);
 extern void ext4_ext_init(struct super_block *);
 extern void ext4_ext_release(struct super_block *);
 extern long ext4_fallocate(struct file *file, int mode, loff_t offset,
@@ -2092,6 +2190,9 @@ extern int ext4_bio_write_page(struct ext4_io_submit *io,
                               int len,
                               struct writeback_control *wbc);
 
+/* mmp.c */
+extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
+
 /* BH_Uninit flag: blocks are allocated but uninitialized on disk */
 enum ext4_state_bits {
        BH_Uninit       /* blocks are allocated but uninitialized on disk */
index 6e272ef6ba96c4938cc357ef498c760bde67d39e..f5240aa15601a6b34c40b339b0153e6f8bc6867d 100644 (file)
@@ -6,20 +6,6 @@
 
 #include <trace/events/ext4.h>
 
-int __ext4_journal_get_undo_access(const char *where, unsigned int line,
-                                  handle_t *handle, struct buffer_head *bh)
-{
-       int err = 0;
-
-       if (ext4_handle_valid(handle)) {
-               err = jbd2_journal_get_undo_access(handle, bh);
-               if (err)
-                       ext4_journal_abort_handle(where, line, __func__, bh,
-                                                 handle, err);
-       }
-       return err;
-}
-
 int __ext4_journal_get_write_access(const char *where, unsigned int line,
                                    handle_t *handle, struct buffer_head *bh)
 {
index d0f53538a57fd8663b03443898e4d12492ff399b..bb85757689b6af513dd556559a407049a1fc9782 100644 (file)
@@ -126,9 +126,6 @@ void ext4_journal_abort_handle(const char *caller, unsigned int line,
                               const char *err_fn,
                struct buffer_head *bh, handle_t *handle, int err);
 
-int __ext4_journal_get_undo_access(const char *where, unsigned int line,
-                                  handle_t *handle, struct buffer_head *bh);
-
 int __ext4_journal_get_write_access(const char *where, unsigned int line,
                                    handle_t *handle, struct buffer_head *bh);
 
@@ -146,8 +143,6 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
 int __ext4_handle_dirty_super(const char *where, unsigned int line,
                              handle_t *handle, struct super_block *sb);
 
-#define ext4_journal_get_undo_access(handle, bh) \
-       __ext4_journal_get_undo_access(__func__, __LINE__, (handle), (bh))
 #define ext4_journal_get_write_access(handle, bh) \
        __ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh))
 #define ext4_forget(handle, is_metadata, inode, bh, block_nr) \
index 4890d6f3ad15a976417ae542296a0051fe97bc27..5199bac7fc625d5a19ad5934417f724fb699fe22 100644 (file)
 
 #include <trace/events/ext4.h>
 
+static int ext4_split_extent(handle_t *handle,
+                               struct inode *inode,
+                               struct ext4_ext_path *path,
+                               struct ext4_map_blocks *map,
+                               int split_flag,
+                               int flags);
+
 static int ext4_ext_truncate_extend_restart(handle_t *handle,
                                            struct inode *inode,
                                            int needed)
@@ -192,12 +199,13 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
 static ext4_fsblk_t
 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
                        struct ext4_ext_path *path,
-                       struct ext4_extent *ex, int *err)
+                       struct ext4_extent *ex, int *err, unsigned int flags)
 {
        ext4_fsblk_t goal, newblock;
 
        goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
-       newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err);
+       newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
+                                       NULL, err);
        return newblock;
 }
 
@@ -474,9 +482,43 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
        }
        ext_debug("\n");
 }
+
+static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
+                       ext4_fsblk_t newblock, int level)
+{
+       int depth = ext_depth(inode);
+       struct ext4_extent *ex;
+
+       if (depth != level) {
+               struct ext4_extent_idx *idx;
+               idx = path[level].p_idx;
+               while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
+                       ext_debug("%d: move %d:%llu in new index %llu\n", level,
+                                       le32_to_cpu(idx->ei_block),
+                                       ext4_idx_pblock(idx),
+                                       newblock);
+                       idx++;
+               }
+
+               return;
+       }
+
+       ex = path[depth].p_ext;
+       while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
+               ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
+                               le32_to_cpu(ex->ee_block),
+                               ext4_ext_pblock(ex),
+                               ext4_ext_is_uninitialized(ex),
+                               ext4_ext_get_actual_len(ex),
+                               newblock);
+               ex++;
+       }
+}
+
 #else
 #define ext4_ext_show_path(inode, path)
 #define ext4_ext_show_leaf(inode, path)
+#define ext4_ext_show_move(inode, path, newblock, level)
 #endif
 
 void ext4_ext_drop_refs(struct ext4_ext_path *path)
@@ -792,14 +834,14 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
  * - initializes subtree
  */
 static int ext4_ext_split(handle_t *handle, struct inode *inode,
-                               struct ext4_ext_path *path,
-                               struct ext4_extent *newext, int at)
+                         unsigned int flags,
+                         struct ext4_ext_path *path,
+                         struct ext4_extent *newext, int at)
 {
        struct buffer_head *bh = NULL;
        int depth = ext_depth(inode);
        struct ext4_extent_header *neh;
        struct ext4_extent_idx *fidx;
-       struct ext4_extent *ex;
        int i = at, k, m, a;
        ext4_fsblk_t newblock, oldblock;
        __le32 border;
@@ -847,7 +889,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
        ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
        for (a = 0; a < depth - at; a++) {
                newblock = ext4_ext_new_meta_block(handle, inode, path,
-                                                  newext, &err);
+                                                  newext, &err, flags);
                if (newblock == 0)
                        goto cleanup;
                ablocks[a] = newblock;
@@ -876,7 +918,6 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
        neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
        neh->eh_magic = EXT4_EXT_MAGIC;
        neh->eh_depth = 0;
-       ex = EXT_FIRST_EXTENT(neh);
 
        /* move remainder of path[depth] to the new leaf */
        if (unlikely(path[depth].p_hdr->eh_entries !=
@@ -888,25 +929,12 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                goto cleanup;
        }
        /* start copy from next extent */
-       /* TODO: we could do it by single memmove */
-       m = 0;
-       path[depth].p_ext++;
-       while (path[depth].p_ext <=
-                       EXT_MAX_EXTENT(path[depth].p_hdr)) {
-               ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
-                               le32_to_cpu(path[depth].p_ext->ee_block),
-                               ext4_ext_pblock(path[depth].p_ext),
-                               ext4_ext_is_uninitialized(path[depth].p_ext),
-                               ext4_ext_get_actual_len(path[depth].p_ext),
-                               newblock);
-               /*memmove(ex++, path[depth].p_ext++,
-                               sizeof(struct ext4_extent));
-               neh->eh_entries++;*/
-               path[depth].p_ext++;
-               m++;
-       }
+       m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
+       ext4_ext_show_move(inode, path, newblock, depth);
        if (m) {
-               memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
+               struct ext4_extent *ex;
+               ex = EXT_FIRST_EXTENT(neh);
+               memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
                le16_add_cpu(&neh->eh_entries, m);
        }
 
@@ -968,12 +996,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
 
                ext_debug("int.index at %d (block %llu): %u -> %llu\n",
                                i, newblock, le32_to_cpu(border), oldblock);
-               /* copy indexes */
-               m = 0;
-               path[i].p_idx++;
 
-               ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
-                               EXT_MAX_INDEX(path[i].p_hdr));
+               /* move remainder of path[i] to the new index block */
                if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
                                        EXT_LAST_INDEX(path[i].p_hdr))) {
                        EXT4_ERROR_INODE(inode,
@@ -982,20 +1006,13 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                        err = -EIO;
                        goto cleanup;
                }
-               while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
-                       ext_debug("%d: move %d:%llu in new index %llu\n", i,
-                                       le32_to_cpu(path[i].p_idx->ei_block),
-                                       ext4_idx_pblock(path[i].p_idx),
-                                       newblock);
-                       /*memmove(++fidx, path[i].p_idx++,
-                                       sizeof(struct ext4_extent_idx));
-                       neh->eh_entries++;
-                       BUG_ON(neh->eh_entries > neh->eh_max);*/
-                       path[i].p_idx++;
-                       m++;
-               }
+               /* start copy indexes */
+               m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
+               ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
+                               EXT_MAX_INDEX(path[i].p_hdr));
+               ext4_ext_show_move(inode, path, newblock, i);
                if (m) {
-                       memmove(++fidx, path[i].p_idx - m,
+                       memmove(++fidx, path[i].p_idx,
                                sizeof(struct ext4_extent_idx) * m);
                        le16_add_cpu(&neh->eh_entries, m);
                }
@@ -1056,8 +1073,9 @@ cleanup:
  *   just created block
  */
 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
-                                       struct ext4_ext_path *path,
-                                       struct ext4_extent *newext)
+                                unsigned int flags,
+                                struct ext4_ext_path *path,
+                                struct ext4_extent *newext)
 {
        struct ext4_ext_path *curp = path;
        struct ext4_extent_header *neh;
@@ -1065,7 +1083,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
        ext4_fsblk_t newblock;
        int err = 0;
 
-       newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err);
+       newblock = ext4_ext_new_meta_block(handle, inode, path,
+               newext, &err, flags);
        if (newblock == 0)
                return err;
 
@@ -1140,8 +1159,9 @@ out:
  * if no free index is found, then it requests in-depth growing.
  */
 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
-                                       struct ext4_ext_path *path,
-                                       struct ext4_extent *newext)
+                                   unsigned int flags,
+                                   struct ext4_ext_path *path,
+                                   struct ext4_extent *newext)
 {
        struct ext4_ext_path *curp;
        int depth, i, err = 0;
@@ -1161,7 +1181,7 @@ repeat:
        if (EXT_HAS_FREE_INDEX(curp)) {
                /* if we found index with free entry, then use that
                 * entry: create all needed subtree and add new leaf */
-               err = ext4_ext_split(handle, inode, path, newext, i);
+               err = ext4_ext_split(handle, inode, flags, path, newext, i);
                if (err)
                        goto out;
 
@@ -1174,7 +1194,8 @@ repeat:
                        err = PTR_ERR(path);
        } else {
                /* tree is full, time to grow in depth */
-               err = ext4_ext_grow_indepth(handle, inode, path, newext);
+               err = ext4_ext_grow_indepth(handle, inode, flags,
+                                           path, newext);
                if (err)
                        goto out;
 
@@ -1563,7 +1584,7 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
  * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
  * 1 if they got merged.
  */
-static int ext4_ext_try_to_merge(struct inode *inode,
+static int ext4_ext_try_to_merge_right(struct inode *inode,
                                 struct ext4_ext_path *path,
                                 struct ext4_extent *ex)
 {
@@ -1602,6 +1623,31 @@ static int ext4_ext_try_to_merge(struct inode *inode,
        return merge_done;
 }
 
+/*
+ * This function tries to merge the @ex extent to neighbours in the tree.
+ * return 1 if merge left else 0.
+ */
+static int ext4_ext_try_to_merge(struct inode *inode,
+                                 struct ext4_ext_path *path,
+                                 struct ext4_extent *ex) {
+       struct ext4_extent_header *eh;
+       unsigned int depth;
+       int merge_done = 0;
+       int ret = 0;
+
+       depth = ext_depth(inode);
+       BUG_ON(path[depth].p_hdr == NULL);
+       eh = path[depth].p_hdr;
+
+       if (ex > EXT_FIRST_EXTENT(eh))
+               merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
+
+       if (!merge_done)
+               ret = ext4_ext_try_to_merge_right(inode, path, ex);
+
+       return ret;
+}
+
 /*
  * check if a portion of the "newext" extent overlaps with an
  * existing extent.
@@ -1668,6 +1714,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
        int depth, len, err;
        ext4_lblk_t next;
        unsigned uninitialized = 0;
+       int flags = 0;
 
        if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
                EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
@@ -1742,7 +1789,9 @@ repeat:
         * There is no free space in the found leaf.
         * We're gonna add a new leaf in the tree.
         */
-       err = ext4_ext_create_new_leaf(handle, inode, path, newext);
+       if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT)
+               flags = EXT4_MB_USE_ROOT_BLOCKS;
+       err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
        if (err)
                goto cleanup;
        depth = ext_depth(inode);
@@ -2003,13 +2052,25 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
 }
 
 /*
+ * ext4_ext_in_cache()
+ * Checks to see if the given block is in the cache.
+ * If it is, the cached extent is stored in the given
+ * cache extent pointer.  If the cached extent is a hole,
+ * this routine should be used instead of
+ * ext4_ext_in_cache if the calling function needs to
+ * know the size of the hole.
+ *
+ * @inode: The files inode
+ * @block: The block to look for in the cache
+ * @ex:    Pointer where the cached extent will be stored
+ *         if it contains block
+ *
  * Return 0 if cache is invalid; 1 if the cache is valid
  */
-static int
-ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
-                       struct ext4_extent *ex)
-{
+static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block,
+       struct ext4_ext_cache *ex){
        struct ext4_ext_cache *cex;
+       struct ext4_sb_info *sbi;
        int ret = 0;
 
        /*
@@ -2017,25 +2078,59 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
         */
        spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
        cex = &EXT4_I(inode)->i_cached_extent;
+       sbi = EXT4_SB(inode->i_sb);
 
        /* has cache valid data? */
        if (cex->ec_len == 0)
                goto errout;
 
        if (in_range(block, cex->ec_block, cex->ec_len)) {
-               ex->ee_block = cpu_to_le32(cex->ec_block);
-               ext4_ext_store_pblock(ex, cex->ec_start);
-               ex->ee_len = cpu_to_le16(cex->ec_len);
+               memcpy(ex, cex, sizeof(struct ext4_ext_cache));
                ext_debug("%u cached by %u:%u:%llu\n",
                                block,
                                cex->ec_block, cex->ec_len, cex->ec_start);
                ret = 1;
        }
 errout:
+       if (!ret)
+               sbi->extent_cache_misses++;
+       else
+               sbi->extent_cache_hits++;
        spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
        return ret;
 }
 
+/*
+ * ext4_ext_in_cache()
+ * Checks to see if the given block is in the cache.
+ * If it is, the cached extent is stored in the given
+ * extent pointer.
+ *
+ * @inode: The files inode
+ * @block: The block to look for in the cache
+ * @ex:    Pointer where the cached extent will be stored
+ *         if it contains block
+ *
+ * Return 0 if cache is invalid; 1 if the cache is valid
+ */
+static int
+ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
+                       struct ext4_extent *ex)
+{
+       struct ext4_ext_cache cex;
+       int ret = 0;
+
+       if (ext4_ext_check_cache(inode, block, &cex)) {
+               ex->ee_block = cpu_to_le32(cex.ec_block);
+               ext4_ext_store_pblock(ex, cex.ec_start);
+               ex->ee_len = cpu_to_le16(cex.ec_len);
+               ret = 1;
+       }
+
+       return ret;
+}
+
+
 /*
  * ext4_ext_rm_idx:
  * removes index from the index block.
@@ -2163,8 +2258,16 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
                ext4_free_blocks(handle, inode, NULL, start, num, flags);
        } else if (from == le32_to_cpu(ex->ee_block)
                   && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
-               printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
-                       from, to, le32_to_cpu(ex->ee_block), ee_len);
+               /* head removal */
+               ext4_lblk_t num;
+               ext4_fsblk_t start;
+
+               num = to - from;
+               start = ext4_ext_pblock(ex);
+
+               ext_debug("free first %u blocks starting %llu\n", num, start);
+               ext4_free_blocks(handle, inode, 0, start, num, flags);
+
        } else {
                printk(KERN_INFO "strange request: removal(2) "
                                "%u-%u from %u:%u\n",
@@ -2173,9 +2276,22 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
        return 0;
 }
 
+
+/*
+ * ext4_ext_rm_leaf() Removes the extents associated with the
+ * blocks appearing between "start" and "end", and splits the extents
+ * if "start" and "end" appear in the same extent
+ *
+ * @handle: The journal handle
+ * @inode:  The files inode
+ * @path:   The path to the leaf
+ * @start:  The first block to remove
+ * @end:   The last block to remove
+ */
 static int
 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
-               struct ext4_ext_path *path, ext4_lblk_t start)
+               struct ext4_ext_path *path, ext4_lblk_t start,
+               ext4_lblk_t end)
 {
        int err = 0, correct_index = 0;
        int depth = ext_depth(inode), credits;
@@ -2186,6 +2302,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
        unsigned short ex_ee_len;
        unsigned uninitialized = 0;
        struct ext4_extent *ex;
+       struct ext4_map_blocks map;
 
        /* the header must be checked already in ext4_ext_remove_space() */
        ext_debug("truncate since %u in leaf\n", start);
@@ -2215,31 +2332,95 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
                path[depth].p_ext = ex;
 
                a = ex_ee_block > start ? ex_ee_block : start;
-               b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
-                       ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
+               b = ex_ee_block+ex_ee_len - 1 < end ?
+                       ex_ee_block+ex_ee_len - 1 : end;
 
                ext_debug("  border %u:%u\n", a, b);
 
-               if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
-                       block = 0;
-                       num = 0;
-                       BUG();
+               /* If this extent is beyond the end of the hole, skip it */
+               if (end <= ex_ee_block) {
+                       ex--;
+                       ex_ee_block = le32_to_cpu(ex->ee_block);
+                       ex_ee_len = ext4_ext_get_actual_len(ex);
+                       continue;
+               } else if (a != ex_ee_block &&
+                       b != ex_ee_block + ex_ee_len - 1) {
+                       /*
+                        * If this is a truncate, then this condition should
+                        * never happen because at least one of the end points
+                        * needs to be on the edge of the extent.
+                        */
+                       if (end == EXT_MAX_BLOCK) {
+                               ext_debug("  bad truncate %u:%u\n",
+                                               start, end);
+                               block = 0;
+                               num = 0;
+                               err = -EIO;
+                               goto out;
+                       }
+                       /*
+                        * else this is a hole punch, so the extent needs to
+                        * be split since neither edge of the hole is on the
+                        * extent edge
+                        */
+                       else{
+                               map.m_pblk = ext4_ext_pblock(ex);
+                               map.m_lblk = ex_ee_block;
+                               map.m_len = b - ex_ee_block;
+
+                               err = ext4_split_extent(handle,
+                                       inode, path, &map, 0,
+                                       EXT4_GET_BLOCKS_PUNCH_OUT_EXT |
+                                       EXT4_GET_BLOCKS_PRE_IO);
+
+                               if (err < 0)
+                                       goto out;
+
+                               ex_ee_len = ext4_ext_get_actual_len(ex);
+
+                               b = ex_ee_block+ex_ee_len - 1 < end ?
+                                       ex_ee_block+ex_ee_len - 1 : end;
+
+                               /* Then remove tail of this extent */
+                               block = ex_ee_block;
+                               num = a - block;
+                       }
                } else if (a != ex_ee_block) {
                        /* remove tail of the extent */
                        block = ex_ee_block;
                        num = a - block;
                } else if (b != ex_ee_block + ex_ee_len - 1) {
                        /* remove head of the extent */
-                       block = a;
-                       num = b - a;
-                       /* there is no "make a hole" API yet */
-                       BUG();
+                       block = b;
+                       num =  ex_ee_block + ex_ee_len - b;
+
+                       /*
+                        * If this is a truncate, this condition
+                        * should never happen
+                        */
+                       if (end == EXT_MAX_BLOCK) {
+                               ext_debug("  bad truncate %u:%u\n",
+                                       start, end);
+                               err = -EIO;
+                               goto out;
+                       }
                } else {
                        /* remove whole extent: excellent! */
                        block = ex_ee_block;
                        num = 0;
-                       BUG_ON(a != ex_ee_block);
-                       BUG_ON(b != ex_ee_block + ex_ee_len - 1);
+                       if (a != ex_ee_block) {
+                               ext_debug("  bad truncate %u:%u\n",
+                                       start, end);
+                               err = -EIO;
+                               goto out;
+                       }
+
+                       if (b != ex_ee_block + ex_ee_len - 1) {
+                               ext_debug("  bad truncate %u:%u\n",
+                                       start, end);
+                               err = -EIO;
+                               goto out;
+                       }
                }
 
                /*
@@ -2270,7 +2451,13 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
                if (num == 0) {
                        /* this extent is removed; mark slot entirely unused */
                        ext4_ext_store_pblock(ex, 0);
-                       le16_add_cpu(&eh->eh_entries, -1);
+               } else if (block != ex_ee_block) {
+                       /*
+                        * If this was a head removal, then we need to update
+                        * the physical block since it is now at a different
+                        * location
+                        */
+                       ext4_ext_store_pblock(ex, ext4_ext_pblock(ex) + (b-a));
                }
 
                ex->ee_block = cpu_to_le32(block);
@@ -2286,6 +2473,27 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
                if (err)
                        goto out;
 
+               /*
+                * If the extent was completely released,
+                * we need to remove it from the leaf
+                */
+               if (num == 0) {
+                       if (end != EXT_MAX_BLOCK) {
+                               /*
+                                * For hole punching, we need to scoot all the
+                                * extents up when an extent is removed so that
+                                * we dont have blank extents in the middle
+                                */
+                               memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
+                                       sizeof(struct ext4_extent));
+
+                               /* Now get rid of the one at the end */
+                               memset(EXT_LAST_EXTENT(eh), 0,
+                                       sizeof(struct ext4_extent));
+                       }
+                       le16_add_cpu(&eh->eh_entries, -1);
+               }
+
                ext_debug("new extent: %u:%u:%llu\n", block, num,
                                ext4_ext_pblock(ex));
                ex--;
@@ -2326,7 +2534,8 @@ ext4_ext_more_to_rm(struct ext4_ext_path *path)
        return 1;
 }
 
-static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
+static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
+                               ext4_lblk_t end)
 {
        struct super_block *sb = inode->i_sb;
        int depth = ext_depth(inode);
@@ -2365,7 +2574,8 @@ again:
        while (i >= 0 && err == 0) {
                if (i == depth) {
                        /* this is leaf block */
-                       err = ext4_ext_rm_leaf(handle, inode, path, start);
+                       err = ext4_ext_rm_leaf(handle, inode, path,
+                                       start, end);
                        /* root level has p_bh == NULL, brelse() eats this */
                        brelse(path[i].p_bh);
                        path[i].p_bh = NULL;
@@ -2529,6 +2739,195 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
        return ret;
 }
 
+/*
+ * used by extent splitting.
+ */
+#define EXT4_EXT_MAY_ZEROOUT   0x1  /* safe to zeroout if split fails \
+                                       due to ENOSPC */
+#define EXT4_EXT_MARK_UNINIT1  0x2  /* mark first half uninitialized */
+#define EXT4_EXT_MARK_UNINIT2  0x4  /* mark second half uninitialized */
+
+/*
+ * ext4_split_extent_at() splits an extent at given block.
+ *
+ * @handle: the journal handle
+ * @inode: the file inode
+ * @path: the path to the extent
+ * @split: the logical block where the extent is splitted.
+ * @split_flags: indicates if the extent could be zeroout if split fails, and
+ *              the states(init or uninit) of new extents.
+ * @flags: flags used to insert new extent to extent tree.
+ *
+ *
+ * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
+ * of which are deterimined by split_flag.
+ *
+ * There are two cases:
+ *  a> the extent are splitted into two extent.
+ *  b> split is not needed, and just mark the extent.
+ *
+ * return 0 on success.
+ */
+static int ext4_split_extent_at(handle_t *handle,
+                            struct inode *inode,
+                            struct ext4_ext_path *path,
+                            ext4_lblk_t split,
+                            int split_flag,
+                            int flags)
+{
+       ext4_fsblk_t newblock;
+       ext4_lblk_t ee_block;
+       struct ext4_extent *ex, newex, orig_ex;
+       struct ext4_extent *ex2 = NULL;
+       unsigned int ee_len, depth;
+       int err = 0;
+
+       ext_debug("ext4_split_extents_at: inode %lu, logical"
+               "block %llu\n", inode->i_ino, (unsigned long long)split);
+
+       ext4_ext_show_leaf(inode, path);
+
+       depth = ext_depth(inode);
+       ex = path[depth].p_ext;
+       ee_block = le32_to_cpu(ex->ee_block);
+       ee_len = ext4_ext_get_actual_len(ex);
+       newblock = split - ee_block + ext4_ext_pblock(ex);
+
+       BUG_ON(split < ee_block || split >= (ee_block + ee_len));
+
+       err = ext4_ext_get_access(handle, inode, path + depth);
+       if (err)
+               goto out;
+
+       if (split == ee_block) {
+               /*
+                * case b: block @split is the block that the extent begins with
+                * then we just change the state of the extent, and splitting
+                * is not needed.
+                */
+               if (split_flag & EXT4_EXT_MARK_UNINIT2)
+                       ext4_ext_mark_uninitialized(ex);
+               else
+                       ext4_ext_mark_initialized(ex);
+
+               if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
+                       ext4_ext_try_to_merge(inode, path, ex);
+
+               err = ext4_ext_dirty(handle, inode, path + depth);
+               goto out;
+       }
+
+       /* case a */
+       memcpy(&orig_ex, ex, sizeof(orig_ex));
+       ex->ee_len = cpu_to_le16(split - ee_block);
+       if (split_flag & EXT4_EXT_MARK_UNINIT1)
+               ext4_ext_mark_uninitialized(ex);
+
+       /*
+        * path may lead to new leaf, not to original leaf any more
+        * after ext4_ext_insert_extent() returns,
+        */
+       err = ext4_ext_dirty(handle, inode, path + depth);
+       if (err)
+               goto fix_extent_len;
+
+       ex2 = &newex;
+       ex2->ee_block = cpu_to_le32(split);
+       ex2->ee_len   = cpu_to_le16(ee_len - (split - ee_block));
+       ext4_ext_store_pblock(ex2, newblock);
+       if (split_flag & EXT4_EXT_MARK_UNINIT2)
+               ext4_ext_mark_uninitialized(ex2);
+
+       err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
+       if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
+               err = ext4_ext_zeroout(inode, &orig_ex);
+               if (err)
+                       goto fix_extent_len;
+               /* update the extent length and mark as initialized */
+               ex->ee_len = cpu_to_le32(ee_len);
+               ext4_ext_try_to_merge(inode, path, ex);
+               err = ext4_ext_dirty(handle, inode, path + depth);
+               goto out;
+       } else if (err)
+               goto fix_extent_len;
+
+out:
+       ext4_ext_show_leaf(inode, path);
+       return err;
+
+fix_extent_len:
+       ex->ee_len = orig_ex.ee_len;
+       ext4_ext_dirty(handle, inode, path + depth);
+       return err;
+}
+
+/*
+ * ext4_split_extents() splits an extent and mark extent which is covered
+ * by @map as split_flags indicates
+ *
+ * It may result in splitting the extent into multiple extents (upto three)
+ * There are three possibilities:
+ *   a> There is no split required
+ *   b> Splits in two extents: Split is happening at either end of the extent
+ *   c> Splits in three extents: Somone is splitting in middle of the extent
+ *
+ */
+static int ext4_split_extent(handle_t *handle,
+                             struct inode *inode,
+                             struct ext4_ext_path *path,
+                             struct ext4_map_blocks *map,
+                             int split_flag,
+                             int flags)
+{
+       ext4_lblk_t ee_block;
+       struct ext4_extent *ex;
+       unsigned int ee_len, depth;
+       int err = 0;
+       int uninitialized;
+       int split_flag1, flags1;
+
+       depth = ext_depth(inode);
+       ex = path[depth].p_ext;
+       ee_block = le32_to_cpu(ex->ee_block);
+       ee_len = ext4_ext_get_actual_len(ex);
+       uninitialized = ext4_ext_is_uninitialized(ex);
+
+       if (map->m_lblk + map->m_len < ee_block + ee_len) {
+               split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
+                             EXT4_EXT_MAY_ZEROOUT : 0;
+               flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
+               if (uninitialized)
+                       split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
+                                      EXT4_EXT_MARK_UNINIT2;
+               err = ext4_split_extent_at(handle, inode, path,
+                               map->m_lblk + map->m_len, split_flag1, flags1);
+               if (err)
+                       goto out;
+       }
+
+       ext4_ext_drop_refs(path);
+       path = ext4_ext_find_extent(inode, map->m_lblk, path);
+       if (IS_ERR(path))
+               return PTR_ERR(path);
+
+       if (map->m_lblk >= ee_block) {
+               split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ?
+                             EXT4_EXT_MAY_ZEROOUT : 0;
+               if (uninitialized)
+                       split_flag1 |= EXT4_EXT_MARK_UNINIT1;
+               if (split_flag & EXT4_EXT_MARK_UNINIT2)
+                       split_flag1 |= EXT4_EXT_MARK_UNINIT2;
+               err = ext4_split_extent_at(handle, inode, path,
+                               map->m_lblk, split_flag1, flags);
+               if (err)
+                       goto out;
+       }
+
+       ext4_ext_show_leaf(inode, path);
+out:
+       return err ? err : map->m_len;
+}
+
 #define EXT4_EXT_ZERO_LEN 7
 /*
  * This function is called by ext4_ext_map_blocks() if someone tries to write
@@ -2545,17 +2944,13 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                                           struct ext4_map_blocks *map,
                                           struct ext4_ext_path *path)
 {
-       struct ext4_extent *ex, newex, orig_ex;
-       struct ext4_extent *ex1 = NULL;
-       struct ext4_extent *ex2 = NULL;
-       struct ext4_extent *ex3 = NULL;
-       struct ext4_extent_header *eh;
+       struct ext4_map_blocks split_map;
+       struct ext4_extent zero_ex;
+       struct ext4_extent *ex;
        ext4_lblk_t ee_block, eof_block;
        unsigned int allocated, ee_len, depth;
-       ext4_fsblk_t newblock;
        int err = 0;
-       int ret = 0;
-       int may_zeroout;
+       int split_flag = 0;
 
        ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
                "block %llu, max_blocks %u\n", inode->i_ino,
@@ -2567,280 +2962,86 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                eof_block = map->m_lblk + map->m_len;
 
        depth = ext_depth(inode);
-       eh = path[depth].p_hdr;
        ex = path[depth].p_ext;
        ee_block = le32_to_cpu(ex->ee_block);
        ee_len = ext4_ext_get_actual_len(ex);
        allocated = ee_len - (map->m_lblk - ee_block);
-       newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
-
-       ex2 = ex;
-       orig_ex.ee_block = ex->ee_block;
-       orig_ex.ee_len   = cpu_to_le16(ee_len);
-       ext4_ext_store_pblock(&orig_ex, ext4_ext_pblock(ex));
 
+       WARN_ON(map->m_lblk < ee_block);
        /*
         * It is safe to convert extent to initialized via explicit
         * zeroout only if extent is fully insde i_size or new_size.
         */
-       may_zeroout = ee_block + ee_len <= eof_block;
+       split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
 
-       err = ext4_ext_get_access(handle, inode, path + depth);
-       if (err)
-               goto out;
        /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
-       if (ee_len <= 2*EXT4_EXT_ZERO_LEN && may_zeroout) {
-               err =  ext4_ext_zeroout(inode, &orig_ex);
+       if (ee_len <= 2*EXT4_EXT_ZERO_LEN &&
+           (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
+               err = ext4_ext_zeroout(inode, ex);
                if (err)
-                       goto fix_extent_len;
-               /* update the extent length and mark as initialized */
-               ex->ee_block = orig_ex.ee_block;
-               ex->ee_len   = orig_ex.ee_len;
-               ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
-               ext4_ext_dirty(handle, inode, path + depth);
-               /* zeroed the full extent */
-               return allocated;
-       }
+                       goto out;
 
-       /* ex1: ee_block to map->m_lblk - 1 : uninitialized */
-       if (map->m_lblk > ee_block) {
-               ex1 = ex;
-               ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
-               ext4_ext_mark_uninitialized(ex1);
-               ex2 = &newex;
+               err = ext4_ext_get_access(handle, inode, path + depth);
+               if (err)
+                       goto out;
+               ext4_ext_mark_initialized(ex);
+               ext4_ext_try_to_merge(inode, path, ex);
+               err = ext4_ext_dirty(handle, inode, path + depth);
+               goto out;
        }
+
        /*
-        * for sanity, update the length of the ex2 extent before
-        * we insert ex3, if ex1 is NULL. This is to avoid temporary
-        * overlap of blocks.
+        * four cases:
+        * 1. split the extent into three extents.
+        * 2. split the extent into two extents, zeroout the first half.
+        * 3. split the extent into two extents, zeroout the second half.
+        * 4. split the extent into two extents with out zeroout.
         */
-       if (!ex1 && allocated > map->m_len)
-               ex2->ee_len = cpu_to_le16(map->m_len);
-       /* ex3: to ee_block + ee_len : uninitialised */
-       if (allocated > map->m_len) {
-               unsigned int newdepth;
-               /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
-               if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) {
-                       /*
-                        * map->m_lblk == ee_block is handled by the zerouout
-                        * at the beginning.
-                        * Mark first half uninitialized.
-                        * Mark second half initialized and zero out the
-                        * initialized extent
-                        */
-                       ex->ee_block = orig_ex.ee_block;
-                       ex->ee_len   = cpu_to_le16(ee_len - allocated);
-                       ext4_ext_mark_uninitialized(ex);
-                       ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
-                       ext4_ext_dirty(handle, inode, path + depth);
-
-                       ex3 = &newex;
-                       ex3->ee_block = cpu_to_le32(map->m_lblk);
-                       ext4_ext_store_pblock(ex3, newblock);
-                       ex3->ee_len = cpu_to_le16(allocated);
-                       err = ext4_ext_insert_extent(handle, inode, path,
-                                                       ex3, 0);
-                       if (err == -ENOSPC) {
-                               err =  ext4_ext_zeroout(inode, &orig_ex);
-                               if (err)
-                                       goto fix_extent_len;
-                               ex->ee_block = orig_ex.ee_block;
-                               ex->ee_len   = orig_ex.ee_len;
-                               ext4_ext_store_pblock(ex,
-                                       ext4_ext_pblock(&orig_ex));
-                               ext4_ext_dirty(handle, inode, path + depth);
-                               /* blocks available from map->m_lblk */
-                               return allocated;
-
-                       } else if (err)
-                               goto fix_extent_len;
+       split_map.m_lblk = map->m_lblk;
+       split_map.m_len = map->m_len;
 
-                       /*
-                        * We need to zero out the second half because
-                        * an fallocate request can update file size and
-                        * converting the second half to initialized extent
-                        * implies that we can leak some junk data to user
-                        * space.
-                        */
-                       err =  ext4_ext_zeroout(inode, ex3);
-                       if (err) {
-                               /*
-                                * We should actually mark the
-                                * second half as uninit and return error
-                                * Insert would have changed the extent
-                                */
-                               depth = ext_depth(inode);
-                               ext4_ext_drop_refs(path);
-                               path = ext4_ext_find_extent(inode, map->m_lblk,
-                                                           path);
-                               if (IS_ERR(path)) {
-                                       err = PTR_ERR(path);
-                                       return err;
-                               }
-                               /* get the second half extent details */
-                               ex = path[depth].p_ext;
-                               err = ext4_ext_get_access(handle, inode,
-                                                               path + depth);
+       if (allocated > map->m_len) {
+               if (allocated <= EXT4_EXT_ZERO_LEN &&
+                   (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
+                       /* case 3 */
+                       zero_ex.ee_block =
+                                        cpu_to_le32(map->m_lblk);
+                       zero_ex.ee_len = cpu_to_le16(allocated);
+                       ext4_ext_store_pblock(&zero_ex,
+                               ext4_ext_pblock(ex) + map->m_lblk - ee_block);
+                       err = ext4_ext_zeroout(inode, &zero_ex);
+                       if (err)
+                               goto out;
+                       split_map.m_lblk = map->m_lblk;
+                       split_map.m_len = allocated;
+               } else if ((map->m_lblk - ee_block + map->m_len <
+                          EXT4_EXT_ZERO_LEN) &&
+                          (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
+                       /* case 2 */
+                       if (map->m_lblk != ee_block) {
+                               zero_ex.ee_block = ex->ee_block;
+                               zero_ex.ee_len = cpu_to_le16(map->m_lblk -
+                                                       ee_block);
+                               ext4_ext_store_pblock(&zero_ex,
+                                                     ext4_ext_pblock(ex));
+                               err = ext4_ext_zeroout(inode, &zero_ex);
                                if (err)
-                                       return err;
-                               ext4_ext_mark_uninitialized(ex);
-                               ext4_ext_dirty(handle, inode, path + depth);
-                               return err;
+                                       goto out;
                        }
 
-                       /* zeroed the second half */
-                       return allocated;
-               }
-               ex3 = &newex;
-               ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
-               ext4_ext_store_pblock(ex3, newblock + map->m_len);
-               ex3->ee_len = cpu_to_le16(allocated - map->m_len);
-               ext4_ext_mark_uninitialized(ex3);
-               err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
-               if (err == -ENOSPC && may_zeroout) {
-                       err =  ext4_ext_zeroout(inode, &orig_ex);
-                       if (err)
-                               goto fix_extent_len;
-                       /* update the extent length and mark as initialized */
-                       ex->ee_block = orig_ex.ee_block;
-                       ex->ee_len   = orig_ex.ee_len;
-                       ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
-                       ext4_ext_dirty(handle, inode, path + depth);
-                       /* zeroed the full extent */
-                       /* blocks available from map->m_lblk */
-                       return allocated;
-
-               } else if (err)
-                       goto fix_extent_len;
-               /*
-                * The depth, and hence eh & ex might change
-                * as part of the insert above.
-                */
-               newdepth = ext_depth(inode);
-               /*
-                * update the extent length after successful insert of the
-                * split extent
-                */
-               ee_len -= ext4_ext_get_actual_len(ex3);
-               orig_ex.ee_len = cpu_to_le16(ee_len);
-               may_zeroout = ee_block + ee_len <= eof_block;
-
-               depth = newdepth;
-               ext4_ext_drop_refs(path);
-               path = ext4_ext_find_extent(inode, map->m_lblk, path);
-               if (IS_ERR(path)) {
-                       err = PTR_ERR(path);
-                       goto out;
+                       split_map.m_lblk = ee_block;
+                       split_map.m_len = map->m_lblk - ee_block + map->m_len;
+                       allocated = map->m_len;
                }
-               eh = path[depth].p_hdr;
-               ex = path[depth].p_ext;
-               if (ex2 != &newex)
-                       ex2 = ex;
-
-               err = ext4_ext_get_access(handle, inode, path + depth);
-               if (err)
-                       goto out;
+       }
 
-               allocated = map->m_len;
+       allocated = ext4_split_extent(handle, inode, path,
+                                      &split_map, split_flag, 0);
+       if (allocated < 0)
+               err = allocated;
 
-               /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
-                * to insert a extent in the middle zerout directly
-                * otherwise give the extent a chance to merge to left
-                */
-               if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
-                       map->m_lblk != ee_block && may_zeroout) {
-                       err =  ext4_ext_zeroout(inode, &orig_ex);
-                       if (err)
-                               goto fix_extent_len;
-                       /* update the extent length and mark as initialized */
-                       ex->ee_block = orig_ex.ee_block;
-                       ex->ee_len   = orig_ex.ee_len;
-                       ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
-                       ext4_ext_dirty(handle, inode, path + depth);
-                       /* zero out the first half */
-                       /* blocks available from map->m_lblk */
-                       return allocated;
-               }
-       }
-       /*
-        * If there was a change of depth as part of the
-        * insertion of ex3 above, we need to update the length
-        * of the ex1 extent again here
-        */
-       if (ex1 && ex1 != ex) {
-               ex1 = ex;
-               ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
-               ext4_ext_mark_uninitialized(ex1);
-               ex2 = &newex;
-       }
-       /* ex2: map->m_lblk to map->m_lblk + maxblocks-1 : initialised */
-       ex2->ee_block = cpu_to_le32(map->m_lblk);
-       ext4_ext_store_pblock(ex2, newblock);
-       ex2->ee_len = cpu_to_le16(allocated);
-       if (ex2 != ex)
-               goto insert;
-       /*
-        * New (initialized) extent starts from the first block
-        * in the current extent. i.e., ex2 == ex
-        * We have to see if it can be merged with the extent
-        * on the left.
-        */
-       if (ex2 > EXT_FIRST_EXTENT(eh)) {
-               /*
-                * To merge left, pass "ex2 - 1" to try_to_merge(),
-                * since it merges towards right _only_.
-                */
-               ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
-               if (ret) {
-                       err = ext4_ext_correct_indexes(handle, inode, path);
-                       if (err)
-                               goto out;
-                       depth = ext_depth(inode);
-                       ex2--;
-               }
-       }
-       /*
-        * Try to Merge towards right. This might be required
-        * only when the whole extent is being written to.
-        * i.e. ex2 == ex and ex3 == NULL.
-        */
-       if (!ex3) {
-               ret = ext4_ext_try_to_merge(inode, path, ex2);
-               if (ret) {
-                       err = ext4_ext_correct_indexes(handle, inode, path);
-                       if (err)
-                               goto out;
-               }
-       }
-       /* Mark modified extent as dirty */
-       err = ext4_ext_dirty(handle, inode, path + depth);
-       goto out;
-insert:
-       err = ext4_ext_insert_extent(handle, inode, path, &newex, 0);
-       if (err == -ENOSPC && may_zeroout) {
-               err =  ext4_ext_zeroout(inode, &orig_ex);
-               if (err)
-                       goto fix_extent_len;
-               /* update the extent length and mark as initialized */
-               ex->ee_block = orig_ex.ee_block;
-               ex->ee_len   = orig_ex.ee_len;
-               ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
-               ext4_ext_dirty(handle, inode, path + depth);
-               /* zero out the first half */
-               return allocated;
-       } else if (err)
-               goto fix_extent_len;
 out:
-       ext4_ext_show_leaf(inode, path);
        return err ? err : allocated;
-
-fix_extent_len:
-       ex->ee_block = orig_ex.ee_block;
-       ex->ee_len   = orig_ex.ee_len;
-       ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
-       ext4_ext_mark_uninitialized(ex);
-       ext4_ext_dirty(handle, inode, path + depth);
-       return err;
 }
 
 /*
@@ -2871,15 +3072,11 @@ static int ext4_split_unwritten_extents(handle_t *handle,
                                        struct ext4_ext_path *path,
                                        int flags)
 {
-       struct ext4_extent *ex, newex, orig_ex;
-       struct ext4_extent *ex1 = NULL;
-       struct ext4_extent *ex2 = NULL;
-       struct ext4_extent *ex3 = NULL;
-       ext4_lblk_t ee_block, eof_block;
-       unsigned int allocated, ee_len, depth;
-       ext4_fsblk_t newblock;
-       int err = 0;
-       int may_zeroout;
+       ext4_lblk_t eof_block;
+       ext4_lblk_t ee_block;
+       struct ext4_extent *ex;
+       unsigned int ee_len;
+       int split_flag = 0, depth;
 
        ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
                "block %llu, max_blocks %u\n", inode->i_ino,
@@ -2889,156 +3086,22 @@ static int ext4_split_unwritten_extents(handle_t *handle,
                inode->i_sb->s_blocksize_bits;
        if (eof_block < map->m_lblk + map->m_len)
                eof_block = map->m_lblk + map->m_len;
-
-       depth = ext_depth(inode);
-       ex = path[depth].p_ext;
-       ee_block = le32_to_cpu(ex->ee_block);
-       ee_len = ext4_ext_get_actual_len(ex);
-       allocated = ee_len - (map->m_lblk - ee_block);
-       newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
-
-       ex2 = ex;
-       orig_ex.ee_block = ex->ee_block;
-       orig_ex.ee_len   = cpu_to_le16(ee_len);
-       ext4_ext_store_pblock(&orig_ex, ext4_ext_pblock(ex));
-
        /*
         * It is safe to convert extent to initialized via explicit
         * zeroout only if extent is fully insde i_size or new_size.
         */
-       may_zeroout = ee_block + ee_len <= eof_block;
-
-       /*
-        * If the uninitialized extent begins at the same logical
-        * block where the write begins, and the write completely
-        * covers the extent, then we don't need to split it.
-        */
-       if ((map->m_lblk == ee_block) && (allocated <= map->m_len))
-               return allocated;
-
-       err = ext4_ext_get_access(handle, inode, path + depth);
-       if (err)
-               goto out;
-       /* ex1: ee_block to map->m_lblk - 1 : uninitialized */
-       if (map->m_lblk > ee_block) {
-               ex1 = ex;
-               ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
-               ext4_ext_mark_uninitialized(ex1);
-               ex2 = &newex;
-       }
-       /*
-        * for sanity, update the length of the ex2 extent before
-        * we insert ex3, if ex1 is NULL. This is to avoid temporary
-        * overlap of blocks.
-        */
-       if (!ex1 && allocated > map->m_len)
-               ex2->ee_len = cpu_to_le16(map->m_len);
-       /* ex3: to ee_block + ee_len : uninitialised */
-       if (allocated > map->m_len) {
-               unsigned int newdepth;
-               ex3 = &newex;
-               ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
-               ext4_ext_store_pblock(ex3, newblock + map->m_len);
-               ex3->ee_len = cpu_to_le16(allocated - map->m_len);
-               ext4_ext_mark_uninitialized(ex3);
-               err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
-               if (err == -ENOSPC && may_zeroout) {
-                       err =  ext4_ext_zeroout(inode, &orig_ex);
-                       if (err)
-                               goto fix_extent_len;
-                       /* update the extent length and mark as initialized */
-                       ex->ee_block = orig_ex.ee_block;
-                       ex->ee_len   = orig_ex.ee_len;
-                       ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
-                       ext4_ext_dirty(handle, inode, path + depth);
-                       /* zeroed the full extent */
-                       /* blocks available from map->m_lblk */
-                       return allocated;
-
-               } else if (err)
-                       goto fix_extent_len;
-               /*
-                * The depth, and hence eh & ex might change
-                * as part of the insert above.
-                */
-               newdepth = ext_depth(inode);
-               /*
-                * update the extent length after successful insert of the
-                * split extent
-                */
-               ee_len -= ext4_ext_get_actual_len(ex3);
-               orig_ex.ee_len = cpu_to_le16(ee_len);
-               may_zeroout = ee_block + ee_len <= eof_block;
-
-               depth = newdepth;
-               ext4_ext_drop_refs(path);
-               path = ext4_ext_find_extent(inode, map->m_lblk, path);
-               if (IS_ERR(path)) {
-                       err = PTR_ERR(path);
-                       goto out;
-               }
-               ex = path[depth].p_ext;
-               if (ex2 != &newex)
-                       ex2 = ex;
-
-               err = ext4_ext_get_access(handle, inode, path + depth);
-               if (err)
-                       goto out;
+       depth = ext_depth(inode);
+       ex = path[depth].p_ext;
+       ee_block = le32_to_cpu(ex->ee_block);
+       ee_len = ext4_ext_get_actual_len(ex);
 
-               allocated = map->m_len;
-       }
-       /*
-        * If there was a change of depth as part of the
-        * insertion of ex3 above, we need to update the length
-        * of the ex1 extent again here
-        */
-       if (ex1 && ex1 != ex) {
-               ex1 = ex;
-               ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
-               ext4_ext_mark_uninitialized(ex1);
-               ex2 = &newex;
-       }
-       /*
-        * ex2: map->m_lblk to map->m_lblk + map->m_len-1 : to be written
-        * using direct I/O, uninitialised still.
-        */
-       ex2->ee_block = cpu_to_le32(map->m_lblk);
-       ext4_ext_store_pblock(ex2, newblock);
-       ex2->ee_len = cpu_to_le16(allocated);
-       ext4_ext_mark_uninitialized(ex2);
-       if (ex2 != ex)
-               goto insert;
-       /* Mark modified extent as dirty */
-       err = ext4_ext_dirty(handle, inode, path + depth);
-       ext_debug("out here\n");
-       goto out;
-insert:
-       err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
-       if (err == -ENOSPC && may_zeroout) {
-               err =  ext4_ext_zeroout(inode, &orig_ex);
-               if (err)
-                       goto fix_extent_len;
-               /* update the extent length and mark as initialized */
-               ex->ee_block = orig_ex.ee_block;
-               ex->ee_len   = orig_ex.ee_len;
-               ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
-               ext4_ext_dirty(handle, inode, path + depth);
-               /* zero out the first half */
-               return allocated;
-       } else if (err)
-               goto fix_extent_len;
-out:
-       ext4_ext_show_leaf(inode, path);
-       return err ? err : allocated;
+       split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
+       split_flag |= EXT4_EXT_MARK_UNINIT2;
 
-fix_extent_len:
-       ex->ee_block = orig_ex.ee_block;
-       ex->ee_len   = orig_ex.ee_len;
-       ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
-       ext4_ext_mark_uninitialized(ex);
-       ext4_ext_dirty(handle, inode, path + depth);
-       return err;
+       flags |= EXT4_GET_BLOCKS_PRE_IO;
+       return ext4_split_extent(handle, inode, path, map, split_flag, flags);
 }
+
 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
                                              struct inode *inode,
                                              struct ext4_ext_path *path)
@@ -3047,46 +3110,27 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
        struct ext4_extent_header *eh;
        int depth;
        int err = 0;
-       int ret = 0;
 
        depth = ext_depth(inode);
        eh = path[depth].p_hdr;
        ex = path[depth].p_ext;
 
+       ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
+               "block %llu, max_blocks %u\n", inode->i_ino,
+               (unsigned long long)le32_to_cpu(ex->ee_block),
+               ext4_ext_get_actual_len(ex));
+
        err = ext4_ext_get_access(handle, inode, path + depth);
        if (err)
                goto out;
        /* first mark the extent as initialized */
        ext4_ext_mark_initialized(ex);
 
-       /*
-        * We have to see if it can be merged with the extent
-        * on the left.
-        */
-       if (ex > EXT_FIRST_EXTENT(eh)) {
-               /*
-                * To merge left, pass "ex - 1" to try_to_merge(),
-                * since it merges towards right _only_.
-                */
-               ret = ext4_ext_try_to_merge(inode, path, ex - 1);
-               if (ret) {
-                       err = ext4_ext_correct_indexes(handle, inode, path);
-                       if (err)
-                               goto out;
-                       depth = ext_depth(inode);
-                       ex--;
-               }
-       }
-       /*
-        * Try to Merge towards right.
+       /* note: ext4_ext_correct_indexes() isn't needed here because
+        * borders are not changed
         */
-       ret = ext4_ext_try_to_merge(inode, path, ex);
-       if (ret) {
-               err = ext4_ext_correct_indexes(handle, inode, path);
-               if (err)
-                       goto out;
-               depth = ext_depth(inode);
-       }
+       ext4_ext_try_to_merge(inode, path, ex);
+
        /* Mark modified extent as dirty */
        err = ext4_ext_dirty(handle, inode, path + depth);
 out:
@@ -3302,15 +3346,19 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
        ext4_fsblk_t newblock = 0;
        int err = 0, depth, ret;
        unsigned int allocated = 0;
+       unsigned int punched_out = 0;
+       unsigned int result = 0;
        struct ext4_allocation_request ar;
        ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
+       struct ext4_map_blocks punch_map;
 
        ext_debug("blocks %u/%u requested for inode %lu\n",
                  map->m_lblk, map->m_len, inode->i_ino);
        trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
 
        /* check in cache */
-       if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
+       if (ext4_ext_in_cache(inode, map->m_lblk, &newex) &&
+               ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0)) {
                if (!newex.ee_start_lo && !newex.ee_start_hi) {
                        if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
                                /*
@@ -3375,16 +3423,84 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
                        ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
                                  ee_block, ee_len, newblock);
 
-                       /* Do not put uninitialized extent in the cache */
-                       if (!ext4_ext_is_uninitialized(ex)) {
-                               ext4_ext_put_in_cache(inode, ee_block,
-                                                       ee_len, ee_start);
-                               goto out;
+                       if ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0) {
+                               /*
+                                * Do not put uninitialized extent
+                                * in the cache
+                                */
+                               if (!ext4_ext_is_uninitialized(ex)) {
+                                       ext4_ext_put_in_cache(inode, ee_block,
+                                               ee_len, ee_start);
+                                       goto out;
+                               }
+                               ret = ext4_ext_handle_uninitialized_extents(
+                                       handle, inode, map, path, flags,
+                                       allocated, newblock);
+                               return ret;
                        }
-                       ret = ext4_ext_handle_uninitialized_extents(handle,
-                                       inode, map, path, flags, allocated,
-                                       newblock);
-                       return ret;
+
+                       /*
+                        * Punch out the map length, but only to the
+                        * end of the extent
+                        */
+                       punched_out = allocated < map->m_len ?
+                               allocated : map->m_len;
+
+                       /*
+                        * Sense extents need to be converted to
+                        * uninitialized, they must fit in an
+                        * uninitialized extent
+                        */
+                       if (punched_out > EXT_UNINIT_MAX_LEN)
+                               punched_out = EXT_UNINIT_MAX_LEN;
+
+                       punch_map.m_lblk = map->m_lblk;
+                       punch_map.m_pblk = newblock;
+                       punch_map.m_len = punched_out;
+                       punch_map.m_flags = 0;
+
+                       /* Check to see if the extent needs to be split */
+                       if (punch_map.m_len != ee_len ||
+                               punch_map.m_lblk != ee_block) {
+
+                               ret = ext4_split_extent(handle, inode,
+                               path, &punch_map, 0,
+                               EXT4_GET_BLOCKS_PUNCH_OUT_EXT |
+                               EXT4_GET_BLOCKS_PRE_IO);
+
+                               if (ret < 0) {
+                                       err = ret;
+                                       goto out2;
+                               }
+                               /*
+                                * find extent for the block at
+                                * the start of the hole
+                                */
+                               ext4_ext_drop_refs(path);
+                               kfree(path);
+
+                               path = ext4_ext_find_extent(inode,
+                               map->m_lblk, NULL);
+                               if (IS_ERR(path)) {
+                                       err = PTR_ERR(path);
+                                       path = NULL;
+                                       goto out2;
+                               }
+
+                               depth = ext_depth(inode);
+                               ex = path[depth].p_ext;
+                               ee_len = ext4_ext_get_actual_len(ex);
+                               ee_block = le32_to_cpu(ex->ee_block);
+                               ee_start = ext4_ext_pblock(ex);
+
+                       }
+
+                       ext4_ext_mark_uninitialized(ex);
+
+                       err = ext4_ext_remove_space(inode, map->m_lblk,
+                               map->m_lblk + punched_out);
+
+                       goto out2;
                }
        }
 
@@ -3446,6 +3562,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
        else
                /* disable in-core preallocation for non-regular files */
                ar.flags = 0;
+       if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
+               ar.flags |= EXT4_MB_HINT_NOPREALLOC;
        newblock = ext4_mb_new_blocks(handle, &ar, &err);
        if (!newblock)
                goto out2;
@@ -3529,7 +3647,11 @@ out2:
        }
        trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
                newblock, map->m_len, err ? err : allocated);
-       return err ? err : allocated;
+
+       result = (flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) ?
+                       punched_out : allocated;
+
+       return err ? err : result;
 }
 
 void ext4_ext_truncate(struct inode *inode)
@@ -3577,7 +3699,7 @@ void ext4_ext_truncate(struct inode *inode)
 
        last_block = (inode->i_size + sb->s_blocksize - 1)
                        >> EXT4_BLOCK_SIZE_BITS(sb);
-       err = ext4_ext_remove_space(inode, last_block);
+       err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCK);
 
        /* In a multi-transaction truncate, we only make the final
         * transaction synchronous.
@@ -3585,8 +3707,9 @@ void ext4_ext_truncate(struct inode *inode)
        if (IS_SYNC(inode))
                ext4_handle_sync(handle);
 
-out_stop:
        up_write(&EXT4_I(inode)->i_data_sem);
+
+out_stop:
        /*
         * If this was a simple ftruncate() and the file will remain alive,
         * then we need to clear up the orphan record which we created above.
@@ -3651,10 +3774,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
        struct ext4_map_blocks map;
        unsigned int credits, blkbits = inode->i_blkbits;
 
-       /* We only support the FALLOC_FL_KEEP_SIZE mode */
-       if (mode & ~FALLOC_FL_KEEP_SIZE)
-               return -EOPNOTSUPP;
-
        /*
         * currently supporting (pre)allocate mode for extent-based
         * files _only_
@@ -3662,6 +3781,13 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
                return -EOPNOTSUPP;
 
+       /* Return error if mode is not supported */
+       if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+               return -EOPNOTSUPP;
+
+       if (mode & FALLOC_FL_PUNCH_HOLE)
+               return ext4_punch_hole(file, offset, len);
+
        trace_ext4_fallocate_enter(inode, offset, len, mode);
        map.m_lblk = offset >> blkbits;
        /*
@@ -3691,7 +3817,8 @@ retry:
                        break;
                }
                ret = ext4_map_blocks(handle, inode, &map,
-                                     EXT4_GET_BLOCKS_CREATE_UNINIT_EXT);
+                                     EXT4_GET_BLOCKS_CREATE_UNINIT_EXT |
+                                     EXT4_GET_BLOCKS_NO_NORMALIZE);
                if (ret <= 0) {
 #ifdef EXT4FS_DEBUG
                        WARN_ON(ret <= 0);
@@ -3822,6 +3949,7 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
                pgoff_t         last_offset;
                pgoff_t         offset;
                pgoff_t         index;
+               pgoff_t         start_index = 0;
                struct page     **pages = NULL;
                struct buffer_head *bh = NULL;
                struct buffer_head *head = NULL;
@@ -3848,39 +3976,57 @@ out:
                                kfree(pages);
                                return EXT_CONTINUE;
                        }
+                       index = 0;
 
+next_page:
                        /* Try to find the 1st mapped buffer. */
-                       end = ((__u64)pages[0]->index << PAGE_SHIFT) >>
+                       end = ((__u64)pages[index]->index << PAGE_SHIFT) >>
                                  blksize_bits;
-                       if (!page_has_buffers(pages[0]))
+                       if (!page_has_buffers(pages[index]))
                                goto out;
-                       head = page_buffers(pages[0]);
+                       head = page_buffers(pages[index]);
                        if (!head)
                                goto out;
 
+                       index++;
                        bh = head;
                        do {
-                               if (buffer_mapped(bh)) {
+                               if (end >= newex->ec_block +
+                                       newex->ec_len)
+                                       /* The buffer is out of
+                                        * the request range.
+                                        */
+                                       goto out;
+
+                               if (buffer_mapped(bh) &&
+                                   end >= newex->ec_block) {
+                                       start_index = index - 1;
                                        /* get the 1st mapped buffer. */
-                                       if (end > newex->ec_block +
-                                               newex->ec_len)
-                                               /* The buffer is out of
-                                                * the request range.
-                                                */
-                                               goto out;
                                        goto found_mapped_buffer;
                                }
+
                                bh = bh->b_this_page;
                                end++;
                        } while (bh != head);
 
-                       /* No mapped buffer found. */
-                       goto out;
+                       /* No mapped buffer in the range found in this page,
+                        * We need to look up next page.
+                        */
+                       if (index >= ret) {
+                               /* There is no page left, but we need to limit
+                                * newex->ec_len.
+                                */
+                               newex->ec_len = end - newex->ec_block;
+                               goto out;
+                       }
+                       goto next_page;
                } else {
                        /*Find contiguous delayed buffers. */
                        if (ret > 0 && pages[0]->index == last_offset)
                                head = page_buffers(pages[0]);
                        bh = head;
+                       index = 1;
+                       start_index = 0;
                }
 
 found_mapped_buffer:
@@ -3903,7 +4049,7 @@ found_mapped_buffer:
                                end++;
                        } while (bh != head);
 
-                       for (index = 1; index < ret; index++) {
+                       for (; index < ret; index++) {
                                if (!page_has_buffers(pages[index])) {
                                        bh = NULL;
                                        break;
@@ -3913,8 +4059,10 @@ found_mapped_buffer:
                                        bh = NULL;
                                        break;
                                }
+
                                if (pages[index]->index !=
-                                       pages[0]->index + index) {
+                                   pages[start_index]->index + index
+                                   - start_index) {
                                        /* Blocks are not contiguous. */
                                        bh = NULL;
                                        break;
@@ -4006,6 +4154,177 @@ static int ext4_xattr_fiemap(struct inode *inode,
        return (error < 0 ? error : 0);
 }
 
+/*
+ * ext4_ext_punch_hole
+ *
+ * Punches a hole of "length" bytes in a file starting
+ * at byte "offset"
+ *
+ * @inode:  The inode of the file to punch a hole in
+ * @offset: The starting byte offset of the hole
+ * @length: The length of the hole
+ *
+ * Returns the number of blocks removed or negative on err
+ */
+int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
+{
+       struct inode *inode = file->f_path.dentry->d_inode;
+       struct super_block *sb = inode->i_sb;
+       struct ext4_ext_cache cache_ex;
+       ext4_lblk_t first_block, last_block, num_blocks, iblock, max_blocks;
+       struct address_space *mapping = inode->i_mapping;
+       struct ext4_map_blocks map;
+       handle_t *handle;
+       loff_t first_block_offset, last_block_offset, block_len;
+       loff_t first_page, last_page, first_page_offset, last_page_offset;
+       int ret, credits, blocks_released, err = 0;
+
+       first_block = (offset + sb->s_blocksize - 1) >>
+               EXT4_BLOCK_SIZE_BITS(sb);
+       last_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
+
+       first_block_offset = first_block << EXT4_BLOCK_SIZE_BITS(sb);
+       last_block_offset = last_block << EXT4_BLOCK_SIZE_BITS(sb);
+
+       first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+       last_page = (offset + length) >> PAGE_CACHE_SHIFT;
+
+       first_page_offset = first_page << PAGE_CACHE_SHIFT;
+       last_page_offset = last_page << PAGE_CACHE_SHIFT;
+
+       /*
+        * Write out all dirty pages to avoid race conditions
+        * Then release them.
+        */
+       if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
+               err = filemap_write_and_wait_range(mapping,
+                       first_page_offset == 0 ? 0 : first_page_offset-1,
+                       last_page_offset);
+
+                       if (err)
+                               return err;
+       }
+
+       /* Now release the pages */
+       if (last_page_offset > first_page_offset) {
+               truncate_inode_pages_range(mapping, first_page_offset,
+                                          last_page_offset-1);
+       }
+
+       /* finish any pending end_io work */
+       ext4_flush_completed_IO(inode);
+
+       credits = ext4_writepage_trans_blocks(inode);
+       handle = ext4_journal_start(inode, credits);
+       if (IS_ERR(handle))
+               return PTR_ERR(handle);
+
+       err = ext4_orphan_add(handle, inode);
+       if (err)
+               goto out;
+
+       /*
+        * Now we need to zero out the un block aligned data.
+        * If the file is smaller than a block, just
+        * zero out the middle
+        */
+       if (first_block > last_block)
+               ext4_block_zero_page_range(handle, mapping, offset, length);
+       else {
+               /* zero out the head of the hole before the first block */
+               block_len  = first_block_offset - offset;
+               if (block_len > 0)
+                       ext4_block_zero_page_range(handle, mapping,
+                                                  offset, block_len);
+
+               /* zero out the tail of the hole after the last block */
+               block_len = offset + length - last_block_offset;
+               if (block_len > 0) {
+                       ext4_block_zero_page_range(handle, mapping,
+                                       last_block_offset, block_len);
+               }
+       }
+
+       /* If there are no blocks to remove, return now */
+       if (first_block >= last_block)
+               goto out;
+
+       down_write(&EXT4_I(inode)->i_data_sem);
+       ext4_ext_invalidate_cache(inode);
+       ext4_discard_preallocations(inode);
+
+       /*
+        * Loop over all the blocks and identify blocks
+        * that need to be punched out
+        */
+       iblock = first_block;
+       blocks_released = 0;
+       while (iblock < last_block) {
+               max_blocks = last_block - iblock;
+               num_blocks = 1;
+               memset(&map, 0, sizeof(map));
+               map.m_lblk = iblock;
+               map.m_len = max_blocks;
+               ret = ext4_ext_map_blocks(handle, inode, &map,
+                       EXT4_GET_BLOCKS_PUNCH_OUT_EXT);
+
+               if (ret > 0) {
+                       blocks_released += ret;
+                       num_blocks = ret;
+               } else if (ret == 0) {
+                       /*
+                        * If map blocks could not find the block,
+                        * then it is in a hole.  If the hole was
+                        * not already cached, then map blocks should
+                        * put it in the cache.  So we can get the hole
+                        * out of the cache
+                        */
+                       memset(&cache_ex, 0, sizeof(cache_ex));
+                       if ((ext4_ext_check_cache(inode, iblock, &cache_ex)) &&
+                               !cache_ex.ec_start) {
+
+                               /* The hole is cached */
+                               num_blocks = cache_ex.ec_block +
+                               cache_ex.ec_len - iblock;
+
+                       } else {
+                               /* The block could not be identified */
+                               err = -EIO;
+                               break;
+                       }
+               } else {
+                       /* Map blocks error */
+                       err = ret;
+                       break;
+               }
+
+               if (num_blocks == 0) {
+                       /* This condition should never happen */
+                       ext_debug("Block lookup failed");
+                       err = -EIO;
+                       break;
+               }
+
+               iblock += num_blocks;
+       }
+
+       if (blocks_released > 0) {
+               ext4_ext_invalidate_cache(inode);
+               ext4_discard_preallocations(inode);
+       }
+
+       if (IS_SYNC(inode))
+               ext4_handle_sync(handle);
+
+       up_write(&EXT4_I(inode)->i_data_sem);
+
+out:
+       ext4_orphan_del(handle, inode);
+       inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+       ext4_mark_inode_dirty(handle, inode);
+       ext4_journal_stop(handle);
+       return err;
+}
 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                __u64 start, __u64 len)
 {
@@ -4042,4 +4361,3 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 
        return error;
 }
-
index 7b80d543b89e71c41764b54a5d6eee59992c712c..2c09723220091f7520b5a9c04622e1d788b9b1c4 100644 (file)
@@ -272,7 +272,6 @@ const struct file_operations ext4_file_operations = {
 };
 
 const struct inode_operations ext4_file_inode_operations = {
-       .truncate       = ext4_truncate,
        .setattr        = ext4_setattr,
        .getattr        = ext4_getattr,
 #ifdef CONFIG_EXT4_FS_XATTR
index e9473cbe80dfd00a7245e13f4de57229011a7bb2..ce66d2fe826cbdf99389dea50aa3f72240e911d9 100644 (file)
@@ -36,7 +36,7 @@
 
 static void dump_completed_IO(struct inode * inode)
 {
-#ifdef EXT4_DEBUG
+#ifdef EXT4FS_DEBUG
        struct list_head *cur, *before, *after;
        ext4_io_end_t *io, *io0, *io1;
        unsigned long flags;
@@ -172,6 +172,7 @@ int ext4_sync_file(struct file *file, int datasync)
        journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
        int ret;
        tid_t commit_tid;
+       bool needs_barrier = false;
 
        J_ASSERT(ext4_journal_current_handle() == NULL);
 
@@ -211,22 +212,12 @@ int ext4_sync_file(struct file *file, int datasync)
        }
 
        commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid;
-       if (jbd2_log_start_commit(journal, commit_tid)) {
-               /*
-                * When the journal is on a different device than the
-                * fs data disk, we need to issue the barrier in
-                * writeback mode.  (In ordered mode, the jbd2 layer
-                * will take care of issuing the barrier.  In
-                * data=journal, all of the data blocks are written to
-                * the journal device.)
-                */
-               if (ext4_should_writeback_data(inode) &&
-                   (journal->j_fs_dev != journal->j_dev) &&
-                   (journal->j_flags & JBD2_BARRIER))
-                       blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
-                                       NULL);
-               ret = jbd2_log_wait_commit(journal, commit_tid);
-       } else if (journal->j_flags & JBD2_BARRIER)
+       if (journal->j_flags & JBD2_BARRIER &&
+           !jbd2_trans_will_send_data_barrier(journal, commit_tid))
+               needs_barrier = true;
+       jbd2_log_start_commit(journal, commit_tid);
+       ret = jbd2_log_wait_commit(journal, commit_tid);
+       if (needs_barrier)
                blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
  out:
        trace_ext4_sync_file_exit(inode, ret);
index f2fa5e8a582caf92dba32df3cfad714920a85721..50d0e9c645845a4001e4da390318fa0c9e6b45b9 100644 (file)
@@ -639,8 +639,8 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
        while (target > 0) {
                count = target;
                /* allocating blocks for indirect blocks and direct blocks */
-               current_block = ext4_new_meta_blocks(handle, inode,
-                                                       goal, &count, err);
+               current_block = ext4_new_meta_blocks(handle, inode, goal,
+                                                    0, &count, err);
                if (*err)
                        goto failed_out;
 
@@ -1930,7 +1930,7 @@ repeat:
         * We do still charge estimated metadata to the sb though;
         * we cannot afford to run out of free blocks.
         */
-       if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
+       if (ext4_claim_free_blocks(sbi, md_needed + 1, 0)) {
                dquot_release_reservation_block(inode, 1);
                if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
                        yield();
@@ -2796,9 +2796,7 @@ static int write_cache_pages_da(struct address_space *mapping,
                                continue;
                        }
 
-                       if (PageWriteback(page))
-                               wait_on_page_writeback(page);
-
+                       wait_on_page_writeback(page);
                        BUG_ON(PageWriteback(page));
 
                        if (mpd->next_page != page->index)
@@ -3513,7 +3511,7 @@ retry:
                        loff_t end = offset + iov_length(iov, nr_segs);
 
                        if (end > isize)
-                               vmtruncate(inode, isize);
+                               ext4_truncate_failed_write(inode);
                }
        }
        if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -3915,10 +3913,31 @@ void ext4_set_aops(struct inode *inode)
  */
 int ext4_block_truncate_page(handle_t *handle,
                struct address_space *mapping, loff_t from)
+{
+       unsigned offset = from & (PAGE_CACHE_SIZE-1);
+       unsigned length;
+       unsigned blocksize;
+       struct inode *inode = mapping->host;
+
+       blocksize = inode->i_sb->s_blocksize;
+       length = blocksize - (offset & (blocksize - 1));
+
+       return ext4_block_zero_page_range(handle, mapping, from, length);
+}
+
+/*
+ * ext4_block_zero_page_range() zeros out a mapping of length 'length'
+ * starting from file offset 'from'.  The range to be zero'd must
+ * be contained with in one block.  If the specified range exceeds
+ * the end of the block it will be shortened to end of the block
+ * that cooresponds to 'from'
+ */
+int ext4_block_zero_page_range(handle_t *handle,
+               struct address_space *mapping, loff_t from, loff_t length)
 {
        ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
        unsigned offset = from & (PAGE_CACHE_SIZE-1);
-       unsigned blocksize, length, pos;
+       unsigned blocksize, max, pos;
        ext4_lblk_t iblock;
        struct inode *inode = mapping->host;
        struct buffer_head *bh;
@@ -3931,7 +3950,15 @@ int ext4_block_truncate_page(handle_t *handle,
                return -EINVAL;
 
        blocksize = inode->i_sb->s_blocksize;
-       length = blocksize - (offset & (blocksize - 1));
+       max = blocksize - (offset & (blocksize - 1));
+
+       /*
+        * correct length if it does not fall between
+        * 'from' and the end of the block
+        */
+       if (length > max || length < 0)
+               length = max;
+
        iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
 
        if (!page_has_buffers(page))
@@ -4380,8 +4407,6 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
 
 int ext4_can_truncate(struct inode *inode)
 {
-       if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
-               return 0;
        if (S_ISREG(inode->i_mode))
                return 1;
        if (S_ISDIR(inode->i_mode))
@@ -4391,6 +4416,31 @@ int ext4_can_truncate(struct inode *inode)
        return 0;
 }
 
+/*
+ * ext4_punch_hole: punches a hole in a file by releaseing the blocks
+ * associated with the given offset and length
+ *
+ * @inode:  File inode
+ * @offset: The offset where the hole will begin
+ * @len:    The length of the hole
+ *
+ * Returns: 0 on sucess or negative on failure
+ */
+
+int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+{
+       struct inode *inode = file->f_path.dentry->d_inode;
+       if (!S_ISREG(inode->i_mode))
+               return -ENOTSUPP;
+
+       if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+               /* TODO: Add support for non extent hole punching */
+               return -ENOTSUPP;
+       }
+
+       return ext4_ext_punch_hole(file, offset, length);
+}
+
 /*
  * ext4_truncate()
  *
@@ -4617,7 +4667,7 @@ static int __ext4_get_inode_loc(struct inode *inode,
        /*
         * Figure out the offset within the block group inode table
         */
-       inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb));
+       inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
        inode_offset = ((inode->i_ino - 1) %
                        EXT4_INODES_PER_GROUP(sb));
        block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
@@ -5311,8 +5361,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
 
        if (S_ISREG(inode->i_mode) &&
            attr->ia_valid & ATTR_SIZE &&
-           (attr->ia_size < inode->i_size ||
-            (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))) {
+           (attr->ia_size < inode->i_size)) {
                handle_t *handle;
 
                handle = ext4_journal_start(inode, 3);
@@ -5346,14 +5395,15 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
                                goto err_out;
                        }
                }
-               /* ext4_truncate will clear the flag */
-               if ((ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))
-                       ext4_truncate(inode);
        }
 
-       if ((attr->ia_valid & ATTR_SIZE) &&
-           attr->ia_size != i_size_read(inode))
-               rc = vmtruncate(inode, attr->ia_size);
+       if (attr->ia_valid & ATTR_SIZE) {
+               if (attr->ia_size != i_size_read(inode)) {
+                       truncate_setsize(inode, attr->ia_size);
+                       ext4_truncate(inode);
+               } else if (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
+                       ext4_truncate(inode);
+       }
 
        if (!rc) {
                setattr_copy(inode, attr);
@@ -5811,15 +5861,19 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
                goto out_unlock;
        }
        ret = 0;
-       if (PageMappedToDisk(page))
-               goto out_unlock;
+
+       lock_page(page);
+       wait_on_page_writeback(page);
+       if (PageMappedToDisk(page)) {
+               up_read(&inode->i_alloc_sem);
+               return VM_FAULT_LOCKED;
+       }
 
        if (page->index == size >> PAGE_CACHE_SHIFT)
                len = size & ~PAGE_CACHE_MASK;
        else
                len = PAGE_CACHE_SIZE;
 
-       lock_page(page);
        /*
         * return if we have all the buffers mapped. This avoid
         * the need to call write_begin/write_end which does a
@@ -5829,8 +5883,8 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (page_has_buffers(page)) {
                if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
                                        ext4_bh_unmapped)) {
-                       unlock_page(page);
-                       goto out_unlock;
+                       up_read(&inode->i_alloc_sem);
+                       return VM_FAULT_LOCKED;
                }
        }
        unlock_page(page);
@@ -5850,6 +5904,16 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (ret < 0)
                goto out_unlock;
        ret = 0;
+
+       /*
+        * write_begin/end might have created a dirty page and someone
+        * could wander in and start the IO.  Make sure that hasn't
+        * happened.
+        */
+       lock_page(page);
+       wait_on_page_writeback(page);
+       up_read(&inode->i_alloc_sem);
+       return VM_FAULT_LOCKED;
 out_unlock:
        if (ret)
                ret = VM_FAULT_SIGBUS;
index d8a16eecf1d55748f59c6b66efe8cdf46b589803..859f2ae8864e6af2b85dc62135f7f89cb0258d4f 100644 (file)
@@ -787,6 +787,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
        struct inode *inode;
        char *data;
        char *bitmap;
+       struct ext4_group_info *grinfo;
 
        mb_debug(1, "init page %lu\n", page->index);
 
@@ -819,6 +820,18 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
                if (first_group + i >= ngroups)
                        break;
 
+               grinfo = ext4_get_group_info(sb, first_group + i);
+               /*
+                * If page is uptodate then we came here after online resize
+                * which added some new uninitialized group info structs, so
+                * we must skip all initialized uptodate buddies on the page,
+                * which may be currently in use by an allocating task.
+                */
+               if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) {
+                       bh[i] = NULL;
+                       continue;
+               }
+
                err = -EIO;
                desc = ext4_get_group_desc(sb, first_group + i, NULL);
                if (desc == NULL)
@@ -871,26 +884,28 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
        }
 
        /* wait for I/O completion */
-       for (i = 0; i < groups_per_page && bh[i]; i++)
-               wait_on_buffer(bh[i]);
+       for (i = 0; i < groups_per_page; i++)
+               if (bh[i])
+                       wait_on_buffer(bh[i]);
 
        err = -EIO;
-       for (i = 0; i < groups_per_page && bh[i]; i++)
-               if (!buffer_uptodate(bh[i]))
+       for (i = 0; i < groups_per_page; i++)
+               if (bh[i] && !buffer_uptodate(bh[i]))
                        goto out;
 
        err = 0;
        first_block = page->index * blocks_per_page;
-       /* init the page  */
-       memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
        for (i = 0; i < blocks_per_page; i++) {
                int group;
-               struct ext4_group_info *grinfo;
 
                group = (first_block + i) >> 1;
                if (group >= ngroups)
                        break;
 
+               if (!bh[group - first_group])
+                       /* skip initialized uptodate buddy */
+                       continue;
+
                /*
                 * data carry information regarding this
                 * particular group in the format specified
@@ -919,6 +934,8 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
                         * incore got set to the group block bitmap below
                         */
                        ext4_lock_group(sb, group);
+                       /* init the buddy */
+                       memset(data, 0xff, blocksize);
                        ext4_mb_generate_buddy(sb, data, incore, group);
                        ext4_unlock_group(sb, group);
                        incore = NULL;
@@ -948,7 +965,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
 
 out:
        if (bh) {
-               for (i = 0; i < groups_per_page && bh[i]; i++)
+               for (i = 0; i < groups_per_page; i++)
                        brelse(bh[i]);
                if (bh != &bhs)
                        kfree(bh);
@@ -957,22 +974,21 @@ out:
 }
 
 /*
- * lock the group_info alloc_sem of all the groups
- * belonging to the same buddy cache page. This
- * make sure other parallel operation on the buddy
- * cache doesn't happen  whild holding the buddy cache
- * lock
+ * Lock the buddy and bitmap pages. This make sure other parallel init_group
+ * on the same buddy page doesn't happen whild holding the buddy page lock.
+ * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap
+ * are on the same page e4b->bd_buddy_page is NULL and return value is 0.
  */
-static int ext4_mb_get_buddy_cache_lock(struct super_block *sb,
-                                       ext4_group_t group)
+static int ext4_mb_get_buddy_page_lock(struct super_block *sb,
+               ext4_group_t group, struct ext4_buddy *e4b)
 {
-       int i;
-       int block, pnum;
+       struct inode *inode = EXT4_SB(sb)->s_buddy_cache;
+       int block, pnum, poff;
        int blocks_per_page;
-       int groups_per_page;
-       ext4_group_t ngroups = ext4_get_groups_count(sb);
-       ext4_group_t first_group;
-       struct ext4_group_info *grp;
+       struct page *page;
+
+       e4b->bd_buddy_page = NULL;
+       e4b->bd_bitmap_page = NULL;
 
        blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
        /*
@@ -982,57 +998,40 @@ static int ext4_mb_get_buddy_cache_lock(struct super_block *sb,
         */
        block = group * 2;
        pnum = block / blocks_per_page;
-       first_group = pnum * blocks_per_page / 2;
-
-       groups_per_page = blocks_per_page >> 1;
-       if (groups_per_page == 0)
-               groups_per_page = 1;
-       /* read all groups the page covers into the cache */
-       for (i = 0; i < groups_per_page; i++) {
+       poff = block % blocks_per_page;
+       page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
+       if (!page)
+               return -EIO;
+       BUG_ON(page->mapping != inode->i_mapping);
+       e4b->bd_bitmap_page = page;
+       e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
 
-               if ((first_group + i) >= ngroups)
-                       break;
-               grp = ext4_get_group_info(sb, first_group + i);
-               /* take all groups write allocation
-                * semaphore. This make sure there is
-                * no block allocation going on in any
-                * of that groups
-                */
-               down_write_nested(&grp->alloc_sem, i);
+       if (blocks_per_page >= 2) {
+               /* buddy and bitmap are on the same page */
+               return 0;
        }
-       return i;
+
+       block++;
+       pnum = block / blocks_per_page;
+       poff = block % blocks_per_page;
+       page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
+       if (!page)
+               return -EIO;
+       BUG_ON(page->mapping != inode->i_mapping);
+       e4b->bd_buddy_page = page;
+       return 0;
 }
 
-static void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
-                                        ext4_group_t group, int locked_group)
+static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b)
 {
-       int i;
-       int block, pnum;
-       int blocks_per_page;
-       ext4_group_t first_group;
-       struct ext4_group_info *grp;
-
-       blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
-       /*
-        * the buddy cache inode stores the block bitmap
-        * and buddy information in consecutive blocks.
-        * So for each group we need two blocks.
-        */
-       block = group * 2;
-       pnum = block / blocks_per_page;
-       first_group = pnum * blocks_per_page / 2;
-       /* release locks on all the groups */
-       for (i = 0; i < locked_group; i++) {
-
-               grp = ext4_get_group_info(sb, first_group + i);
-               /* take all groups write allocation
-                * semaphore. This make sure there is
-                * no block allocation going on in any
-                * of that groups
-                */
-               up_write(&grp->alloc_sem);
+       if (e4b->bd_bitmap_page) {
+               unlock_page(e4b->bd_bitmap_page);
+               page_cache_release(e4b->bd_bitmap_page);
+       }
+       if (e4b->bd_buddy_page) {
+               unlock_page(e4b->bd_buddy_page);
+               page_cache_release(e4b->bd_buddy_page);
        }
-
 }
 
 /*
@@ -1044,93 +1043,60 @@ static noinline_for_stack
 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
 {
 
-       int ret = 0;
-       void *bitmap;
-       int blocks_per_page;
-       int block, pnum, poff;
-       int num_grp_locked = 0;
        struct ext4_group_info *this_grp;
-       struct ext4_sb_info *sbi = EXT4_SB(sb);
-       struct inode *inode = sbi->s_buddy_cache;
-       struct page *page = NULL, *bitmap_page = NULL;
+       struct ext4_buddy e4b;
+       struct page *page;
+       int ret = 0;
 
        mb_debug(1, "init group %u\n", group);
-       blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
        this_grp = ext4_get_group_info(sb, group);
        /*
         * This ensures that we don't reinit the buddy cache
         * page which map to the group from which we are already
         * allocating. If we are looking at the buddy cache we would
         * have taken a reference using ext4_mb_load_buddy and that
-        * would have taken the alloc_sem lock.
+        * would have pinned buddy page to page cache.
         */
-       num_grp_locked =  ext4_mb_get_buddy_cache_lock(sb, group);
-       if (!EXT4_MB_GRP_NEED_INIT(this_grp)) {
+       ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b);
+       if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
                /*
                 * somebody initialized the group
                 * return without doing anything
                 */
-               ret = 0;
                goto err;
        }
-       /*
-        * the buddy cache inode stores the block bitmap
-        * and buddy information in consecutive blocks.
-        * So for each group we need two blocks.
-        */
-       block = group * 2;
-       pnum = block / blocks_per_page;
-       poff = block % blocks_per_page;
-       page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
-       if (page) {
-               BUG_ON(page->mapping != inode->i_mapping);
-               ret = ext4_mb_init_cache(page, NULL);
-               if (ret) {
-                       unlock_page(page);
-                       goto err;
-               }
-               unlock_page(page);
-       }
-       if (page == NULL || !PageUptodate(page)) {
+
+       page = e4b.bd_bitmap_page;
+       ret = ext4_mb_init_cache(page, NULL);
+       if (ret)
+               goto err;
+       if (!PageUptodate(page)) {
                ret = -EIO;
                goto err;
        }
        mark_page_accessed(page);
-       bitmap_page = page;
-       bitmap = page_address(page) + (poff * sb->s_blocksize);
 
-       /* init buddy cache */
-       block++;
-       pnum = block / blocks_per_page;
-       poff = block % blocks_per_page;
-       page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
-       if (page == bitmap_page) {
+       if (e4b.bd_buddy_page == NULL) {
                /*
                 * If both the bitmap and buddy are in
                 * the same page we don't need to force
                 * init the buddy
                 */
-               unlock_page(page);
-       } else if (page) {
-               BUG_ON(page->mapping != inode->i_mapping);
-               ret = ext4_mb_init_cache(page, bitmap);
-               if (ret) {
-                       unlock_page(page);
-                       goto err;
-               }
-               unlock_page(page);
+               ret = 0;
+               goto err;
        }
-       if (page == NULL || !PageUptodate(page)) {
+       /* init buddy cache */
+       page = e4b.bd_buddy_page;
+       ret = ext4_mb_init_cache(page, e4b.bd_bitmap);
+       if (ret)
+               goto err;
+       if (!PageUptodate(page)) {
                ret = -EIO;
                goto err;
        }
        mark_page_accessed(page);
 err:
-       ext4_mb_put_buddy_cache_lock(sb, group, num_grp_locked);
-       if (bitmap_page)
-               page_cache_release(bitmap_page);
-       if (page)
-               page_cache_release(page);
+       ext4_mb_put_buddy_page_lock(&e4b);
        return ret;
 }
 
@@ -1164,24 +1130,8 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
        e4b->bd_group = group;
        e4b->bd_buddy_page = NULL;
        e4b->bd_bitmap_page = NULL;
-       e4b->alloc_semp = &grp->alloc_sem;
-
-       /* Take the read lock on the group alloc
-        * sem. This would make sure a parallel
-        * ext4_mb_init_group happening on other
-        * groups mapped by the page is blocked
-        * till we are done with allocation
-        */
-repeat_load_buddy:
-       down_read(e4b->alloc_semp);
 
        if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
-               /* we need to check for group need init flag
-                * with alloc_semp held so that we can be sure
-                * that new blocks didn't get added to the group
-                * when we are loading the buddy cache
-                */
-               up_read(e4b->alloc_semp);
                /*
                 * we need full data about the group
                 * to make a good selection
@@ -1189,7 +1139,6 @@ repeat_load_buddy:
                ret = ext4_mb_init_group(sb, group);
                if (ret)
                        return ret;
-               goto repeat_load_buddy;
        }
 
        /*
@@ -1273,15 +1222,14 @@ repeat_load_buddy:
        return 0;
 
 err:
+       if (page)
+               page_cache_release(page);
        if (e4b->bd_bitmap_page)
                page_cache_release(e4b->bd_bitmap_page);
        if (e4b->bd_buddy_page)
                page_cache_release(e4b->bd_buddy_page);
        e4b->bd_buddy = NULL;
        e4b->bd_bitmap = NULL;
-
-       /* Done with the buddy cache */
-       up_read(e4b->alloc_semp);
        return ret;
 }
 
@@ -1291,9 +1239,6 @@ static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
                page_cache_release(e4b->bd_bitmap_page);
        if (e4b->bd_buddy_page)
                page_cache_release(e4b->bd_buddy_page);
-       /* Done with the buddy cache */
-       if (e4b->alloc_semp)
-               up_read(e4b->alloc_semp);
 }
 
 
@@ -1606,9 +1551,6 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
        get_page(ac->ac_bitmap_page);
        ac->ac_buddy_page = e4b->bd_buddy_page;
        get_page(ac->ac_buddy_page);
-       /* on allocation we use ac to track the held semaphore */
-       ac->alloc_semp =  e4b->alloc_semp;
-       e4b->alloc_semp = NULL;
        /* store last allocated for subsequent stream allocation */
        if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) {
                spin_lock(&sbi->s_md_lock);
@@ -2659,7 +2601,7 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
        struct super_block *sb = journal->j_private;
        struct ext4_buddy e4b;
        struct ext4_group_info *db;
-       int err, ret, count = 0, count2 = 0;
+       int err, count = 0, count2 = 0;
        struct ext4_free_data *entry;
        struct list_head *l, *ltmp;
 
@@ -2669,15 +2611,9 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
                mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
                         entry->count, entry->group, entry);
 
-               if (test_opt(sb, DISCARD)) {
-                       ret = ext4_issue_discard(sb, entry->group,
-                                       entry->start_blk, entry->count);
-                       if (unlikely(ret == -EOPNOTSUPP)) {
-                               ext4_warning(sb, "discard not supported, "
-                                                "disabling");
-                               clear_opt(sb, DISCARD);
-                       }
-               }
+               if (test_opt(sb, DISCARD))
+                       ext4_issue_discard(sb, entry->group,
+                                          entry->start_blk, entry->count);
 
                err = ext4_mb_load_buddy(sb, entry->group, &e4b);
                /* we expect to find existing buddy because it's pinned */
@@ -4226,15 +4162,12 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac)
                        spin_unlock(&pa->pa_lock);
                }
        }
-       if (ac->alloc_semp)
-               up_read(ac->alloc_semp);
        if (pa) {
                /*
                 * We want to add the pa to the right bucket.
                 * Remove it from the list and while adding
                 * make sure the list to which we are adding
-                * doesn't grow big.  We need to release
-                * alloc_semp before calling ext4_mb_add_n_trim()
+                * doesn't grow big.
                 */
                if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) {
                        spin_lock(pa->pa_obj_lock);
@@ -4303,7 +4236,9 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
                 * there is enough free blocks to do block allocation
                 * and verify allocation doesn't exceed the quota limits.
                 */
-               while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) {
+               while (ar->len &&
+                       ext4_claim_free_blocks(sbi, ar->len, ar->flags)) {
+
                        /* let others to free the space */
                        yield();
                        ar->len = ar->len >> 1;
@@ -4313,9 +4248,15 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
                        return 0;
                }
                reserv_blks = ar->len;
-               while (ar->len && dquot_alloc_block(ar->inode, ar->len)) {
-                       ar->flags |= EXT4_MB_HINT_NOPREALLOC;
-                       ar->len--;
+               if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) {
+                       dquot_alloc_block_nofail(ar->inode, ar->len);
+               } else {
+                       while (ar->len &&
+                               dquot_alloc_block(ar->inode, ar->len)) {
+
+                               ar->flags |= EXT4_MB_HINT_NOPREALLOC;
+                               ar->len--;
+                       }
                }
                inquota = ar->len;
                if (ar->len == 0) {
@@ -4703,6 +4644,127 @@ error_return:
        return;
 }
 
+/**
+ * ext4_add_groupblocks() -- Add given blocks to an existing group
+ * @handle:                    handle to this transaction
+ * @sb:                                super block
+ * @block:                     start physcial block to add to the block group
+ * @count:                     number of blocks to free
+ *
+ * This marks the blocks as free in the bitmap and buddy.
+ */
+void ext4_add_groupblocks(handle_t *handle, struct super_block *sb,
+                        ext4_fsblk_t block, unsigned long count)
+{
+       struct buffer_head *bitmap_bh = NULL;
+       struct buffer_head *gd_bh;
+       ext4_group_t block_group;
+       ext4_grpblk_t bit;
+       unsigned int i;
+       struct ext4_group_desc *desc;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_buddy e4b;
+       int err = 0, ret, blk_free_count;
+       ext4_grpblk_t blocks_freed;
+       struct ext4_group_info *grp;
+
+       ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1);
+
+       ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
+       grp = ext4_get_group_info(sb, block_group);
+       /*
+        * Check to see if we are freeing blocks across a group
+        * boundary.
+        */
+       if (bit + count > EXT4_BLOCKS_PER_GROUP(sb))
+               goto error_return;
+
+       bitmap_bh = ext4_read_block_bitmap(sb, block_group);
+       if (!bitmap_bh)
+               goto error_return;
+       desc = ext4_get_group_desc(sb, block_group, &gd_bh);
+       if (!desc)
+               goto error_return;
+
+       if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
+           in_range(ext4_inode_bitmap(sb, desc), block, count) ||
+           in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
+           in_range(block + count - 1, ext4_inode_table(sb, desc),
+                    sbi->s_itb_per_group)) {
+               ext4_error(sb, "Adding blocks in system zones - "
+                          "Block = %llu, count = %lu",
+                          block, count);
+               goto error_return;
+       }
+
+       BUFFER_TRACE(bitmap_bh, "getting write access");
+       err = ext4_journal_get_write_access(handle, bitmap_bh);
+       if (err)
+               goto error_return;
+
+       /*
+        * We are about to modify some metadata.  Call the journal APIs
+        * to unshare ->b_data if a currently-committing transaction is
+        * using it
+        */
+       BUFFER_TRACE(gd_bh, "get_write_access");
+       err = ext4_journal_get_write_access(handle, gd_bh);
+       if (err)
+               goto error_return;
+
+       for (i = 0, blocks_freed = 0; i < count; i++) {
+               BUFFER_TRACE(bitmap_bh, "clear bit");
+               if (!mb_test_bit(bit + i, bitmap_bh->b_data)) {
+                       ext4_error(sb, "bit already cleared for block %llu",
+                                  (ext4_fsblk_t)(block + i));
+                       BUFFER_TRACE(bitmap_bh, "bit already cleared");
+               } else {
+                       blocks_freed++;
+               }
+       }
+
+       err = ext4_mb_load_buddy(sb, block_group, &e4b);
+       if (err)
+               goto error_return;
+
+       /*
+        * need to update group_info->bb_free and bitmap
+        * with group lock held. generate_buddy look at
+        * them with group lock_held
+        */
+       ext4_lock_group(sb, block_group);
+       mb_clear_bits(bitmap_bh->b_data, bit, count);
+       mb_free_blocks(NULL, &e4b, bit, count);
+       blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc);
+       ext4_free_blks_set(sb, desc, blk_free_count);
+       desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
+       ext4_unlock_group(sb, block_group);
+       percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);
+
+       if (sbi->s_log_groups_per_flex) {
+               ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+               atomic_add(blocks_freed,
+                          &sbi->s_flex_groups[flex_group].free_blocks);
+       }
+
+       ext4_mb_unload_buddy(&e4b);
+
+       /* We dirtied the bitmap block */
+       BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
+       err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
+
+       /* And the group descriptor block */
+       BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
+       ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh);
+       if (!err)
+               err = ret;
+
+error_return:
+       brelse(bitmap_bh);
+       ext4_std_error(sb, err);
+       return;
+}
+
 /**
  * ext4_trim_extent -- function to TRIM one single free extent in the group
  * @sb:                super block for the file system
@@ -4715,11 +4777,10 @@ error_return:
  * one will allocate those blocks, mark it as used in buddy bitmap. This must
  * be called with under the group lock.
  */
-static int ext4_trim_extent(struct super_block *sb, int start, int count,
-               ext4_group_t group, struct ext4_buddy *e4b)
+static void ext4_trim_extent(struct super_block *sb, int start, int count,
+                            ext4_group_t group, struct ext4_buddy *e4b)
 {
        struct ext4_free_extent ex;
-       int ret = 0;
 
        assert_spin_locked(ext4_group_lock_ptr(sb, group));
 
@@ -4733,12 +4794,9 @@ static int ext4_trim_extent(struct super_block *sb, int start, int count,
         */
        mb_mark_used(e4b, &ex);
        ext4_unlock_group(sb, group);
-
-       ret = ext4_issue_discard(sb, group, start, count);
-
+       ext4_issue_discard(sb, group, start, count);
        ext4_lock_group(sb, group);
        mb_free_blocks(NULL, e4b, start, ex.fe_len);
-       return ret;
 }
 
 /**
@@ -4760,21 +4818,26 @@ static int ext4_trim_extent(struct super_block *sb, int start, int count,
  * the group buddy bitmap. This is done until whole group is scanned.
  */
 static ext4_grpblk_t
-ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b,
-               ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks)
+ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
+                  ext4_grpblk_t start, ext4_grpblk_t max,
+                  ext4_grpblk_t minblocks)
 {
        void *bitmap;
        ext4_grpblk_t next, count = 0;
-       ext4_group_t group;
-       int ret = 0;
+       struct ext4_buddy e4b;
+       int ret;
 
-       BUG_ON(e4b == NULL);
+       ret = ext4_mb_load_buddy(sb, group, &e4b);
+       if (ret) {
+               ext4_error(sb, "Error in loading buddy "
+                               "information for %u", group);
+               return ret;
+       }
+       bitmap = e4b.bd_bitmap;
 
-       bitmap = e4b->bd_bitmap;
-       group = e4b->bd_group;
-       start = (e4b->bd_info->bb_first_free > start) ?
-               e4b->bd_info->bb_first_free : start;
        ext4_lock_group(sb, group);
+       start = (e4b.bd_info->bb_first_free > start) ?
+               e4b.bd_info->bb_first_free : start;
 
        while (start < max) {
                start = mb_find_next_zero_bit(bitmap, max, start);
@@ -4783,10 +4846,8 @@ ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b,
                next = mb_find_next_bit(bitmap, max, start);
 
                if ((next - start) >= minblocks) {
-                       ret = ext4_trim_extent(sb, start,
-                               next - start, group, e4b);
-                       if (ret < 0)
-                               break;
+                       ext4_trim_extent(sb, start,
+                                        next - start, group, &e4b);
                        count += next - start;
                }
                start = next + 1;
@@ -4802,17 +4863,15 @@ ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b,
                        ext4_lock_group(sb, group);
                }
 
-               if ((e4b->bd_info->bb_free - count) < minblocks)
+               if ((e4b.bd_info->bb_free - count) < minblocks)
                        break;
        }
        ext4_unlock_group(sb, group);
+       ext4_mb_unload_buddy(&e4b);
 
        ext4_debug("trimmed %d blocks in the group %d\n",
                count, group);
 
-       if (ret < 0)
-               count = ret;
-
        return count;
 }
 
@@ -4830,11 +4889,11 @@ ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b,
  */
 int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
 {
-       struct ext4_buddy e4b;
+       struct ext4_group_info *grp;
        ext4_group_t first_group, last_group;
        ext4_group_t group, ngroups = ext4_get_groups_count(sb);
        ext4_grpblk_t cnt = 0, first_block, last_block;
-       uint64_t start, len, minlen, trimmed;
+       uint64_t start, len, minlen, trimmed = 0;
        ext4_fsblk_t first_data_blk =
                        le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
        int ret = 0;
@@ -4842,7 +4901,6 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
        start = range->start >> sb->s_blocksize_bits;
        len = range->len >> sb->s_blocksize_bits;
        minlen = range->minlen >> sb->s_blocksize_bits;
-       trimmed = 0;
 
        if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb)))
                return -EINVAL;
@@ -4863,11 +4921,12 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
                return -EINVAL;
 
        for (group = first_group; group <= last_group; group++) {
-               ret = ext4_mb_load_buddy(sb, group, &e4b);
-               if (ret) {
-                       ext4_error(sb, "Error in loading buddy "
-                                       "information for %u", group);
-                       break;
+               grp = ext4_get_group_info(sb, group);
+               /* We only do this if the grp has never been initialized */
+               if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
+                       ret = ext4_mb_init_group(sb, group);
+                       if (ret)
+                               break;
                }
 
                /*
@@ -4880,16 +4939,14 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
                        last_block = first_block + len;
                len -= last_block - first_block;
 
-               if (e4b.bd_info->bb_free >= minlen) {
-                       cnt = ext4_trim_all_free(sb, &e4b, first_block,
+               if (grp->bb_free >= minlen) {
+                       cnt = ext4_trim_all_free(sb, group, first_block,
                                                last_block, minlen);
                        if (cnt < 0) {
                                ret = cnt;
-                               ext4_mb_unload_buddy(&e4b);
                                break;
                        }
                }
-               ext4_mb_unload_buddy(&e4b);
                trimmed += cnt;
                first_block = 0;
        }
index 22bd4d7f289b834b277fb55ce92cefa019c9d354..20b5e7bfebd175e27db63f959c0d5d89a9f929b0 100644 (file)
@@ -193,11 +193,6 @@ struct ext4_allocation_context {
        __u8 ac_op;             /* operation, for history only */
        struct page *ac_bitmap_page;
        struct page *ac_buddy_page;
-       /*
-        * pointer to the held semaphore upon successful
-        * block allocation
-        */
-       struct rw_semaphore *alloc_semp;
        struct ext4_prealloc_space *ac_pa;
        struct ext4_locality_group *ac_lg;
 };
@@ -215,7 +210,6 @@ struct ext4_buddy {
        struct super_block *bd_sb;
        __u16 bd_blkbits;
        ext4_group_t bd_group;
-       struct rw_semaphore *alloc_semp;
 };
 #define EXT4_MB_BITMAP(e4b)    ((e4b)->bd_bitmap)
 #define EXT4_MB_BUDDY(e4b)     ((e4b)->bd_buddy)
index 92816b4e0f16a143f555d539cb9cb47abc7e1d48..b57b98fb44d1457ec9f98e60290d6da3a90fb6c8 100644 (file)
@@ -376,7 +376,7 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
         * We have the extent map build with the tmp inode.
         * Now copy the i_data across
         */
-       ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS);
+       ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
        memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
 
        /*
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
new file mode 100644 (file)
index 0000000..9bdef3f
--- /dev/null
@@ -0,0 +1,351 @@
+#include <linux/fs.h>
+#include <linux/random.h>
+#include <linux/buffer_head.h>
+#include <linux/utsname.h>
+#include <linux/kthread.h>
+
+#include "ext4.h"
+
+/*
+ * Write the MMP block using WRITE_SYNC to try to get the block on-disk
+ * faster.
+ */
+static int write_mmp_block(struct buffer_head *bh)
+{
+       mark_buffer_dirty(bh);
+       lock_buffer(bh);
+       bh->b_end_io = end_buffer_write_sync;
+       get_bh(bh);
+       submit_bh(WRITE_SYNC, bh);
+       wait_on_buffer(bh);
+       if (unlikely(!buffer_uptodate(bh)))
+               return 1;
+
+       return 0;
+}
+
+/*
+ * Read the MMP block. It _must_ be read from disk and hence we clear the
+ * uptodate flag on the buffer.
+ */
+static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
+                         ext4_fsblk_t mmp_block)
+{
+       struct mmp_struct *mmp;
+
+       if (*bh)
+               clear_buffer_uptodate(*bh);
+
+       /* This would be sb_bread(sb, mmp_block), except we need to be sure
+        * that the MD RAID device cache has been bypassed, and that the read
+        * is not blocked in the elevator. */
+       if (!*bh)
+               *bh = sb_getblk(sb, mmp_block);
+       if (*bh) {
+               get_bh(*bh);
+               lock_buffer(*bh);
+               (*bh)->b_end_io = end_buffer_read_sync;
+               submit_bh(READ_SYNC, *bh);
+               wait_on_buffer(*bh);
+               if (!buffer_uptodate(*bh)) {
+                       brelse(*bh);
+                       *bh = NULL;
+               }
+       }
+       if (!*bh) {
+               ext4_warning(sb, "Error while reading MMP block %llu",
+                            mmp_block);
+               return -EIO;
+       }
+
+       mmp = (struct mmp_struct *)((*bh)->b_data);
+       if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC)
+               return -EINVAL;
+
+       return 0;
+}
+
+/*
+ * Dump as much information as possible to help the admin.
+ */
+void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
+                   const char *function, unsigned int line, const char *msg)
+{
+       __ext4_warning(sb, function, line, msg);
+       __ext4_warning(sb, function, line,
+                      "MMP failure info: last update time: %llu, last update "
+                      "node: %s, last update device: %s\n",
+                      (long long unsigned int) le64_to_cpu(mmp->mmp_time),
+                      mmp->mmp_nodename, mmp->mmp_bdevname);
+}
+
+/*
+ * kmmpd will update the MMP sequence every s_mmp_update_interval seconds
+ */
+static int kmmpd(void *data)
+{
+       struct super_block *sb = ((struct mmpd_data *) data)->sb;
+       struct buffer_head *bh = ((struct mmpd_data *) data)->bh;
+       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+       struct mmp_struct *mmp;
+       ext4_fsblk_t mmp_block;
+       u32 seq = 0;
+       unsigned long failed_writes = 0;
+       int mmp_update_interval = le16_to_cpu(es->s_mmp_update_interval);
+       unsigned mmp_check_interval;
+       unsigned long last_update_time;
+       unsigned long diff;
+       int retval;
+
+       mmp_block = le64_to_cpu(es->s_mmp_block);
+       mmp = (struct mmp_struct *)(bh->b_data);
+       mmp->mmp_time = cpu_to_le64(get_seconds());
+       /*
+        * Start with the higher mmp_check_interval and reduce it if
+        * the MMP block is being updated on time.
+        */
+       mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
+                                EXT4_MMP_MIN_CHECK_INTERVAL);
+       mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
+       bdevname(bh->b_bdev, mmp->mmp_bdevname);
+
+       memcpy(mmp->mmp_nodename, init_utsname()->sysname,
+              sizeof(mmp->mmp_nodename));
+
+       while (!kthread_should_stop()) {
+               if (++seq > EXT4_MMP_SEQ_MAX)
+                       seq = 1;
+
+               mmp->mmp_seq = cpu_to_le32(seq);
+               mmp->mmp_time = cpu_to_le64(get_seconds());
+               last_update_time = jiffies;
+
+               retval = write_mmp_block(bh);
+               /*
+                * Don't spew too many error messages. Print one every
+                * (s_mmp_update_interval * 60) seconds.
+                */
+               if (retval && (failed_writes % 60) == 0) {
+                       ext4_error(sb, "Error writing to MMP block");
+                       failed_writes++;
+               }
+
+               if (!(le32_to_cpu(es->s_feature_incompat) &
+                   EXT4_FEATURE_INCOMPAT_MMP)) {
+                       ext4_warning(sb, "kmmpd being stopped since MMP feature"
+                                    " has been disabled.");
+                       EXT4_SB(sb)->s_mmp_tsk = NULL;
+                       goto failed;
+               }
+
+               if (sb->s_flags & MS_RDONLY) {
+                       ext4_warning(sb, "kmmpd being stopped since filesystem "
+                                    "has been remounted as readonly.");
+                       EXT4_SB(sb)->s_mmp_tsk = NULL;
+                       goto failed;
+               }
+
+               diff = jiffies - last_update_time;
+               if (diff < mmp_update_interval * HZ)
+                       schedule_timeout_interruptible(mmp_update_interval *
+                                                      HZ - diff);
+
+               /*
+                * We need to make sure that more than mmp_check_interval
+                * seconds have not passed since writing. If that has happened
+                * we need to check if the MMP block is as we left it.
+                */
+               diff = jiffies - last_update_time;
+               if (diff > mmp_check_interval * HZ) {
+                       struct buffer_head *bh_check = NULL;
+                       struct mmp_struct *mmp_check;
+
+                       retval = read_mmp_block(sb, &bh_check, mmp_block);
+                       if (retval) {
+                               ext4_error(sb, "error reading MMP data: %d",
+                                          retval);
+
+                               EXT4_SB(sb)->s_mmp_tsk = NULL;
+                               goto failed;
+                       }
+
+                       mmp_check = (struct mmp_struct *)(bh_check->b_data);
+                       if (mmp->mmp_seq != mmp_check->mmp_seq ||
+                           memcmp(mmp->mmp_nodename, mmp_check->mmp_nodename,
+                                  sizeof(mmp->mmp_nodename))) {
+                               dump_mmp_msg(sb, mmp_check,
+                                            "Error while updating MMP info. "
+                                            "The filesystem seems to have been"
+                                            " multiply mounted.");
+                               ext4_error(sb, "abort");
+                               goto failed;
+                       }
+                       put_bh(bh_check);
+               }
+
+                /*
+                * Adjust the mmp_check_interval depending on how much time
+                * it took for the MMP block to be written.
+                */
+               mmp_check_interval = max(min(EXT4_MMP_CHECK_MULT * diff / HZ,
+                                            EXT4_MMP_MAX_CHECK_INTERVAL),
+                                        EXT4_MMP_MIN_CHECK_INTERVAL);
+               mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
+       }
+
+       /*
+        * Unmount seems to be clean.
+        */
+       mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
+       mmp->mmp_time = cpu_to_le64(get_seconds());
+
+       retval = write_mmp_block(bh);
+
+failed:
+       kfree(data);
+       brelse(bh);
+       return retval;
+}
+
+/*
+ * Get a random new sequence number but make sure it is not greater than
+ * EXT4_MMP_SEQ_MAX.
+ */
+static unsigned int mmp_new_seq(void)
+{
+       u32 new_seq;
+
+       do {
+               get_random_bytes(&new_seq, sizeof(u32));
+       } while (new_seq > EXT4_MMP_SEQ_MAX);
+
+       return new_seq;
+}
+
+/*
+ * Protect the filesystem from being mounted more than once.
+ */
+int ext4_multi_mount_protect(struct super_block *sb,
+                                   ext4_fsblk_t mmp_block)
+{
+       struct ext4_super_block *es = EXT4_SB(sb)->s_es;
+       struct buffer_head *bh = NULL;
+       struct mmp_struct *mmp = NULL;
+       struct mmpd_data *mmpd_data;
+       u32 seq;
+       unsigned int mmp_check_interval = le16_to_cpu(es->s_mmp_update_interval);
+       unsigned int wait_time = 0;
+       int retval;
+
+       if (mmp_block < le32_to_cpu(es->s_first_data_block) ||
+           mmp_block >= ext4_blocks_count(es)) {
+               ext4_warning(sb, "Invalid MMP block in superblock");
+               goto failed;
+       }
+
+       retval = read_mmp_block(sb, &bh, mmp_block);
+       if (retval)
+               goto failed;
+
+       mmp = (struct mmp_struct *)(bh->b_data);
+
+       if (mmp_check_interval < EXT4_MMP_MIN_CHECK_INTERVAL)
+               mmp_check_interval = EXT4_MMP_MIN_CHECK_INTERVAL;
+
+       /*
+        * If check_interval in MMP block is larger, use that instead of
+        * update_interval from the superblock.
+        */
+       if (mmp->mmp_check_interval > mmp_check_interval)
+               mmp_check_interval = mmp->mmp_check_interval;
+
+       seq = le32_to_cpu(mmp->mmp_seq);
+       if (seq == EXT4_MMP_SEQ_CLEAN)
+               goto skip;
+
+       if (seq == EXT4_MMP_SEQ_FSCK) {
+               dump_mmp_msg(sb, mmp, "fsck is running on the filesystem");
+               goto failed;
+       }
+
+       wait_time = min(mmp_check_interval * 2 + 1,
+                       mmp_check_interval + 60);
+
+       /* Print MMP interval if more than 20 secs. */
+       if (wait_time > EXT4_MMP_MIN_CHECK_INTERVAL * 4)
+               ext4_warning(sb, "MMP interval %u higher than expected, please"
+                            " wait.\n", wait_time * 2);
+
+       if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
+               ext4_warning(sb, "MMP startup interrupted, failing mount\n");
+               goto failed;
+       }
+
+       retval = read_mmp_block(sb, &bh, mmp_block);
+       if (retval)
+               goto failed;
+       mmp = (struct mmp_struct *)(bh->b_data);
+       if (seq != le32_to_cpu(mmp->mmp_seq)) {
+               dump_mmp_msg(sb, mmp,
+                            "Device is already active on another node.");
+               goto failed;
+       }
+
+skip:
+       /*
+        * write a new random sequence number.
+        */
+       mmp->mmp_seq = seq = cpu_to_le32(mmp_new_seq());
+
+       retval = write_mmp_block(bh);
+       if (retval)
+               goto failed;
+
+       /*
+        * wait for MMP interval and check mmp_seq.
+        */
+       if (schedule_timeout_interruptible(HZ * wait_time) != 0) {
+               ext4_warning(sb, "MMP startup interrupted, failing mount\n");
+               goto failed;
+       }
+
+       retval = read_mmp_block(sb, &bh, mmp_block);
+       if (retval)
+               goto failed;
+       mmp = (struct mmp_struct *)(bh->b_data);
+       if (seq != le32_to_cpu(mmp->mmp_seq)) {
+               dump_mmp_msg(sb, mmp,
+                            "Device is already active on another node.");
+               goto failed;
+       }
+
+       mmpd_data = kmalloc(sizeof(struct mmpd_data), GFP_KERNEL);
+       if (!mmpd_data) {
+               ext4_warning(sb, "not enough memory for mmpd_data");
+               goto failed;
+       }
+       mmpd_data->sb = sb;
+       mmpd_data->bh = bh;
+
+       /*
+        * Start a kernel thread to update the MMP block periodically.
+        */
+       EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s",
+                                            bdevname(bh->b_bdev,
+                                                     mmp->mmp_bdevname));
+       if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
+               EXT4_SB(sb)->s_mmp_tsk = NULL;
+               kfree(mmpd_data);
+               ext4_warning(sb, "Unable to create kmmpd thread for %s.",
+                            sb->s_id);
+               goto failed;
+       }
+
+       return 0;
+
+failed:
+       brelse(bh);
+       return 1;
+}
+
+
index b9f3e7862f13834b2c166b21a1b96d3e36dde428..2b8304bf3c50e1d1363287e79a66d5641fccceaa 100644 (file)
@@ -876,8 +876,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
         * It needs to call wait_on_page_writeback() to wait for the
         * writeback of the page.
         */
-       if (PageWriteback(page))
-               wait_on_page_writeback(page);
+       wait_on_page_writeback(page);
 
        /* Release old bh and drop refs */
        try_to_release_page(page, 0);
index 67fd0b0258589ae64428d26530807b898e79854b..b754b7721f51fea4dd943a68e436fe5677f301a0 100644 (file)
@@ -1413,10 +1413,22 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
        frame->at = entries;
        frame->bh = bh;
        bh = bh2;
+
+       ext4_handle_dirty_metadata(handle, dir, frame->bh);
+       ext4_handle_dirty_metadata(handle, dir, bh);
+
        de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
-       dx_release (frames);
-       if (!(de))
+       if (!de) {
+               /*
+                * Even if the block split failed, we have to properly write
+                * out all the changes we did so far. Otherwise we can end up
+                * with corrupted filesystem.
+                */
+               ext4_mark_inode_dirty(handle, dir);
+               dx_release(frames);
                return retval;
+       }
+       dx_release(frames);
 
        retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
        brelse(bh);
@@ -2240,6 +2252,7 @@ static int ext4_symlink(struct inode *dir,
        handle_t *handle;
        struct inode *inode;
        int l, err, retries = 0;
+       int credits;
 
        l = strlen(symname)+1;
        if (l > dir->i_sb->s_blocksize)
@@ -2247,10 +2260,26 @@ static int ext4_symlink(struct inode *dir,
 
        dquot_initialize(dir);
 
+       if (l > EXT4_N_BLOCKS * 4) {
+               /*
+                * For non-fast symlinks, we just allocate inode and put it on
+                * orphan list in the first transaction => we need bitmap,
+                * group descriptor, sb, inode block, quota blocks.
+                */
+               credits = 4 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb);
+       } else {
+               /*
+                * Fast symlink. We have to add entry to directory
+                * (EXT4_DATA_TRANS_BLOCKS + EXT4_INDEX_EXTRA_TRANS_BLOCKS),
+                * allocate new inode (bitmap, group descriptor, inode block,
+                * quota blocks, sb is already counted in previous macros).
+                */
+               credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
+                         EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
+                         EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb);
+       }
 retry:
-       handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
-                                       EXT4_INDEX_EXTRA_TRANS_BLOCKS + 5 +
-                                       EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
+       handle = ext4_journal_start(dir, credits);
        if (IS_ERR(handle))
                return PTR_ERR(handle);
 
@@ -2263,21 +2292,44 @@ retry:
        if (IS_ERR(inode))
                goto out_stop;
 
-       if (l > sizeof(EXT4_I(inode)->i_data)) {
+       if (l > EXT4_N_BLOCKS * 4) {
                inode->i_op = &ext4_symlink_inode_operations;
                ext4_set_aops(inode);
                /*
-                * page_symlink() calls into ext4_prepare/commit_write.
-                * We have a transaction open.  All is sweetness.  It also sets
-                * i_size in generic_commit_write().
+                * We cannot call page_symlink() with transaction started
+                * because it calls into ext4_write_begin() which can wait
+                * for transaction commit if we are running out of space
+                * and thus we deadlock. So we have to stop transaction now
+                * and restart it when symlink contents is written.
+                * 
+                * To keep fs consistent in case of crash, we have to put inode
+                * to orphan list in the mean time.
                 */
+               drop_nlink(inode);
+               err = ext4_orphan_add(handle, inode);
+               ext4_journal_stop(handle);
+               if (err)
+                       goto err_drop_inode;
                err = __page_symlink(inode, symname, l, 1);
+               if (err)
+                       goto err_drop_inode;
+               /*
+                * Now inode is being linked into dir (EXT4_DATA_TRANS_BLOCKS
+                * + EXT4_INDEX_EXTRA_TRANS_BLOCKS), inode is also modified
+                */
+               handle = ext4_journal_start(dir,
+                               EXT4_DATA_TRANS_BLOCKS(dir->i_sb) +
+                               EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1);
+               if (IS_ERR(handle)) {
+                       err = PTR_ERR(handle);
+                       goto err_drop_inode;
+               }
+               inc_nlink(inode);
+               err = ext4_orphan_del(handle, inode);
                if (err) {
+                       ext4_journal_stop(handle);
                        clear_nlink(inode);
-                       unlock_new_inode(inode);
-                       ext4_mark_inode_dirty(handle, inode);
-                       iput(inode);
-                       goto out_stop;
+                       goto err_drop_inode;
                }
        } else {
                /* clear the extent format for fast symlink */
@@ -2293,6 +2345,10 @@ out_stop:
        if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries))
                goto retry;
        return err;
+err_drop_inode:
+       unlock_new_inode(inode);
+       iput(inode);
+       return err;
 }
 
 static int ext4_link(struct dentry *old_dentry,
index b6dbd056fcb1d7f532f428e34cae4ef5248680ce..7bb8f76d470a019dd3801c6b9d8e6d76825f96c2 100644 (file)
@@ -203,46 +203,29 @@ static void ext4_end_bio(struct bio *bio, int error)
        for (i = 0; i < io_end->num_io_pages; i++) {
                struct page *page = io_end->pages[i]->p_page;
                struct buffer_head *bh, *head;
-               int partial_write = 0;
+               loff_t offset;
+               loff_t io_end_offset;
 
-               head = page_buffers(page);
-               if (error)
+               if (error) {
                        SetPageError(page);
-               BUG_ON(!head);
-               if (head->b_size != PAGE_CACHE_SIZE) {
-                       loff_t offset;
-                       loff_t io_end_offset = io_end->offset + io_end->size;
+                       set_bit(AS_EIO, &page->mapping->flags);
+                       head = page_buffers(page);
+                       BUG_ON(!head);
+
+                       io_end_offset = io_end->offset + io_end->size;
 
                        offset = (sector_t) page->index << PAGE_CACHE_SHIFT;
                        bh = head;
                        do {
                                if ((offset >= io_end->offset) &&
-                                   (offset+bh->b_size <= io_end_offset)) {
-                                       if (error)
-                                               buffer_io_error(bh);
-
-                               }
-                               if (buffer_delay(bh))
-                                       partial_write = 1;
-                               else if (!buffer_mapped(bh))
-                                       clear_buffer_dirty(bh);
-                               else if (buffer_dirty(bh))
-                                       partial_write = 1;
+                                   (offset+bh->b_size <= io_end_offset))
+                                       buffer_io_error(bh);
+
                                offset += bh->b_size;
                                bh = bh->b_this_page;
                        } while (bh != head);
                }
 
-               /*
-                * If this is a partial write which happened to make
-                * all buffers uptodate then we can optimize away a
-                * bogus readpage() for the next read(). Here we
-                * 'discover' whether the page went uptodate as a
-                * result of this (potentially partial) write.
-                */
-               if (!partial_write)
-                       SetPageUptodate(page);
-
                put_io_page(io_end->pages[i]);
        }
        io_end->num_io_pages = 0;
index 8553dfb310afd7ac2209d186125287e99a867f61..cc5c157aa11df4cef77167e870dd64c6273cd4fa 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/ctype.h>
 #include <linux/log2.h>
 #include <linux/crc16.h>
+#include <linux/cleancache.h>
 #include <asm/uaccess.h>
 
 #include <linux/kthread.h>
@@ -75,11 +76,27 @@ static void ext4_write_super(struct super_block *sb);
 static int ext4_freeze(struct super_block *sb);
 static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
                       const char *dev_name, void *data);
+static inline int ext2_feature_set_ok(struct super_block *sb);
+static inline int ext3_feature_set_ok(struct super_block *sb);
 static int ext4_feature_set_ok(struct super_block *sb, int readonly);
 static void ext4_destroy_lazyinit_thread(void);
 static void ext4_unregister_li_request(struct super_block *sb);
 static void ext4_clear_request_list(void);
 
+#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
+static struct file_system_type ext2_fs_type = {
+       .owner          = THIS_MODULE,
+       .name           = "ext2",
+       .mount          = ext4_mount,
+       .kill_sb        = kill_block_super,
+       .fs_flags       = FS_REQUIRES_DEV,
+};
+#define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
+#else
+#define IS_EXT2_SB(sb) (0)
+#endif
+
+
 #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
 static struct file_system_type ext3_fs_type = {
        .owner          = THIS_MODULE,
@@ -806,6 +823,8 @@ static void ext4_put_super(struct super_block *sb)
                invalidate_bdev(sbi->journal_bdev);
                ext4_blkdev_remove(sbi);
        }
+       if (sbi->s_mmp_tsk)
+               kthread_stop(sbi->s_mmp_tsk);
        sb->s_fs_info = NULL;
        /*
         * Now that we are completely done shutting down the
@@ -1096,7 +1115,7 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
 
        if (!test_opt(sb, INIT_INODE_TABLE))
                seq_puts(seq, ",noinit_inode_table");
-       else if (sbi->s_li_wait_mult)
+       else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)
                seq_printf(seq, ",init_inode_table=%u",
                           (unsigned) sbi->s_li_wait_mult);
 
@@ -1187,9 +1206,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
                                const char *data, size_t len, loff_t off);
 
 static const struct dquot_operations ext4_quota_operations = {
-#ifdef CONFIG_QUOTA
        .get_reserved_space = ext4_get_reserved_space,
-#endif
        .write_dquot    = ext4_write_dquot,
        .acquire_dquot  = ext4_acquire_dquot,
        .release_dquot  = ext4_release_dquot,
@@ -1900,7 +1917,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
                ext4_msg(sb, KERN_WARNING,
                         "warning: mounting fs with errors, "
                         "running e2fsck is recommended");
-       else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 &&
+       else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 &&
                 le16_to_cpu(es->s_mnt_count) >=
                 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
                ext4_msg(sb, KERN_WARNING,
@@ -1932,6 +1949,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
                        EXT4_INODES_PER_GROUP(sb),
                        sbi->s_mount_opt, sbi->s_mount_opt2);
 
+       cleancache_init_fs(sb);
        return res;
 }
 
@@ -2425,6 +2443,18 @@ static ssize_t lifetime_write_kbytes_show(struct ext4_attr *a,
                          EXT4_SB(sb)->s_sectors_written_start) >> 1)));
 }
 
+static ssize_t extent_cache_hits_show(struct ext4_attr *a,
+                                     struct ext4_sb_info *sbi, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_hits);
+}
+
+static ssize_t extent_cache_misses_show(struct ext4_attr *a,
+                                       struct ext4_sb_info *sbi, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_misses);
+}
+
 static ssize_t inode_readahead_blks_store(struct ext4_attr *a,
                                          struct ext4_sb_info *sbi,
                                          const char *buf, size_t count)
@@ -2482,6 +2512,8 @@ static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store)
 EXT4_RO_ATTR(delayed_allocation_blocks);
 EXT4_RO_ATTR(session_write_kbytes);
 EXT4_RO_ATTR(lifetime_write_kbytes);
+EXT4_RO_ATTR(extent_cache_hits);
+EXT4_RO_ATTR(extent_cache_misses);
 EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show,
                 inode_readahead_blks_store, s_inode_readahead_blks);
 EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
@@ -2497,6 +2529,8 @@ static struct attribute *ext4_attrs[] = {
        ATTR_LIST(delayed_allocation_blocks),
        ATTR_LIST(session_write_kbytes),
        ATTR_LIST(lifetime_write_kbytes),
+       ATTR_LIST(extent_cache_hits),
+       ATTR_LIST(extent_cache_misses),
        ATTR_LIST(inode_readahead_blks),
        ATTR_LIST(inode_goal),
        ATTR_LIST(mb_stats),
@@ -2659,12 +2693,6 @@ static void print_daily_error_info(unsigned long arg)
        mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);  /* Once a day */
 }
 
-static void ext4_lazyinode_timeout(unsigned long data)
-{
-       struct task_struct *p = (struct task_struct *)data;
-       wake_up_process(p);
-}
-
 /* Find next suitable group and run ext4_init_inode_table */
 static int ext4_run_li_request(struct ext4_li_request *elr)
 {
@@ -2696,11 +2724,8 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
                ret = ext4_init_inode_table(sb, group,
                                            elr->lr_timeout ? 0 : 1);
                if (elr->lr_timeout == 0) {
-                       timeout = jiffies - timeout;
-                       if (elr->lr_sbi->s_li_wait_mult)
-                               timeout *= elr->lr_sbi->s_li_wait_mult;
-                       else
-                               timeout *= 20;
+                       timeout = (jiffies - timeout) *
+                                 elr->lr_sbi->s_li_wait_mult;
                        elr->lr_timeout = timeout;
                }
                elr->lr_next_sched = jiffies + elr->lr_timeout;
@@ -2712,7 +2737,7 @@ static int ext4_run_li_request(struct ext4_li_request *elr)
 
 /*
  * Remove lr_request from the list_request and free the
- * request tructure. Should be called with li_list_mtx held
+ * request structure. Should be called with li_list_mtx held
  */
 static void ext4_remove_li_request(struct ext4_li_request *elr)
 {
@@ -2730,14 +2755,16 @@ static void ext4_remove_li_request(struct ext4_li_request *elr)
 
 static void ext4_unregister_li_request(struct super_block *sb)
 {
-       struct ext4_li_request *elr = EXT4_SB(sb)->s_li_request;
-
-       if (!ext4_li_info)
+       mutex_lock(&ext4_li_mtx);
+       if (!ext4_li_info) {
+               mutex_unlock(&ext4_li_mtx);
                return;
+       }
 
        mutex_lock(&ext4_li_info->li_list_mtx);
-       ext4_remove_li_request(elr);
+       ext4_remove_li_request(EXT4_SB(sb)->s_li_request);
        mutex_unlock(&ext4_li_info->li_list_mtx);
+       mutex_unlock(&ext4_li_mtx);
 }
 
 static struct task_struct *ext4_lazyinit_task;
@@ -2756,17 +2783,10 @@ static int ext4_lazyinit_thread(void *arg)
        struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg;
        struct list_head *pos, *n;
        struct ext4_li_request *elr;
-       unsigned long next_wakeup;
-       DEFINE_WAIT(wait);
+       unsigned long next_wakeup, cur;
 
        BUG_ON(NULL == eli);
 
-       eli->li_timer.data = (unsigned long)current;
-       eli->li_timer.function = ext4_lazyinode_timeout;
-
-       eli->li_task = current;
-       wake_up(&eli->li_wait_task);
-
 cont_thread:
        while (true) {
                next_wakeup = MAX_JIFFY_OFFSET;
@@ -2797,19 +2817,15 @@ cont_thread:
                if (freezing(current))
                        refrigerator();
 
-               if ((time_after_eq(jiffies, next_wakeup)) ||
+               cur = jiffies;
+               if ((time_after_eq(cur, next_wakeup)) ||
                    (MAX_JIFFY_OFFSET == next_wakeup)) {
                        cond_resched();
                        continue;
                }
 
-               eli->li_timer.expires = next_wakeup;
-               add_timer(&eli->li_timer);
-               prepare_to_wait(&eli->li_wait_daemon, &wait,
-                               TASK_INTERRUPTIBLE);
-               if (time_before(jiffies, next_wakeup))
-                       schedule();
-               finish_wait(&eli->li_wait_daemon, &wait);
+               schedule_timeout_interruptible(next_wakeup - cur);
+
                if (kthread_should_stop()) {
                        ext4_clear_request_list();
                        goto exit_thread;
@@ -2833,12 +2849,7 @@ exit_thread:
                goto cont_thread;
        }
        mutex_unlock(&eli->li_list_mtx);
-       del_timer_sync(&ext4_li_info->li_timer);
-       eli->li_task = NULL;
-       wake_up(&eli->li_wait_task);
-
        kfree(ext4_li_info);
-       ext4_lazyinit_task = NULL;
        ext4_li_info = NULL;
        mutex_unlock(&ext4_li_mtx);
 
@@ -2866,7 +2877,6 @@ static int ext4_run_lazyinit_thread(void)
        if (IS_ERR(ext4_lazyinit_task)) {
                int err = PTR_ERR(ext4_lazyinit_task);
                ext4_clear_request_list();
-               del_timer_sync(&ext4_li_info->li_timer);
                kfree(ext4_li_info);
                ext4_li_info = NULL;
                printk(KERN_CRIT "EXT4: error %d creating inode table "
@@ -2875,8 +2885,6 @@ static int ext4_run_lazyinit_thread(void)
                return err;
        }
        ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING;
-
-       wait_event(ext4_li_info->li_wait_task, ext4_li_info->li_task != NULL);
        return 0;
 }
 
@@ -2911,13 +2919,9 @@ static int ext4_li_info_new(void)
        if (!eli)
                return -ENOMEM;
 
-       eli->li_task = NULL;
        INIT_LIST_HEAD(&eli->li_request_list);
        mutex_init(&eli->li_list_mtx);
 
-       init_waitqueue_head(&eli->li_wait_daemon);
-       init_waitqueue_head(&eli->li_wait_task);
-       init_timer(&eli->li_timer);
        eli->li_state |= EXT4_LAZYINIT_QUIT;
 
        ext4_li_info = eli;
@@ -2960,20 +2964,19 @@ static int ext4_register_li_request(struct super_block *sb,
        ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
        int ret = 0;
 
-       if (sbi->s_li_request != NULL)
+       if (sbi->s_li_request != NULL) {
+               /*
+                * Reset timeout so it can be computed again, because
+                * s_li_wait_mult might have changed.
+                */
+               sbi->s_li_request->lr_timeout = 0;
                return 0;
+       }
 
        if (first_not_zeroed == ngroups ||
            (sb->s_flags & MS_RDONLY) ||
-           !test_opt(sb, INIT_INODE_TABLE)) {
-               sbi->s_li_request = NULL;
+           !test_opt(sb, INIT_INODE_TABLE))
                return 0;
-       }
-
-       if (first_not_zeroed == ngroups) {
-               sbi->s_li_request = NULL;
-               return 0;
-       }
 
        elr = ext4_li_request_new(sb, first_not_zeroed);
        if (!elr)
@@ -3166,6 +3169,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
            ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
                set_opt(sb, DELALLOC);
 
+       /*
+        * set default s_li_wait_mult for lazyinit, for the case there is
+        * no mount option specified.
+        */
+       sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
+
        if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
                           &journal_devnum, &journal_ioprio, NULL, 0)) {
                ext4_msg(sb, KERN_WARNING,
@@ -3187,6 +3196,28 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                       "feature flags set on rev 0 fs, "
                       "running e2fsck is recommended");
 
+       if (IS_EXT2_SB(sb)) {
+               if (ext2_feature_set_ok(sb))
+                       ext4_msg(sb, KERN_INFO, "mounting ext2 file system "
+                                "using the ext4 subsystem");
+               else {
+                       ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due "
+                                "to feature incompatibilities");
+                       goto failed_mount;
+               }
+       }
+
+       if (IS_EXT3_SB(sb)) {
+               if (ext3_feature_set_ok(sb))
+                       ext4_msg(sb, KERN_INFO, "mounting ext3 file system "
+                                "using the ext4 subsystem");
+               else {
+                       ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due "
+                                "to feature incompatibilities");
+                       goto failed_mount;
+               }
+       }
+
        /*
         * Check feature flags regardless of the revision level, since we
         * previously didn't change the revision level when setting the flags,
@@ -3459,6 +3490,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                          EXT4_HAS_INCOMPAT_FEATURE(sb,
                                    EXT4_FEATURE_INCOMPAT_RECOVER));
 
+       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP) &&
+           !(sb->s_flags & MS_RDONLY))
+               if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
+                       goto failed_mount3;
+
        /*
         * The first inode we look at is the journal inode.  Don't try
         * root first: it may be modified in the journal!
@@ -3474,7 +3510,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                goto failed_mount_wq;
        } else {
                clear_opt(sb, DATA_FLAGS);
-               set_opt(sb, WRITEBACK_DATA);
                sbi->s_journal = NULL;
                needs_recovery = 0;
                goto no_journal;
@@ -3707,6 +3742,8 @@ failed_mount3:
        percpu_counter_destroy(&sbi->s_freeinodes_counter);
        percpu_counter_destroy(&sbi->s_dirs_counter);
        percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
+       if (sbi->s_mmp_tsk)
+               kthread_stop(sbi->s_mmp_tsk);
 failed_mount2:
        for (i = 0; i < db_count; i++)
                brelse(sbi->s_group_desc[i]);
@@ -4242,7 +4279,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        int enable_quota = 0;
        ext4_group_t g;
        unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
-       int err;
+       int err = 0;
 #ifdef CONFIG_QUOTA
        int i;
 #endif
@@ -4368,6 +4405,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                                goto restore_opts;
                        if (!ext4_setup_super(sb, es, 0))
                                sb->s_flags &= ~MS_RDONLY;
+                       if (EXT4_HAS_INCOMPAT_FEATURE(sb,
+                                                    EXT4_FEATURE_INCOMPAT_MMP))
+                               if (ext4_multi_mount_protect(sb,
+                                               le64_to_cpu(es->s_mmp_block))) {
+                                       err = -EROFS;
+                                       goto restore_opts;
+                               }
                        enable_quota = 1;
                }
        }
@@ -4432,6 +4476,7 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct ext4_super_block *es = sbi->s_es;
        u64 fsid;
+       s64 bfree;
 
        if (test_opt(sb, MINIX_DF)) {
                sbi->s_overhead_last = 0;
@@ -4475,8 +4520,10 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
        buf->f_type = EXT4_SUPER_MAGIC;
        buf->f_bsize = sb->s_blocksize;
        buf->f_blocks = ext4_blocks_count(es) - sbi->s_overhead_last;
-       buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter) -
+       bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter) -
                       percpu_counter_sum_positive(&sbi->s_dirtyblocks_counter);
+       /* prevent underflow in case that few free space is available */
+       buf->f_bfree = max_t(s64, bfree, 0);
        buf->f_bavail = buf->f_bfree - ext4_r_blocks_count(es);
        if (buf->f_bfree < ext4_r_blocks_count(es))
                buf->f_bavail = 0;
@@ -4652,6 +4699,9 @@ static int ext4_quota_off(struct super_block *sb, int type)
        if (test_opt(sb, DELALLOC))
                sync_filesystem(sb);
 
+       if (!inode)
+               goto out;
+
        /* Update modification times of quota files when userspace can
         * start looking at them */
        handle = ext4_journal_start(inode, 1);
@@ -4772,14 +4822,6 @@ static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
 }
 
 #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
-static struct file_system_type ext2_fs_type = {
-       .owner          = THIS_MODULE,
-       .name           = "ext2",
-       .mount          = ext4_mount,
-       .kill_sb        = kill_block_super,
-       .fs_flags       = FS_REQUIRES_DEV,
-};
-
 static inline void register_as_ext2(void)
 {
        int err = register_filesystem(&ext2_fs_type);
@@ -4792,10 +4834,22 @@ static inline void unregister_as_ext2(void)
 {
        unregister_filesystem(&ext2_fs_type);
 }
+
+static inline int ext2_feature_set_ok(struct super_block *sb)
+{
+       if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP))
+               return 0;
+       if (sb->s_flags & MS_RDONLY)
+               return 1;
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))
+               return 0;
+       return 1;
+}
 MODULE_ALIAS("ext2");
 #else
 static inline void register_as_ext2(void) { }
 static inline void unregister_as_ext2(void) { }
+static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; }
 #endif
 
 #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
@@ -4811,10 +4865,24 @@ static inline void unregister_as_ext3(void)
 {
        unregister_filesystem(&ext3_fs_type);
 }
+
+static inline int ext3_feature_set_ok(struct super_block *sb)
+{
+       if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT3_FEATURE_INCOMPAT_SUPP))
+               return 0;
+       if (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
+               return 0;
+       if (sb->s_flags & MS_RDONLY)
+               return 1;
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP))
+               return 0;
+       return 1;
+}
 MODULE_ALIAS("ext3");
 #else
 static inline void register_as_ext3(void) { }
 static inline void unregister_as_ext3(void) { }
+static inline int ext3_feature_set_ok(struct super_block *sb) { return 0; }
 #endif
 
 static struct file_system_type ext4_fs_type = {
@@ -4898,8 +4966,8 @@ static int __init ext4_init_fs(void)
        err = init_inodecache();
        if (err)
                goto out1;
-       register_as_ext2();
        register_as_ext3();
+       register_as_ext2();
        err = register_filesystem(&ext4_fs_type);
        if (err)
                goto out;
index b545ca1c459c42e2cc6aef35ce49dea3fcd2694d..c757adc972506d672c78b7de03e654ede7eb8b1e 100644 (file)
@@ -820,8 +820,8 @@ inserted:
                        if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
                                goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
 
-                       block = ext4_new_meta_blocks(handle, inode,
-                                                 goal, NULL, &error);
+                       block = ext4_new_meta_blocks(handle, inode, goal, 0,
+                                                    NULL, &error);
                        if (error)
                                goto cleanup;
 
index 3b222dafd15b7eb6fa8f8b425a407f8a70dc7a19..be15437c272e913b10e50278bcb28c01cf93fe21 100644 (file)
@@ -326,6 +326,8 @@ static int msdos_rmdir(struct inode *dir, struct dentry *dentry)
        struct fat_slot_info sinfo;
        int err;
 
+       dentry_unhash(dentry);
+
        lock_super(sb);
        /*
         * Check whether the directory is not in use, then check
@@ -457,6 +459,9 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name,
        old_inode = old_dentry->d_inode;
        new_inode = new_dentry->d_inode;
 
+       if (new_inode && S_ISDIR(new_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        err = fat_scan(old_dir, old_name, &old_sinfo);
        if (err) {
                err = -EIO;
index 20b4ea53fdc4e3be434dc7601dce74a3a4658473..c61a6789f36cb2642b324c05ea8f7a6fd936fe28 100644 (file)
@@ -824,6 +824,8 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
        struct fat_slot_info sinfo;
        int err;
 
+       dentry_unhash(dentry);
+
        lock_super(sb);
 
        err = fat_dir_empty(inode);
@@ -931,6 +933,9 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
        int err, is_dir, update_dotdot, corrupt = 0;
        struct super_block *sb = old_dir->i_sb;
 
+       if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
        old_inode = old_dentry->d_inode;
        new_inode = new_dentry->d_inode;
index b32eb29a4e6ff4f8dd892b1af9ea74d712f148ee..0d0e3faddcfa0bbd9ddac898d9e7caa0fc2b57bf 100644 (file)
@@ -667,6 +667,8 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry)
        if (IS_ERR(req))
                return PTR_ERR(req);
 
+       dentry_unhash(entry);
+
        req->in.h.opcode = FUSE_RMDIR;
        req->in.h.nodeid = get_node_id(dir);
        req->in.numargs = 1;
@@ -691,6 +693,10 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent,
        struct fuse_rename_in inarg;
        struct fuse_conn *fc = get_fuse_conn(olddir);
        struct fuse_req *req = fuse_get_req(fc);
+
+       if (newent->d_inode && S_ISDIR(newent->d_inode->i_mode))
+               dentry_unhash(newent);
+
        if (IS_ERR(req))
                return PTR_ERR(req);
 
index cfa327d331942aae3b9ee99497be51110e1a2843..c2b34cd2abe0e81f7be6eab08189817507d9f307 100644 (file)
@@ -146,7 +146,7 @@ static int __init init_gfs2_fs(void)
 
        gfs2_register_debugfs();
 
-       printk("GFS2 (built %s %s) installed\n", __DATE__, __TIME__);
+       printk("GFS2 installed\n");
 
        return 0;
 
index b4d70b13be92548c6ac2cd72ae5e34f3ab53ec21..1cb70cdba2c1dbf61cb5bbe47f6bba47d5a70d6b 100644 (file)
@@ -253,6 +253,9 @@ static int hfs_remove(struct inode *dir, struct dentry *dentry)
        struct inode *inode = dentry->d_inode;
        int res;
 
+       if (S_ISDIR(inode->i_mode))
+               dentry_unhash(dentry);
+
        if (S_ISDIR(inode->i_mode) && inode->i_size != 2)
                return -ENOTEMPTY;
        res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
@@ -283,6 +286,9 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        /* Unlink destination if it already exists */
        if (new_dentry->d_inode) {
+               if (S_ISDIR(new_dentry->d_inode->i_mode))
+                       dentry_unhash(new_dentry);
+
                res = hfs_remove(new_dir, new_dentry);
                if (res)
                        return res;
index 4df5059c25da67c4b1ddd2a868057951e6e54d60..b28835091dd084d9f1fae4709dafbc2cb1d7fc0b 100644 (file)
@@ -370,6 +370,8 @@ static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry)
        struct inode *inode = dentry->d_inode;
        int res;
 
+       dentry_unhash(dentry);
+
        if (inode->i_size != 2)
                return -ENOTEMPTY;
 
@@ -467,10 +469,12 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        /* Unlink destination if it already exists */
        if (new_dentry->d_inode) {
-               if (S_ISDIR(new_dentry->d_inode->i_mode))
+               if (S_ISDIR(new_dentry->d_inode->i_mode)) {
+                       dentry_unhash(new_dentry);
                        res = hfsplus_rmdir(new_dir, new_dentry);
-               else
+               } else {
                        res = hfsplus_unlink(new_dir, new_dentry);
+               }
                if (res)
                        return res;
        }
index 2638c834ed281db783f6a7cd5ef24e2edb2047e4..e6816b9e6903e0d80d4ab7505ad84b276ec12baa 100644 (file)
@@ -683,6 +683,8 @@ int hostfs_rmdir(struct inode *ino, struct dentry *dentry)
        char *file;
        int err;
 
+       dentry_unhash(dentry);
+
        if ((file = dentry_name(dentry)) == NULL)
                return -ENOMEM;
        err = do_rmdir(file);
@@ -736,6 +738,9 @@ int hostfs_rename(struct inode *from_ino, struct dentry *from,
        char *from_name, *to_name;
        int err;
 
+       if (to->d_inode && S_ISDIR(to->d_inode->i_mode))
+               dentry_unhash(to);
+
        if ((from_name = dentry_name(from)) == NULL)
                return -ENOMEM;
        if ((to_name = dentry_name(to)) == NULL) {
index 1f05839c27a7fca1f24c99dda71adf07bb2930c5..ff0ce21c0867065f2efbcb30fbfee929ee6e4e09 100644 (file)
@@ -395,7 +395,6 @@ again:
 
                dentry_unhash(dentry);
                if (!d_unhashed(dentry)) {
-                       dput(dentry);
                        hpfs_unlock(dir->i_sb);
                        return -ENOSPC;
                }
@@ -403,7 +402,6 @@ again:
                    !S_ISREG(inode->i_mode) ||
                    get_write_access(inode)) {
                        d_rehash(dentry);
-                       dput(dentry);
                } else {
                        struct iattr newattrs;
                        /*printk("HPFS: truncating file before delete.\n");*/
@@ -411,7 +409,6 @@ again:
                        newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
                        err = notify_change(dentry, &newattrs);
                        put_write_access(inode);
-                       dput(dentry);
                        if (!err)
                                goto again;
                }
@@ -442,6 +439,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
        int err;
        int r;
 
+       dentry_unhash(dentry);
+
        hpfs_adjust_length(name, &len);
        hpfs_lock(dir->i_sb);
        err = -ENOENT;
@@ -535,6 +534,10 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct buffer_head *bh;
        struct fnode *fnode;
        int err;
+
+       if (new_inode && S_ISDIR(new_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        if ((err = hpfs_chk_name(new_name, &new_len))) return err;
        err = 0;
        hpfs_adjust_length(old_name, &old_len);
index e7a035781b7dd30694acd0b3ee104758a3ffcf87..7aafeb8fa3005eb14fd09f75d94c17dbeefe8481 100644 (file)
@@ -921,7 +921,8 @@ static int can_do_hugetlb_shm(void)
        return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group);
 }
 
-struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag,
+struct file *hugetlb_file_setup(const char *name, size_t size,
+                               vm_flags_t acctflag,
                                struct user_struct **user, int creat_flags)
 {
        int error = -ENOMEM;
index 29148a81c783728928cf4ea63d46160cc5c4e85c..7f21cf3aaf92e66d892259289cae31553423312b 100644 (file)
@@ -219,7 +219,6 @@ static int journal_submit_data_buffers(journal_t *journal,
                        ret = err;
                spin_lock(&journal->j_list_lock);
                J_ASSERT(jinode->i_transaction == commit_transaction);
-               commit_transaction->t_flushed_data_blocks = 1;
                clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
                smp_mb__after_clear_bit();
                wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
@@ -672,12 +671,16 @@ start_journal_io:
                err = 0;
        }
 
+       write_lock(&journal->j_state_lock);
+       J_ASSERT(commit_transaction->t_state == T_COMMIT);
+       commit_transaction->t_state = T_COMMIT_DFLUSH;
+       write_unlock(&journal->j_state_lock);
        /* 
         * If the journal is not located on the file system device,
         * then we must flush the file system device before we issue
         * the commit record
         */
-       if (commit_transaction->t_flushed_data_blocks &&
+       if (commit_transaction->t_need_data_flush &&
            (journal->j_fs_dev != journal->j_dev) &&
            (journal->j_flags & JBD2_BARRIER))
                blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
@@ -754,8 +757,13 @@ wait_for_iobuf:
                    required. */
                JBUFFER_TRACE(jh, "file as BJ_Forget");
                jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
-               /* Wake up any transactions which were waiting for this
-                  IO to complete */
+               /*
+                * Wake up any transactions which were waiting for this IO to
+                * complete. The barrier must be here so that changes by
+                * jbd2_journal_file_buffer() take effect before wake_up_bit()
+                * does the waitqueue check.
+                */
+               smp_mb();
                wake_up_bit(&bh->b_state, BH_Unshadow);
                JBUFFER_TRACE(jh, "brelse shadowed buffer");
                __brelse(bh);
@@ -794,6 +802,10 @@ wait_for_iobuf:
                jbd2_journal_abort(journal, err);
 
        jbd_debug(3, "JBD: commit phase 5\n");
+       write_lock(&journal->j_state_lock);
+       J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
+       commit_transaction->t_state = T_COMMIT_JFLUSH;
+       write_unlock(&journal->j_state_lock);
 
        if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
                                       JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
@@ -949,7 +961,7 @@ restart_loop:
 
        jbd_debug(3, "JBD: commit phase 7\n");
 
-       J_ASSERT(commit_transaction->t_state == T_COMMIT);
+       J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
 
        commit_transaction->t_start = jiffies;
        stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
index e0ec3db1c395b6c338acc6f7c14af6a2c11eb067..9a78269903041934f0896148e1999e8467fd0872 100644 (file)
@@ -479,9 +479,12 @@ int __jbd2_log_space_left(journal_t *journal)
 int __jbd2_log_start_commit(journal_t *journal, tid_t target)
 {
        /*
-        * Are we already doing a recent enough commit?
+        * The only transaction we can possibly wait upon is the
+        * currently running transaction (if it exists).  Otherwise,
+        * the target tid must be an old one.
         */
-       if (!tid_geq(journal->j_commit_request, target)) {
+       if (journal->j_running_transaction &&
+           journal->j_running_transaction->t_tid == target) {
                /*
                 * We want a new commit: OK, mark the request and wakeup the
                 * commit thread.  We do _not_ do the commit ourselves.
@@ -493,7 +496,15 @@ int __jbd2_log_start_commit(journal_t *journal, tid_t target)
                          journal->j_commit_sequence);
                wake_up(&journal->j_wait_commit);
                return 1;
-       }
+       } else if (!tid_geq(journal->j_commit_request, target))
+               /* This should never happen, but if it does, preserve
+                  the evidence before kjournald goes into a loop and
+                  increments j_commit_sequence beyond all recognition. */
+               WARN_ONCE(1, "jbd: bad log_start_commit: %u %u %u %u\n",
+                         journal->j_commit_request,
+                         journal->j_commit_sequence,
+                         target, journal->j_running_transaction ? 
+                         journal->j_running_transaction->t_tid : 0);
        return 0;
 }
 
@@ -576,6 +587,47 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid)
        return ret;
 }
 
+/*
+ * Return 1 if a given transaction has not yet sent barrier request
+ * connected with a transaction commit. If 0 is returned, transaction
+ * may or may not have sent the barrier. Used to avoid sending barrier
+ * twice in common cases.
+ */
+int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid)
+{
+       int ret = 0;
+       transaction_t *commit_trans;
+
+       if (!(journal->j_flags & JBD2_BARRIER))
+               return 0;
+       read_lock(&journal->j_state_lock);
+       /* Transaction already committed? */
+       if (tid_geq(journal->j_commit_sequence, tid))
+               goto out;
+       commit_trans = journal->j_committing_transaction;
+       if (!commit_trans || commit_trans->t_tid != tid) {
+               ret = 1;
+               goto out;
+       }
+       /*
+        * Transaction is being committed and we already proceeded to
+        * submitting a flush to fs partition?
+        */
+       if (journal->j_fs_dev != journal->j_dev) {
+               if (!commit_trans->t_need_data_flush ||
+                   commit_trans->t_state >= T_COMMIT_DFLUSH)
+                       goto out;
+       } else {
+               if (commit_trans->t_state >= T_COMMIT_JFLUSH)
+                       goto out;
+       }
+       ret = 1;
+out:
+       read_unlock(&journal->j_state_lock);
+       return ret;
+}
+EXPORT_SYMBOL(jbd2_trans_will_send_data_barrier);
+
 /*
  * Wait for a specified commit to complete.
  * The caller may not hold the journal lock.
index 05fa77a23711f8db041cac46984452451daae70c..3eec82d32fd4c6886fdf823e7d3690ca2e665162 100644 (file)
@@ -82,7 +82,7 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
  */
 
 /*
- * Update transiaction's maximum wait time, if debugging is enabled.
+ * Update transaction's maximum wait time, if debugging is enabled.
  *
  * In order for t_max_wait to be reliable, it must be protected by a
  * lock.  But doing so will mean that start_this_handle() can not be
@@ -91,11 +91,10 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
  * means that maximum wait time reported by the jbd2_run_stats
  * tracepoint will always be zero.
  */
-static inline void update_t_max_wait(transaction_t *transaction)
+static inline void update_t_max_wait(transaction_t *transaction,
+                                    unsigned long ts)
 {
 #ifdef CONFIG_JBD2_DEBUG
-       unsigned long ts = jiffies;
-
        if (jbd2_journal_enable_debug &&
            time_after(transaction->t_start, ts)) {
                ts = jbd2_time_diff(ts, transaction->t_start);
@@ -121,6 +120,7 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
        tid_t           tid;
        int             needed, need_to_start;
        int             nblocks = handle->h_buffer_credits;
+       unsigned long ts = jiffies;
 
        if (nblocks > journal->j_max_transaction_buffers) {
                printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n",
@@ -271,7 +271,7 @@ repeat:
        /* OK, account for the buffers that this operation expects to
         * use and add the handle to the running transaction. 
         */
-       update_t_max_wait(transaction);
+       update_t_max_wait(transaction, ts);
        handle->h_transaction = transaction;
        atomic_inc(&transaction->t_updates);
        atomic_inc(&transaction->t_handle_count);
@@ -316,7 +316,8 @@ static handle_t *new_handle(int nblocks)
  * This function is visible to journal users (like ext3fs), so is not
  * called with the journal already locked.
  *
- * Return a pointer to a newly allocated handle, or NULL on failure
+ * Return a pointer to a newly allocated handle, or an ERR_PTR() value
+ * on failure.
  */
 handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int gfp_mask)
 {
@@ -921,8 +922,8 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
         */
        JBUFFER_TRACE(jh, "cancelling revoke");
        jbd2_journal_cancel_revoke(handle, jh);
-       jbd2_journal_put_journal_head(jh);
 out:
+       jbd2_journal_put_journal_head(jh);
        return err;
 }
 
@@ -2147,6 +2148,13 @@ int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
            jinode->i_next_transaction == transaction)
                goto done;
 
+       /*
+        * We only ever set this variable to 1 so the test is safe. Since
+        * t_need_data_flush is likely to be set, we do the test to save some
+        * cacheline bouncing
+        */
+       if (!transaction->t_need_data_flush)
+               transaction->t_need_data_flush = 1;
        /* On some different transaction's list - should be
         * the committing one */
        if (jinode->i_transaction) {
index 82faddd1f321b74e8efd4f11fe07449e409d7bbc..05f73328b28b40bf1ba30952faa64e7568d53a43 100644 (file)
@@ -609,6 +609,8 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
        int ret;
        uint32_t now = get_seconds();
 
+       dentry_unhash(dentry);
+
        for (fd = f->dents ; fd; fd = fd->next) {
                if (fd->ino)
                        return -ENOTEMPTY;
@@ -784,6 +786,9 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
        uint8_t type;
        uint32_t now;
 
+       if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        /* The VFS will check for us and prevent trying to rename a
         * file over a directory and vice versa, but if it's a directory,
         * the VFS can't check whether the victim is empty. The filesystem
index eaaf2b511e89a64613bd281f82029ee869358484..865df16a6cf3b23d230ccfa14a1f45eaf900998a 100644 (file)
@@ -360,6 +360,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry)
 
        jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name);
 
+       dentry_unhash(dentry);
+
        /* Init inode for quota operations. */
        dquot_initialize(dip);
        dquot_initialize(ip);
@@ -1095,6 +1097,9 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        jfs_info("jfs_rename: %s %s", old_dentry->d_name.name,
                 new_dentry->d_name.name);
 
+       if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        dquot_initialize(old_dir);
        dquot_initialize(new_dir);
 
index 9ed89d1663f839c86b84e133617a47f4d855ee25..f34c9cde9e94d0187462e22ca10cfcb52e2d1cd8 100644 (file)
@@ -273,6 +273,8 @@ static int logfs_rmdir(struct inode *dir, struct dentry *dentry)
 {
        struct inode *inode = dentry->d_inode;
 
+       dentry_unhash(dentry);
+
        if (!logfs_empty_dir(inode))
                return -ENOTEMPTY;
 
@@ -622,6 +624,9 @@ static int logfs_rename_cross(struct inode *old_dir, struct dentry *old_dentry,
        loff_t pos;
        int err;
 
+       if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        /* 1. locate source dd */
        err = logfs_get_dd(old_dir, old_dentry, &dd, &pos);
        if (err)
index 6e6777f1b4b208eb5b2767ac4b215f781ad7385d..f60aed8db9c4f2d1885a51616b4b89963a9958a0 100644 (file)
@@ -168,6 +168,8 @@ static int minix_rmdir(struct inode * dir, struct dentry *dentry)
        struct inode * inode = dentry->d_inode;
        int err = -ENOTEMPTY;
 
+       dentry_unhash(dentry);
+
        if (minix_empty_dir(inode)) {
                err = minix_unlink(dir, dentry);
                if (!err) {
@@ -190,6 +192,9 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
        struct minix_dir_entry * old_de;
        int err = -ENOENT;
 
+       if (new_inode && S_ISDIR(new_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        old_de = minix_find_entry(old_dentry, &old_page);
        if (!old_de)
                goto out;
index 0afc809e46e09b53cb767e56795fba3139c4b181..fdfae9fa98cda52f26b730ab6786db084124d279 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/writeback.h>
 #include <linux/backing-dev.h>
 #include <linux/pagevec.h>
+#include <linux/cleancache.h>
 
 /*
  * I/O completion handler for multipage BIOs.
@@ -271,6 +272,12 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
                SetPageMappedToDisk(page);
        }
 
+       if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) &&
+           cleancache_get_page(page) == 0) {
+               SetPageUptodate(page);
+               goto confused;
+       }
+
        /*
         * This page will go to BIO.  Do we need to send this BIO off first?
         */
index 6ff858c049c030fd5ea1e142a02c39bb4b51a2dc..2358b326b2211ef93b912e7a43062763de8999a9 100644 (file)
@@ -391,79 +391,28 @@ void path_put(struct path *path)
 }
 EXPORT_SYMBOL(path_put);
 
-/**
- * nameidata_drop_rcu - drop this nameidata out of rcu-walk
- * @nd: nameidata pathwalk data to drop
- * Returns: 0 on success, -ECHILD on failure
- *
+/*
  * Path walking has 2 modes, rcu-walk and ref-walk (see
- * Documentation/filesystems/path-lookup.txt). __drop_rcu* functions attempt
- * to drop out of rcu-walk mode and take normal reference counts on dentries
- * and vfsmounts to transition to rcu-walk mode. __drop_rcu* functions take
- * refcounts at the last known good point before rcu-walk got stuck, so
- * ref-walk may continue from there. If this is not successful (eg. a seqcount
- * has changed), then failure is returned and path walk restarts from the
- * beginning in ref-walk mode.
- *
- * nameidata_drop_rcu attempts to drop the current nd->path and nd->root into
- * ref-walk. Must be called from rcu-walk context.
+ * Documentation/filesystems/path-lookup.txt).  In situations when we can't
+ * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab
+ * normal reference counts on dentries and vfsmounts to transition to rcu-walk
+ * mode.  Refcounts are grabbed at the last known good point before rcu-walk
+ * got stuck, so ref-walk may continue from there. If this is not successful
+ * (eg. a seqcount has changed), then failure is returned and it's up to caller
+ * to restart the path walk from the beginning in ref-walk mode.
  */
-static int nameidata_drop_rcu(struct nameidata *nd)
-{
-       struct fs_struct *fs = current->fs;
-       struct dentry *dentry = nd->path.dentry;
-       int want_root = 0;
-
-       BUG_ON(!(nd->flags & LOOKUP_RCU));
-       if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
-               want_root = 1;
-               spin_lock(&fs->lock);
-               if (nd->root.mnt != fs->root.mnt ||
-                               nd->root.dentry != fs->root.dentry)
-                       goto err_root;
-       }
-       spin_lock(&dentry->d_lock);
-       if (!__d_rcu_to_refcount(dentry, nd->seq))
-               goto err;
-       BUG_ON(nd->inode != dentry->d_inode);
-       spin_unlock(&dentry->d_lock);
-       if (want_root) {
-               path_get(&nd->root);
-               spin_unlock(&fs->lock);
-       }
-       mntget(nd->path.mnt);
-
-       rcu_read_unlock();
-       br_read_unlock(vfsmount_lock);
-       nd->flags &= ~LOOKUP_RCU;
-       return 0;
-err:
-       spin_unlock(&dentry->d_lock);
-err_root:
-       if (want_root)
-               spin_unlock(&fs->lock);
-       return -ECHILD;
-}
-
-/* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing.  */
-static inline int nameidata_drop_rcu_maybe(struct nameidata *nd)
-{
-       if (nd->flags & LOOKUP_RCU)
-               return nameidata_drop_rcu(nd);
-       return 0;
-}
 
 /**
- * nameidata_dentry_drop_rcu - drop nameidata and dentry out of rcu-walk
- * @nd: nameidata pathwalk data to drop
- * @dentry: dentry to drop
+ * unlazy_walk - try to switch to ref-walk mode.
+ * @nd: nameidata pathwalk data
+ * @dentry: child of nd->path.dentry or NULL
  * Returns: 0 on success, -ECHILD on failure
  *
- * nameidata_dentry_drop_rcu attempts to drop the current nd->path and nd->root,
- * and dentry into ref-walk. @dentry must be a path found by a do_lookup call on
- * @nd. Must be called from rcu-walk context.
+ * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry
+ * for ref-walk mode.  @dentry must be a path found by a do_lookup call on
+ * @nd or NULL.  Must be called from rcu-walk context.
  */
-static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry)
+static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
 {
        struct fs_struct *fs = current->fs;
        struct dentry *parent = nd->path.dentry;
@@ -478,18 +427,25 @@ static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry
                        goto err_root;
        }
        spin_lock(&parent->d_lock);
-       spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
-       if (!__d_rcu_to_refcount(dentry, nd->seq))
-               goto err;
-       /*
-        * If the sequence check on the child dentry passed, then the child has
-        * not been removed from its parent. This means the parent dentry must
-        * be valid and able to take a reference at this point.
-        */
-       BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent);
-       BUG_ON(!parent->d_count);
-       parent->d_count++;
-       spin_unlock(&dentry->d_lock);
+       if (!dentry) {
+               if (!__d_rcu_to_refcount(parent, nd->seq))
+                       goto err_parent;
+               BUG_ON(nd->inode != parent->d_inode);
+       } else {
+               spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
+               if (!__d_rcu_to_refcount(dentry, nd->seq))
+                       goto err_child;
+               /*
+                * If the sequence check on the child dentry passed, then
+                * the child has not been removed from its parent. This
+                * means the parent dentry must be valid and able to take
+                * a reference at this point.
+                */
+               BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent);
+               BUG_ON(!parent->d_count);
+               parent->d_count++;
+               spin_unlock(&dentry->d_lock);
+       }
        spin_unlock(&parent->d_lock);
        if (want_root) {
                path_get(&nd->root);
@@ -501,8 +457,10 @@ static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry
        br_read_unlock(vfsmount_lock);
        nd->flags &= ~LOOKUP_RCU;
        return 0;
-err:
+
+err_child:
        spin_unlock(&dentry->d_lock);
+err_parent:
        spin_unlock(&parent->d_lock);
 err_root:
        if (want_root)
@@ -510,59 +468,6 @@ err_root:
        return -ECHILD;
 }
 
-/* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing.  */
-static inline int nameidata_dentry_drop_rcu_maybe(struct nameidata *nd, struct dentry *dentry)
-{
-       if (nd->flags & LOOKUP_RCU) {
-               if (unlikely(nameidata_dentry_drop_rcu(nd, dentry))) {
-                       nd->flags &= ~LOOKUP_RCU;
-                       if (!(nd->flags & LOOKUP_ROOT))
-                               nd->root.mnt = NULL;
-                       rcu_read_unlock();
-                       br_read_unlock(vfsmount_lock);
-                       return -ECHILD;
-               }
-       }
-       return 0;
-}
-
-/**
- * nameidata_drop_rcu_last - drop nameidata ending path walk out of rcu-walk
- * @nd: nameidata pathwalk data to drop
- * Returns: 0 on success, -ECHILD on failure
- *
- * nameidata_drop_rcu_last attempts to drop the current nd->path into ref-walk.
- * nd->path should be the final element of the lookup, so nd->root is discarded.
- * Must be called from rcu-walk context.
- */
-static int nameidata_drop_rcu_last(struct nameidata *nd)
-{
-       struct dentry *dentry = nd->path.dentry;
-
-       BUG_ON(!(nd->flags & LOOKUP_RCU));
-       nd->flags &= ~LOOKUP_RCU;
-       if (!(nd->flags & LOOKUP_ROOT))
-               nd->root.mnt = NULL;
-       spin_lock(&dentry->d_lock);
-       if (!__d_rcu_to_refcount(dentry, nd->seq))
-               goto err_unlock;
-       BUG_ON(nd->inode != dentry->d_inode);
-       spin_unlock(&dentry->d_lock);
-
-       mntget(nd->path.mnt);
-
-       rcu_read_unlock();
-       br_read_unlock(vfsmount_lock);
-
-       return 0;
-
-err_unlock:
-       spin_unlock(&dentry->d_lock);
-       rcu_read_unlock();
-       br_read_unlock(vfsmount_lock);
-       return -ECHILD;
-}
-
 /**
  * release_open_intent - free up open intent resources
  * @nd: pointer to nameidata
@@ -606,26 +511,39 @@ do_revalidate(struct dentry *dentry, struct nameidata *nd)
        return dentry;
 }
 
-/*
- * handle_reval_path - force revalidation of a dentry
- *
- * In some situations the path walking code will trust dentries without
- * revalidating them. This causes problems for filesystems that depend on
- * d_revalidate to handle file opens (e.g. NFSv4). When FS_REVAL_DOT is set
- * (which indicates that it's possible for the dentry to go stale), force
- * a d_revalidate call before proceeding.
+/**
+ * complete_walk - successful completion of path walk
+ * @nd:  pointer nameidata
  *
- * Returns 0 if the revalidation was successful. If the revalidation fails,
- * either return the error returned by d_revalidate or -ESTALE if the
- * revalidation it just returned 0. If d_revalidate returns 0, we attempt to
- * invalidate the dentry. It's up to the caller to handle putting references
- * to the path if necessary.
+ * If we had been in RCU mode, drop out of it and legitimize nd->path.
+ * Revalidate the final result, unless we'd already done that during
+ * the path walk or the filesystem doesn't ask for it.  Return 0 on
+ * success, -error on failure.  In case of failure caller does not
+ * need to drop nd->path.
  */
-static inline int handle_reval_path(struct nameidata *nd)
+static int complete_walk(struct nameidata *nd)
 {
        struct dentry *dentry = nd->path.dentry;
        int status;
 
+       if (nd->flags & LOOKUP_RCU) {
+               nd->flags &= ~LOOKUP_RCU;
+               if (!(nd->flags & LOOKUP_ROOT))
+                       nd->root.mnt = NULL;
+               spin_lock(&dentry->d_lock);
+               if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) {
+                       spin_unlock(&dentry->d_lock);
+                       rcu_read_unlock();
+                       br_read_unlock(vfsmount_lock);
+                       return -ECHILD;
+               }
+               BUG_ON(nd->inode != dentry->d_inode);
+               spin_unlock(&dentry->d_lock);
+               mntget(nd->path.mnt);
+               rcu_read_unlock();
+               br_read_unlock(vfsmount_lock);
+       }
+
        if (likely(!(nd->flags & LOOKUP_JUMPED)))
                return 0;
 
@@ -643,6 +561,7 @@ static inline int handle_reval_path(struct nameidata *nd)
        if (!status)
                status = -ESTALE;
 
+       path_put(&nd->path);
        return status;
 }
 
@@ -1241,13 +1160,8 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
                if (likely(__follow_mount_rcu(nd, path, inode, false)))
                        return 0;
 unlazy:
-               if (dentry) {
-                       if (nameidata_dentry_drop_rcu(nd, dentry))
-                               return -ECHILD;
-               } else {
-                       if (nameidata_drop_rcu(nd))
-                               return -ECHILD;
-               }
+               if (unlazy_walk(nd, dentry))
+                       return -ECHILD;
        } else {
                dentry = __d_lookup(parent, name);
        }
@@ -1303,7 +1217,7 @@ static inline int may_lookup(struct nameidata *nd)
                int err = exec_permission(nd->inode, IPERM_FLAG_RCU);
                if (err != -ECHILD)
                        return err;
-               if (nameidata_drop_rcu(nd))
+               if (unlazy_walk(nd, NULL))
                        return -ECHILD;
        }
        return exec_permission(nd->inode, 0);
@@ -1357,8 +1271,12 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
                return -ENOENT;
        }
        if (unlikely(inode->i_op->follow_link) && follow) {
-               if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry))
-                       return -ECHILD;
+               if (nd->flags & LOOKUP_RCU) {
+                       if (unlikely(unlazy_walk(nd, path->dentry))) {
+                               terminate_walk(nd);
+                               return -ECHILD;
+                       }
+               }
                BUG_ON(inode != path->dentry->d_inode);
                return 1;
        }
@@ -1657,18 +1575,8 @@ static int path_lookupat(int dfd, const char *name,
                }
        }
 
-       if (nd->flags & LOOKUP_RCU) {
-               /* went all way through without dropping RCU */
-               BUG_ON(err);
-               if (nameidata_drop_rcu_last(nd))
-                       err = -ECHILD;
-       }
-
-       if (!err) {
-               err = handle_reval_path(nd);
-               if (err)
-                       path_put(&nd->path);
-       }
+       if (!err)
+               err = complete_walk(nd);
 
        if (!err && nd->flags & LOOKUP_DIRECTORY) {
                if (!nd->inode->i_op->lookup) {
@@ -2134,13 +2042,9 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
                        return ERR_PTR(error);
                /* fallthrough */
        case LAST_ROOT:
-               if (nd->flags & LOOKUP_RCU) {
-                       if (nameidata_drop_rcu_last(nd))
-                               return ERR_PTR(-ECHILD);
-               }
-               error = handle_reval_path(nd);
+               error = complete_walk(nd);
                if (error)
-                       goto exit;
+                       return ERR_PTR(error);
                audit_inode(pathname, nd->path.dentry);
                if (open_flag & O_CREAT) {
                        error = -EISDIR;
@@ -2148,10 +2052,9 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
                }
                goto ok;
        case LAST_BIND:
-               /* can't be RCU mode here */
-               error = handle_reval_path(nd);
+               error = complete_walk(nd);
                if (error)
-                       goto exit;
+                       return ERR_PTR(error);
                audit_inode(pathname, dir);
                goto ok;
        }
@@ -2170,10 +2073,9 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
                if (error) /* symlink */
                        return NULL;
                /* sayonara */
-               if (nd->flags & LOOKUP_RCU) {
-                       if (nameidata_drop_rcu_last(nd))
-                               return ERR_PTR(-ECHILD);
-               }
+               error = complete_walk(nd);
+               if (error)
+                       return ERR_PTR(-ECHILD);
 
                error = -ENOTDIR;
                if (nd->flags & LOOKUP_DIRECTORY) {
@@ -2185,11 +2087,9 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
        }
 
        /* create side of things */
-
-       if (nd->flags & LOOKUP_RCU) {
-               if (nameidata_drop_rcu_last(nd))
-                       return ERR_PTR(-ECHILD);
-       }
+       error = complete_walk(nd);
+       if (error)
+               return ERR_PTR(error);
 
        audit_inode(pathname, dir);
        error = -EISDIR;
@@ -2629,10 +2529,10 @@ SYSCALL_DEFINE2(mkdir, const char __user *, pathname, int, mode)
 }
 
 /*
- * We try to drop the dentry early: we should have
- * a usage count of 2 if we're the only user of this
- * dentry, and if that is true (possibly after pruning
- * the dcache), then we drop the dentry now.
+ * The dentry_unhash() helper will try to drop the dentry early: we
+ * should have a usage count of 2 if we're the only user of this
+ * dentry, and if that is true (possibly after pruning the dcache),
+ * then we drop the dentry now.
  *
  * A low-level filesystem can, if it choses, legally
  * do a
@@ -2645,10 +2545,9 @@ SYSCALL_DEFINE2(mkdir, const char __user *, pathname, int, mode)
  */
 void dentry_unhash(struct dentry *dentry)
 {
-       dget(dentry);
        shrink_dcache_parent(dentry);
        spin_lock(&dentry->d_lock);
-       if (dentry->d_count == 2)
+       if (dentry->d_count == 1)
                __d_drop(dentry);
        spin_unlock(&dentry->d_lock);
 }
@@ -2664,25 +2563,26 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
                return -EPERM;
 
        mutex_lock(&dentry->d_inode->i_mutex);
-       dentry_unhash(dentry);
+
+       error = -EBUSY;
        if (d_mountpoint(dentry))
-               error = -EBUSY;
-       else {
-               error = security_inode_rmdir(dir, dentry);
-               if (!error) {
-                       error = dir->i_op->rmdir(dir, dentry);
-                       if (!error) {
-                               dentry->d_inode->i_flags |= S_DEAD;
-                               dont_mount(dentry);
-                       }
-               }
-       }
+               goto out;
+
+       error = security_inode_rmdir(dir, dentry);
+       if (error)
+               goto out;
+
+       error = dir->i_op->rmdir(dir, dentry);
+       if (error)
+               goto out;
+
+       dentry->d_inode->i_flags |= S_DEAD;
+       dont_mount(dentry);
+
+out:
        mutex_unlock(&dentry->d_inode->i_mutex);
-       if (!error) {
+       if (!error)
                d_delete(dentry);
-       }
-       dput(dentry);
-
        return error;
 }
 
@@ -3053,12 +2953,7 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname
  *        HOWEVER, it relies on the assumption that any object with ->lookup()
  *        has no more than 1 dentry.  If "hybrid" objects will ever appear,
  *        we'd better make sure that there's no link(2) for them.
- *     d) some filesystems don't support opened-but-unlinked directories,
- *        either because of layout or because they are not ready to deal with
- *        all cases correctly. The latter will be fixed (taking this sort of
- *        stuff into VFS), but the former is not going away. Solution: the same
- *        trick as in rmdir().
- *     e) conversion from fhandle to dentry may come in the wrong moment - when
+ *     d) conversion from fhandle to dentry may come in the wrong moment - when
  *        we are removing the target. Solution: we will have to grab ->i_mutex
  *        in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
  *        ->i_mutex on parents, which works but leads to some truly excessive
@@ -3068,7 +2963,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
                          struct inode *new_dir, struct dentry *new_dentry)
 {
        int error = 0;
-       struct inode *target;
+       struct inode *target = new_dentry->d_inode;
 
        /*
         * If we are going to change the parent - check write permissions,
@@ -3084,26 +2979,24 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
        if (error)
                return error;
 
-       target = new_dentry->d_inode;
        if (target)
                mutex_lock(&target->i_mutex);
-       if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
-               error = -EBUSY;
-       else {
-               if (target)
-                       dentry_unhash(new_dentry);
-               error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
-       }
+
+       error = -EBUSY;
+       if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry))
+               goto out;
+
+       error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
+       if (error)
+               goto out;
+
        if (target) {
-               if (!error) {
-                       target->i_flags |= S_DEAD;
-                       dont_mount(new_dentry);
-               }
-               mutex_unlock(&target->i_mutex);
-               if (d_unhashed(new_dentry))
-                       d_rehash(new_dentry);
-               dput(new_dentry);
+               target->i_flags |= S_DEAD;
+               dont_mount(new_dentry);
        }
+out:
+       if (target)
+               mutex_unlock(&target->i_mutex);
        if (!error)
                if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
                        d_move(old_dentry,new_dentry);
@@ -3113,7 +3006,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
 static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
                            struct inode *new_dir, struct dentry *new_dentry)
 {
-       struct inode *target;
+       struct inode *target = new_dentry->d_inode;
        int error;
 
        error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
@@ -3121,19 +3014,22 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
                return error;
 
        dget(new_dentry);
-       target = new_dentry->d_inode;
        if (target)
                mutex_lock(&target->i_mutex);
+
+       error = -EBUSY;
        if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry))
-               error = -EBUSY;
-       else
-               error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
-       if (!error) {
-               if (target)
-                       dont_mount(new_dentry);
-               if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
-                       d_move(old_dentry, new_dentry);
-       }
+               goto out;
+
+       error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry);
+       if (error)
+               goto out;
+
+       if (target)
+               dont_mount(new_dentry);
+       if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE))
+               d_move(old_dentry, new_dentry);
+out:
        if (target)
                mutex_unlock(&target->i_mutex);
        dput(new_dentry);
index d99bcf59e4c2fd23b2c03d2c9ed7471f7b2b230e..fe59bd145d214b0157b75dd7b441c91e24dcb56c 100644 (file)
@@ -1695,7 +1695,7 @@ static int graft_tree(struct vfsmount *mnt, struct path *path)
 
 static int flags_to_propagation_type(int flags)
 {
-       int type = flags & ~MS_REC;
+       int type = flags & ~(MS_REC | MS_SILENT);
 
        /* Fail if any non-propagation flags are set */
        if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
index f6946bb5cb5530a655dba93d0aedb072ceb627cc..e3e646b06404dbce84b78d6c1d9df302d0e33dc1 100644 (file)
@@ -1033,6 +1033,8 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
        DPRINTK("ncp_rmdir: removing %s/%s\n",
                dentry->d_parent->d_name.name, dentry->d_name.name);
 
+       dentry_unhash(dentry);
+
        error = -EBUSY;
        if (!d_unhashed(dentry))
                goto out;
@@ -1139,6 +1141,9 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
                old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
                new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
 
+       if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        ncp_age_dentry(server, old_dentry);
        ncp_age_dentry(server, new_dentry);
 
index a7c07b44b100b499593fb1ef7c6a24e1b9a3c15d..e5d71b27a5b0588be72426a598d80197d042a521 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/mman.h>
 #include <linux/string.h>
 #include <linux/fcntl.h>
+#include <linux/memcontrol.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -92,6 +93,7 @@ static int ncp_file_mmap_fault(struct vm_area_struct *area,
         * -- wli
         */
        count_vm_event(PGMAJFAULT);
+       mem_cgroup_count_vm_event(area->vm_mm, PGMAJFAULT);
        return VM_FAULT_MAJOR;
 }
 
index 546849b3e88f1935585067628e158f4233631d17..1102a5fbb7449c6769d6af85c048636f6cb8615b 100644 (file)
@@ -334,6 +334,8 @@ static int nilfs_rmdir(struct inode *dir, struct dentry *dentry)
        struct nilfs_transaction_info ti;
        int err;
 
+       dentry_unhash(dentry);
+
        err = nilfs_transaction_begin(dir->i_sb, &ti, 0);
        if (err)
                return err;
@@ -369,6 +371,9 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct nilfs_transaction_info ti;
        int err;
 
+       if (new_inode && S_ISDIR(new_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        err = nilfs_transaction_begin(old_dir->i_sb, &ti, 1);
        if (unlikely(err))
                return err;
index d8a0313e99e6af42b8412e91bb44cb2d7bb410b8..f17e58b32989a53f4f0413065bbd5e07d7901d5d 100644 (file)
@@ -30,6 +30,7 @@ ocfs2-objs := \
        namei.o                 \
        refcounttree.o          \
        reservations.o          \
+       move_extents.o          \
        resize.o                \
        slot_map.o              \
        suballoc.o              \
index 48aa9c7401c77aa5487fbc2fb1c472efa6217faa..ed553c60de827e0ebad24e3501e0e00d21c82cfc 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/highmem.h>
 #include <linux/swap.h>
 #include <linux/quotaops.h>
+#include <linux/blkdev.h>
 
 #include <cluster/masklog.h>
 
@@ -7184,3 +7185,168 @@ out_commit:
 out:
        return ret;
 }
+
+static int ocfs2_trim_extent(struct super_block *sb,
+                            struct ocfs2_group_desc *gd,
+                            u32 start, u32 count)
+{
+       u64 discard, bcount;
+
+       bcount = ocfs2_clusters_to_blocks(sb, count);
+       discard = le64_to_cpu(gd->bg_blkno) +
+                       ocfs2_clusters_to_blocks(sb, start);
+
+       trace_ocfs2_trim_extent(sb, (unsigned long long)discard, bcount);
+
+       return sb_issue_discard(sb, discard, bcount, GFP_NOFS, 0);
+}
+
+static int ocfs2_trim_group(struct super_block *sb,
+                           struct ocfs2_group_desc *gd,
+                           u32 start, u32 max, u32 minbits)
+{
+       int ret = 0, count = 0, next;
+       void *bitmap = gd->bg_bitmap;
+
+       if (le16_to_cpu(gd->bg_free_bits_count) < minbits)
+               return 0;
+
+       trace_ocfs2_trim_group((unsigned long long)le64_to_cpu(gd->bg_blkno),
+                              start, max, minbits);
+
+       while (start < max) {
+               start = ocfs2_find_next_zero_bit(bitmap, max, start);
+               if (start >= max)
+                       break;
+               next = ocfs2_find_next_bit(bitmap, max, start);
+
+               if ((next - start) >= minbits) {
+                       ret = ocfs2_trim_extent(sb, gd,
+                                               start, next - start);
+                       if (ret < 0) {
+                               mlog_errno(ret);
+                               break;
+                       }
+                       count += next - start;
+               }
+               start = next + 1;
+
+               if (fatal_signal_pending(current)) {
+                       count = -ERESTARTSYS;
+                       break;
+               }
+
+               if ((le16_to_cpu(gd->bg_free_bits_count) - count) < minbits)
+                       break;
+       }
+
+       if (ret < 0)
+               count = ret;
+
+       return count;
+}
+
+int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range)
+{
+       struct ocfs2_super *osb = OCFS2_SB(sb);
+       u64 start, len, trimmed, first_group, last_group, group;
+       int ret, cnt;
+       u32 first_bit, last_bit, minlen;
+       struct buffer_head *main_bm_bh = NULL;
+       struct inode *main_bm_inode = NULL;
+       struct buffer_head *gd_bh = NULL;
+       struct ocfs2_dinode *main_bm;
+       struct ocfs2_group_desc *gd = NULL;
+
+       start = range->start >> osb->s_clustersize_bits;
+       len = range->len >> osb->s_clustersize_bits;
+       minlen = range->minlen >> osb->s_clustersize_bits;
+       trimmed = 0;
+
+       if (!len) {
+               range->len = 0;
+               return 0;
+       }
+
+       if (minlen >= osb->bitmap_cpg)
+               return -EINVAL;
+
+       main_bm_inode = ocfs2_get_system_file_inode(osb,
+                                                   GLOBAL_BITMAP_SYSTEM_INODE,
+                                                   OCFS2_INVALID_SLOT);
+       if (!main_bm_inode) {
+               ret = -EIO;
+               mlog_errno(ret);
+               goto out;
+       }
+
+       mutex_lock(&main_bm_inode->i_mutex);
+
+       ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 0);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out_mutex;
+       }
+       main_bm = (struct ocfs2_dinode *)main_bm_bh->b_data;
+
+       if (start >= le32_to_cpu(main_bm->i_clusters)) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
+
+       if (start + len > le32_to_cpu(main_bm->i_clusters))
+               len = le32_to_cpu(main_bm->i_clusters) - start;
+
+       trace_ocfs2_trim_fs(start, len, minlen);
+
+       /* Determine first and last group to examine based on start and len */
+       first_group = ocfs2_which_cluster_group(main_bm_inode, start);
+       if (first_group == osb->first_cluster_group_blkno)
+               first_bit = start;
+       else
+               first_bit = start - ocfs2_blocks_to_clusters(sb, first_group);
+       last_group = ocfs2_which_cluster_group(main_bm_inode, start + len - 1);
+       last_bit = osb->bitmap_cpg;
+
+       for (group = first_group; group <= last_group;) {
+               if (first_bit + len >= osb->bitmap_cpg)
+                       last_bit = osb->bitmap_cpg;
+               else
+                       last_bit = first_bit + len;
+
+               ret = ocfs2_read_group_descriptor(main_bm_inode,
+                                                 main_bm, group,
+                                                 &gd_bh);
+               if (ret < 0) {
+                       mlog_errno(ret);
+                       break;
+               }
+
+               gd = (struct ocfs2_group_desc *)gd_bh->b_data;
+               cnt = ocfs2_trim_group(sb, gd, first_bit, last_bit, minlen);
+               brelse(gd_bh);
+               gd_bh = NULL;
+               if (cnt < 0) {
+                       ret = cnt;
+                       mlog_errno(ret);
+                       break;
+               }
+
+               trimmed += cnt;
+               len -= osb->bitmap_cpg - first_bit;
+               first_bit = 0;
+               if (group == osb->first_cluster_group_blkno)
+                       group = ocfs2_clusters_to_blocks(sb, osb->bitmap_cpg);
+               else
+                       group += ocfs2_clusters_to_blocks(sb, osb->bitmap_cpg);
+       }
+       range->len = trimmed * sb->s_blocksize;
+out_unlock:
+       ocfs2_inode_unlock(main_bm_inode, 0);
+       brelse(main_bm_bh);
+out_mutex:
+       mutex_unlock(&main_bm_inode->i_mutex);
+       iput(main_bm_inode);
+out:
+       return ret;
+}
index 3bd08a03251c32fe904af81cb49305f72dba1e41..ca381c5841273433d279cd6aca8137ec01d03bbd 100644 (file)
@@ -239,6 +239,7 @@ int ocfs2_find_leaf(struct ocfs2_caching_info *ci,
                    struct buffer_head **leaf_bh);
 int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster);
 
+int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range);
 /*
  * Helper function to look at the # of clusters in an extent record.
  */
index bc702dab5d1f912be9e70ee1dd58fb8aaeb441dc..a4b07730b2e1d0abb257a126fce7f3911ae1d434 100644 (file)
@@ -57,7 +57,6 @@ static struct kset *o2cb_kset;
 void o2cb_sys_shutdown(void)
 {
        mlog_sys_shutdown();
-       sysfs_remove_link(NULL, "o2cb");
        kset_unregister(o2cb_kset);
 }
 
@@ -69,14 +68,6 @@ int o2cb_sys_init(void)
        if (!o2cb_kset)
                return -ENOMEM;
 
-       /*
-        * Create this symlink for backwards compatibility with old
-        * versions of ocfs2-tools which look for things in /sys/o2cb.
-        */
-       ret = sysfs_create_link(NULL, &o2cb_kset->kobj, "o2cb");
-       if (ret)
-               goto error;
-
        ret = sysfs_create_group(&o2cb_kset->kobj, &o2cb_attr_group);
        if (ret)
                goto error;
index 4bdf7baee344dd9533bf08bc3969f3b4b00b4ac8..d602abb51b610d525cc437daa05c25d2105fe0c3 100644 (file)
@@ -144,6 +144,7 @@ struct dlm_ctxt
        wait_queue_head_t dlm_join_events;
        unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
        unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+       unsigned long exit_domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
        unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
        struct dlm_recovery_ctxt reco;
        spinlock_t master_lock;
@@ -401,6 +402,18 @@ static inline int dlm_lvb_is_empty(char *lvb)
        return 1;
 }
 
+static inline char *dlm_list_in_text(enum dlm_lockres_list idx)
+{
+       if (idx == DLM_GRANTED_LIST)
+               return "granted";
+       else if (idx == DLM_CONVERTING_LIST)
+               return "converting";
+       else if (idx == DLM_BLOCKED_LIST)
+               return "blocked";
+       else
+               return "unknown";
+}
+
 static inline struct list_head *
 dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx)
 {
@@ -448,6 +461,7 @@ enum {
        DLM_FINALIZE_RECO_MSG           = 518,
        DLM_QUERY_REGION                = 519,
        DLM_QUERY_NODEINFO              = 520,
+       DLM_BEGIN_EXIT_DOMAIN_MSG       = 521,
 };
 
 struct dlm_reco_node_data
index 04a32be0aeb92f6ec15fb9217e0cca789f895bca..56f82cb912e379e4149ab5454261a9bb6bdadf67 100644 (file)
@@ -756,6 +756,12 @@ static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len)
                                 buf + out, len - out);
        out += snprintf(buf + out, len - out, "\n");
 
+       /* Exit Domain Map: xx xx xx */
+       out += snprintf(buf + out, len - out, "Exit Domain Map: ");
+       out += stringify_nodemap(dlm->exit_domain_map, O2NM_MAX_NODES,
+                                buf + out, len - out);
+       out += snprintf(buf + out, len - out, "\n");
+
        /* Live Map: xx xx xx */
        out += snprintf(buf + out, len - out, "Live Map: ");
        out += stringify_nodemap(dlm->live_nodes_map, O2NM_MAX_NODES,
index 3b179d6cbde09be9017eb21fbefcc5df8b217a53..6ed6b95dcf935a6516e935b85a3ca9ffc0b8a9d8 100644 (file)
@@ -132,10 +132,12 @@ static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events);
  * New in version 1.1:
  *     - Message DLM_QUERY_REGION added to support global heartbeat
  *     - Message DLM_QUERY_NODEINFO added to allow online node removes
+ * New in version 1.2:
+ *     - Message DLM_BEGIN_EXIT_DOMAIN_MSG added to mark start of exit domain
  */
 static const struct dlm_protocol_version dlm_protocol = {
        .pv_major = 1,
-       .pv_minor = 1,
+       .pv_minor = 2,
 };
 
 #define DLM_DOMAIN_BACKOFF_MS 200
@@ -449,14 +451,18 @@ redo_bucket:
                        dropped = dlm_empty_lockres(dlm, res);
 
                        spin_lock(&res->spinlock);
-                       __dlm_lockres_calc_usage(dlm, res);
-                       iter = res->hash_node.next;
+                       if (dropped)
+                               __dlm_lockres_calc_usage(dlm, res);
+                       else
+                               iter = res->hash_node.next;
                        spin_unlock(&res->spinlock);
 
                        dlm_lockres_put(res);
 
-                       if (dropped)
+                       if (dropped) {
+                               cond_resched_lock(&dlm->spinlock);
                                goto redo_bucket;
+                       }
                }
                cond_resched_lock(&dlm->spinlock);
                num += n;
@@ -486,6 +492,28 @@ static int dlm_no_joining_node(struct dlm_ctxt *dlm)
        return ret;
 }
 
+static int dlm_begin_exit_domain_handler(struct o2net_msg *msg, u32 len,
+                                        void *data, void **ret_data)
+{
+       struct dlm_ctxt *dlm = data;
+       unsigned int node;
+       struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf;
+
+       if (!dlm_grab(dlm))
+               return 0;
+
+       node = exit_msg->node_idx;
+       mlog(0, "%s: Node %u sent a begin exit domain message\n", dlm->name, node);
+
+       spin_lock(&dlm->spinlock);
+       set_bit(node, dlm->exit_domain_map);
+       spin_unlock(&dlm->spinlock);
+
+       dlm_put(dlm);
+
+       return 0;
+}
+
 static void dlm_mark_domain_leaving(struct dlm_ctxt *dlm)
 {
        /* Yikes, a double spinlock! I need domain_lock for the dlm
@@ -542,6 +570,7 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
 
        spin_lock(&dlm->spinlock);
        clear_bit(node, dlm->domain_map);
+       clear_bit(node, dlm->exit_domain_map);
        __dlm_print_nodes(dlm);
 
        /* notify anything attached to the heartbeat events */
@@ -554,29 +583,56 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
        return 0;
 }
 
-static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm,
+static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm, u32 msg_type,
                                    unsigned int node)
 {
        int status;
        struct dlm_exit_domain leave_msg;
 
-       mlog(0, "Asking node %u if we can leave the domain %s me = %u\n",
-                 node, dlm->name, dlm->node_num);
+       mlog(0, "%s: Sending domain exit message %u to node %u\n", dlm->name,
+            msg_type, node);
 
        memset(&leave_msg, 0, sizeof(leave_msg));
        leave_msg.node_idx = dlm->node_num;
 
-       status = o2net_send_message(DLM_EXIT_DOMAIN_MSG, dlm->key,
-                                   &leave_msg, sizeof(leave_msg), node,
-                                   NULL);
+       status = o2net_send_message(msg_type, dlm->key, &leave_msg,
+                                   sizeof(leave_msg), node, NULL);
        if (status < 0)
-               mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
-                    "node %u\n", status, DLM_EXIT_DOMAIN_MSG, dlm->key, node);
-       mlog(0, "status return %d from o2net_send_message\n", status);
+               mlog(ML_ERROR, "Error %d sending domain exit message %u "
+                    "to node %u on domain %s\n", status, msg_type, node,
+                    dlm->name);
 
        return status;
 }
 
+static void dlm_begin_exit_domain(struct dlm_ctxt *dlm)
+{
+       int node = -1;
+
+       /* Support for begin exit domain was added in 1.2 */
+       if (dlm->dlm_locking_proto.pv_major == 1 &&
+           dlm->dlm_locking_proto.pv_minor < 2)
+               return;
+
+       /*
+        * Unlike DLM_EXIT_DOMAIN_MSG, DLM_BEGIN_EXIT_DOMAIN_MSG is purely
+        * informational. Meaning if a node does not receive the message,
+        * so be it.
+        */
+       spin_lock(&dlm->spinlock);
+       while (1) {
+               node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, node + 1);
+               if (node >= O2NM_MAX_NODES)
+                       break;
+               if (node == dlm->node_num)
+                       continue;
+
+               spin_unlock(&dlm->spinlock);
+               dlm_send_one_domain_exit(dlm, DLM_BEGIN_EXIT_DOMAIN_MSG, node);
+               spin_lock(&dlm->spinlock);
+       }
+       spin_unlock(&dlm->spinlock);
+}
 
 static void dlm_leave_domain(struct dlm_ctxt *dlm)
 {
@@ -602,7 +658,8 @@ static void dlm_leave_domain(struct dlm_ctxt *dlm)
 
                clear_node = 1;
 
-               status = dlm_send_one_domain_exit(dlm, node);
+               status = dlm_send_one_domain_exit(dlm, DLM_EXIT_DOMAIN_MSG,
+                                                 node);
                if (status < 0 &&
                    status != -ENOPROTOOPT &&
                    status != -ENOTCONN) {
@@ -677,6 +734,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm)
 
        if (leave) {
                mlog(0, "shutting down domain %s\n", dlm->name);
+               dlm_begin_exit_domain(dlm);
 
                /* We changed dlm state, notify the thread */
                dlm_kick_thread(dlm, NULL);
@@ -909,6 +967,7 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
                 * leftover join state. */
                BUG_ON(dlm->joining_node != assert->node_idx);
                set_bit(assert->node_idx, dlm->domain_map);
+               clear_bit(assert->node_idx, dlm->exit_domain_map);
                __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
 
                printk(KERN_NOTICE "o2dlm: Node %u joins domain %s\n",
@@ -1793,6 +1852,13 @@ static int dlm_register_domain_handlers(struct dlm_ctxt *dlm)
        if (status)
                goto bail;
 
+       status = o2net_register_handler(DLM_BEGIN_EXIT_DOMAIN_MSG, dlm->key,
+                                       sizeof(struct dlm_exit_domain),
+                                       dlm_begin_exit_domain_handler,
+                                       dlm, NULL, &dlm->dlm_domain_handlers);
+       if (status)
+               goto bail;
+
 bail:
        if (status)
                dlm_unregister_domain_handlers(dlm);
index 84d166328cf7448f36cfcb0a54f08eefda50e18d..11eefb8c12e98fb418f41c31a3b0a32201be3ca1 100644 (file)
@@ -2339,65 +2339,55 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
        dlm_lockres_put(res);
 }
 
-/* Checks whether the lockres can be migrated. Returns 0 if yes, < 0
- * if not. If 0, numlocks is set to the number of locks in the lockres.
+/*
+ * A migrateable resource is one that is :
+ * 1. locally mastered, and,
+ * 2. zero local locks, and,
+ * 3. one or more non-local locks, or, one or more references
+ * Returns 1 if yes, 0 if not.
  */
 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
-                                     struct dlm_lock_resource *res,
-                                     int *numlocks,
-                                     int *hasrefs)
+                                     struct dlm_lock_resource *res)
 {
-       int ret;
-       int i;
-       int count = 0;
+       enum dlm_lockres_list idx;
+       int nonlocal = 0, node_ref;
        struct list_head *queue;
        struct dlm_lock *lock;
+       u64 cookie;
 
        assert_spin_locked(&res->spinlock);
 
-       *numlocks = 0;
-       *hasrefs = 0;
-
-       ret = -EINVAL;
-       if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
-               mlog(0, "cannot migrate lockres with unknown owner!\n");
-               goto leave;
-       }
-
-       if (res->owner != dlm->node_num) {
-               mlog(0, "cannot migrate lockres this node doesn't own!\n");
-               goto leave;
-       }
+       if (res->owner != dlm->node_num)
+               return 0;
 
-       ret = 0;
-       queue = &res->granted;
-       for (i = 0; i < 3; i++) {
+        for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
+               queue = dlm_list_idx_to_ptr(res, idx);
                list_for_each_entry(lock, queue, list) {
-                       ++count;
-                       if (lock->ml.node == dlm->node_num) {
-                               mlog(0, "found a lock owned by this node still "
-                                    "on the %s queue!  will not migrate this "
-                                    "lockres\n", (i == 0 ? "granted" :
-                                                  (i == 1 ? "converting" :
-                                                   "blocked")));
-                               ret = -ENOTEMPTY;
-                               goto leave;
+                       if (lock->ml.node != dlm->node_num) {
+                               nonlocal++;
+                               continue;
                        }
+                       cookie = be64_to_cpu(lock->ml.cookie);
+                       mlog(0, "%s: Not migrateable res %.*s, lock %u:%llu on "
+                            "%s list\n", dlm->name, res->lockname.len,
+                            res->lockname.name,
+                            dlm_get_lock_cookie_node(cookie),
+                            dlm_get_lock_cookie_seq(cookie),
+                            dlm_list_in_text(idx));
+                       return 0;
                }
-               queue++;
        }
 
-       *numlocks = count;
-
-       count = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
-       if (count < O2NM_MAX_NODES)
-               *hasrefs = 1;
+       if (!nonlocal) {
+               node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
+               if (node_ref >= O2NM_MAX_NODES)
+                       return 0;
+       }
 
-       mlog(0, "%s: res %.*s, Migrateable, locks %d, refs %d\n", dlm->name,
-            res->lockname.len, res->lockname.name, *numlocks, *hasrefs);
+       mlog(0, "%s: res %.*s, Migrateable\n", dlm->name, res->lockname.len,
+            res->lockname.name);
 
-leave:
-       return ret;
+       return 1;
 }
 
 /*
@@ -2406,8 +2396,7 @@ leave:
 
 
 static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
-                              struct dlm_lock_resource *res,
-                              u8 target)
+                              struct dlm_lock_resource *res, u8 target)
 {
        struct dlm_master_list_entry *mle = NULL;
        struct dlm_master_list_entry *oldmle = NULL;
@@ -2416,37 +2405,20 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
        const char *name;
        unsigned int namelen;
        int mle_added = 0;
-       int numlocks, hasrefs;
        int wake = 0;
 
        if (!dlm_grab(dlm))
                return -EINVAL;
 
+       BUG_ON(target == O2NM_MAX_NODES);
+
        name = res->lockname.name;
        namelen = res->lockname.len;
 
-       mlog(0, "%s: Migrating %.*s to %u\n", dlm->name, namelen, name, target);
-
-       /*
-        * ensure this lockres is a proper candidate for migration
-        */
-       spin_lock(&res->spinlock);
-       ret = dlm_is_lockres_migrateable(dlm, res, &numlocks, &hasrefs);
-       if (ret < 0) {
-               spin_unlock(&res->spinlock);
-               goto leave;
-       }
-       spin_unlock(&res->spinlock);
-
-       /* no work to do */
-       if (numlocks == 0 && !hasrefs)
-               goto leave;
-
-       /*
-        * preallocate up front
-        * if this fails, abort
-        */
+       mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name,
+            target);
 
+       /* preallocate up front. if this fails, abort */
        ret = -ENOMEM;
        mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
        if (!mres) {
@@ -2461,36 +2433,11 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
        }
        ret = 0;
 
-       /*
-        * find a node to migrate the lockres to
-        */
-
-       spin_lock(&dlm->spinlock);
-       /* pick a new node */
-       if (!test_bit(target, dlm->domain_map) ||
-           target >= O2NM_MAX_NODES) {
-               target = dlm_pick_migration_target(dlm, res);
-       }
-       mlog(0, "%s: res %.*s, Node %u chosen for migration\n", dlm->name,
-            namelen, name, target);
-
-       if (target >= O2NM_MAX_NODES ||
-           !test_bit(target, dlm->domain_map)) {
-               /* target chosen is not alive */
-               ret = -EINVAL;
-       }
-
-       if (ret) {
-               spin_unlock(&dlm->spinlock);
-               goto fail;
-       }
-
-       mlog(0, "continuing with target = %u\n", target);
-
        /*
         * clear any existing master requests and
         * add the migration mle to the list
         */
+       spin_lock(&dlm->spinlock);
        spin_lock(&dlm->master_lock);
        ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
                                    namelen, target, dlm->node_num);
@@ -2531,6 +2478,7 @@ fail:
                        dlm_put_mle(mle);
                } else if (mle) {
                        kmem_cache_free(dlm_mle_cache, mle);
+                       mle = NULL;
                }
                goto leave;
        }
@@ -2652,69 +2600,52 @@ leave:
        if (wake)
                wake_up(&res->wq);
 
-       /* TODO: cleanup */
        if (mres)
                free_page((unsigned long)mres);
 
        dlm_put(dlm);
 
-       mlog(0, "returning %d\n", ret);
+       mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen,
+            name, target, ret);
        return ret;
 }
 
 #define DLM_MIGRATION_RETRY_MS  100
 
-/* Should be called only after beginning the domain leave process.
+/*
+ * Should be called only after beginning the domain leave process.
  * There should not be any remaining locks on nonlocal lock resources,
  * and there should be no local locks left on locally mastered resources.
  *
  * Called with the dlm spinlock held, may drop it to do migration, but
  * will re-acquire before exit.
  *
- * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */
+ * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped
+ */
 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
 {
        int ret;
        int lock_dropped = 0;
-       int numlocks, hasrefs;
+       u8 target = O2NM_MAX_NODES;
+
+       assert_spin_locked(&dlm->spinlock);
 
        spin_lock(&res->spinlock);
-       if (res->owner != dlm->node_num) {
-               if (!__dlm_lockres_unused(res)) {
-                       mlog(ML_ERROR, "%s:%.*s: this node is not master, "
-                            "trying to free this but locks remain\n",
-                            dlm->name, res->lockname.len, res->lockname.name);
-               }
-               spin_unlock(&res->spinlock);
-               goto leave;
-       }
+       if (dlm_is_lockres_migrateable(dlm, res))
+               target = dlm_pick_migration_target(dlm, res);
+       spin_unlock(&res->spinlock);
 
-       /* No need to migrate a lockres having no locks */
-       ret = dlm_is_lockres_migrateable(dlm, res, &numlocks, &hasrefs);
-       if (ret >= 0 && numlocks == 0 && !hasrefs) {
-               spin_unlock(&res->spinlock);
+       if (target == O2NM_MAX_NODES)
                goto leave;
-       }
-       spin_unlock(&res->spinlock);
 
        /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
        spin_unlock(&dlm->spinlock);
        lock_dropped = 1;
-       while (1) {
-               ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES);
-               if (ret >= 0)
-                       break;
-               if (ret == -ENOTEMPTY) {
-                       mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
-                               res->lockname.len, res->lockname.name);
-                       BUG();
-               }
-
-               mlog(0, "lockres %.*s: migrate failed, "
-                    "retrying\n", res->lockname.len,
-                    res->lockname.name);
-               msleep(DLM_MIGRATION_RETRY_MS);
-       }
+       ret = dlm_migrate_lockres(dlm, res, target);
+       if (ret)
+               mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n",
+                    dlm->name, res->lockname.len, res->lockname.name,
+                    target, ret);
        spin_lock(&dlm->spinlock);
 leave:
        return lock_dropped;
@@ -2898,61 +2829,55 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
        }
 }
 
-/* for now this is not too intelligent.  we will
- * need stats to make this do the right thing.
- * this just finds the first lock on one of the
- * queues and uses that node as the target. */
+/*
+ * Pick a node to migrate the lock resource to. This function selects a
+ * potential target based first on the locks and then on refmap. It skips
+ * nodes that are in the process of exiting the domain.
+ */
 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
                                    struct dlm_lock_resource *res)
 {
-       int i;
+       enum dlm_lockres_list idx;
        struct list_head *queue = &res->granted;
        struct dlm_lock *lock;
-       int nodenum;
+       int noderef;
+       u8 nodenum = O2NM_MAX_NODES;
 
        assert_spin_locked(&dlm->spinlock);
+       assert_spin_locked(&res->spinlock);
 
-       spin_lock(&res->spinlock);
-       for (i=0; i<3; i++) {
+       /* Go through all the locks */
+       for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) {
+               queue = dlm_list_idx_to_ptr(res, idx);
                list_for_each_entry(lock, queue, list) {
-                       /* up to the caller to make sure this node
-                        * is alive */
-                       if (lock->ml.node != dlm->node_num) {
-                               spin_unlock(&res->spinlock);
-                               return lock->ml.node;
-                       }
+                       if (lock->ml.node == dlm->node_num)
+                               continue;
+                       if (test_bit(lock->ml.node, dlm->exit_domain_map))
+                               continue;
+                       nodenum = lock->ml.node;
+                       goto bail;
                }
-               queue++;
-       }
-
-       nodenum = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
-       if (nodenum < O2NM_MAX_NODES) {
-               spin_unlock(&res->spinlock);
-               return nodenum;
        }
-       spin_unlock(&res->spinlock);
-       mlog(0, "have not found a suitable target yet! checking domain map\n");
 
-       /* ok now we're getting desperate.  pick anyone alive. */
-       nodenum = -1;
+       /* Go thru the refmap */
+       noderef = -1;
        while (1) {
-               nodenum = find_next_bit(dlm->domain_map,
-                                       O2NM_MAX_NODES, nodenum+1);
-               mlog(0, "found %d in domain map\n", nodenum);
-               if (nodenum >= O2NM_MAX_NODES)
+               noderef = find_next_bit(res->refmap, O2NM_MAX_NODES,
+                                       noderef + 1);
+               if (noderef >= O2NM_MAX_NODES)
                        break;
-               if (nodenum != dlm->node_num) {
-                       mlog(0, "picking %d\n", nodenum);
-                       return nodenum;
-               }
+               if (noderef == dlm->node_num)
+                       continue;
+               if (test_bit(noderef, dlm->exit_domain_map))
+                       continue;
+               nodenum = noderef;
+               goto bail;
        }
 
-       mlog(0, "giving up.  no master to migrate to\n");
-       return DLM_LOCK_RES_OWNER_UNKNOWN;
+bail:
+       return nodenum;
 }
 
-
-
 /* this is called by the new master once all lockres
  * data has been received */
 static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
index f1beb6fc254d1720ae10b10a0a7ea904703c7d0d..7efab6d28a21b4ee6a8376559d70f739a4e1da90 100644 (file)
@@ -2393,6 +2393,7 @@ static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
 
        mlog(0, "node %u being removed from domain map!\n", idx);
        clear_bit(idx, dlm->domain_map);
+       clear_bit(idx, dlm->exit_domain_map);
        /* wake up migration waiters if a node goes down.
         * perhaps later we can genericize this for other waiters. */
        wake_up(&dlm->migration_wq);
index 8c5c0eddc365a060d278d49dac042226fafa0515..b420767970492082b357cf67f8735894dd7e103f 100644 (file)
@@ -88,7 +88,7 @@ struct workqueue_struct *user_dlm_worker;
  *               signifies a bast fired on the lock.
  */
 #define DLMFS_CAPABILITIES "bast stackglue"
-extern int param_set_dlmfs_capabilities(const char *val,
+static int param_set_dlmfs_capabilities(const char *val,
                                        struct kernel_param *kp)
 {
        printk(KERN_ERR "%s: readonly parameter\n", kp->name);
index 89659d6dc2067276f2cebcea63a9bcc34a0a2486..b1e35a392ca5279d7aa7d645aa75fd8c101c6d05 100644 (file)
@@ -2670,6 +2670,7 @@ const struct file_operations ocfs2_fops_no_plocks = {
        .flock          = ocfs2_flock,
        .splice_read    = ocfs2_file_splice_read,
        .splice_write   = ocfs2_file_splice_write,
+       .fallocate      = ocfs2_fallocate,
 };
 
 const struct file_operations ocfs2_dops_no_plocks = {
index 8f13c5989eaeeecd543be983cec37176c1c54ece..bc91072b72196fd335c4b7cbc02ba08cb67254e6 100644 (file)
 #include "ioctl.h"
 #include "resize.h"
 #include "refcounttree.h"
+#include "sysfile.h"
+#include "dir.h"
+#include "buffer_head_io.h"
+#include "suballoc.h"
+#include "move_extents.h"
 
 #include <linux/ext2_fs.h>
 
  * be -EFAULT.  The error will be returned from the ioctl(2) call.  It's
  * just a best-effort to tell userspace that this request caused the error.
  */
-static inline void __o2info_set_request_error(struct ocfs2_info_request *kreq,
+static inline void o2info_set_request_error(struct ocfs2_info_request *kreq,
                                        struct ocfs2_info_request __user *req)
 {
        kreq->ir_flags |= OCFS2_INFO_FL_ERROR;
        (void)put_user(kreq->ir_flags, (__u32 __user *)&(req->ir_flags));
 }
 
-#define o2info_set_request_error(a, b) \
-               __o2info_set_request_error((struct ocfs2_info_request *)&(a), b)
-
-static inline void __o2info_set_request_filled(struct ocfs2_info_request *req)
+static inline void o2info_set_request_filled(struct ocfs2_info_request *req)
 {
        req->ir_flags |= OCFS2_INFO_FL_FILLED;
 }
 
-#define o2info_set_request_filled(a) \
-               __o2info_set_request_filled((struct ocfs2_info_request *)&(a))
-
-static inline void __o2info_clear_request_filled(struct ocfs2_info_request *req)
+static inline void o2info_clear_request_filled(struct ocfs2_info_request *req)
 {
        req->ir_flags &= ~OCFS2_INFO_FL_FILLED;
 }
 
-#define o2info_clear_request_filled(a) \
-               __o2info_clear_request_filled((struct ocfs2_info_request *)&(a))
+static inline int o2info_coherent(struct ocfs2_info_request *req)
+{
+       return (!(req->ir_flags & OCFS2_INFO_FL_NON_COHERENT));
+}
 
 static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags)
 {
@@ -153,7 +154,7 @@ int ocfs2_info_handle_blocksize(struct inode *inode,
 
        oib.ib_blocksize = inode->i_sb->s_blocksize;
 
-       o2info_set_request_filled(oib);
+       o2info_set_request_filled(&oib.ib_req);
 
        if (o2info_to_user(oib, req))
                goto bail;
@@ -161,7 +162,7 @@ int ocfs2_info_handle_blocksize(struct inode *inode,
        status = 0;
 bail:
        if (status)
-               o2info_set_request_error(oib, req);
+               o2info_set_request_error(&oib.ib_req, req);
 
        return status;
 }
@@ -178,7 +179,7 @@ int ocfs2_info_handle_clustersize(struct inode *inode,
 
        oic.ic_clustersize = osb->s_clustersize;
 
-       o2info_set_request_filled(oic);
+       o2info_set_request_filled(&oic.ic_req);
 
        if (o2info_to_user(oic, req))
                goto bail;
@@ -186,7 +187,7 @@ int ocfs2_info_handle_clustersize(struct inode *inode,
        status = 0;
 bail:
        if (status)
-               o2info_set_request_error(oic, req);
+               o2info_set_request_error(&oic.ic_req, req);
 
        return status;
 }
@@ -203,7 +204,7 @@ int ocfs2_info_handle_maxslots(struct inode *inode,
 
        oim.im_max_slots = osb->max_slots;
 
-       o2info_set_request_filled(oim);
+       o2info_set_request_filled(&oim.im_req);
 
        if (o2info_to_user(oim, req))
                goto bail;
@@ -211,7 +212,7 @@ int ocfs2_info_handle_maxslots(struct inode *inode,
        status = 0;
 bail:
        if (status)
-               o2info_set_request_error(oim, req);
+               o2info_set_request_error(&oim.im_req, req);
 
        return status;
 }
@@ -228,7 +229,7 @@ int ocfs2_info_handle_label(struct inode *inode,
 
        memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN);
 
-       o2info_set_request_filled(oil);
+       o2info_set_request_filled(&oil.il_req);
 
        if (o2info_to_user(oil, req))
                goto bail;
@@ -236,7 +237,7 @@ int ocfs2_info_handle_label(struct inode *inode,
        status = 0;
 bail:
        if (status)
-               o2info_set_request_error(oil, req);
+               o2info_set_request_error(&oil.il_req, req);
 
        return status;
 }
@@ -253,7 +254,7 @@ int ocfs2_info_handle_uuid(struct inode *inode,
 
        memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1);
 
-       o2info_set_request_filled(oiu);
+       o2info_set_request_filled(&oiu.iu_req);
 
        if (o2info_to_user(oiu, req))
                goto bail;
@@ -261,7 +262,7 @@ int ocfs2_info_handle_uuid(struct inode *inode,
        status = 0;
 bail:
        if (status)
-               o2info_set_request_error(oiu, req);
+               o2info_set_request_error(&oiu.iu_req, req);
 
        return status;
 }
@@ -280,7 +281,7 @@ int ocfs2_info_handle_fs_features(struct inode *inode,
        oif.if_incompat_features = osb->s_feature_incompat;
        oif.if_ro_compat_features = osb->s_feature_ro_compat;
 
-       o2info_set_request_filled(oif);
+       o2info_set_request_filled(&oif.if_req);
 
        if (o2info_to_user(oif, req))
                goto bail;
@@ -288,7 +289,7 @@ int ocfs2_info_handle_fs_features(struct inode *inode,
        status = 0;
 bail:
        if (status)
-               o2info_set_request_error(oif, req);
+               o2info_set_request_error(&oif.if_req, req);
 
        return status;
 }
@@ -305,7 +306,7 @@ int ocfs2_info_handle_journal_size(struct inode *inode,
 
        oij.ij_journal_size = osb->journal->j_inode->i_size;
 
-       o2info_set_request_filled(oij);
+       o2info_set_request_filled(&oij.ij_req);
 
        if (o2info_to_user(oij, req))
                goto bail;
@@ -313,7 +314,408 @@ int ocfs2_info_handle_journal_size(struct inode *inode,
        status = 0;
 bail:
        if (status)
-               o2info_set_request_error(oij, req);
+               o2info_set_request_error(&oij.ij_req, req);
+
+       return status;
+}
+
+int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb,
+                               struct inode *inode_alloc, u64 blkno,
+                               struct ocfs2_info_freeinode *fi, u32 slot)
+{
+       int status = 0, unlock = 0;
+
+       struct buffer_head *bh = NULL;
+       struct ocfs2_dinode *dinode_alloc = NULL;
+
+       if (inode_alloc)
+               mutex_lock(&inode_alloc->i_mutex);
+
+       if (o2info_coherent(&fi->ifi_req)) {
+               status = ocfs2_inode_lock(inode_alloc, &bh, 0);
+               if (status < 0) {
+                       mlog_errno(status);
+                       goto bail;
+               }
+               unlock = 1;
+       } else {
+               status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh);
+               if (status < 0) {
+                       mlog_errno(status);
+                       goto bail;
+               }
+       }
+
+       dinode_alloc = (struct ocfs2_dinode *)bh->b_data;
+
+       fi->ifi_stat[slot].lfi_total =
+               le32_to_cpu(dinode_alloc->id1.bitmap1.i_total);
+       fi->ifi_stat[slot].lfi_free =
+               le32_to_cpu(dinode_alloc->id1.bitmap1.i_total) -
+               le32_to_cpu(dinode_alloc->id1.bitmap1.i_used);
+
+bail:
+       if (unlock)
+               ocfs2_inode_unlock(inode_alloc, 0);
+
+       if (inode_alloc)
+               mutex_unlock(&inode_alloc->i_mutex);
+
+       brelse(bh);
+
+       return status;
+}
+
+int ocfs2_info_handle_freeinode(struct inode *inode,
+                               struct ocfs2_info_request __user *req)
+{
+       u32 i;
+       u64 blkno = -1;
+       char namebuf[40];
+       int status = -EFAULT, type = INODE_ALLOC_SYSTEM_INODE;
+       struct ocfs2_info_freeinode *oifi = NULL;
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       struct inode *inode_alloc = NULL;
+
+       oifi = kzalloc(sizeof(struct ocfs2_info_freeinode), GFP_KERNEL);
+       if (!oifi) {
+               status = -ENOMEM;
+               mlog_errno(status);
+               goto bail;
+       }
+
+       if (o2info_from_user(*oifi, req))
+               goto bail;
+
+       oifi->ifi_slotnum = osb->max_slots;
+
+       for (i = 0; i < oifi->ifi_slotnum; i++) {
+               if (o2info_coherent(&oifi->ifi_req)) {
+                       inode_alloc = ocfs2_get_system_file_inode(osb, type, i);
+                       if (!inode_alloc) {
+                               mlog(ML_ERROR, "unable to get alloc inode in "
+                                   "slot %u\n", i);
+                               status = -EIO;
+                               goto bail;
+                       }
+               } else {
+                       ocfs2_sprintf_system_inode_name(namebuf,
+                                                       sizeof(namebuf),
+                                                       type, i);
+                       status = ocfs2_lookup_ino_from_name(osb->sys_root_inode,
+                                                           namebuf,
+                                                           strlen(namebuf),
+                                                           &blkno);
+                       if (status < 0) {
+                               status = -ENOENT;
+                               goto bail;
+                       }
+               }
+
+               status = ocfs2_info_scan_inode_alloc(osb, inode_alloc, blkno, oifi, i);
+               if (status < 0)
+                       goto bail;
+
+               iput(inode_alloc);
+               inode_alloc = NULL;
+       }
+
+       o2info_set_request_filled(&oifi->ifi_req);
+
+       if (o2info_to_user(*oifi, req))
+               goto bail;
+
+       status = 0;
+bail:
+       if (status)
+               o2info_set_request_error(&oifi->ifi_req, req);
+
+       kfree(oifi);
+
+       return status;
+}
+
+static void o2ffg_update_histogram(struct ocfs2_info_free_chunk_list *hist,
+                                  unsigned int chunksize)
+{
+       int index;
+
+       index = __ilog2_u32(chunksize);
+       if (index >= OCFS2_INFO_MAX_HIST)
+               index = OCFS2_INFO_MAX_HIST - 1;
+
+       hist->fc_chunks[index]++;
+       hist->fc_clusters[index] += chunksize;
+}
+
+static void o2ffg_update_stats(struct ocfs2_info_freefrag_stats *stats,
+                              unsigned int chunksize)
+{
+       if (chunksize > stats->ffs_max)
+               stats->ffs_max = chunksize;
+
+       if (chunksize < stats->ffs_min)
+               stats->ffs_min = chunksize;
+
+       stats->ffs_avg += chunksize;
+       stats->ffs_free_chunks_real++;
+}
+
+void ocfs2_info_update_ffg(struct ocfs2_info_freefrag *ffg,
+                          unsigned int chunksize)
+{
+       o2ffg_update_histogram(&(ffg->iff_ffs.ffs_fc_hist), chunksize);
+       o2ffg_update_stats(&(ffg->iff_ffs), chunksize);
+}
+
+int ocfs2_info_freefrag_scan_chain(struct ocfs2_super *osb,
+                                  struct inode *gb_inode,
+                                  struct ocfs2_dinode *gb_dinode,
+                                  struct ocfs2_chain_rec *rec,
+                                  struct ocfs2_info_freefrag *ffg,
+                                  u32 chunks_in_group)
+{
+       int status = 0, used;
+       u64 blkno;
+
+       struct buffer_head *bh = NULL;
+       struct ocfs2_group_desc *bg = NULL;
+
+       unsigned int max_bits, num_clusters;
+       unsigned int offset = 0, cluster, chunk;
+       unsigned int chunk_free, last_chunksize = 0;
+
+       if (!le32_to_cpu(rec->c_free))
+               goto bail;
+
+       do {
+               if (!bg)
+                       blkno = le64_to_cpu(rec->c_blkno);
+               else
+                       blkno = le64_to_cpu(bg->bg_next_group);
+
+               if (bh) {
+                       brelse(bh);
+                       bh = NULL;
+               }
+
+               if (o2info_coherent(&ffg->iff_req))
+                       status = ocfs2_read_group_descriptor(gb_inode,
+                                                            gb_dinode,
+                                                            blkno, &bh);
+               else
+                       status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh);
+
+               if (status < 0) {
+                       mlog(ML_ERROR, "Can't read the group descriptor # "
+                            "%llu from device.", (unsigned long long)blkno);
+                       status = -EIO;
+                       goto bail;
+               }
+
+               bg = (struct ocfs2_group_desc *)bh->b_data;
+
+               if (!le16_to_cpu(bg->bg_free_bits_count))
+                       continue;
+
+               max_bits = le16_to_cpu(bg->bg_bits);
+               offset = 0;
+
+               for (chunk = 0; chunk < chunks_in_group; chunk++) {
+                       /*
+                        * last chunk may be not an entire one.
+                        */
+                       if ((offset + ffg->iff_chunksize) > max_bits)
+                               num_clusters = max_bits - offset;
+                       else
+                               num_clusters = ffg->iff_chunksize;
+
+                       chunk_free = 0;
+                       for (cluster = 0; cluster < num_clusters; cluster++) {
+                               used = ocfs2_test_bit(offset,
+                                               (unsigned long *)bg->bg_bitmap);
+                               /*
+                                * - chunk_free counts free clusters in #N chunk.
+                                * - last_chunksize records the size(in) clusters
+                                *   for the last real free chunk being counted.
+                                */
+                               if (!used) {
+                                       last_chunksize++;
+                                       chunk_free++;
+                               }
+
+                               if (used && last_chunksize) {
+                                       ocfs2_info_update_ffg(ffg,
+                                                             last_chunksize);
+                                       last_chunksize = 0;
+                               }
+
+                               offset++;
+                       }
+
+                       if (chunk_free == ffg->iff_chunksize)
+                               ffg->iff_ffs.ffs_free_chunks++;
+               }
+
+               /*
+                * need to update the info for last free chunk.
+                */
+               if (last_chunksize)
+                       ocfs2_info_update_ffg(ffg, last_chunksize);
+
+       } while (le64_to_cpu(bg->bg_next_group));
+
+bail:
+       brelse(bh);
+
+       return status;
+}
+
+int ocfs2_info_freefrag_scan_bitmap(struct ocfs2_super *osb,
+                                   struct inode *gb_inode, u64 blkno,
+                                   struct ocfs2_info_freefrag *ffg)
+{
+       u32 chunks_in_group;
+       int status = 0, unlock = 0, i;
+
+       struct buffer_head *bh = NULL;
+       struct ocfs2_chain_list *cl = NULL;
+       struct ocfs2_chain_rec *rec = NULL;
+       struct ocfs2_dinode *gb_dinode = NULL;
+
+       if (gb_inode)
+               mutex_lock(&gb_inode->i_mutex);
+
+       if (o2info_coherent(&ffg->iff_req)) {
+               status = ocfs2_inode_lock(gb_inode, &bh, 0);
+               if (status < 0) {
+                       mlog_errno(status);
+                       goto bail;
+               }
+               unlock = 1;
+       } else {
+               status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh);
+               if (status < 0) {
+                       mlog_errno(status);
+                       goto bail;
+               }
+       }
+
+       gb_dinode = (struct ocfs2_dinode *)bh->b_data;
+       cl = &(gb_dinode->id2.i_chain);
+
+       /*
+        * Chunksize(in) clusters from userspace should be
+        * less than clusters in a group.
+        */
+       if (ffg->iff_chunksize > le16_to_cpu(cl->cl_cpg)) {
+               status = -EINVAL;
+               goto bail;
+       }
+
+       memset(&ffg->iff_ffs, 0, sizeof(struct ocfs2_info_freefrag_stats));
+
+       ffg->iff_ffs.ffs_min = ~0U;
+       ffg->iff_ffs.ffs_clusters =
+                       le32_to_cpu(gb_dinode->id1.bitmap1.i_total);
+       ffg->iff_ffs.ffs_free_clusters = ffg->iff_ffs.ffs_clusters -
+                       le32_to_cpu(gb_dinode->id1.bitmap1.i_used);
+
+       chunks_in_group = le16_to_cpu(cl->cl_cpg) / ffg->iff_chunksize + 1;
+
+       for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
+               rec = &(cl->cl_recs[i]);
+               status = ocfs2_info_freefrag_scan_chain(osb, gb_inode,
+                                                       gb_dinode,
+                                                       rec, ffg,
+                                                       chunks_in_group);
+               if (status)
+                       goto bail;
+       }
+
+       if (ffg->iff_ffs.ffs_free_chunks_real)
+               ffg->iff_ffs.ffs_avg = (ffg->iff_ffs.ffs_avg /
+                                       ffg->iff_ffs.ffs_free_chunks_real);
+bail:
+       if (unlock)
+               ocfs2_inode_unlock(gb_inode, 0);
+
+       if (gb_inode)
+               mutex_unlock(&gb_inode->i_mutex);
+
+       if (gb_inode)
+               iput(gb_inode);
+
+       brelse(bh);
+
+       return status;
+}
+
+int ocfs2_info_handle_freefrag(struct inode *inode,
+                              struct ocfs2_info_request __user *req)
+{
+       u64 blkno = -1;
+       char namebuf[40];
+       int status = -EFAULT, type = GLOBAL_BITMAP_SYSTEM_INODE;
+
+       struct ocfs2_info_freefrag *oiff;
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       struct inode *gb_inode = NULL;
+
+       oiff = kzalloc(sizeof(struct ocfs2_info_freefrag), GFP_KERNEL);
+       if (!oiff) {
+               status = -ENOMEM;
+               mlog_errno(status);
+               goto bail;
+       }
+
+       if (o2info_from_user(*oiff, req))
+               goto bail;
+       /*
+        * chunksize from userspace should be power of 2.
+        */
+       if ((oiff->iff_chunksize & (oiff->iff_chunksize - 1)) ||
+           (!oiff->iff_chunksize)) {
+               status = -EINVAL;
+               goto bail;
+       }
+
+       if (o2info_coherent(&oiff->iff_req)) {
+               gb_inode = ocfs2_get_system_file_inode(osb, type,
+                                                      OCFS2_INVALID_SLOT);
+               if (!gb_inode) {
+                       mlog(ML_ERROR, "unable to get global_bitmap inode\n");
+                       status = -EIO;
+                       goto bail;
+               }
+       } else {
+               ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type,
+                                               OCFS2_INVALID_SLOT);
+               status = ocfs2_lookup_ino_from_name(osb->sys_root_inode,
+                                                   namebuf,
+                                                   strlen(namebuf),
+                                                   &blkno);
+               if (status < 0) {
+                       status = -ENOENT;
+                       goto bail;
+               }
+       }
+
+       status = ocfs2_info_freefrag_scan_bitmap(osb, gb_inode, blkno, oiff);
+       if (status < 0)
+               goto bail;
+
+       o2info_set_request_filled(&oiff->iff_req);
+
+       if (o2info_to_user(*oiff, req))
+               goto bail;
+
+       status = 0;
+bail:
+       if (status)
+               o2info_set_request_error(&oiff->iff_req, req);
+
+       kfree(oiff);
 
        return status;
 }
@@ -327,7 +729,7 @@ int ocfs2_info_handle_unknown(struct inode *inode,
        if (o2info_from_user(oir, req))
                goto bail;
 
-       o2info_clear_request_filled(oir);
+       o2info_clear_request_filled(&oir);
 
        if (o2info_to_user(oir, req))
                goto bail;
@@ -335,7 +737,7 @@ int ocfs2_info_handle_unknown(struct inode *inode,
        status = 0;
 bail:
        if (status)
-               o2info_set_request_error(oir, req);
+               o2info_set_request_error(&oir, req);
 
        return status;
 }
@@ -389,6 +791,14 @@ int ocfs2_info_handle_request(struct inode *inode,
                if (oir.ir_size == sizeof(struct ocfs2_info_journal_size))
                        status = ocfs2_info_handle_journal_size(inode, req);
                break;
+       case OCFS2_INFO_FREEINODE:
+               if (oir.ir_size == sizeof(struct ocfs2_info_freeinode))
+                       status = ocfs2_info_handle_freeinode(inode, req);
+               break;
+       case OCFS2_INFO_FREEFRAG:
+               if (oir.ir_size == sizeof(struct ocfs2_info_freefrag))
+                       status = ocfs2_info_handle_freefrag(inode, req);
+               break;
        default:
                status = ocfs2_info_handle_unknown(inode, req);
                break;
@@ -542,6 +952,31 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                        return -EFAULT;
 
                return ocfs2_info_handle(inode, &info, 0);
+       case FITRIM:
+       {
+               struct super_block *sb = inode->i_sb;
+               struct fstrim_range range;
+               int ret = 0;
+
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+               if (copy_from_user(&range, (struct fstrim_range *)arg,
+                   sizeof(range)))
+                       return -EFAULT;
+
+               ret = ocfs2_trim_fs(sb, &range);
+               if (ret < 0)
+                       return ret;
+
+               if (copy_to_user((struct fstrim_range *)arg, &range,
+                   sizeof(range)))
+                       return -EFAULT;
+
+               return 0;
+       }
+       case OCFS2_IOC_MOVE_EXT:
+               return ocfs2_ioctl_move_extents(filp, (void __user *)arg);
        default:
                return -ENOTTY;
        }
@@ -569,6 +1004,7 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        case OCFS2_IOC_GROUP_EXTEND:
        case OCFS2_IOC_GROUP_ADD:
        case OCFS2_IOC_GROUP_ADD64:
+       case FITRIM:
                break;
        case OCFS2_IOC_REFLINK:
                if (copy_from_user(&args, (struct reflink_arguments *)arg,
@@ -584,6 +1020,8 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
                        return -EFAULT;
 
                return ocfs2_info_handle(inode, &info, 1);
+       case OCFS2_IOC_MOVE_EXT:
+               break;
        default:
                return -ENOIOCTLCMD;
        }
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
new file mode 100644 (file)
index 0000000..cd94270
--- /dev/null
@@ -0,0 +1,1152 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * move_extents.c
+ *
+ * Copyright (C) 2011 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/mount.h>
+#include <linux/swap.h>
+
+#include <cluster/masklog.h>
+
+#include "ocfs2.h"
+#include "ocfs2_ioctl.h"
+
+#include "alloc.h"
+#include "aops.h"
+#include "dlmglue.h"
+#include "extent_map.h"
+#include "inode.h"
+#include "journal.h"
+#include "suballoc.h"
+#include "uptodate.h"
+#include "super.h"
+#include "dir.h"
+#include "buffer_head_io.h"
+#include "sysfile.h"
+#include "suballoc.h"
+#include "refcounttree.h"
+#include "move_extents.h"
+
+struct ocfs2_move_extents_context {
+       struct inode *inode;
+       struct file *file;
+       int auto_defrag;
+       int partial;
+       int credits;
+       u32 new_phys_cpos;
+       u32 clusters_moved;
+       u64 refcount_loc;
+       struct ocfs2_move_extents *range;
+       struct ocfs2_extent_tree et;
+       struct ocfs2_alloc_context *meta_ac;
+       struct ocfs2_alloc_context *data_ac;
+       struct ocfs2_cached_dealloc_ctxt dealloc;
+};
+
+static int __ocfs2_move_extent(handle_t *handle,
+                              struct ocfs2_move_extents_context *context,
+                              u32 cpos, u32 len, u32 p_cpos, u32 new_p_cpos,
+                              int ext_flags)
+{
+       int ret = 0, index;
+       struct inode *inode = context->inode;
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       struct ocfs2_extent_rec *rec, replace_rec;
+       struct ocfs2_path *path = NULL;
+       struct ocfs2_extent_list *el;
+       u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
+       u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
+
+       ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos,
+                                              p_cpos, new_p_cpos, len);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       memset(&replace_rec, 0, sizeof(replace_rec));
+       replace_rec.e_cpos = cpu_to_le32(cpos);
+       replace_rec.e_leaf_clusters = cpu_to_le16(len);
+       replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb,
+                                                                  new_p_cpos));
+
+       path = ocfs2_new_path_from_et(&context->et);
+       if (!path) {
+               ret = -ENOMEM;
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = ocfs2_find_path(INODE_CACHE(inode), path, cpos);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       el = path_leaf_el(path);
+
+       index = ocfs2_search_extent_list(el, cpos);
+       if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
+               ocfs2_error(inode->i_sb,
+                           "Inode %llu has an extent at cpos %u which can no "
+                           "longer be found.\n",
+                           (unsigned long long)ino, cpos);
+               ret = -EROFS;
+               goto out;
+       }
+
+       rec = &el->l_recs[index];
+
+       BUG_ON(ext_flags != rec->e_flags);
+       /*
+        * after moving/defraging to new location, the extent is not going
+        * to be refcounted anymore.
+        */
+       replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED;
+
+       ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode),
+                                     context->et.et_root_bh,
+                                     OCFS2_JOURNAL_ACCESS_WRITE);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = ocfs2_split_extent(handle, &context->et, path, index,
+                                &replace_rec, context->meta_ac,
+                                &context->dealloc);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ocfs2_journal_dirty(handle, context->et.et_root_bh);
+
+       context->new_phys_cpos = new_p_cpos;
+
+       /*
+        * need I to append truncate log for old clusters?
+        */
+       if (old_blkno) {
+               if (ext_flags & OCFS2_EXT_REFCOUNTED)
+                       ret = ocfs2_decrease_refcount(inode, handle,
+                                       ocfs2_blocks_to_clusters(osb->sb,
+                                                                old_blkno),
+                                       len, context->meta_ac,
+                                       &context->dealloc, 1);
+               else
+                       ret = ocfs2_truncate_log_append(osb, handle,
+                                                       old_blkno, len);
+       }
+
+out:
+       return ret;
+}
+
+/*
+ * lock allocators, and reserving appropriate number of bits for
+ * meta blocks and data clusters.
+ *
+ * in some cases, we don't need to reserve clusters, just let data_ac
+ * be NULL.
+ */
+static int ocfs2_lock_allocators_move_extents(struct inode *inode,
+                                       struct ocfs2_extent_tree *et,
+                                       u32 clusters_to_move,
+                                       u32 extents_to_split,
+                                       struct ocfs2_alloc_context **meta_ac,
+                                       struct ocfs2_alloc_context **data_ac,
+                                       int extra_blocks,
+                                       int *credits)
+{
+       int ret, num_free_extents;
+       unsigned int max_recs_needed = 2 * extents_to_split + clusters_to_move;
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+       num_free_extents = ocfs2_num_free_extents(osb, et);
+       if (num_free_extents < 0) {
+               ret = num_free_extents;
+               mlog_errno(ret);
+               goto out;
+       }
+
+       if (!num_free_extents ||
+           (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed))
+               extra_blocks += ocfs2_extend_meta_needed(et->et_root_el);
+
+       ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, meta_ac);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       if (data_ac) {
+               ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac);
+               if (ret) {
+                       mlog_errno(ret);
+                       goto out;
+               }
+       }
+
+       *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el,
+                                             clusters_to_move + 2);
+
+       mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n",
+            extra_blocks, clusters_to_move, *credits);
+out:
+       if (ret) {
+               if (*meta_ac) {
+                       ocfs2_free_alloc_context(*meta_ac);
+                       *meta_ac = NULL;
+               }
+       }
+
+       return ret;
+}
+
+/*
+ * Using one journal handle to guarantee the data consistency in case
+ * crash happens anywhere.
+ *
+ *  XXX: defrag can end up with finishing partial extent as requested,
+ * due to not enough contiguous clusters can be found in allocator.
+ */
+static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context,
+                              u32 cpos, u32 phys_cpos, u32 *len, int ext_flags)
+{
+       int ret, credits = 0, extra_blocks = 0, partial = context->partial;
+       handle_t *handle;
+       struct inode *inode = context->inode;
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       struct inode *tl_inode = osb->osb_tl_inode;
+       struct ocfs2_refcount_tree *ref_tree = NULL;
+       u32 new_phys_cpos, new_len;
+       u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
+
+       if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) {
+
+               BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
+                        OCFS2_HAS_REFCOUNT_FL));
+
+               BUG_ON(!context->refcount_loc);
+
+               ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
+                                              &ref_tree, NULL);
+               if (ret) {
+                       mlog_errno(ret);
+                       return ret;
+               }
+
+               ret = ocfs2_prepare_refcount_change_for_del(inode,
+                                                       context->refcount_loc,
+                                                       phys_blkno,
+                                                       *len,
+                                                       &credits,
+                                                       &extra_blocks);
+               if (ret) {
+                       mlog_errno(ret);
+                       goto out;
+               }
+       }
+
+       ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1,
+                                                &context->meta_ac,
+                                                &context->data_ac,
+                                                extra_blocks, &credits);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       /*
+        * should be using allocation reservation strategy there?
+        *
+        * if (context->data_ac)
+        *      context->data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv;
+        */
+
+       mutex_lock(&tl_inode->i_mutex);
+
+       if (ocfs2_truncate_log_needs_flush(osb)) {
+               ret = __ocfs2_flush_truncate_log(osb);
+               if (ret < 0) {
+                       mlog_errno(ret);
+                       goto out_unlock_mutex;
+               }
+       }
+
+       handle = ocfs2_start_trans(osb, credits);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               mlog_errno(ret);
+               goto out_unlock_mutex;
+       }
+
+       ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, *len,
+                                    &new_phys_cpos, &new_len);
+       if (ret) {
+               mlog_errno(ret);
+               goto out_commit;
+       }
+
+       /*
+        * allowing partial extent moving is kind of 'pros and cons', it makes
+        * whole defragmentation less likely to fail, on the contrary, the bad
+        * thing is it may make the fs even more fragmented after moving, let
+        * userspace make a good decision here.
+        */
+       if (new_len != *len) {
+               mlog(0, "len_claimed: %u, len: %u\n", new_len, *len);
+               if (!partial) {
+                       context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE;
+                       ret = -ENOSPC;
+                       goto out_commit;
+               }
+       }
+
+       mlog(0, "cpos: %u, phys_cpos: %u, new_phys_cpos: %u\n", cpos,
+            phys_cpos, new_phys_cpos);
+
+       ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos,
+                                 new_phys_cpos, ext_flags);
+       if (ret)
+               mlog_errno(ret);
+
+       if (partial && (new_len != *len))
+               *len = new_len;
+
+       /*
+        * Here we should write the new page out first if we are
+        * in write-back mode.
+        */
+       ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, *len);
+       if (ret)
+               mlog_errno(ret);
+
+out_commit:
+       ocfs2_commit_trans(osb, handle);
+
+out_unlock_mutex:
+       mutex_unlock(&tl_inode->i_mutex);
+
+       if (context->data_ac) {
+               ocfs2_free_alloc_context(context->data_ac);
+               context->data_ac = NULL;
+       }
+
+       if (context->meta_ac) {
+               ocfs2_free_alloc_context(context->meta_ac);
+               context->meta_ac = NULL;
+       }
+
+out:
+       if (ref_tree)
+               ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
+
+       return ret;
+}
+
+/*
+ * find the victim alloc group, where #blkno fits.
+ */
+static int ocfs2_find_victim_alloc_group(struct inode *inode,
+                                        u64 vict_blkno,
+                                        int type, int slot,
+                                        int *vict_bit,
+                                        struct buffer_head **ret_bh)
+{
+       int ret, i, bits_per_unit = 0;
+       u64 blkno;
+       char namebuf[40];
+
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       struct buffer_head *ac_bh = NULL, *gd_bh = NULL;
+       struct ocfs2_chain_list *cl;
+       struct ocfs2_chain_rec *rec;
+       struct ocfs2_dinode *ac_dinode;
+       struct ocfs2_group_desc *bg;
+
+       ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type, slot);
+       ret = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf,
+                                        strlen(namebuf), &blkno);
+       if (ret) {
+               ret = -ENOENT;
+               goto out;
+       }
+
+       ret = ocfs2_read_blocks_sync(osb, blkno, 1, &ac_bh);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ac_dinode = (struct ocfs2_dinode *)ac_bh->b_data;
+       cl = &(ac_dinode->id2.i_chain);
+       rec = &(cl->cl_recs[0]);
+
+       if (type == GLOBAL_BITMAP_SYSTEM_INODE)
+               bits_per_unit = osb->s_clustersize_bits -
+                                       inode->i_sb->s_blocksize_bits;
+       /*
+        * 'vict_blkno' was out of the valid range.
+        */
+       if ((vict_blkno < le64_to_cpu(rec->c_blkno)) ||
+           (vict_blkno >= (le32_to_cpu(ac_dinode->id1.bitmap1.i_total) <<
+                               bits_per_unit))) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) {
+
+               rec = &(cl->cl_recs[i]);
+               if (!rec)
+                       continue;
+
+               bg = NULL;
+
+               do {
+                       if (!bg)
+                               blkno = le64_to_cpu(rec->c_blkno);
+                       else
+                               blkno = le64_to_cpu(bg->bg_next_group);
+
+                       if (gd_bh) {
+                               brelse(gd_bh);
+                               gd_bh = NULL;
+                       }
+
+                       ret = ocfs2_read_blocks_sync(osb, blkno, 1, &gd_bh);
+                       if (ret) {
+                               mlog_errno(ret);
+                               goto out;
+                       }
+
+                       bg = (struct ocfs2_group_desc *)gd_bh->b_data;
+
+                       if (vict_blkno < (le64_to_cpu(bg->bg_blkno) +
+                                               le16_to_cpu(bg->bg_bits))) {
+
+                               *ret_bh = gd_bh;
+                               *vict_bit = (vict_blkno - blkno) >>
+                                                       bits_per_unit;
+                               mlog(0, "find the victim group: #%llu, "
+                                    "total_bits: %u, vict_bit: %u\n",
+                                    blkno, le16_to_cpu(bg->bg_bits),
+                                    *vict_bit);
+                               goto out;
+                       }
+
+               } while (le64_to_cpu(bg->bg_next_group));
+       }
+
+       ret = -EINVAL;
+out:
+       brelse(ac_bh);
+
+       /*
+        * caller has to release the gd_bh properly.
+        */
+       return ret;
+}
+
+/*
+ * XXX: helper to validate and adjust moving goal.
+ */
+static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
+                                              struct ocfs2_move_extents *range)
+{
+       int ret, goal_bit = 0;
+
+       struct buffer_head *gd_bh = NULL;
+       struct ocfs2_group_desc *bg = NULL;
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       int c_to_b = 1 << (osb->s_clustersize_bits -
+                                       inode->i_sb->s_blocksize_bits);
+
+       /*
+        * make goal become cluster aligned.
+        */
+       range->me_goal = ocfs2_block_to_cluster_start(inode->i_sb,
+                                                     range->me_goal);
+       /*
+        * moving goal is not allowd to start with a group desc blok(#0 blk)
+        * let's compromise to the latter cluster.
+        */
+       if (range->me_goal == le64_to_cpu(bg->bg_blkno))
+               range->me_goal += c_to_b;
+
+       /*
+        * validate goal sits within global_bitmap, and return the victim
+        * group desc
+        */
+       ret = ocfs2_find_victim_alloc_group(inode, range->me_goal,
+                                           GLOBAL_BITMAP_SYSTEM_INODE,
+                                           OCFS2_INVALID_SLOT,
+                                           &goal_bit, &gd_bh);
+       if (ret)
+               goto out;
+
+       bg = (struct ocfs2_group_desc *)gd_bh->b_data;
+
+       /*
+        * movement is not gonna cross two groups.
+        */
+       if ((le16_to_cpu(bg->bg_bits) - goal_bit) * osb->s_clustersize <
+                                                               range->me_len) {
+               ret = -EINVAL;
+               goto out;
+       }
+       /*
+        * more exact validations/adjustments will be performed later during
+        * moving operation for each extent range.
+        */
+       mlog(0, "extents get ready to be moved to #%llu block\n",
+            range->me_goal);
+
+out:
+       brelse(gd_bh);
+
+       return ret;
+}
+
+static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh,
+                                   int *goal_bit, u32 move_len, u32 max_hop,
+                                   u32 *phys_cpos)
+{
+       int i, used, last_free_bits = 0, base_bit = *goal_bit;
+       struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
+       u32 base_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
+                                                le64_to_cpu(gd->bg_blkno));
+
+       for (i = base_bit; i < le16_to_cpu(gd->bg_bits); i++) {
+
+               used = ocfs2_test_bit(i, (unsigned long *)gd->bg_bitmap);
+               if (used) {
+                       /*
+                        * we even tried searching the free chunk by jumping
+                        * a 'max_hop' distance, but still failed.
+                        */
+                       if ((i - base_bit) > max_hop) {
+                               *phys_cpos = 0;
+                               break;
+                       }
+
+                       if (last_free_bits)
+                               last_free_bits = 0;
+
+                       continue;
+               } else
+                       last_free_bits++;
+
+               if (last_free_bits == move_len) {
+                       *goal_bit = i;
+                       *phys_cpos = base_cpos + i;
+                       break;
+               }
+       }
+
+       mlog(0, "found phys_cpos: %u to fit the wanted moving.\n", *phys_cpos);
+}
+
+static int ocfs2_alloc_dinode_update_counts(struct inode *inode,
+                                      handle_t *handle,
+                                      struct buffer_head *di_bh,
+                                      u32 num_bits,
+                                      u16 chain)
+{
+       int ret;
+       u32 tmp_used;
+       struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
+       struct ocfs2_chain_list *cl =
+                               (struct ocfs2_chain_list *) &di->id2.i_chain;
+
+       ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
+                                     OCFS2_JOURNAL_ACCESS_WRITE);
+       if (ret < 0) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
+       di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used);
+       le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits);
+       ocfs2_journal_dirty(handle, di_bh);
+
+out:
+       return ret;
+}
+
+static inline int ocfs2_block_group_set_bits(handle_t *handle,
+                                            struct inode *alloc_inode,
+                                            struct ocfs2_group_desc *bg,
+                                            struct buffer_head *group_bh,
+                                            unsigned int bit_off,
+                                            unsigned int num_bits)
+{
+       int status;
+       void *bitmap = bg->bg_bitmap;
+       int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
+
+       /* All callers get the descriptor via
+        * ocfs2_read_group_descriptor().  Any corruption is a code bug. */
+       BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg));
+       BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);
+
+       mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off,
+            num_bits);
+
+       if (ocfs2_is_cluster_bitmap(alloc_inode))
+               journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
+
+       status = ocfs2_journal_access_gd(handle,
+                                        INODE_CACHE(alloc_inode),
+                                        group_bh,
+                                        journal_type);
+       if (status < 0) {
+               mlog_errno(status);
+               goto bail;
+       }
+
+       le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
+       if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) {
+               ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit"
+                           " count %u but claims %u are freed. num_bits %d",
+                           (unsigned long long)le64_to_cpu(bg->bg_blkno),
+                           le16_to_cpu(bg->bg_bits),
+                           le16_to_cpu(bg->bg_free_bits_count), num_bits);
+               return -EROFS;
+       }
+       while (num_bits--)
+               ocfs2_set_bit(bit_off++, bitmap);
+
+       ocfs2_journal_dirty(handle, group_bh);
+
+bail:
+       return status;
+}
+
+static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
+                            u32 cpos, u32 phys_cpos, u32 *new_phys_cpos,
+                            u32 len, int ext_flags)
+{
+       int ret, credits = 0, extra_blocks = 0, goal_bit = 0;
+       handle_t *handle;
+       struct inode *inode = context->inode;
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       struct inode *tl_inode = osb->osb_tl_inode;
+       struct inode *gb_inode = NULL;
+       struct buffer_head *gb_bh = NULL;
+       struct buffer_head *gd_bh = NULL;
+       struct ocfs2_group_desc *gd;
+       struct ocfs2_refcount_tree *ref_tree = NULL;
+       u32 move_max_hop = ocfs2_blocks_to_clusters(inode->i_sb,
+                                                   context->range->me_threshold);
+       u64 phys_blkno, new_phys_blkno;
+
+       phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
+
+       if ((ext_flags & OCFS2_EXT_REFCOUNTED) && len) {
+
+               BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
+                        OCFS2_HAS_REFCOUNT_FL));
+
+               BUG_ON(!context->refcount_loc);
+
+               ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1,
+                                              &ref_tree, NULL);
+               if (ret) {
+                       mlog_errno(ret);
+                       return ret;
+               }
+
+               ret = ocfs2_prepare_refcount_change_for_del(inode,
+                                                       context->refcount_loc,
+                                                       phys_blkno,
+                                                       len,
+                                                       &credits,
+                                                       &extra_blocks);
+               if (ret) {
+                       mlog_errno(ret);
+                       goto out;
+               }
+       }
+
+       ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1,
+                                                &context->meta_ac,
+                                                NULL, extra_blocks, &credits);
+       if (ret) {
+               mlog_errno(ret);
+               goto out;
+       }
+
+       /*
+        * need to count 2 extra credits for global_bitmap inode and
+        * group descriptor.
+        */
+       credits += OCFS2_INODE_UPDATE_CREDITS + 1;
+
+       /*
+        * ocfs2_move_extent() didn't reserve any clusters in lock_allocators()
+        * logic, while we still need to lock the global_bitmap.
+        */
+       gb_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE,
+                                              OCFS2_INVALID_SLOT);
+       if (!gb_inode) {
+               mlog(ML_ERROR, "unable to get global_bitmap inode\n");
+               ret = -EIO;
+               goto out;
+       }
+
+       mutex_lock(&gb_inode->i_mutex);
+
+       ret = ocfs2_inode_lock(gb_inode, &gb_bh, 1);
+       if (ret) {
+               mlog_errno(ret);
+               goto out_unlock_gb_mutex;
+       }
+
+       mutex_lock(&tl_inode->i_mutex);
+
+       handle = ocfs2_start_trans(osb, credits);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               mlog_errno(ret);
+               goto out_unlock_tl_inode;
+       }
+
+       new_phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *new_phys_cpos);
+       ret = ocfs2_find_victim_alloc_group(inode, new_phys_blkno,
+                                           GLOBAL_BITMAP_SYSTEM_INODE,
+                                           OCFS2_INVALID_SLOT,
+                                           &goal_bit, &gd_bh);
+       if (ret) {
+               mlog_errno(ret);
+               goto out_commit;
+       }
+
+       /*
+        * probe the victim cluster group to find a proper
+        * region to fit wanted movement, it even will perfrom
+        * a best-effort attempt by compromising to a threshold
+        * around the goal.
+        */
+       ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop,
+                               new_phys_cpos);
+       if (!new_phys_cpos) {
+               ret = -ENOSPC;
+               goto out_commit;
+       }
+
+       ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos,
+                                 *new_phys_cpos, ext_flags);
+       if (ret) {
+               mlog_errno(ret);
+               goto out_commit;
+       }
+
+       gd = (struct ocfs2_group_desc *)gd_bh->b_data;
+       ret = ocfs2_alloc_dinode_update_counts(gb_inode, handle, gb_bh, len,
+                                              le16_to_cpu(gd->bg_chain));
+       if (ret) {
+               mlog_errno(ret);
+               goto out_commit;
+       }
+
+       ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh,
+                                        goal_bit, len);
+       if (ret)
+               mlog_errno(ret);
+
+       /*
+        * Here we should write the new page out first if we are
+        * in write-back mode.
+        */
+       ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len);
+       if (ret)
+               mlog_errno(ret);
+
+out_commit:
+       ocfs2_commit_trans(osb, handle);
+       brelse(gd_bh);
+
+out_unlock_tl_inode:
+       mutex_unlock(&tl_inode->i_mutex);
+
+       ocfs2_inode_unlock(gb_inode, 1);
+out_unlock_gb_mutex:
+       mutex_unlock(&gb_inode->i_mutex);
+       brelse(gb_bh);
+       iput(gb_inode);
+
+out:
+       if (context->meta_ac) {
+               ocfs2_free_alloc_context(context->meta_ac);
+               context->meta_ac = NULL;
+       }
+
+       if (ref_tree)
+               ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
+
+       return ret;
+}
+
+/*
+ * Helper to calculate the defraging length in one run according to threshold.
+ */
+static void ocfs2_calc_extent_defrag_len(u32 *alloc_size, u32 *len_defraged,
+                                        u32 threshold, int *skip)
+{
+       if ((*alloc_size + *len_defraged) < threshold) {
+               /*
+                * proceed defragmentation until we meet the thresh
+                */
+               *len_defraged += *alloc_size;
+       } else if (*len_defraged == 0) {
+               /*
+                * XXX: skip a large extent.
+                */
+               *skip = 1;
+       } else {
+               /*
+                * split this extent to coalesce with former pieces as
+                * to reach the threshold.
+                *
+                * we're done here with one cycle of defragmentation
+                * in a size of 'thresh', resetting 'len_defraged'
+                * forces a new defragmentation.
+                */
+               *alloc_size = threshold - *len_defraged;
+               *len_defraged = 0;
+       }
+}
+
+static int __ocfs2_move_extents_range(struct buffer_head *di_bh,
+                               struct ocfs2_move_extents_context *context)
+{
+       int ret = 0, flags, do_defrag, skip = 0;
+       u32 cpos, phys_cpos, move_start, len_to_move, alloc_size;
+       u32 len_defraged = 0, defrag_thresh = 0, new_phys_cpos = 0;
+
+       struct inode *inode = context->inode;
+       struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
+       struct ocfs2_move_extents *range = context->range;
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+       if ((inode->i_size == 0) || (range->me_len == 0))
+               return 0;
+
+       if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
+               return 0;
+
+       context->refcount_loc = le64_to_cpu(di->i_refcount_loc);
+
+       ocfs2_init_dinode_extent_tree(&context->et, INODE_CACHE(inode), di_bh);
+       ocfs2_init_dealloc_ctxt(&context->dealloc);
+
+       /*
+        * TO-DO XXX:
+        *
+        * - xattr extents.
+        */
+
+       do_defrag = context->auto_defrag;
+
+       /*
+        * extents moving happens in unit of clusters, for the sake
+        * of simplicity, we may ignore two clusters where 'byte_start'
+        * and 'byte_start + len' were within.
+        */
+       move_start = ocfs2_clusters_for_bytes(osb->sb, range->me_start);
+       len_to_move = (range->me_start + range->me_len) >>
+                                               osb->s_clustersize_bits;
+       if (len_to_move >= move_start)
+               len_to_move -= move_start;
+       else
+               len_to_move = 0;
+
+       if (do_defrag) {
+               defrag_thresh = range->me_threshold >> osb->s_clustersize_bits;
+               if (defrag_thresh <= 1)
+                       goto done;
+       } else
+               new_phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb,
+                                                        range->me_goal);
+
+       mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u, "
+            "thresh: %u\n",
+            (unsigned long long)OCFS2_I(inode)->ip_blkno,
+            (unsigned long long)range->me_start,
+            (unsigned long long)range->me_len,
+            move_start, len_to_move, defrag_thresh);
+
+       cpos = move_start;
+       while (len_to_move) {
+               ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &alloc_size,
+                                        &flags);
+               if (ret) {
+                       mlog_errno(ret);
+                       goto out;
+               }
+
+               if (alloc_size > len_to_move)
+                       alloc_size = len_to_move;
+
+               /*
+                * XXX: how to deal with a hole:
+                *
+                * - skip the hole of course
+                * - force a new defragmentation
+                */
+               if (!phys_cpos) {
+                       if (do_defrag)
+                               len_defraged = 0;
+
+                       goto next;
+               }
+
+               if (do_defrag) {
+                       ocfs2_calc_extent_defrag_len(&alloc_size, &len_defraged,
+                                                    defrag_thresh, &skip);
+                       /*
+                        * skip large extents
+                        */
+                       if (skip) {
+                               skip = 0;
+                               goto next;
+                       }
+
+                       mlog(0, "#Defrag: cpos: %u, phys_cpos: %u, "
+                            "alloc_size: %u, len_defraged: %u\n",
+                            cpos, phys_cpos, alloc_size, len_defraged);
+
+                       ret = ocfs2_defrag_extent(context, cpos, phys_cpos,
+                                                 &alloc_size, flags);
+               } else {
+                       ret = ocfs2_move_extent(context, cpos, phys_cpos,
+                                               &new_phys_cpos, alloc_size,
+                                               flags);
+
+                       new_phys_cpos += alloc_size;
+               }
+
+               if (ret < 0) {
+                       mlog_errno(ret);
+                       goto out;
+               }
+
+               context->clusters_moved += alloc_size;
+next:
+               cpos += alloc_size;
+               len_to_move -= alloc_size;
+       }
+
+done:
+       range->me_flags |= OCFS2_MOVE_EXT_FL_COMPLETE;
+
+out:
+       range->me_moved_len = ocfs2_clusters_to_bytes(osb->sb,
+                                                     context->clusters_moved);
+       range->me_new_offset = ocfs2_clusters_to_bytes(osb->sb,
+                                                      context->new_phys_cpos);
+
+       ocfs2_schedule_truncate_log_flush(osb, 1);
+       ocfs2_run_deallocs(osb, &context->dealloc);
+
+       return ret;
+}
+
+static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
+{
+       int status;
+       handle_t *handle;
+       struct inode *inode = context->inode;
+       struct ocfs2_dinode *di;
+       struct buffer_head *di_bh = NULL;
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+
+       if (!inode)
+               return -ENOENT;
+
+       if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+               return -EROFS;
+
+       mutex_lock(&inode->i_mutex);
+
+       /*
+        * This prevents concurrent writes from other nodes
+        */
+       status = ocfs2_rw_lock(inode, 1);
+       if (status) {
+               mlog_errno(status);
+               goto out;
+       }
+
+       status = ocfs2_inode_lock(inode, &di_bh, 1);
+       if (status) {
+               mlog_errno(status);
+               goto out_rw_unlock;
+       }
+
+       /*
+        * rememer ip_xattr_sem also needs to be held if necessary
+        */
+       down_write(&OCFS2_I(inode)->ip_alloc_sem);
+
+       status = __ocfs2_move_extents_range(di_bh, context);
+
+       up_write(&OCFS2_I(inode)->ip_alloc_sem);
+       if (status) {
+               mlog_errno(status);
+               goto out_inode_unlock;
+       }
+
+       /*
+        * We update ctime for these changes
+        */
+       handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
+       if (IS_ERR(handle)) {
+               status = PTR_ERR(handle);
+               mlog_errno(status);
+               goto out_inode_unlock;
+       }
+
+       status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
+                                        OCFS2_JOURNAL_ACCESS_WRITE);
+       if (status) {
+               mlog_errno(status);
+               goto out_commit;
+       }
+
+       di = (struct ocfs2_dinode *)di_bh->b_data;
+       inode->i_ctime = CURRENT_TIME;
+       di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
+       di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
+
+       ocfs2_journal_dirty(handle, di_bh);
+
+out_commit:
+       ocfs2_commit_trans(osb, handle);
+
+out_inode_unlock:
+       brelse(di_bh);
+       ocfs2_inode_unlock(inode, 1);
+out_rw_unlock:
+       ocfs2_rw_unlock(inode, 1);
+out:
+       mutex_unlock(&inode->i_mutex);
+
+       return status;
+}
+
+int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
+{
+       int status;
+
+       struct inode *inode = filp->f_path.dentry->d_inode;
+       struct ocfs2_move_extents range;
+       struct ocfs2_move_extents_context *context = NULL;
+
+       status = mnt_want_write(filp->f_path.mnt);
+       if (status)
+               return status;
+
+       if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE))
+               goto out;
+
+       if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
+               status = -EPERM;
+               goto out;
+       }
+
+       context = kzalloc(sizeof(struct ocfs2_move_extents_context), GFP_NOFS);
+       if (!context) {
+               status = -ENOMEM;
+               mlog_errno(status);
+               goto out;
+       }
+
+       context->inode = inode;
+       context->file = filp;
+
+       if (argp) {
+               if (copy_from_user(&range, (struct ocfs2_move_extents *)argp,
+                                  sizeof(range))) {
+                       status = -EFAULT;
+                       goto out;
+               }
+       } else {
+               status = -EINVAL;
+               goto out;
+       }
+
+       if (range.me_start > i_size_read(inode))
+               goto out;
+
+       if (range.me_start + range.me_len > i_size_read(inode))
+                       range.me_len = i_size_read(inode) - range.me_start;
+
+       context->range = &range;
+
+       if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) {
+               context->auto_defrag = 1;
+               /*
+                * ok, the default theshold for the defragmentation
+                * is 1M, since our maximum clustersize was 1M also.
+                * any thought?
+                */
+               if (!range.me_threshold)
+                       range.me_threshold = 1024 * 1024;
+
+               if (range.me_threshold > i_size_read(inode))
+                       range.me_threshold = i_size_read(inode);
+
+               if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG)
+                       context->partial = 1;
+       } else {
+               /*
+                * first best-effort attempt to validate and adjust the goal
+                * (physical address in block), while it can't guarantee later
+                * operation can succeed all the time since global_bitmap may
+                * change a bit over time.
+                */
+
+               status = ocfs2_validate_and_adjust_move_goal(inode, &range);
+               if (status)
+                       goto out;
+       }
+
+       status = ocfs2_move_extents(context);
+       if (status)
+               mlog_errno(status);
+out:
+       /*
+        * movement/defragmentation may end up being partially completed,
+        * that's the reason why we need to return userspace the finished
+        * length and new_offset even if failure happens somewhere.
+        */
+       if (argp) {
+               if (copy_to_user((struct ocfs2_move_extents *)argp, &range,
+                               sizeof(range)))
+                       status = -EFAULT;
+       }
+
+       kfree(context);
+
+       mnt_drop_write(filp->f_path.mnt);
+
+       return status;
+}
diff --git a/fs/ocfs2/move_extents.h b/fs/ocfs2/move_extents.h
new file mode 100644 (file)
index 0000000..4e143e8
--- /dev/null
@@ -0,0 +1,22 @@
+/* -*- mode: c; c-basic-offset: 8; -*-
+ * vim: noexpandtab sw=8 ts=8 sts=0:
+ *
+ * move_extents.h
+ *
+ * Copyright (C) 2011 Oracle.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#ifndef OCFS2_MOVE_EXTENTS_H
+#define OCFS2_MOVE_EXTENTS_H
+
+int ocfs2_ioctl_move_extents(struct file *filp,  void __user *argp);
+
+#endif /* OCFS2_MOVE_EXTENTS_H */
index b46f39bf7438d5048dd5637d5762b4d6c44990e4..5b27ff1fa577d95533c0b594349b23f8cc62bd62 100644 (file)
@@ -142,6 +142,38 @@ struct ocfs2_info_journal_size {
        __u64 ij_journal_size;
 };
 
+struct ocfs2_info_freeinode {
+       struct ocfs2_info_request ifi_req;
+       struct ocfs2_info_local_freeinode {
+               __u64 lfi_total;
+               __u64 lfi_free;
+       } ifi_stat[OCFS2_MAX_SLOTS];
+       __u32 ifi_slotnum; /* out */
+       __u32 ifi_pad;
+};
+
+#define OCFS2_INFO_MAX_HIST     (32)
+
+struct ocfs2_info_freefrag {
+       struct ocfs2_info_request iff_req;
+       struct ocfs2_info_freefrag_stats { /* (out) */
+               struct ocfs2_info_free_chunk_list {
+                       __u32 fc_chunks[OCFS2_INFO_MAX_HIST];
+                       __u32 fc_clusters[OCFS2_INFO_MAX_HIST];
+               } ffs_fc_hist;
+               __u32 ffs_clusters;
+               __u32 ffs_free_clusters;
+               __u32 ffs_free_chunks;
+               __u32 ffs_free_chunks_real;
+               __u32 ffs_min; /* Minimum free chunksize in clusters */
+               __u32 ffs_max;
+               __u32 ffs_avg;
+               __u32 ffs_pad;
+       } iff_ffs;
+       __u32 iff_chunksize; /* chunksize in clusters(in) */
+       __u32 iff_pad;
+};
+
 /* Codes for ocfs2_info_request */
 enum ocfs2_info_type {
        OCFS2_INFO_CLUSTERSIZE = 1,
@@ -151,6 +183,8 @@ enum ocfs2_info_type {
        OCFS2_INFO_UUID,
        OCFS2_INFO_FS_FEATURES,
        OCFS2_INFO_JOURNAL_SIZE,
+       OCFS2_INFO_FREEINODE,
+       OCFS2_INFO_FREEFRAG,
        OCFS2_INFO_NUM_TYPES
 };
 
@@ -171,4 +205,38 @@ enum ocfs2_info_type {
 
 #define OCFS2_IOC_INFO         _IOR('o', 5, struct ocfs2_info)
 
+struct ocfs2_move_extents {
+/* All values are in bytes */
+       /* in */
+       __u64 me_start;         /* Virtual start in the file to move */
+       __u64 me_len;           /* Length of the extents to be moved */
+       __u64 me_goal;          /* Physical offset of the goal,
+                                  it's in block unit */
+       __u64 me_threshold;     /* Maximum distance from goal or threshold
+                                  for auto defragmentation */
+       __u64 me_flags;         /* Flags for the operation:
+                                * - auto defragmentation.
+                                * - refcount,xattr cases.
+                                */
+       /* out */
+       __u64 me_moved_len;     /* Moved/defraged length */
+       __u64 me_new_offset;    /* Resulting physical location */
+       __u32 me_reserved[2];   /* Reserved for futhure */
+};
+
+#define OCFS2_MOVE_EXT_FL_AUTO_DEFRAG  (0x00000001)    /* Kernel manages to
+                                                          claim new clusters
+                                                          as the goal place
+                                                          for extents moving */
+#define OCFS2_MOVE_EXT_FL_PART_DEFRAG  (0x00000002)    /* Allow partial extent
+                                                          moving, is to make
+                                                          movement less likely
+                                                          to fail, may make fs
+                                                          even more fragmented */
+#define OCFS2_MOVE_EXT_FL_COMPLETE     (0x00000004)    /* Move or defragmenation
+                                                          completely gets done.
+                                                        */
+
+#define OCFS2_IOC_MOVE_EXT     _IOW('o', 6, struct ocfs2_move_extents)
+
 #endif /* OCFS2_IOCTL_H */
index a1dae5bb54acda9d6e852d9d66e24c3d4cde8926..3b481f490633af2f483afd1817fe8ad538908b6a 100644 (file)
@@ -688,6 +688,31 @@ TRACE_EVENT(ocfs2_cache_block_dealloc,
                  __entry->blkno, __entry->bit)
 );
 
+TRACE_EVENT(ocfs2_trim_extent,
+       TP_PROTO(struct super_block *sb, unsigned long long blk,
+                unsigned long long count),
+       TP_ARGS(sb, blk, count),
+       TP_STRUCT__entry(
+               __field(int, dev_major)
+               __field(int, dev_minor)
+               __field(unsigned long long, blk)
+               __field(__u64,  count)
+       ),
+       TP_fast_assign(
+               __entry->dev_major = MAJOR(sb->s_dev);
+               __entry->dev_minor = MINOR(sb->s_dev);
+               __entry->blk = blk;
+               __entry->count = count;
+       ),
+       TP_printk("%d %d %llu %llu",
+                 __entry->dev_major, __entry->dev_minor,
+                 __entry->blk, __entry->count)
+);
+
+DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_trim_group);
+
+DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_trim_fs);
+
 /* End of trace events for fs/ocfs2/alloc.c. */
 
 /* Trace events for fs/ocfs2/localalloc.c. */
index 3c7606cff1ab4f3c7df789f14ef89dfdff70063e..ebfd3825f12a367b3c2786507146913a8191e56f 100644 (file)
@@ -66,7 +66,7 @@ struct ocfs2_cow_context {
                            u32 *num_clusters,
                            unsigned int *extent_flags);
        int (*cow_duplicate_clusters)(handle_t *handle,
-                                     struct ocfs2_cow_context *context,
+                                     struct file *file,
                                      u32 cpos, u32 old_cluster,
                                      u32 new_cluster, u32 new_len);
 };
@@ -2921,20 +2921,21 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
        return 0;
 }
 
-static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
-                                           struct ocfs2_cow_context *context,
-                                           u32 cpos, u32 old_cluster,
-                                           u32 new_cluster, u32 new_len)
+int ocfs2_duplicate_clusters_by_page(handle_t *handle,
+                                    struct file *file,
+                                    u32 cpos, u32 old_cluster,
+                                    u32 new_cluster, u32 new_len)
 {
        int ret = 0, partial;
-       struct ocfs2_caching_info *ci = context->data_et.et_ci;
+       struct inode *inode = file->f_path.dentry->d_inode;
+       struct ocfs2_caching_info *ci = INODE_CACHE(inode);
        struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
        u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
        struct page *page;
        pgoff_t page_index;
        unsigned int from, to, readahead_pages;
        loff_t offset, end, map_end;
-       struct address_space *mapping = context->inode->i_mapping;
+       struct address_space *mapping = inode->i_mapping;
 
        trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster,
                                               new_cluster, new_len);
@@ -2948,8 +2949,8 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
         * We only duplicate pages until we reach the page contains i_size - 1.
         * So trim 'end' to i_size.
         */
-       if (end > i_size_read(context->inode))
-               end = i_size_read(context->inode);
+       if (end > i_size_read(inode))
+               end = i_size_read(inode);
 
        while (offset < end) {
                page_index = offset >> PAGE_CACHE_SHIFT;
@@ -2972,10 +2973,9 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
                        BUG_ON(PageDirty(page));
 
-               if (PageReadahead(page) && context->file) {
+               if (PageReadahead(page)) {
                        page_cache_async_readahead(mapping,
-                                                  &context->file->f_ra,
-                                                  context->file,
+                                                  &file->f_ra, file,
                                                   page, page_index,
                                                   readahead_pages);
                }
@@ -2999,8 +2999,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
                        }
                }
 
-               ocfs2_map_and_dirty_page(context->inode,
-                                        handle, from, to,
+               ocfs2_map_and_dirty_page(inode, handle, from, to,
                                         page, 0, &new_block);
                mark_page_accessed(page);
 unlock:
@@ -3015,14 +3014,15 @@ unlock:
        return ret;
 }
 
-static int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
-                                          struct ocfs2_cow_context *context,
-                                          u32 cpos, u32 old_cluster,
-                                          u32 new_cluster, u32 new_len)
+int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
+                                   struct file *file,
+                                   u32 cpos, u32 old_cluster,
+                                   u32 new_cluster, u32 new_len)
 {
        int ret = 0;
-       struct super_block *sb = context->inode->i_sb;
-       struct ocfs2_caching_info *ci = context->data_et.et_ci;
+       struct inode *inode = file->f_path.dentry->d_inode;
+       struct super_block *sb = inode->i_sb;
+       struct ocfs2_caching_info *ci = INODE_CACHE(inode);
        int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
        u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster);
        u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
@@ -3145,8 +3145,8 @@ static int ocfs2_replace_clusters(handle_t *handle,
 
        /*If the old clusters is unwritten, no need to duplicate. */
        if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
-               ret = context->cow_duplicate_clusters(handle, context, cpos,
-                                                     old, new, len);
+               ret = context->cow_duplicate_clusters(handle, context->file,
+                                                     cpos, old, new, len);
                if (ret) {
                        mlog_errno(ret);
                        goto out;
@@ -3162,22 +3162,22 @@ out:
        return ret;
 }
 
-static int ocfs2_cow_sync_writeback(struct super_block *sb,
-                                   struct ocfs2_cow_context *context,
-                                   u32 cpos, u32 num_clusters)
+int ocfs2_cow_sync_writeback(struct super_block *sb,
+                            struct inode *inode,
+                            u32 cpos, u32 num_clusters)
 {
        int ret = 0;
        loff_t offset, end, map_end;
        pgoff_t page_index;
        struct page *page;
 
-       if (ocfs2_should_order_data(context->inode))
+       if (ocfs2_should_order_data(inode))
                return 0;
 
        offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
        end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits);
 
-       ret = filemap_fdatawrite_range(context->inode->i_mapping,
+       ret = filemap_fdatawrite_range(inode->i_mapping,
                                       offset, end - 1);
        if (ret < 0) {
                mlog_errno(ret);
@@ -3190,7 +3190,7 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb,
                if (map_end > end)
                        map_end = end;
 
-               page = find_or_create_page(context->inode->i_mapping,
+               page = find_or_create_page(inode->i_mapping,
                                           page_index, GFP_NOFS);
                BUG_ON(!page);
 
@@ -3349,7 +3349,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
         * in write-back mode.
         */
        if (context->get_clusters == ocfs2_di_get_clusters) {
-               ret = ocfs2_cow_sync_writeback(sb, context, cpos,
+               ret = ocfs2_cow_sync_writeback(sb, context->inode, cpos,
                                               orig_num_clusters);
                if (ret)
                        mlog_errno(ret);
index c8ce46f7d8e30ee842cc8966a8c034aefae3b98b..7754608c83a47b1b44425c8f9c5e13a2adc65675 100644 (file)
@@ -84,6 +84,17 @@ int ocfs2_refcount_cow_xattr(struct inode *inode,
                             struct buffer_head *ref_root_bh,
                             u32 cpos, u32 write_len,
                             struct ocfs2_post_refcount *post);
+int ocfs2_duplicate_clusters_by_page(handle_t *handle,
+                                    struct file *file,
+                                    u32 cpos, u32 old_cluster,
+                                    u32 new_cluster, u32 new_len);
+int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
+                                   struct file *file,
+                                   u32 cpos, u32 old_cluster,
+                                   u32 new_cluster, u32 new_len);
+int ocfs2_cow_sync_writeback(struct super_block *sb,
+                            struct inode *inode,
+                            u32 cpos, u32 num_clusters);
 int ocfs2_add_refcount_flag(struct inode *inode,
                            struct ocfs2_extent_tree *data_et,
                            struct ocfs2_caching_info *ref_ci,
index 5a521c748859945c76299e93fe01127c76408d8f..cdbaf5e97308f3564af7820c575c591e3cfcbf95 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/mount.h>
 #include <linux/seq_file.h>
 #include <linux/quotaops.h>
+#include <linux/cleancache.h>
 
 #define CREATE_TRACE_POINTS
 #include "ocfs2_trace.h"
@@ -1566,7 +1567,7 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
        if (osb->preferred_slot != OCFS2_INVALID_SLOT)
                seq_printf(s, ",preferred_slot=%d", osb->preferred_slot);
 
-       if (osb->s_atime_quantum != OCFS2_DEFAULT_ATIME_QUANTUM)
+       if (!(mnt->mnt_flags & MNT_NOATIME) && !(mnt->mnt_flags & MNT_RELATIME))
                seq_printf(s, ",atime_quantum=%u", osb->s_atime_quantum);
 
        if (osb->osb_commit_interval)
@@ -2352,6 +2353,7 @@ static int ocfs2_initialize_super(struct super_block *sb,
                mlog_errno(status);
                goto bail;
        }
+       cleancache_init_shared_fs((char *)&uuid_net_key, sb);
 
 bail:
        return status;
index de4ff29f1e0595c549877d16144064dae9bcbb7a..c368360c35a167be19592642f803dd4211f20b49 100644 (file)
@@ -240,8 +240,12 @@ static int omfs_remove(struct inode *dir, struct dentry *dentry)
        struct inode *inode = dentry->d_inode;
        int ret;
 
-       if (S_ISDIR(inode->i_mode) && !omfs_dir_is_empty(inode))
-               return -ENOTEMPTY;
+
+       if (S_ISDIR(inode->i_mode)) {
+               dentry_unhash(dentry);
+               if (!omfs_dir_is_empty(inode))
+                       return -ENOTEMPTY;
+       }
 
        ret = omfs_delete_entry(dentry);
        if (ret)
@@ -378,6 +382,9 @@ static int omfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        int err;
 
        if (new_inode) {
+               if (S_ISDIR(new_inode->i_mode))
+                       dentry_unhash(new_dentry);
+
                /* overwriting existing file/dir */
                err = omfs_remove(new_dir, new_dentry);
                if (err)
index 8ed4d3433199fb233913c3f2e6e88afc149c062c..f82e762eeca277683a49b23e4ce9534d29a672de 100644 (file)
@@ -256,10 +256,12 @@ ssize_t part_discard_alignment_show(struct device *dev,
 {
        struct hd_struct *p = dev_to_part(dev);
        struct gendisk *disk = dev_to_disk(dev);
+       unsigned int alignment = 0;
 
-       return sprintf(buf, "%u\n",
-                       queue_limit_discard_alignment(&disk->queue->limits,
-                                                       p->start_sect));
+       if (disk->queue)
+               alignment = queue_limit_discard_alignment(&disk->queue->limits,
+                                                               p->start_sect);
+       return sprintf(buf, "%u\n", alignment);
 }
 
 ssize_t part_stat_show(struct device *dev,
index 19d6750d1d6ce3b5571e463ee2fb2d768985d644..6296b403c67a3d5ca512b05e048ecf3525330153 100644 (file)
@@ -310,6 +310,15 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
                goto fail;
        }
 
+       /* Check the GUID Partition Table header size */
+       if (le32_to_cpu((*gpt)->header_size) >
+                       bdev_logical_block_size(state->bdev)) {
+               pr_debug("GUID Partition Table Header size is wrong: %u > %u\n",
+                       le32_to_cpu((*gpt)->header_size),
+                       bdev_logical_block_size(state->bdev));
+               goto fail;
+       }
+
        /* Check the GUID Partition Table CRC */
        origcrc = le32_to_cpu((*gpt)->header_crc32);
        (*gpt)->header_crc32 = 0;
index 5e4f776b0917a48bf0602d9ce8f0000f2b54d77d..9b45ee84fbccd584144fd942ffe8fcca34c47b50 100644 (file)
@@ -131,7 +131,7 @@ static inline void task_name(struct seq_file *m, struct task_struct *p)
  * you can test for combinations of others with
  * simple bit tests.
  */
-static const char *task_state_array[] = {
+static const char * const task_state_array[] = {
        "R (running)",          /*   0 */
        "S (sleeping)",         /*   1 */
        "D (disk sleep)",       /*   2 */
@@ -147,7 +147,7 @@ static const char *task_state_array[] = {
 static inline const char *get_task_state(struct task_struct *tsk)
 {
        unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state;
-       const char **p = &task_state_array[0];
+       const char * const *p = &task_state_array[0];
 
        BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array));
 
index dc8bca72b002d9b01036bb7e659495eb972af322..4ede550517a66b42b0c646d227d32456d4245764 100644 (file)
@@ -894,20 +894,20 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
        if (!task)
                goto out_no_task;
 
+       copied = -ENOMEM;
+       page = (char *)__get_free_page(GFP_TEMPORARY);
+       if (!page)
+               goto out_task;
+
        mm = check_mem_permission(task);
        copied = PTR_ERR(mm);
        if (IS_ERR(mm))
-               goto out_task;
+               goto out_free;
 
        copied = -EIO;
        if (file->private_data != (void *)((long)current->self_exec_id))
                goto out_mm;
 
-       copied = -ENOMEM;
-       page = (char *)__get_free_page(GFP_TEMPORARY);
-       if (!page)
-               goto out_mm;
-
        copied = 0;
        while (count > 0) {
                int this_len, retval;
@@ -929,9 +929,11 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
                count -= retval;                        
        }
        *ppos = dst;
-       free_page((unsigned long) page);
+
 out_mm:
        mmput(mm);
+out_free:
+       free_page((unsigned long) page);
 out_task:
        put_task_struct(task);
 out_no_task:
@@ -1059,7 +1061,7 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
 {
        struct task_struct *task;
        char buffer[PROC_NUMBUF];
-       long oom_adjust;
+       int oom_adjust;
        unsigned long flags;
        int err;
 
@@ -1071,7 +1073,7 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf,
                goto out;
        }
 
-       err = strict_strtol(strstrip(buffer), 0, &oom_adjust);
+       err = kstrtoint(strstrip(buffer), 0, &oom_adjust);
        if (err)
                goto out;
        if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) &&
@@ -1168,7 +1170,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
        struct task_struct *task;
        char buffer[PROC_NUMBUF];
        unsigned long flags;
-       long oom_score_adj;
+       int oom_score_adj;
        int err;
 
        memset(buffer, 0, sizeof(buffer));
@@ -1179,7 +1181,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
                goto out;
        }
 
-       err = strict_strtol(strstrip(buffer), 0, &oom_score_adj);
+       err = kstrtoint(strstrip(buffer), 0, &oom_score_adj);
        if (err)
                goto out;
        if (oom_score_adj < OOM_SCORE_ADJ_MIN ||
@@ -1468,7 +1470,7 @@ sched_autogroup_write(struct file *file, const char __user *buf,
        struct inode *inode = file->f_path.dentry->d_inode;
        struct task_struct *p;
        char buffer[PROC_NUMBUF];
-       long nice;
+       int nice;
        int err;
 
        memset(buffer, 0, sizeof(buffer));
@@ -1477,9 +1479,9 @@ sched_autogroup_write(struct file *file, const char __user *buf,
        if (copy_from_user(buffer, buf, count))
                return -EFAULT;
 
-       err = strict_strtol(strstrip(buffer), 0, &nice);
-       if (err)
-               return -EINVAL;
+       err = kstrtoint(strstrip(buffer), 0, &nice);
+       if (err < 0)
+               return err;
 
        p = get_proc_task(inode);
        if (!p)
@@ -1576,57 +1578,6 @@ static const struct file_operations proc_pid_set_comm_operations = {
        .release        = single_release,
 };
 
-/*
- * We added or removed a vma mapping the executable. The vmas are only mapped
- * during exec and are not mapped with the mmap system call.
- * Callers must hold down_write() on the mm's mmap_sem for these
- */
-void added_exe_file_vma(struct mm_struct *mm)
-{
-       mm->num_exe_file_vmas++;
-}
-
-void removed_exe_file_vma(struct mm_struct *mm)
-{
-       mm->num_exe_file_vmas--;
-       if ((mm->num_exe_file_vmas == 0) && mm->exe_file){
-               fput(mm->exe_file);
-               mm->exe_file = NULL;
-       }
-
-}
-
-void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
-{
-       if (new_exe_file)
-               get_file(new_exe_file);
-       if (mm->exe_file)
-               fput(mm->exe_file);
-       mm->exe_file = new_exe_file;
-       mm->num_exe_file_vmas = 0;
-}
-
-struct file *get_mm_exe_file(struct mm_struct *mm)
-{
-       struct file *exe_file;
-
-       /* We need mmap_sem to protect against races with removal of
-        * VM_EXECUTABLE vmas */
-       down_read(&mm->mmap_sem);
-       exe_file = mm->exe_file;
-       if (exe_file)
-               get_file(exe_file);
-       up_read(&mm->mmap_sem);
-       return exe_file;
-}
-
-void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
-{
-       /* It's safe to write the exe_file pointer without exe_file_lock because
-        * this is called during fork when the task is not yet in /proc */
-       newmm->exe_file = get_mm_exe_file(oldmm);
-}
-
 static int proc_exe_link(struct inode *inode, struct path *exe_path)
 {
        struct task_struct *task;
index 1cffa2b8a2fcd5dc9a6b329ce29498ef9e24cab0..9758b654a1bcffe27c414ef322f34cd6a8658c33 100644 (file)
@@ -138,9 +138,9 @@ static int stat_open(struct inode *inode, struct file *file)
        struct seq_file *m;
        int res;
 
-       /* don't ask for more than the kmalloc() max size, currently 128 KB */
-       if (size > 128 * 1024)
-               size = 128 * 1024;
+       /* don't ask for more than the kmalloc() max size */
+       if (size > KMALLOC_MAX_SIZE)
+               size = KMALLOC_MAX_SIZE;
        buf = kmalloc(size, GFP_KERNEL);
        if (!buf)
                return -ENOMEM;
index 2c9db29ea3581d7ad79a250cea453411a0b4c9e9..25b6a887adb916a93d20aa092481f67df451ff8c 100644 (file)
@@ -211,7 +211,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
 {
        struct mm_struct *mm = vma->vm_mm;
        struct file *file = vma->vm_file;
-       int flags = vma->vm_flags;
+       vm_flags_t flags = vma->vm_flags;
        unsigned long ino = 0;
        unsigned long long pgoff = 0;
        unsigned long start, end;
@@ -536,15 +536,17 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
        char buffer[PROC_NUMBUF];
        struct mm_struct *mm;
        struct vm_area_struct *vma;
-       long type;
+       int type;
+       int rv;
 
        memset(buffer, 0, sizeof(buffer));
        if (count > sizeof(buffer) - 1)
                count = sizeof(buffer) - 1;
        if (copy_from_user(buffer, buf, count))
                return -EFAULT;
-       if (strict_strtol(strstrip(buffer), 10, &type))
-               return -EINVAL;
+       rv = kstrtoint(strstrip(buffer), 10, &type);
+       if (rv < 0)
+               return rv;
        if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
                return -EINVAL;
        task = get_proc_task(file->f_path.dentry->d_inode);
@@ -769,18 +771,12 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
        if (!task)
                goto out;
 
-       mm = mm_for_maps(task);
-       ret = PTR_ERR(mm);
-       if (!mm || IS_ERR(mm))
-               goto out_task;
-
        ret = -EINVAL;
        /* file position must be aligned */
        if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
                goto out_task;
 
        ret = 0;
-
        if (!count)
                goto out_task;
 
@@ -788,7 +784,12 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
        pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
        ret = -ENOMEM;
        if (!pm.buffer)
-               goto out_mm;
+               goto out_task;
+
+       mm = mm_for_maps(task);
+       ret = PTR_ERR(mm);
+       if (!mm || IS_ERR(mm))
+               goto out_free;
 
        pagemap_walk.pmd_entry = pagemap_pte_range;
        pagemap_walk.pte_hole = pagemap_pte_hole;
@@ -831,7 +832,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
                len = min(count, PM_ENTRY_BYTES * pm.pos);
                if (copy_to_user(buf, pm.buffer, len)) {
                        ret = -EFAULT;
-                       goto out_free;
+                       goto out_mm;
                }
                copied += len;
                buf += len;
@@ -841,10 +842,10 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
        if (!ret || ret == PM_END_OF_BUFFER)
                ret = copied;
 
-out_free:
-       kfree(pm.buffer);
 out_mm:
        mmput(mm);
+out_free:
+       kfree(pm.buffer);
 out_task:
        put_task_struct(task);
 out:
index 74802bc5ded95e09d510bcadfba94167e1390bc2..cd99bf557650c4c727cdd9ac060ec28800a0ffe5 100644 (file)
@@ -35,6 +35,46 @@ static u64 vmcore_size;
 
 static struct proc_dir_entry *proc_vmcore = NULL;
 
+/*
+ * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
+ * The called function has to take care of module refcounting.
+ */
+static int (*oldmem_pfn_is_ram)(unsigned long pfn);
+
+int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
+{
+       if (oldmem_pfn_is_ram)
+               return -EBUSY;
+       oldmem_pfn_is_ram = fn;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
+
+void unregister_oldmem_pfn_is_ram(void)
+{
+       oldmem_pfn_is_ram = NULL;
+       wmb();
+}
+EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
+
+static int pfn_is_ram(unsigned long pfn)
+{
+       int (*fn)(unsigned long pfn);
+       /* pfn is ram unless fn() checks pagetype */
+       int ret = 1;
+
+       /*
+        * Ask hypervisor if the pfn is really ram.
+        * A ballooned page contains no data and reading from such a page
+        * will cause high load in the hypervisor.
+        */
+       fn = oldmem_pfn_is_ram;
+       if (fn)
+               ret = fn(pfn);
+
+       return ret;
+}
+
 /* Reads a page from the oldmem device from given offset. */
 static ssize_t read_from_oldmem(char *buf, size_t count,
                                u64 *ppos, int userbuf)
@@ -55,9 +95,15 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
                else
                        nr_bytes = count;
 
-               tmp = copy_oldmem_page(pfn, buf, nr_bytes, offset, userbuf);
-               if (tmp < 0)
-                       return tmp;
+               /* If pfn is not ram, return zeros for sparse dump files */
+               if (pfn_is_ram(pfn) == 0)
+                       memset(buf, 0, nr_bytes);
+               else {
+                       tmp = copy_oldmem_page(pfn, buf, nr_bytes,
+                                               offset, userbuf);
+                       if (tmp < 0)
+                               return tmp;
+               }
                *ppos += nr_bytes;
                count -= nr_bytes;
                buf += nr_bytes;
index 118662690cdf7a88c88074a88fab26bff94fd3f3..76c8164d56513ace39925dd14ca1fdf64fe574f2 100644 (file)
@@ -831,6 +831,8 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry)
        INITIALIZE_PATH(path);
        struct reiserfs_dir_entry de;
 
+       dentry_unhash(dentry);
+
        /* we will be doing 2 balancings and update 2 stat data, we change quotas
         * of the owner of the directory and of the owner of the parent directory.
         * The quota structure is possibly deleted only on last iput => outside
@@ -1225,6 +1227,9 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        unsigned long savelink = 1;
        struct timespec ctime;
 
+       if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        /* three balancings: (1) old name removal, (2) new name insertion
           and (3) maybe "save" link insertion
           stat data updates: (1) old directory,
index 47d2a4498b039cc0fa6c397ec1defd4f65c0db11..50f1abccd1cd1ceeb1e5a44b21b508747d834469 100644 (file)
@@ -105,7 +105,6 @@ static int xattr_rmdir(struct inode *dir, struct dentry *dentry)
        mutex_unlock(&dentry->d_inode->i_mutex);
        if (!error)
                d_delete(dentry);
-       dput(dentry);
 
        return error;
 }
index 8ab48bc2fa7d4f17f75288ac64544b9894f15dfc..ed0eb2a921f4bdb654bca99407b542db061b0054 100644 (file)
@@ -2,7 +2,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 4b5a3fbb1f1f6e3a611063fd0eadfe5f0579c248..f744be98cd5abc8dd6d93cf4a699981491f8e811 100644 (file)
@@ -2,7 +2,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -393,19 +393,36 @@ struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb,
 /*
  * Read a filesystem table (uncompressed sequence of bytes) from disk
  */
-int squashfs_read_table(struct super_block *sb, void *buffer, u64 block,
-       int length)
+void *squashfs_read_table(struct super_block *sb, u64 block, int length)
 {
        int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
        int i, res;
-       void **data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
-       if (data == NULL)
-               return -ENOMEM;
+       void *table, *buffer, **data;
+
+       table = buffer = kmalloc(length, GFP_KERNEL);
+       if (table == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       data = kcalloc(pages, sizeof(void *), GFP_KERNEL);
+       if (data == NULL) {
+               res = -ENOMEM;
+               goto failed;
+       }
 
        for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE)
                data[i] = buffer;
+
        res = squashfs_read_data(sb, data, block, length |
                SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages);
+
        kfree(data);
-       return res;
+
+       if (res < 0)
+               goto failed;
+
+       return table;
+
+failed:
+       kfree(table);
+       return ERR_PTR(res);
 }
index e921bd213738fdfe1d15b50150c1060986d3e0ec..9f1b0bb96f138b49f67f008e9043a27ec997e480 100644 (file)
@@ -2,7 +2,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 099745ad5691ed1e129afba411eb56e821a4be27..8ba70cff09a60e89f39713eb88a51f2abb10145d 100644 (file)
@@ -4,7 +4,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 3f79cd1d0c197b4428356c120fbff7cead1042fe..9dfe2ce0fb70f0b0caee3d14c9f0f3026bf5d3f4 100644 (file)
@@ -2,7 +2,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 7f93d5a9ee056648a865ae2e2dbf45a3783ce9bb..730c56248c9baf4f65567aa8299a87e53e75b6ab 100644 (file)
@@ -2,7 +2,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -121,30 +121,38 @@ static struct dentry *squashfs_get_parent(struct dentry *child)
  * Read uncompressed inode lookup table indexes off disk into memory
  */
 __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
-               u64 lookup_table_start, unsigned int inodes)
+               u64 lookup_table_start, u64 next_table, unsigned int inodes)
 {
        unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes);
-       __le64 *inode_lookup_table;
-       int err;
+       __le64 *table;
 
        TRACE("In read_inode_lookup_table, length %d\n", length);
 
-       /* Allocate inode lookup table indexes */
-       inode_lookup_table = kmalloc(length, GFP_KERNEL);
-       if (inode_lookup_table == NULL) {
-               ERROR("Failed to allocate inode lookup table\n");
-               return ERR_PTR(-ENOMEM);
-       }
+       /* Sanity check values */
+
+       /* there should always be at least one inode */
+       if (inodes == 0)
+               return ERR_PTR(-EINVAL);
+
+       /* length bytes should not extend into the next table - this check
+        * also traps instances where lookup_table_start is incorrectly larger
+        * than the next table start
+        */
+       if (lookup_table_start + length > next_table)
+               return ERR_PTR(-EINVAL);
+
+       table = squashfs_read_table(sb, lookup_table_start, length);
 
-       err = squashfs_read_table(sb, inode_lookup_table, lookup_table_start,
-                       length);
-       if (err < 0) {
-               ERROR("unable to read inode lookup table\n");
-               kfree(inode_lookup_table);
-               return ERR_PTR(err);
+       /*
+        * table[0] points to the first inode lookup table metadata block,
+        * this should be less than lookup_table_start
+        */
+       if (!IS_ERR(table) && table[0] >= lookup_table_start) {
+               kfree(table);
+               return ERR_PTR(-EINVAL);
        }
 
-       return inode_lookup_table;
+       return table;
 }
 
 
index a25c5060bdcb5437726497e8bf43d621f09af599..38bb1c6405590fa92addaa44a88b3f411575a9a3 100644 (file)
@@ -2,7 +2,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 7eef571443c6d50936619357514fcc9d8df3be57..1516a6490bfb5a77b89f82eb6d280f2247d39f0c 100644 (file)
@@ -2,7 +2,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -71,26 +71,29 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
  * Read the uncompressed fragment lookup table indexes off disk into memory
  */
 __le64 *squashfs_read_fragment_index_table(struct super_block *sb,
-       u64 fragment_table_start, unsigned int fragments)
+       u64 fragment_table_start, u64 next_table, unsigned int fragments)
 {
        unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(fragments);
-       __le64 *fragment_index;
-       int err;
+       __le64 *table;
 
-       /* Allocate fragment lookup table indexes */
-       fragment_index = kmalloc(length, GFP_KERNEL);
-       if (fragment_index == NULL) {
-               ERROR("Failed to allocate fragment index table\n");
-               return ERR_PTR(-ENOMEM);
-       }
+       /*
+        * Sanity check, length bytes should not extend into the next table -
+        * this check also traps instances where fragment_table_start is
+        * incorrectly larger than the next table start
+        */
+       if (fragment_table_start + length > next_table)
+               return ERR_PTR(-EINVAL);
+
+       table = squashfs_read_table(sb, fragment_table_start, length);
 
-       err = squashfs_read_table(sb, fragment_index, fragment_table_start,
-                       length);
-       if (err < 0) {
-               ERROR("unable to read fragment index table\n");
-               kfree(fragment_index);
-               return ERR_PTR(err);
+       /*
+        * table[0] points to the first fragment table metadata block, this
+        * should be less than fragment_table_start
+        */
+       if (!IS_ERR(table) && table[0] >= fragment_table_start) {
+               kfree(table);
+               return ERR_PTR(-EINVAL);
        }
 
-       return fragment_index;
+       return table;
 }
index d8f32452638e192f4418ff7ffd226a27c7f373c3..a70858e0fb4457069d270f51ffd58b13d40606b4 100644 (file)
@@ -2,7 +2,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -66,27 +66,37 @@ int squashfs_get_id(struct super_block *sb, unsigned int index,
  * Read uncompressed id lookup table indexes from disk into memory
  */
 __le64 *squashfs_read_id_index_table(struct super_block *sb,
-                       u64 id_table_start, unsigned short no_ids)
+               u64 id_table_start, u64 next_table, unsigned short no_ids)
 {
        unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids);
-       __le64 *id_table;
-       int err;
+       __le64 *table;
 
        TRACE("In read_id_index_table, length %d\n", length);
 
-       /* Allocate id lookup table indexes */
-       id_table = kmalloc(length, GFP_KERNEL);
-       if (id_table == NULL) {
-               ERROR("Failed to allocate id index table\n");
-               return ERR_PTR(-ENOMEM);
-       }
+       /* Sanity check values */
+
+       /* there should always be at least one id */
+       if (no_ids == 0)
+               return ERR_PTR(-EINVAL);
+
+       /*
+        * length bytes should not extend into the next table - this check
+        * also traps instances where id_table_start is incorrectly larger
+        * than the next table start
+        */
+       if (id_table_start + length > next_table)
+               return ERR_PTR(-EINVAL);
+
+       table = squashfs_read_table(sb, id_table_start, length);
 
-       err = squashfs_read_table(sb, id_table, id_table_start, length);
-       if (err < 0) {
-               ERROR("unable to read id index table\n");
-               kfree(id_table);
-               return ERR_PTR(err);
+       /*
+        * table[0] points to the first id lookup table metadata block, this
+        * should be less than id_table_start
+        */
+       if (!IS_ERR(table) && table[0] >= id_table_start) {
+               kfree(table);
+               return ERR_PTR(-EINVAL);
        }
 
-       return id_table;
+       return table;
 }
index 62e63ad250755ed6d51fd56aa75f2c91f82e74e7..04bebcaa237331cd3a27d252dbdcc60dd4e6d98d 100644 (file)
@@ -2,7 +2,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 5d922a6701ab730bf27d70451dd5c37c17337c9e..4bc63ac64bc01bdbbc01cc6a22e5494b06a876ae 100644 (file)
@@ -2,7 +2,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 1f2e608b87858ffdd39911bd09155e4bbc845cac..e3be6a71cfa7508c4602c40cdab86641ac6742d7 100644 (file)
@@ -2,7 +2,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -44,24 +44,24 @@ extern struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *,
                                u64, int);
 extern struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *,
                                u64, int);
-extern int squashfs_read_table(struct super_block *, void *, u64, int);
+extern void *squashfs_read_table(struct super_block *, u64, int);
 
 /* decompressor.c */
 extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int);
 extern void *squashfs_decompressor_init(struct super_block *, unsigned short);
 
 /* export.c */
-extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64,
+extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64, u64,
                                unsigned int);
 
 /* fragment.c */
 extern int squashfs_frag_lookup(struct super_block *, unsigned int, u64 *);
 extern __le64 *squashfs_read_fragment_index_table(struct super_block *,
-                               u64, unsigned int);
+                               u64, u64, unsigned int);
 
 /* id.c */
 extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *);
-extern __le64 *squashfs_read_id_index_table(struct super_block *, u64,
+extern __le64 *squashfs_read_id_index_table(struct super_block *, u64, u64,
                                unsigned short);
 
 /* inode.c */
index 4582c568ef4d115df3491af9f7c46216e5acb7bf..b4a4e539a08ca99846979777e738279962779568 100644 (file)
@@ -4,7 +4,7 @@
  * Squashfs
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 359baefc01fc00be8eae64f74e77b847061e5521..73588e7700ed8a38aa98b5765d0d331df9c83193 100644 (file)
@@ -4,7 +4,7 @@
  * Squashfs
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index d9037a5215f00b77044c030b37c1255fe4273a41..651f0b31d2966318bfd5ec7ce6ee4bd152486c8e 100644 (file)
@@ -4,7 +4,7 @@
  * Squashfs
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 5c8184c061a49e15920909b07d151b0a6b985ea6..6f26abee35970dc54bc5a88d96d07592f8fd24de 100644 (file)
@@ -2,7 +2,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -83,7 +83,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
        long long root_inode;
        unsigned short flags;
        unsigned int fragments;
-       u64 lookup_table_start, xattr_id_table_start;
+       u64 lookup_table_start, xattr_id_table_start, next_table;
        int err;
 
        TRACE("Entered squashfs_fill_superblock\n");
@@ -95,12 +95,6 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
        }
        msblk = sb->s_fs_info;
 
-       sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
-       if (sblk == NULL) {
-               ERROR("Failed to allocate squashfs_super_block\n");
-               goto failure;
-       }
-
        msblk->devblksize = sb_min_blocksize(sb, BLOCK_SIZE);
        msblk->devblksize_log2 = ffz(~msblk->devblksize);
 
@@ -114,10 +108,12 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
         * of bytes_used) we need to set it to an initial sensible dummy value
         */
        msblk->bytes_used = sizeof(*sblk);
-       err = squashfs_read_table(sb, sblk, SQUASHFS_START, sizeof(*sblk));
+       sblk = squashfs_read_table(sb, SQUASHFS_START, sizeof(*sblk));
 
-       if (err < 0) {
+       if (IS_ERR(sblk)) {
                ERROR("unable to read squashfs_super_block\n");
+               err = PTR_ERR(sblk);
+               sblk = NULL;
                goto failed_mount;
        }
 
@@ -218,18 +214,61 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
                goto failed_mount;
        }
 
+       /* Handle xattrs */
+       sb->s_xattr = squashfs_xattr_handlers;
+       xattr_id_table_start = le64_to_cpu(sblk->xattr_id_table_start);
+       if (xattr_id_table_start == SQUASHFS_INVALID_BLK) {
+               next_table = msblk->bytes_used;
+               goto allocate_id_index_table;
+       }
+
+       /* Allocate and read xattr id lookup table */
+       msblk->xattr_id_table = squashfs_read_xattr_id_table(sb,
+               xattr_id_table_start, &msblk->xattr_table, &msblk->xattr_ids);
+       if (IS_ERR(msblk->xattr_id_table)) {
+               ERROR("unable to read xattr id index table\n");
+               err = PTR_ERR(msblk->xattr_id_table);
+               msblk->xattr_id_table = NULL;
+               if (err != -ENOTSUPP)
+                       goto failed_mount;
+       }
+       next_table = msblk->xattr_table;
+
+allocate_id_index_table:
        /* Allocate and read id index table */
        msblk->id_table = squashfs_read_id_index_table(sb,
-               le64_to_cpu(sblk->id_table_start), le16_to_cpu(sblk->no_ids));
+               le64_to_cpu(sblk->id_table_start), next_table,
+               le16_to_cpu(sblk->no_ids));
        if (IS_ERR(msblk->id_table)) {
+               ERROR("unable to read id index table\n");
                err = PTR_ERR(msblk->id_table);
                msblk->id_table = NULL;
                goto failed_mount;
        }
+       next_table = msblk->id_table[0];
+
+       /* Handle inode lookup table */
+       lookup_table_start = le64_to_cpu(sblk->lookup_table_start);
+       if (lookup_table_start == SQUASHFS_INVALID_BLK)
+               goto handle_fragments;
+
+       /* Allocate and read inode lookup table */
+       msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb,
+               lookup_table_start, next_table, msblk->inodes);
+       if (IS_ERR(msblk->inode_lookup_table)) {
+               ERROR("unable to read inode lookup table\n");
+               err = PTR_ERR(msblk->inode_lookup_table);
+               msblk->inode_lookup_table = NULL;
+               goto failed_mount;
+       }
+       next_table = msblk->inode_lookup_table[0];
 
+       sb->s_export_op = &squashfs_export_ops;
+
+handle_fragments:
        fragments = le32_to_cpu(sblk->fragments);
        if (fragments == 0)
-               goto allocate_lookup_table;
+               goto check_directory_table;
 
        msblk->fragment_cache = squashfs_cache_init("fragment",
                SQUASHFS_CACHED_FRAGMENTS, msblk->block_size);
@@ -240,45 +279,29 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
 
        /* Allocate and read fragment index table */
        msblk->fragment_index = squashfs_read_fragment_index_table(sb,
-               le64_to_cpu(sblk->fragment_table_start), fragments);
+               le64_to_cpu(sblk->fragment_table_start), next_table, fragments);
        if (IS_ERR(msblk->fragment_index)) {
+               ERROR("unable to read fragment index table\n");
                err = PTR_ERR(msblk->fragment_index);
                msblk->fragment_index = NULL;
                goto failed_mount;
        }
+       next_table = msblk->fragment_index[0];
 
-allocate_lookup_table:
-       lookup_table_start = le64_to_cpu(sblk->lookup_table_start);
-       if (lookup_table_start == SQUASHFS_INVALID_BLK)
-               goto allocate_xattr_table;
-
-       /* Allocate and read inode lookup table */
-       msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb,
-               lookup_table_start, msblk->inodes);
-       if (IS_ERR(msblk->inode_lookup_table)) {
-               err = PTR_ERR(msblk->inode_lookup_table);
-               msblk->inode_lookup_table = NULL;
+check_directory_table:
+       /* Sanity check directory_table */
+       if (msblk->directory_table >= next_table) {
+               err = -EINVAL;
                goto failed_mount;
        }
 
-       sb->s_export_op = &squashfs_export_ops;
-
-allocate_xattr_table:
-       sb->s_xattr = squashfs_xattr_handlers;
-       xattr_id_table_start = le64_to_cpu(sblk->xattr_id_table_start);
-       if (xattr_id_table_start == SQUASHFS_INVALID_BLK)
-               goto allocate_root;
-
-       /* Allocate and read xattr id lookup table */
-       msblk->xattr_id_table = squashfs_read_xattr_id_table(sb,
-               xattr_id_table_start, &msblk->xattr_table, &msblk->xattr_ids);
-       if (IS_ERR(msblk->xattr_id_table)) {
-               err = PTR_ERR(msblk->xattr_id_table);
-               msblk->xattr_id_table = NULL;
-               if (err != -ENOTSUPP)
-                       goto failed_mount;
+       /* Sanity check inode_table */
+       if (msblk->inode_table >= msblk->directory_table) {
+               err = -EINVAL;
+               goto failed_mount;
        }
-allocate_root:
+
+       /* allocate root */
        root = new_inode(sb);
        if (!root) {
                err = -ENOMEM;
@@ -318,11 +341,6 @@ failed_mount:
        sb->s_fs_info = NULL;
        kfree(sblk);
        return err;
-
-failure:
-       kfree(sb->s_fs_info);
-       sb->s_fs_info = NULL;
-       return -ENOMEM;
 }
 
 
@@ -475,5 +493,5 @@ static const struct super_operations squashfs_super_ops = {
 module_init(init_squashfs_fs);
 module_exit(exit_squashfs_fs);
 MODULE_DESCRIPTION("squashfs 4.0, a compressed read-only filesystem");
-MODULE_AUTHOR("Phillip Lougher <phillip@lougher.demon.co.uk>");
+MODULE_AUTHOR("Phillip Lougher <phillip@squashfs.org.uk>");
 MODULE_LICENSE("GPL");
index ec86434921e18c6a5bd180246439f84b56a5d00b..1191817264cc22dc1b07b6d5ca14aee99668e33c 100644 (file)
@@ -2,7 +2,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 3876c36699a1ca5a8d8303c11e9a5d99ab4c0492..92fcde7b4d6189c3d88f341d170dbd0fa3441012 100644 (file)
@@ -2,7 +2,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2010
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index b634efce4bded85e487849738af0cda27650048c..c83f5d9ec125c75a79d8578f261fe07988ad2254 100644 (file)
@@ -2,7 +2,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2010
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -31,6 +31,7 @@ static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
                u64 start, u64 *xattr_table_start, int *xattr_ids)
 {
        ERROR("Xattrs in filesystem, these will be ignored\n");
+       *xattr_table_start = start;
        return ERR_PTR(-ENOTSUPP);
 }
 
index 05385dbe1465f8ea4a2cec4cddba4042cb22ceed..c89607d690c48a8b1e6eb7cb3e8929a30dda4689 100644 (file)
@@ -2,7 +2,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2010
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
@@ -67,34 +67,29 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
                u64 *xattr_table_start, int *xattr_ids)
 {
        unsigned int len;
-       __le64 *xid_table;
-       struct squashfs_xattr_id_table id_table;
-       int err;
+       struct squashfs_xattr_id_table *id_table;
+
+       id_table = squashfs_read_table(sb, start, sizeof(*id_table));
+       if (IS_ERR(id_table))
+               return (__le64 *) id_table;
+
+       *xattr_table_start = le64_to_cpu(id_table->xattr_table_start);
+       *xattr_ids = le32_to_cpu(id_table->xattr_ids);
+       kfree(id_table);
+
+       /* Sanity check values */
+
+       /* there is always at least one xattr id */
+       if (*xattr_ids == 0)
+               return ERR_PTR(-EINVAL);
+
+       /* xattr_table should be less than start */
+       if (*xattr_table_start >= start)
+               return ERR_PTR(-EINVAL);
 
-       err = squashfs_read_table(sb, &id_table, start, sizeof(id_table));
-       if (err < 0) {
-               ERROR("unable to read xattr id table\n");
-               return ERR_PTR(err);
-       }
-       *xattr_table_start = le64_to_cpu(id_table.xattr_table_start);
-       *xattr_ids = le32_to_cpu(id_table.xattr_ids);
        len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
 
        TRACE("In read_xattr_index_table, length %d\n", len);
 
-       /* Allocate xattr id lookup table indexes */
-       xid_table = kmalloc(len, GFP_KERNEL);
-       if (xid_table == NULL) {
-               ERROR("Failed to allocate xattr id index table\n");
-               return ERR_PTR(-ENOMEM);
-       }
-
-       err = squashfs_read_table(sb, xid_table, start + sizeof(id_table), len);
-       if (err < 0) {
-               ERROR("unable to read xattr id index table\n");
-               kfree(xid_table);
-               return ERR_PTR(err);
-       }
-
-       return xid_table;
+       return squashfs_read_table(sb, start + sizeof(*id_table), len);
 }
index aa47a286d1f8e813b47e2d7fd30ee22957e9ced1..1760b7d108f66a55614102c43713ef315592374e 100644 (file)
@@ -2,7 +2,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index 517688b32ffaec065505ef967742d43c1295421f..55d918fd2d862605beb85dae54c3a177b776381d 100644 (file)
@@ -2,7 +2,7 @@
  * Squashfs - a compressed read only filesystem for Linux
  *
  * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
- * Phillip Lougher <phillip@lougher.demon.co.uk>
+ * Phillip Lougher <phillip@squashfs.org.uk>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License
index c04f7e0b7ed260e906f49d6d295e47d63d0b7e87..c75593953c5275eb89b56b08358d25830728cf1e 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/mutex.h>
 #include <linux/backing-dev.h>
 #include <linux/rculist_bl.h>
+#include <linux/cleancache.h>
 #include "internal.h"
 
 
@@ -112,6 +113,7 @@ static struct super_block *alloc_super(struct file_system_type *type)
                s->s_maxbytes = MAX_NON_LFS;
                s->s_op = &default_op;
                s->s_time_gran = 1000000000;
+               s->cleancache_poolid = -1;
        }
 out:
        return s;
@@ -177,6 +179,7 @@ void deactivate_locked_super(struct super_block *s)
 {
        struct file_system_type *fs = s->s_type;
        if (atomic_dec_and_test(&s->s_active)) {
+               cleancache_flush_fs(s);
                fs->kill_sb(s);
                /*
                 * We need to call rcu_barrier so all the delayed rcu free
index e474fbcf8bde991924da62e321d2be7eae8ac5f6..e2cc6756f3b1595600d8ae25f537b4e1b0b19855 100644 (file)
@@ -196,6 +196,8 @@ static int sysv_rmdir(struct inode * dir, struct dentry * dentry)
        struct inode *inode = dentry->d_inode;
        int err = -ENOTEMPTY;
 
+       dentry_unhash(dentry);
+
        if (sysv_empty_dir(inode)) {
                err = sysv_unlink(dir, dentry);
                if (!err) {
@@ -222,6 +224,9 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
        struct sysv_dir_entry * old_de;
        int err = -ENOENT;
 
+       if (new_inode && S_ISDIR(new_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        old_de = sysv_find_entry(old_dentry, &old_page);
        if (!old_de)
                goto out;
index ef5abd38f0bf32a90f30ef6994c81b6d864f93e8..c2b80943560d6f9516468903a9847ae5089e28f8 100644 (file)
@@ -656,6 +656,8 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
        struct ubifs_inode *dir_ui = ubifs_inode(dir);
        struct ubifs_budget_req req = { .mod_dent = 1, .dirtied_ino = 2 };
 
+       dentry_unhash(dentry);
+
        /*
         * Budget request settings: deletion direntry, deletion inode and
         * changing the parent inode. If budgeting fails, go ahead anyway
@@ -976,6 +978,9 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        .dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) };
        struct timespec time;
 
+       if (new_inode && S_ISDIR(new_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        /*
         * Budget request settings: deletion direntry, new direntry, removing
         * the old inode, and changing old and new parent directory inodes.
index f1dce848ef966ea1853c9f50ce2d9cc997114a18..4d76594c2a8fbe33c9a088f74d03637b87c11bdb 100644 (file)
@@ -783,6 +783,8 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry)
        struct fileIdentDesc *fi, cfi;
        struct kernel_lb_addr tloc;
 
+       dentry_unhash(dentry);
+
        retval = -ENOENT;
        fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi);
        if (!fi)
@@ -1081,6 +1083,9 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct kernel_lb_addr tloc;
        struct udf_inode_info *old_iinfo = UDF_I(old_inode);
 
+       if (new_inode && S_ISDIR(new_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi);
        if (ofi) {
                if (ofibh.sbh != ofibh.ebh)
index 46f7a807bbc1ec8313af3df1a4c08c2afb3498eb..42694e11c23de46b25f1177888b97ec2aac1cf32 100644 (file)
@@ -424,8 +424,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
                        ufs_cpu_to_data_ptr(sb, p, result);
                        *err = 0;
                        UFS_I(inode)->i_lastfrag =
-                               max_t(u32, UFS_I(inode)->i_lastfrag,
-                                     fragment + count);
+                               max(UFS_I(inode)->i_lastfrag, fragment + count);
                        ufs_clear_frags(inode, result + oldcount,
                                        newcount - oldcount, locked_page != NULL);
                }
@@ -440,7 +439,8 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
        result = ufs_add_fragments (inode, tmp, oldcount, newcount, err);
        if (result) {
                *err = 0;
-               UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
+               UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
+                                               fragment + count);
                ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
                                locked_page != NULL);
                unlock_super(sb);
@@ -479,7 +479,8 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
                                   uspi->s_sbbase + result, locked_page);
                ufs_cpu_to_data_ptr(sb, p, result);
                *err = 0;
-               UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
+               UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag,
+                                               fragment + count);
                unlock_super(sb);
                if (newcount < request)
                        ufs_free_fragments (inode, result + newcount, request - newcount);
index 29309e25417fdf30209f69d301ea55e4daaed401..953ebdfc5bf7ea18d8d87e8ccd98697a90e798a7 100644 (file)
@@ -258,6 +258,8 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry)
        struct inode * inode = dentry->d_inode;
        int err= -ENOTEMPTY;
 
+       dentry_unhash(dentry);
+
        lock_ufs(dir->i_sb);
        if (ufs_empty_dir (inode)) {
                err = ufs_unlink(dir, dentry);
@@ -282,6 +284,9 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct ufs_dir_entry *old_de;
        int err = -ENOENT;
 
+       if (new_inode && S_ISDIR(new_inode->i_mode))
+               dentry_unhash(new_dentry);
+
        old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page);
        if (!old_de)
                goto out;
index 5f821dbc057905c379df686dfbb2f2ff67609c9a..f04f89fbd4d9914b0a1a16f2cce8c8e0799e020e 100644 (file)
@@ -84,7 +84,7 @@ static int ufs_trunc_direct(struct inode *inode)
        retry = 0;
        
        frag1 = DIRECT_FRAGMENT;
-       frag4 = min_t(u32, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
+       frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag);
        frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1);
        frag3 = frag4 & ~uspi->s_fpbmask;
        block1 = block2 = 0;
index d61611c88012c8daaab1840f9534983fac62bd2d..244e797dae327a7da95992e23697489397c4bbc1 100644 (file)
@@ -191,3 +191,32 @@ xfs_ioc_trim(
                return -XFS_ERROR(EFAULT);
        return 0;
 }
+
+int
+xfs_discard_extents(
+       struct xfs_mount        *mp,
+       struct list_head        *list)
+{
+       struct xfs_busy_extent  *busyp;
+       int                     error = 0;
+
+       list_for_each_entry(busyp, list, list) {
+               trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
+                                        busyp->length);
+
+               error = -blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
+                               XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
+                               XFS_FSB_TO_BB(mp, busyp->length),
+                               GFP_NOFS, 0);
+               if (error && error != EOPNOTSUPP) {
+                       xfs_info(mp,
+        "discard failed for extent [0x%llu,%u], error %d",
+                                (unsigned long long)busyp->bno,
+                                busyp->length,
+                                error);
+                       return error;
+               }
+       }
+
+       return 0;
+}
index e82b6dd3e127707ed0a8e154ab1617da23444a90..344879aea646cfbaf3ac1c57916f9fce5e6a50c2 100644 (file)
@@ -2,7 +2,9 @@
 #define XFS_DISCARD_H 1
 
 struct fstrim_range;
+struct list_head;
 
 extern int     xfs_ioc_trim(struct xfs_mount *, struct fstrim_range __user *);
+extern int     xfs_discard_extents(struct xfs_mount *, struct list_head *);
 
 #endif /* XFS_DISCARD_H */
index b0aa59e51fd066377d29f92da999dff54b1a1620..98b9c91fcdf1d9d101339aadaea2740e74cb6506 100644 (file)
@@ -110,8 +110,10 @@ mempool_t *xfs_ioend_pool;
 #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */
 #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */
 #define MNTOPT_QUOTANOENF  "qnoenforce"        /* same as uqnoenforce */
-#define MNTOPT_DELAYLOG   "delaylog"   /* Delayed loging enabled */
-#define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed loging disabled */
+#define MNTOPT_DELAYLOG    "delaylog"  /* Delayed logging enabled */
+#define MNTOPT_NODELAYLOG  "nodelaylog"        /* Delayed logging disabled */
+#define MNTOPT_DISCARD    "discard"    /* Discard unused blocks */
+#define MNTOPT_NODISCARD   "nodiscard" /* Do not discard unused blocks */
 
 /*
  * Table driven mount option parser.
@@ -355,6 +357,10 @@ xfs_parseargs(
                        mp->m_flags |= XFS_MOUNT_DELAYLOG;
                } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
                        mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
+               } else if (!strcmp(this_char, MNTOPT_DISCARD)) {
+                       mp->m_flags |= XFS_MOUNT_DISCARD;
+               } else if (!strcmp(this_char, MNTOPT_NODISCARD)) {
+                       mp->m_flags &= ~XFS_MOUNT_DISCARD;
                } else if (!strcmp(this_char, "ihashsize")) {
                        xfs_warn(mp,
        "ihashsize no longer used, option is deprecated.");
@@ -388,6 +394,13 @@ xfs_parseargs(
                return EINVAL;
        }
 
+       if ((mp->m_flags & XFS_MOUNT_DISCARD) &&
+           !(mp->m_flags & XFS_MOUNT_DELAYLOG)) {
+               xfs_warn(mp,
+       "the discard option is incompatible with the nodelaylog option");
+               return EINVAL;
+       }
+
 #ifndef CONFIG_XFS_QUOTA
        if (XFS_IS_QUOTA_RUNNING(mp)) {
                xfs_warn(mp, "quota support not available in this kernel.");
@@ -488,6 +501,7 @@ xfs_showargs(
                { XFS_MOUNT_FILESTREAMS,        "," MNTOPT_FILESTREAM },
                { XFS_MOUNT_GRPID,              "," MNTOPT_GRPID },
                { XFS_MOUNT_DELAYLOG,           "," MNTOPT_DELAYLOG },
+               { XFS_MOUNT_DISCARD,            "," MNTOPT_DISCARD },
                { 0, NULL }
        };
        static struct proc_xfs_info xfs_info_unset[] = {
index da0a561ffba2abc2ed277992f21f44c1a38dbc54..6530769a999bdb045ba4ec5d1588440a04993e1c 100644 (file)
@@ -187,6 +187,9 @@ struct xfs_busy_extent {
        xfs_agnumber_t  agno;
        xfs_agblock_t   bno;
        xfs_extlen_t    length;
+       unsigned int    flags;
+#define XFS_ALLOC_BUSY_DISCARDED       0x01    /* undergoing a discard op. */
+#define XFS_ALLOC_BUSY_SKIP_DISCARD    0x02    /* do not discard */
 };
 
 /*
index acdced86413ce0d90e5fb07ac7ea08005727a2e4..95862bbff56bf0cf8c82d4e88178dc43408866d7 100644 (file)
@@ -2469,7 +2469,7 @@ xfs_free_extent(
 
        error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
        if (!error)
-               xfs_alloc_busy_insert(tp, args.agno, args.agbno, len);
+               xfs_alloc_busy_insert(tp, args.agno, args.agbno, len, 0);
 error0:
        xfs_perag_put(args.pag);
        return error;
@@ -2480,7 +2480,8 @@ xfs_alloc_busy_insert(
        struct xfs_trans        *tp,
        xfs_agnumber_t          agno,
        xfs_agblock_t           bno,
-       xfs_extlen_t            len)
+       xfs_extlen_t            len,
+       unsigned int            flags)
 {
        struct xfs_busy_extent  *new;
        struct xfs_busy_extent  *busyp;
@@ -2504,6 +2505,7 @@ xfs_alloc_busy_insert(
        new->bno = bno;
        new->length = len;
        INIT_LIST_HEAD(&new->list);
+       new->flags = flags;
 
        /* trace before insert to be able to see failed inserts */
        trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len);
@@ -2608,6 +2610,18 @@ xfs_alloc_busy_update_extent(
        xfs_agblock_t           bbno = busyp->bno;
        xfs_agblock_t           bend = bbno + busyp->length;
 
+       /*
+        * This extent is currently being discarded.  Give the thread
+        * performing the discard a chance to mark the extent unbusy
+        * and retry.
+        */
+       if (busyp->flags & XFS_ALLOC_BUSY_DISCARDED) {
+               spin_unlock(&pag->pagb_lock);
+               delay(1);
+               spin_lock(&pag->pagb_lock);
+               return false;
+       }
+
        /*
         * If there is a busy extent overlapping a user allocation, we have
         * no choice but to force the log and retry the search.
@@ -2813,7 +2827,8 @@ restart:
                 * If this is a metadata allocation, try to reuse the busy
                 * extent instead of trimming the allocation.
                 */
-               if (!args->userdata) {
+               if (!args->userdata &&
+                   !(busyp->flags & XFS_ALLOC_BUSY_DISCARDED)) {
                        if (!xfs_alloc_busy_update_extent(args->mp, args->pag,
                                                          busyp, fbno, flen,
                                                          false))
@@ -2979,10 +2994,16 @@ xfs_alloc_busy_clear_one(
        kmem_free(busyp);
 }
 
+/*
+ * Remove all extents on the passed in list from the busy extents tree.
+ * If do_discard is set skip extents that need to be discarded, and mark
+ * these as undergoing a discard operation instead.
+ */
 void
 xfs_alloc_busy_clear(
        struct xfs_mount        *mp,
-       struct list_head        *list)
+       struct list_head        *list,
+       bool                    do_discard)
 {
        struct xfs_busy_extent  *busyp, *n;
        struct xfs_perag        *pag = NULL;
@@ -2999,7 +3020,11 @@ xfs_alloc_busy_clear(
                        agno = busyp->agno;
                }
 
-               xfs_alloc_busy_clear_one(mp, pag, busyp);
+               if (do_discard && busyp->length &&
+                   !(busyp->flags & XFS_ALLOC_BUSY_SKIP_DISCARD))
+                       busyp->flags = XFS_ALLOC_BUSY_DISCARDED;
+               else
+                       xfs_alloc_busy_clear_one(mp, pag, busyp);
        }
 
        if (pag) {
index 240ad288f2f99d7b6805591460846ce6a4c905b8..2f52b924be79f424b0a3115849af602f719a6b79 100644 (file)
@@ -137,10 +137,11 @@ xfs_alloc_longest_free_extent(struct xfs_mount *mp,
 #ifdef __KERNEL__
 void
 xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno,
-       xfs_agblock_t bno, xfs_extlen_t len);
+       xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags);
 
 void
-xfs_alloc_busy_clear(struct xfs_mount *mp, struct list_head *list);
+xfs_alloc_busy_clear(struct xfs_mount *mp, struct list_head *list,
+       bool do_discard);
 
 int
 xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno,
index 8b469d53599fb4b35cd0851a0baef7e916787969..2b3518826a692640c46e3e4fb5f5192e418e868b 100644 (file)
@@ -120,7 +120,8 @@ xfs_allocbt_free_block(
        if (error)
                return error;
 
-       xfs_alloc_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1);
+       xfs_alloc_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
+                             XFS_ALLOC_BUSY_SKIP_DISCARD);
        xfs_trans_agbtree_delta(cur->bc_tp, -1);
        return 0;
 }
index fa00788de2f549acf0d51bdb913e64350f703b16..e546a33214c93b9d6d4e9e75450451c8592b40aa 100644 (file)
@@ -88,22 +88,6 @@ xfs_bmap_add_attrfork_local(
        xfs_bmap_free_t         *flist,         /* blocks to free at commit */
        int                     *flags);        /* inode logging flags */
 
-/*
- * Called by xfs_bmapi to update file extent records and the btree
- * after allocating space (or doing a delayed allocation).
- */
-STATIC int                             /* error */
-xfs_bmap_add_extent(
-       xfs_inode_t             *ip,    /* incore inode pointer */
-       xfs_extnum_t            idx,    /* extent number to update/insert */
-       xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
-       xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
-       xfs_fsblock_t           *first, /* pointer to firstblock variable */
-       xfs_bmap_free_t         *flist, /* list of extents to be freed */
-       int                     *logflagsp, /* inode logging flags */
-       int                     whichfork, /* data or attr fork */
-       int                     rsvd);  /* OK to allocate reserved blocks */
-
 /*
  * Called by xfs_bmap_add_extent to handle cases converting a delayed
  * allocation to a real allocation.
@@ -111,14 +95,13 @@ xfs_bmap_add_extent(
 STATIC int                             /* error */
 xfs_bmap_add_extent_delay_real(
        xfs_inode_t             *ip,    /* incore inode pointer */
-       xfs_extnum_t            idx,    /* extent number to update/insert */
+       xfs_extnum_t            *idx,   /* extent number to update/insert */
        xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
        xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
        xfs_filblks_t           *dnew,  /* new delayed-alloc indirect blocks */
        xfs_fsblock_t           *first, /* pointer to firstblock variable */
        xfs_bmap_free_t         *flist, /* list of extents to be freed */
-       int                     *logflagsp, /* inode logging flags */
-       int                     rsvd);  /* OK to allocate reserved blocks */
+       int                     *logflagsp); /* inode logging flags */
 
 /*
  * Called by xfs_bmap_add_extent to handle cases converting a hole
@@ -127,10 +110,9 @@ xfs_bmap_add_extent_delay_real(
 STATIC int                             /* error */
 xfs_bmap_add_extent_hole_delay(
        xfs_inode_t             *ip,    /* incore inode pointer */
-       xfs_extnum_t            idx,    /* extent number to update/insert */
+       xfs_extnum_t            *idx,   /* extent number to update/insert */
        xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
-       int                     *logflagsp,/* inode logging flags */
-       int                     rsvd);  /* OK to allocate reserved blocks */
+       int                     *logflagsp); /* inode logging flags */
 
 /*
  * Called by xfs_bmap_add_extent to handle cases converting a hole
@@ -139,7 +121,7 @@ xfs_bmap_add_extent_hole_delay(
 STATIC int                             /* error */
 xfs_bmap_add_extent_hole_real(
        xfs_inode_t             *ip,    /* incore inode pointer */
-       xfs_extnum_t            idx,    /* extent number to update/insert */
+       xfs_extnum_t            *idx,   /* extent number to update/insert */
        xfs_btree_cur_t         *cur,   /* if null, not a btree */
        xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
        int                     *logflagsp, /* inode logging flags */
@@ -152,7 +134,7 @@ xfs_bmap_add_extent_hole_real(
 STATIC int                             /* error */
 xfs_bmap_add_extent_unwritten_real(
        xfs_inode_t             *ip,    /* incore inode pointer */
-       xfs_extnum_t            idx,    /* extent number to update/insert */
+       xfs_extnum_t            *idx,   /* extent number to update/insert */
        xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
        xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
        int                     *logflagsp); /* inode logging flags */
@@ -179,22 +161,6 @@ xfs_bmap_btree_to_extents(
        int                     *logflagsp, /* inode logging flags */
        int                     whichfork); /* data or attr fork */
 
-/*
- * Called by xfs_bmapi to update file extent records and the btree
- * after removing space (or undoing a delayed allocation).
- */
-STATIC int                             /* error */
-xfs_bmap_del_extent(
-       xfs_inode_t             *ip,    /* incore inode pointer */
-       xfs_trans_t             *tp,    /* current trans pointer */
-       xfs_extnum_t            idx,    /* extent number to update/insert */
-       xfs_bmap_free_t         *flist, /* list of extents to be freed */
-       xfs_btree_cur_t         *cur,   /* if null, not a btree */
-       xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
-       int                     *logflagsp,/* inode logging flags */
-       int                     whichfork, /* data or attr fork */
-       int                     rsvd);   /* OK to allocate reserved blocks */
-
 /*
  * Remove the entry "free" from the free item list.  Prev points to the
  * previous entry, unless "free" is the head of the list.
@@ -474,14 +440,13 @@ xfs_bmap_add_attrfork_local(
 STATIC int                             /* error */
 xfs_bmap_add_extent(
        xfs_inode_t             *ip,    /* incore inode pointer */
-       xfs_extnum_t            idx,    /* extent number to update/insert */
+       xfs_extnum_t            *idx,   /* extent number to update/insert */
        xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
        xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
        xfs_fsblock_t           *first, /* pointer to firstblock variable */
        xfs_bmap_free_t         *flist, /* list of extents to be freed */
        int                     *logflagsp, /* inode logging flags */
-       int                     whichfork, /* data or attr fork */
-       int                     rsvd)   /* OK to use reserved data blocks */
+       int                     whichfork) /* data or attr fork */
 {
        xfs_btree_cur_t         *cur;   /* btree cursor or null */
        xfs_filblks_t           da_new; /* new count del alloc blocks used */
@@ -492,23 +457,27 @@ xfs_bmap_add_extent(
        xfs_extnum_t            nextents; /* number of extents in file now */
 
        XFS_STATS_INC(xs_add_exlist);
+
        cur = *curp;
        ifp = XFS_IFORK_PTR(ip, whichfork);
        nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
-       ASSERT(idx <= nextents);
        da_old = da_new = 0;
        error = 0;
+
+       ASSERT(*idx >= 0);
+       ASSERT(*idx <= nextents);
+
        /*
         * This is the first extent added to a new/empty file.
         * Special case this one, so other routines get to assume there are
         * already extents in the list.
         */
        if (nextents == 0) {
-               xfs_iext_insert(ip, 0, 1, new,
+               xfs_iext_insert(ip, *idx, 1, new,
                                whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
 
                ASSERT(cur == NULL);
-               ifp->if_lastex = 0;
+
                if (!isnullstartblock(new->br_startblock)) {
                        XFS_IFORK_NEXT_SET(ip, whichfork, 1);
                        logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
@@ -522,27 +491,25 @@ xfs_bmap_add_extent(
                if (cur)
                        ASSERT((cur->bc_private.b.flags &
                                XFS_BTCUR_BPRV_WASDEL) == 0);
-               if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, new,
-                               &logflags, rsvd)))
-                       goto done;
+               error = xfs_bmap_add_extent_hole_delay(ip, idx, new,
+                                                      &logflags);
        }
        /*
         * Real allocation off the end of the file.
         */
-       else if (idx == nextents) {
+       else if (*idx == nextents) {
                if (cur)
                        ASSERT((cur->bc_private.b.flags &
                                XFS_BTCUR_BPRV_WASDEL) == 0);
-               if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new,
-                               &logflags, whichfork)))
-                       goto done;
+               error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new,
+                               &logflags, whichfork);
        } else {
                xfs_bmbt_irec_t prev;   /* old extent at offset idx */
 
                /*
                 * Get the record referred to by idx.
                 */
-               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &prev);
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &prev);
                /*
                 * If it's a real allocation record, and the new allocation ends
                 * after the start of the referred to record, then we're filling
@@ -557,22 +524,18 @@ xfs_bmap_add_extent(
                                if (cur)
                                        ASSERT(cur->bc_private.b.flags &
                                                XFS_BTCUR_BPRV_WASDEL);
-                               if ((error = xfs_bmap_add_extent_delay_real(ip,
-                                       idx, &cur, new, &da_new, first, flist,
-                                       &logflags, rsvd)))
-                                       goto done;
-                       } else if (new->br_state == XFS_EXT_NORM) {
-                               ASSERT(new->br_state == XFS_EXT_NORM);
-                               if ((error = xfs_bmap_add_extent_unwritten_real(
-                                       ip, idx, &cur, new, &logflags)))
-                                       goto done;
+                               error = xfs_bmap_add_extent_delay_real(ip,
+                                               idx, &cur, new, &da_new,
+                                               first, flist, &logflags);
                        } else {
-                               ASSERT(new->br_state == XFS_EXT_UNWRITTEN);
-                               if ((error = xfs_bmap_add_extent_unwritten_real(
-                                       ip, idx, &cur, new, &logflags)))
+                               ASSERT(new->br_state == XFS_EXT_NORM ||
+                                      new->br_state == XFS_EXT_UNWRITTEN);
+
+                               error = xfs_bmap_add_extent_unwritten_real(ip,
+                                               idx, &cur, new, &logflags);
+                               if (error)
                                        goto done;
                        }
-                       ASSERT(*curp == cur || *curp == NULL);
                }
                /*
                 * Otherwise we're filling in a hole with an allocation.
@@ -581,13 +544,15 @@ xfs_bmap_add_extent(
                        if (cur)
                                ASSERT((cur->bc_private.b.flags &
                                        XFS_BTCUR_BPRV_WASDEL) == 0);
-                       if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur,
-                                       new, &logflags, whichfork)))
-                               goto done;
+                       error = xfs_bmap_add_extent_hole_real(ip, idx, cur,
+                                       new, &logflags, whichfork);
                }
        }
 
+       if (error)
+               goto done;
        ASSERT(*curp == cur || *curp == NULL);
+
        /*
         * Convert to a btree if necessary.
         */
@@ -615,7 +580,7 @@ xfs_bmap_add_extent(
                ASSERT(nblks <= da_old);
                if (nblks < da_old)
                        xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
-                               (int64_t)(da_old - nblks), rsvd);
+                               (int64_t)(da_old - nblks), 0);
        }
        /*
         * Clear out the allocated field, done with it now in any case.
@@ -640,14 +605,13 @@ done:
 STATIC int                             /* error */
 xfs_bmap_add_extent_delay_real(
        xfs_inode_t             *ip,    /* incore inode pointer */
-       xfs_extnum_t            idx,    /* extent number to update/insert */
+       xfs_extnum_t            *idx,   /* extent number to update/insert */
        xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
        xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
        xfs_filblks_t           *dnew,  /* new delayed-alloc indirect blocks */
        xfs_fsblock_t           *first, /* pointer to firstblock variable */
        xfs_bmap_free_t         *flist, /* list of extents to be freed */
-       int                     *logflagsp, /* inode logging flags */
-       int                     rsvd)   /* OK to use reserved data block allocation */
+       int                     *logflagsp) /* inode logging flags */
 {
        xfs_btree_cur_t         *cur;   /* btree cursor */
        int                     diff;   /* temp value */
@@ -673,7 +637,7 @@ xfs_bmap_add_extent_delay_real(
         */
        cur = *curp;
        ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
-       ep = xfs_iext_get_ext(ifp, idx);
+       ep = xfs_iext_get_ext(ifp, *idx);
        xfs_bmbt_get_all(ep, &PREV);
        new_endoff = new->br_startoff + new->br_blockcount;
        ASSERT(PREV.br_startoff <= new->br_startoff);
@@ -692,9 +656,9 @@ xfs_bmap_add_extent_delay_real(
         * Check and set flags if this segment has a left neighbor.
         * Don't set contiguous if the combined extent would be too large.
         */
-       if (idx > 0) {
+       if (*idx > 0) {
                state |= BMAP_LEFT_VALID;
-               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
 
                if (isnullstartblock(LEFT.br_startblock))
                        state |= BMAP_LEFT_DELAY;
@@ -712,9 +676,9 @@ xfs_bmap_add_extent_delay_real(
         * Don't set contiguous if the combined extent would be too large.
         * Also check for all-three-contiguous being too large.
         */
-       if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
+       if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
                state |= BMAP_RIGHT_VALID;
-               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
 
                if (isnullstartblock(RIGHT.br_startblock))
                        state |= BMAP_RIGHT_DELAY;
@@ -745,14 +709,14 @@ xfs_bmap_add_extent_delay_real(
                 * Filling in all of a previously delayed allocation extent.
                 * The left and right neighbors are both contiguous with new.
                 */
-               trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
-               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
+               --*idx;
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
                        LEFT.br_blockcount + PREV.br_blockcount +
                        RIGHT.br_blockcount);
-               trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
 
-               xfs_iext_remove(ip, idx, 2, state);
-               ip->i_df.if_lastex = idx - 1;
+               xfs_iext_remove(ip, *idx + 1, 2, state);
                ip->i_d.di_nextents--;
                if (cur == NULL)
                        rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -784,13 +748,14 @@ xfs_bmap_add_extent_delay_real(
                 * Filling in all of a previously delayed allocation extent.
                 * The left neighbor is contiguous, the right is not.
                 */
-               trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
-               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
+               --*idx;
+
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
                        LEFT.br_blockcount + PREV.br_blockcount);
-               trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
 
-               ip->i_df.if_lastex = idx - 1;
-               xfs_iext_remove(ip, idx, 1, state);
+               xfs_iext_remove(ip, *idx + 1, 1, state);
                if (cur == NULL)
                        rval = XFS_ILOG_DEXT;
                else {
@@ -814,14 +779,13 @@ xfs_bmap_add_extent_delay_real(
                 * Filling in all of a previously delayed allocation extent.
                 * The right neighbor is contiguous, the left is not.
                 */
-               trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
                xfs_bmbt_set_startblock(ep, new->br_startblock);
                xfs_bmbt_set_blockcount(ep,
                        PREV.br_blockcount + RIGHT.br_blockcount);
-               trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
 
-               ip->i_df.if_lastex = idx;
-               xfs_iext_remove(ip, idx + 1, 1, state);
+               xfs_iext_remove(ip, *idx + 1, 1, state);
                if (cur == NULL)
                        rval = XFS_ILOG_DEXT;
                else {
@@ -837,6 +801,7 @@ xfs_bmap_add_extent_delay_real(
                                        RIGHT.br_blockcount, PREV.br_state)))
                                goto done;
                }
+
                *dnew = 0;
                break;
 
@@ -846,11 +811,10 @@ xfs_bmap_add_extent_delay_real(
                 * Neither the left nor right neighbors are contiguous with
                 * the new one.
                 */
-               trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
                xfs_bmbt_set_startblock(ep, new->br_startblock);
-               trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
 
-               ip->i_df.if_lastex = idx;
                ip->i_d.di_nextents++;
                if (cur == NULL)
                        rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -866,6 +830,7 @@ xfs_bmap_add_extent_delay_real(
                                goto done;
                        XFS_WANT_CORRUPTED_GOTO(i == 1, done);
                }
+
                *dnew = 0;
                break;
 
@@ -874,17 +839,16 @@ xfs_bmap_add_extent_delay_real(
                 * Filling in the first part of a previous delayed allocation.
                 * The left neighbor is contiguous.
                 */
-               trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
-               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
+               trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
                        LEFT.br_blockcount + new->br_blockcount);
                xfs_bmbt_set_startoff(ep,
                        PREV.br_startoff + new->br_blockcount);
-               trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
 
                temp = PREV.br_blockcount - new->br_blockcount;
-               trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
                xfs_bmbt_set_blockcount(ep, temp);
-               ip->i_df.if_lastex = idx - 1;
                if (cur == NULL)
                        rval = XFS_ILOG_DEXT;
                else {
@@ -904,7 +868,9 @@ xfs_bmap_add_extent_delay_real(
                temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
                        startblockval(PREV.br_startblock));
                xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
-               trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
+               --*idx;
                *dnew = temp;
                break;
 
@@ -913,12 +879,11 @@ xfs_bmap_add_extent_delay_real(
                 * Filling in the first part of a previous delayed allocation.
                 * The left neighbor is not contiguous.
                 */
-               trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
                xfs_bmbt_set_startoff(ep, new_endoff);
                temp = PREV.br_blockcount - new->br_blockcount;
                xfs_bmbt_set_blockcount(ep, temp);
-               xfs_iext_insert(ip, idx, 1, new, state);
-               ip->i_df.if_lastex = idx;
+               xfs_iext_insert(ip, *idx, 1, new, state);
                ip->i_d.di_nextents++;
                if (cur == NULL)
                        rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -946,9 +911,10 @@ xfs_bmap_add_extent_delay_real(
                temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
                        startblockval(PREV.br_startblock) -
                        (cur ? cur->bc_private.b.allocated : 0));
-               ep = xfs_iext_get_ext(ifp, idx + 1);
+               ep = xfs_iext_get_ext(ifp, *idx + 1);
                xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
-               trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx + 1, state, _THIS_IP_);
+
                *dnew = temp;
                break;
 
@@ -958,15 +924,13 @@ xfs_bmap_add_extent_delay_real(
                 * The right neighbor is contiguous with the new allocation.
                 */
                temp = PREV.br_blockcount - new->br_blockcount;
-               trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
-               trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx + 1, state, _THIS_IP_);
                xfs_bmbt_set_blockcount(ep, temp);
-               xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
+               xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx + 1),
                        new->br_startoff, new->br_startblock,
                        new->br_blockcount + RIGHT.br_blockcount,
                        RIGHT.br_state);
-               trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
-               ip->i_df.if_lastex = idx + 1;
+               trace_xfs_bmap_post_update(ip, *idx + 1, state, _THIS_IP_);
                if (cur == NULL)
                        rval = XFS_ILOG_DEXT;
                else {
@@ -983,10 +947,14 @@ xfs_bmap_add_extent_delay_real(
                                        RIGHT.br_state)))
                                goto done;
                }
+
                temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
                        startblockval(PREV.br_startblock));
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
                xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
-               trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
+               ++*idx;
                *dnew = temp;
                break;
 
@@ -996,10 +964,9 @@ xfs_bmap_add_extent_delay_real(
                 * The right neighbor is not contiguous.
                 */
                temp = PREV.br_blockcount - new->br_blockcount;
-               trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
                xfs_bmbt_set_blockcount(ep, temp);
-               xfs_iext_insert(ip, idx + 1, 1, new, state);
-               ip->i_df.if_lastex = idx + 1;
+               xfs_iext_insert(ip, *idx + 1, 1, new, state);
                ip->i_d.di_nextents++;
                if (cur == NULL)
                        rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1027,9 +994,11 @@ xfs_bmap_add_extent_delay_real(
                temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
                        startblockval(PREV.br_startblock) -
                        (cur ? cur->bc_private.b.allocated : 0));
-               ep = xfs_iext_get_ext(ifp, idx);
+               ep = xfs_iext_get_ext(ifp, *idx);
                xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
-               trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
+               ++*idx;
                *dnew = temp;
                break;
 
@@ -1056,7 +1025,7 @@ xfs_bmap_add_extent_delay_real(
                 */
                temp = new->br_startoff - PREV.br_startoff;
                temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
-               trace_xfs_bmap_pre_update(ip, idx, 0, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx, 0, _THIS_IP_);
                xfs_bmbt_set_blockcount(ep, temp);      /* truncate PREV */
                LEFT = *new;
                RIGHT.br_state = PREV.br_state;
@@ -1065,8 +1034,7 @@ xfs_bmap_add_extent_delay_real(
                RIGHT.br_startoff = new_endoff;
                RIGHT.br_blockcount = temp2;
                /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
-               xfs_iext_insert(ip, idx + 1, 2, &LEFT, state);
-               ip->i_df.if_lastex = idx + 1;
+               xfs_iext_insert(ip, *idx + 1, 2, &LEFT, state);
                ip->i_d.di_nextents++;
                if (cur == NULL)
                        rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1097,7 +1065,7 @@ xfs_bmap_add_extent_delay_real(
                        (cur ? cur->bc_private.b.allocated : 0));
                if (diff > 0 &&
                    xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
-                                            -((int64_t)diff), rsvd)) {
+                                            -((int64_t)diff), 0)) {
                        /*
                         * Ick gross gag me with a spoon.
                         */
@@ -1109,7 +1077,7 @@ xfs_bmap_add_extent_delay_real(
                                        if (!diff ||
                                            !xfs_icsb_modify_counters(ip->i_mount,
                                                    XFS_SBS_FDBLOCKS,
-                                                   -((int64_t)diff), rsvd))
+                                                   -((int64_t)diff), 0))
                                                break;
                                }
                                if (temp2) {
@@ -1118,18 +1086,20 @@ xfs_bmap_add_extent_delay_real(
                                        if (!diff ||
                                            !xfs_icsb_modify_counters(ip->i_mount,
                                                    XFS_SBS_FDBLOCKS,
-                                                   -((int64_t)diff), rsvd))
+                                                   -((int64_t)diff), 0))
                                                break;
                                }
                        }
                }
-               ep = xfs_iext_get_ext(ifp, idx);
+               ep = xfs_iext_get_ext(ifp, *idx);
                xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
-               trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
-               trace_xfs_bmap_pre_update(ip, idx + 2, state, _THIS_IP_);
-               xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2),
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx + 2, state, _THIS_IP_);
+               xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx + 2),
                        nullstartblock((int)temp2));
-               trace_xfs_bmap_post_update(ip, idx + 2, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx + 2, state, _THIS_IP_);
+
+               ++*idx;
                *dnew = temp + temp2;
                break;
 
@@ -1161,7 +1131,7 @@ done:
 STATIC int                             /* error */
 xfs_bmap_add_extent_unwritten_real(
        xfs_inode_t             *ip,    /* incore inode pointer */
-       xfs_extnum_t            idx,    /* extent number to update/insert */
+       xfs_extnum_t            *idx,   /* extent number to update/insert */
        xfs_btree_cur_t         **curp, /* if *curp is null, not a btree */
        xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
        int                     *logflagsp) /* inode logging flags */
@@ -1188,7 +1158,7 @@ xfs_bmap_add_extent_unwritten_real(
        error = 0;
        cur = *curp;
        ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
-       ep = xfs_iext_get_ext(ifp, idx);
+       ep = xfs_iext_get_ext(ifp, *idx);
        xfs_bmbt_get_all(ep, &PREV);
        newext = new->br_state;
        oldext = (newext == XFS_EXT_UNWRITTEN) ?
@@ -1211,9 +1181,9 @@ xfs_bmap_add_extent_unwritten_real(
         * Check and set flags if this segment has a left neighbor.
         * Don't set contiguous if the combined extent would be too large.
         */
-       if (idx > 0) {
+       if (*idx > 0) {
                state |= BMAP_LEFT_VALID;
-               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
 
                if (isnullstartblock(LEFT.br_startblock))
                        state |= BMAP_LEFT_DELAY;
@@ -1231,9 +1201,9 @@ xfs_bmap_add_extent_unwritten_real(
         * Don't set contiguous if the combined extent would be too large.
         * Also check for all-three-contiguous being too large.
         */
-       if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
+       if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
                state |= BMAP_RIGHT_VALID;
-               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
                if (isnullstartblock(RIGHT.br_startblock))
                        state |= BMAP_RIGHT_DELAY;
        }
@@ -1262,14 +1232,15 @@ xfs_bmap_add_extent_unwritten_real(
                 * Setting all of a previous oldext extent to newext.
                 * The left and right neighbors are both contiguous with new.
                 */
-               trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
-               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
+               --*idx;
+
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
                        LEFT.br_blockcount + PREV.br_blockcount +
                        RIGHT.br_blockcount);
-               trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
 
-               xfs_iext_remove(ip, idx, 2, state);
-               ip->i_df.if_lastex = idx - 1;
+               xfs_iext_remove(ip, *idx + 1, 2, state);
                ip->i_d.di_nextents -= 2;
                if (cur == NULL)
                        rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1305,13 +1276,14 @@ xfs_bmap_add_extent_unwritten_real(
                 * Setting all of a previous oldext extent to newext.
                 * The left neighbor is contiguous, the right is not.
                 */
-               trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
-               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
+               --*idx;
+
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
                        LEFT.br_blockcount + PREV.br_blockcount);
-               trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
 
-               ip->i_df.if_lastex = idx - 1;
-               xfs_iext_remove(ip, idx, 1, state);
+               xfs_iext_remove(ip, *idx + 1, 1, state);
                ip->i_d.di_nextents--;
                if (cur == NULL)
                        rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1341,13 +1313,12 @@ xfs_bmap_add_extent_unwritten_real(
                 * Setting all of a previous oldext extent to newext.
                 * The right neighbor is contiguous, the left is not.
                 */
-               trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
                xfs_bmbt_set_blockcount(ep,
                        PREV.br_blockcount + RIGHT.br_blockcount);
                xfs_bmbt_set_state(ep, newext);
-               trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
-               ip->i_df.if_lastex = idx;
-               xfs_iext_remove(ip, idx + 1, 1, state);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               xfs_iext_remove(ip, *idx + 1, 1, state);
                ip->i_d.di_nextents--;
                if (cur == NULL)
                        rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1378,11 +1349,10 @@ xfs_bmap_add_extent_unwritten_real(
                 * Neither the left nor right neighbors are contiguous with
                 * the new one.
                 */
-               trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
                xfs_bmbt_set_state(ep, newext);
-               trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
 
-               ip->i_df.if_lastex = idx;
                if (cur == NULL)
                        rval = XFS_ILOG_DEXT;
                else {
@@ -1404,21 +1374,22 @@ xfs_bmap_add_extent_unwritten_real(
                 * Setting the first part of a previous oldext extent to newext.
                 * The left neighbor is contiguous.
                 */
-               trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
-               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
+               trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
                        LEFT.br_blockcount + new->br_blockcount);
                xfs_bmbt_set_startoff(ep,
                        PREV.br_startoff + new->br_blockcount);
-               trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
 
-               trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
                xfs_bmbt_set_startblock(ep,
                        new->br_startblock + new->br_blockcount);
                xfs_bmbt_set_blockcount(ep,
                        PREV.br_blockcount - new->br_blockcount);
-               trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
+               --*idx;
 
-               ip->i_df.if_lastex = idx - 1;
                if (cur == NULL)
                        rval = XFS_ILOG_DEXT;
                else {
@@ -1449,17 +1420,16 @@ xfs_bmap_add_extent_unwritten_real(
                 * Setting the first part of a previous oldext extent to newext.
                 * The left neighbor is not contiguous.
                 */
-               trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
                ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
                xfs_bmbt_set_startoff(ep, new_endoff);
                xfs_bmbt_set_blockcount(ep,
                        PREV.br_blockcount - new->br_blockcount);
                xfs_bmbt_set_startblock(ep,
                        new->br_startblock + new->br_blockcount);
-               trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
 
-               xfs_iext_insert(ip, idx, 1, new, state);
-               ip->i_df.if_lastex = idx;
+               xfs_iext_insert(ip, *idx, 1, new, state);
                ip->i_d.di_nextents++;
                if (cur == NULL)
                        rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1488,17 +1458,19 @@ xfs_bmap_add_extent_unwritten_real(
                 * Setting the last part of a previous oldext extent to newext.
                 * The right neighbor is contiguous with the new allocation.
                 */
-               trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
-               trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
                xfs_bmbt_set_blockcount(ep,
                        PREV.br_blockcount - new->br_blockcount);
-               trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
-               xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1),
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
+               ++*idx;
+
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
                        new->br_startoff, new->br_startblock,
                        new->br_blockcount + RIGHT.br_blockcount, newext);
-               trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
 
-               ip->i_df.if_lastex = idx + 1;
                if (cur == NULL)
                        rval = XFS_ILOG_DEXT;
                else {
@@ -1528,13 +1500,14 @@ xfs_bmap_add_extent_unwritten_real(
                 * Setting the last part of a previous oldext extent to newext.
                 * The right neighbor is not contiguous.
                 */
-               trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
                xfs_bmbt_set_blockcount(ep,
                        PREV.br_blockcount - new->br_blockcount);
-               trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
+               ++*idx;
+               xfs_iext_insert(ip, *idx, 1, new, state);
 
-               xfs_iext_insert(ip, idx + 1, 1, new, state);
-               ip->i_df.if_lastex = idx + 1;
                ip->i_d.di_nextents++;
                if (cur == NULL)
                        rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1568,10 +1541,10 @@ xfs_bmap_add_extent_unwritten_real(
                 * newext.  Contiguity is impossible here.
                 * One extent becomes three extents.
                 */
-               trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
                xfs_bmbt_set_blockcount(ep,
                        new->br_startoff - PREV.br_startoff);
-               trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
 
                r[0] = *new;
                r[1].br_startoff = new_endoff;
@@ -1579,8 +1552,10 @@ xfs_bmap_add_extent_unwritten_real(
                        PREV.br_startoff + PREV.br_blockcount - new_endoff;
                r[1].br_startblock = new->br_startblock + new->br_blockcount;
                r[1].br_state = oldext;
-               xfs_iext_insert(ip, idx + 1, 2, &r[0], state);
-               ip->i_df.if_lastex = idx + 1;
+
+               ++*idx;
+               xfs_iext_insert(ip, *idx, 2, &r[0], state);
+
                ip->i_d.di_nextents += 2;
                if (cur == NULL)
                        rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
@@ -1650,12 +1625,10 @@ done:
 STATIC int                             /* error */
 xfs_bmap_add_extent_hole_delay(
        xfs_inode_t             *ip,    /* incore inode pointer */
-       xfs_extnum_t            idx,    /* extent number to update/insert */
+       xfs_extnum_t            *idx,   /* extent number to update/insert */
        xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
-       int                     *logflagsp, /* inode logging flags */
-       int                     rsvd)           /* OK to allocate reserved blocks */
+       int                     *logflagsp) /* inode logging flags */
 {
-       xfs_bmbt_rec_host_t     *ep;    /* extent record for idx */
        xfs_ifork_t             *ifp;   /* inode fork pointer */
        xfs_bmbt_irec_t         left;   /* left neighbor extent entry */
        xfs_filblks_t           newlen=0;       /* new indirect size */
@@ -1665,16 +1638,15 @@ xfs_bmap_add_extent_hole_delay(
        xfs_filblks_t           temp=0; /* temp for indirect calculations */
 
        ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
-       ep = xfs_iext_get_ext(ifp, idx);
        state = 0;
        ASSERT(isnullstartblock(new->br_startblock));
 
        /*
         * Check and set flags if this segment has a left neighbor
         */
-       if (idx > 0) {
+       if (*idx > 0) {
                state |= BMAP_LEFT_VALID;
-               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
 
                if (isnullstartblock(left.br_startblock))
                        state |= BMAP_LEFT_DELAY;
@@ -1684,9 +1656,9 @@ xfs_bmap_add_extent_hole_delay(
         * Check and set flags if the current (right) segment exists.
         * If it doesn't exist, we're converting the hole at end-of-file.
         */
-       if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
+       if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
                state |= BMAP_RIGHT_VALID;
-               xfs_bmbt_get_all(ep, &right);
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
 
                if (isnullstartblock(right.br_startblock))
                        state |= BMAP_RIGHT_DELAY;
@@ -1719,21 +1691,21 @@ xfs_bmap_add_extent_hole_delay(
                 * on the left and on the right.
                 * Merge all three into a single extent record.
                 */
+               --*idx;
                temp = left.br_blockcount + new->br_blockcount +
                        right.br_blockcount;
 
-               trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
-               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
                oldlen = startblockval(left.br_startblock) +
                        startblockval(new->br_startblock) +
                        startblockval(right.br_startblock);
                newlen = xfs_bmap_worst_indlen(ip, temp);
-               xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
+               xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
                        nullstartblock((int)newlen));
-               trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
 
-               xfs_iext_remove(ip, idx, 1, state);
-               ip->i_df.if_lastex = idx - 1;
+               xfs_iext_remove(ip, *idx + 1, 1, state);
                break;
 
        case BMAP_LEFT_CONTIG:
@@ -1742,17 +1714,17 @@ xfs_bmap_add_extent_hole_delay(
                 * on the left.
                 * Merge the new allocation with the left neighbor.
                 */
+               --*idx;
                temp = left.br_blockcount + new->br_blockcount;
-               trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
-               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
+
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
                oldlen = startblockval(left.br_startblock) +
                        startblockval(new->br_startblock);
                newlen = xfs_bmap_worst_indlen(ip, temp);
-               xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
+               xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
                        nullstartblock((int)newlen));
-               trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
-
-               ip->i_df.if_lastex = idx - 1;
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
                break;
 
        case BMAP_RIGHT_CONTIG:
@@ -1761,16 +1733,15 @@ xfs_bmap_add_extent_hole_delay(
                 * on the right.
                 * Merge the new allocation with the right neighbor.
                 */
-               trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
                temp = new->br_blockcount + right.br_blockcount;
                oldlen = startblockval(new->br_startblock) +
                        startblockval(right.br_startblock);
                newlen = xfs_bmap_worst_indlen(ip, temp);
-               xfs_bmbt_set_allf(ep, new->br_startoff,
+               xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
+                       new->br_startoff,
                        nullstartblock((int)newlen), temp, right.br_state);
-               trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
-
-               ip->i_df.if_lastex = idx;
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
                break;
 
        case 0:
@@ -1780,14 +1751,13 @@ xfs_bmap_add_extent_hole_delay(
                 * Insert a new entry.
                 */
                oldlen = newlen = 0;
-               xfs_iext_insert(ip, idx, 1, new, state);
-               ip->i_df.if_lastex = idx;
+               xfs_iext_insert(ip, *idx, 1, new, state);
                break;
        }
        if (oldlen != newlen) {
                ASSERT(oldlen > newlen);
                xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
-                       (int64_t)(oldlen - newlen), rsvd);
+                       (int64_t)(oldlen - newlen), 0);
                /*
                 * Nothing to do for disk quota accounting here.
                 */
@@ -1803,13 +1773,12 @@ xfs_bmap_add_extent_hole_delay(
 STATIC int                             /* error */
 xfs_bmap_add_extent_hole_real(
        xfs_inode_t             *ip,    /* incore inode pointer */
-       xfs_extnum_t            idx,    /* extent number to update/insert */
+       xfs_extnum_t            *idx,   /* extent number to update/insert */
        xfs_btree_cur_t         *cur,   /* if null, not a btree */
        xfs_bmbt_irec_t         *new,   /* new data to add to file extents */
        int                     *logflagsp, /* inode logging flags */
        int                     whichfork) /* data or attr fork */
 {
-       xfs_bmbt_rec_host_t     *ep;    /* pointer to extent entry ins. point */
        int                     error;  /* error return value */
        int                     i;      /* temp state */
        xfs_ifork_t             *ifp;   /* inode fork pointer */
@@ -1819,8 +1788,7 @@ xfs_bmap_add_extent_hole_real(
        int                     state;  /* state bits, accessed thru macros */
 
        ifp = XFS_IFORK_PTR(ip, whichfork);
-       ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
-       ep = xfs_iext_get_ext(ifp, idx);
+       ASSERT(*idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
        state = 0;
 
        if (whichfork == XFS_ATTR_FORK)
@@ -1829,9 +1797,9 @@ xfs_bmap_add_extent_hole_real(
        /*
         * Check and set flags if this segment has a left neighbor.
         */
-       if (idx > 0) {
+       if (*idx > 0) {
                state |= BMAP_LEFT_VALID;
-               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
                if (isnullstartblock(left.br_startblock))
                        state |= BMAP_LEFT_DELAY;
        }
@@ -1840,9 +1808,9 @@ xfs_bmap_add_extent_hole_real(
         * Check and set flags if this segment has a current value.
         * Not true if we're inserting into the "hole" at eof.
         */
-       if (idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
+       if (*idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
                state |= BMAP_RIGHT_VALID;
-               xfs_bmbt_get_all(ep, &right);
+               xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
                if (isnullstartblock(right.br_startblock))
                        state |= BMAP_RIGHT_DELAY;
        }
@@ -1879,14 +1847,15 @@ xfs_bmap_add_extent_hole_real(
                 * left and on the right.
                 * Merge all three into a single extent record.
                 */
-               trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
-               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
+               --*idx;
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
                        left.br_blockcount + new->br_blockcount +
                        right.br_blockcount);
-               trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+
+               xfs_iext_remove(ip, *idx + 1, 1, state);
 
-               xfs_iext_remove(ip, idx, 1, state);
-               ifp->if_lastex = idx - 1;
                XFS_IFORK_NEXT_SET(ip, whichfork,
                        XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
                if (cur == NULL) {
@@ -1921,12 +1890,12 @@ xfs_bmap_add_extent_hole_real(
                 * on the left.
                 * Merge the new allocation with the left neighbor.
                 */
-               trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_);
-               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1),
+               --*idx;
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
                        left.br_blockcount + new->br_blockcount);
-               trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
 
-               ifp->if_lastex = idx - 1;
                if (cur == NULL) {
                        rval = xfs_ilog_fext(whichfork);
                } else {
@@ -1952,13 +1921,13 @@ xfs_bmap_add_extent_hole_real(
                 * on the right.
                 * Merge the new allocation with the right neighbor.
                 */
-               trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
-               xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock,
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
+               xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
+                       new->br_startoff, new->br_startblock,
                        new->br_blockcount + right.br_blockcount,
                        right.br_state);
-               trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
 
-               ifp->if_lastex = idx;
                if (cur == NULL) {
                        rval = xfs_ilog_fext(whichfork);
                } else {
@@ -1984,8 +1953,7 @@ xfs_bmap_add_extent_hole_real(
                 * real allocation.
                 * Insert a new entry.
                 */
-               xfs_iext_insert(ip, idx, 1, new, state);
-               ifp->if_lastex = idx;
+               xfs_iext_insert(ip, *idx, 1, new, state);
                XFS_IFORK_NEXT_SET(ip, whichfork,
                        XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
                if (cur == NULL) {
@@ -2833,13 +2801,12 @@ STATIC int                              /* error */
 xfs_bmap_del_extent(
        xfs_inode_t             *ip,    /* incore inode pointer */
        xfs_trans_t             *tp,    /* current transaction pointer */
-       xfs_extnum_t            idx,    /* extent number to update/delete */
+       xfs_extnum_t            *idx,   /* extent number to update/delete */
        xfs_bmap_free_t         *flist, /* list of extents to be freed */
        xfs_btree_cur_t         *cur,   /* if null, not a btree */
        xfs_bmbt_irec_t         *del,   /* data to remove from extents */
        int                     *logflagsp, /* inode logging flags */
-       int                     whichfork, /* data or attr fork */
-       int                     rsvd)   /* OK to allocate reserved blocks */
+       int                     whichfork) /* data or attr fork */
 {
        xfs_filblks_t           da_new; /* new delay-alloc indirect blocks */
        xfs_filblks_t           da_old; /* old delay-alloc indirect blocks */
@@ -2870,10 +2837,10 @@ xfs_bmap_del_extent(
 
        mp = ip->i_mount;
        ifp = XFS_IFORK_PTR(ip, whichfork);
-       ASSERT((idx >= 0) && (idx < ifp->if_bytes /
+       ASSERT((*idx >= 0) && (*idx < ifp->if_bytes /
                (uint)sizeof(xfs_bmbt_rec_t)));
        ASSERT(del->br_blockcount > 0);
-       ep = xfs_iext_get_ext(ifp, idx);
+       ep = xfs_iext_get_ext(ifp, *idx);
        xfs_bmbt_get_all(ep, &got);
        ASSERT(got.br_startoff <= del->br_startoff);
        del_endoff = del->br_startoff + del->br_blockcount;
@@ -2947,11 +2914,12 @@ xfs_bmap_del_extent(
                /*
                 * Matches the whole extent.  Delete the entry.
                 */
-               xfs_iext_remove(ip, idx, 1,
+               xfs_iext_remove(ip, *idx, 1,
                                whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
-               ifp->if_lastex = idx;
+               --*idx;
                if (delay)
                        break;
+
                XFS_IFORK_NEXT_SET(ip, whichfork,
                        XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
                flags |= XFS_ILOG_CORE;
@@ -2968,21 +2936,20 @@ xfs_bmap_del_extent(
                /*
                 * Deleting the first part of the extent.
                 */
-               trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
                xfs_bmbt_set_startoff(ep, del_endoff);
                temp = got.br_blockcount - del->br_blockcount;
                xfs_bmbt_set_blockcount(ep, temp);
-               ifp->if_lastex = idx;
                if (delay) {
                        temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
                                da_old);
                        xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
-                       trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+                       trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
                        da_new = temp;
                        break;
                }
                xfs_bmbt_set_startblock(ep, del_endblock);
-               trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
                if (!cur) {
                        flags |= xfs_ilog_fext(whichfork);
                        break;
@@ -2998,18 +2965,17 @@ xfs_bmap_del_extent(
                 * Deleting the last part of the extent.
                 */
                temp = got.br_blockcount - del->br_blockcount;
-               trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
                xfs_bmbt_set_blockcount(ep, temp);
-               ifp->if_lastex = idx;
                if (delay) {
                        temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
                                da_old);
                        xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
-                       trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+                       trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
                        da_new = temp;
                        break;
                }
-               trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
                if (!cur) {
                        flags |= xfs_ilog_fext(whichfork);
                        break;
@@ -3026,7 +2992,7 @@ xfs_bmap_del_extent(
                 * Deleting the middle of the extent.
                 */
                temp = del->br_startoff - got.br_startoff;
-               trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_);
+               trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
                xfs_bmbt_set_blockcount(ep, temp);
                new.br_startoff = del_endoff;
                temp2 = got_endoff - del_endoff;
@@ -3113,9 +3079,9 @@ xfs_bmap_del_extent(
                                }
                        }
                }
-               trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_);
-               xfs_iext_insert(ip, idx + 1, 1, &new, state);
-               ifp->if_lastex = idx + 1;
+               trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
+               xfs_iext_insert(ip, *idx + 1, 1, &new, state);
+               ++*idx;
                break;
        }
        /*
@@ -3142,7 +3108,7 @@ xfs_bmap_del_extent(
        ASSERT(da_old >= da_new);
        if (da_old > da_new) {
                xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
-                       (int64_t)(da_old - da_new), rsvd);
+                       (int64_t)(da_old - da_new), 0);
        }
 done:
        *logflagsp = flags;
@@ -4562,29 +4528,24 @@ xfs_bmapi(
                                if (rt) {
                                        error = xfs_mod_incore_sb(mp,
                                                        XFS_SBS_FREXTENTS,
-                                                       -((int64_t)extsz), (flags &
-                                                       XFS_BMAPI_RSVBLOCKS));
+                                                       -((int64_t)extsz), 0);
                                } else {
                                        error = xfs_icsb_modify_counters(mp,
                                                        XFS_SBS_FDBLOCKS,
-                                                       -((int64_t)alen), (flags &
-                                                       XFS_BMAPI_RSVBLOCKS));
+                                                       -((int64_t)alen), 0);
                                }
                                if (!error) {
                                        error = xfs_icsb_modify_counters(mp,
                                                        XFS_SBS_FDBLOCKS,
-                                                       -((int64_t)indlen), (flags &
-                                                       XFS_BMAPI_RSVBLOCKS));
+                                                       -((int64_t)indlen), 0);
                                        if (error && rt)
                                                xfs_mod_incore_sb(mp,
                                                        XFS_SBS_FREXTENTS,
-                                                       (int64_t)extsz, (flags &
-                                                       XFS_BMAPI_RSVBLOCKS));
+                                                       (int64_t)extsz, 0);
                                        else if (error)
                                                xfs_icsb_modify_counters(mp,
                                                        XFS_SBS_FDBLOCKS,
-                                                       (int64_t)alen, (flags &
-                                                       XFS_BMAPI_RSVBLOCKS));
+                                                       (int64_t)alen, 0);
                                }
 
                                if (error) {
@@ -4701,13 +4662,12 @@ xfs_bmapi(
                                if (!wasdelay && (flags & XFS_BMAPI_PREALLOC))
                                        got.br_state = XFS_EXT_UNWRITTEN;
                        }
-                       error = xfs_bmap_add_extent(ip, lastx, &cur, &got,
+                       error = xfs_bmap_add_extent(ip, &lastx, &cur, &got,
                                firstblock, flist, &tmp_logflags,
-                               whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
+                               whichfork);
                        logflags |= tmp_logflags;
                        if (error)
                                goto error0;
-                       lastx = ifp->if_lastex;
                        ep = xfs_iext_get_ext(ifp, lastx);
                        nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
                        xfs_bmbt_get_all(ep, &got);
@@ -4803,13 +4763,12 @@ xfs_bmapi(
                        mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
                                                ? XFS_EXT_NORM
                                                : XFS_EXT_UNWRITTEN;
-                       error = xfs_bmap_add_extent(ip, lastx, &cur, mval,
+                       error = xfs_bmap_add_extent(ip, &lastx, &cur, mval,
                                firstblock, flist, &tmp_logflags,
-                               whichfork, (flags & XFS_BMAPI_RSVBLOCKS));
+                               whichfork);
                        logflags |= tmp_logflags;
                        if (error)
                                goto error0;
-                       lastx = ifp->if_lastex;
                        ep = xfs_iext_get_ext(ifp, lastx);
                        nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
                        xfs_bmbt_get_all(ep, &got);
@@ -4868,14 +4827,14 @@ xfs_bmapi(
                /*
                 * Else go on to the next record.
                 */
-               ep = xfs_iext_get_ext(ifp, ++lastx);
                prev = got;
-               if (lastx >= nextents)
-                       eof = 1;
-               else
+               if (++lastx < nextents) {
+                       ep = xfs_iext_get_ext(ifp, lastx);
                        xfs_bmbt_get_all(ep, &got);
+               } else {
+                       eof = 1;
+               }
        }
-       ifp->if_lastex = lastx;
        *nmap = n;
        /*
         * Transform from btree to extents, give it cur.
@@ -4984,7 +4943,6 @@ xfs_bmapi_single(
        ASSERT(!isnullstartblock(got.br_startblock));
        ASSERT(bno < got.br_startoff + got.br_blockcount);
        *fsb = got.br_startblock + (bno - got.br_startoff);
-       ifp->if_lastex = lastx;
        return 0;
 }
 
@@ -5026,7 +4984,6 @@ xfs_bunmapi(
        int                     tmp_logflags;   /* partial logging flags */
        int                     wasdel;         /* was a delayed alloc extent */
        int                     whichfork;      /* data or attribute fork */
-       int                     rsvd;           /* OK to allocate reserved blocks */
        xfs_fsblock_t           sum;
 
        trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
@@ -5044,7 +5001,7 @@ xfs_bunmapi(
        mp = ip->i_mount;
        if (XFS_FORCED_SHUTDOWN(mp))
                return XFS_ERROR(EIO);
-       rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0;
+
        ASSERT(len > 0);
        ASSERT(nexts >= 0);
        ASSERT(ifp->if_ext_max ==
@@ -5160,9 +5117,9 @@ xfs_bunmapi(
                                del.br_blockcount = mod;
                        }
                        del.br_state = XFS_EXT_UNWRITTEN;
-                       error = xfs_bmap_add_extent(ip, lastx, &cur, &del,
+                       error = xfs_bmap_add_extent(ip, &lastx, &cur, &del,
                                firstblock, flist, &logflags,
-                               XFS_DATA_FORK, 0);
+                               XFS_DATA_FORK);
                        if (error)
                                goto error0;
                        goto nodelete;
@@ -5188,9 +5145,12 @@ xfs_bunmapi(
                                 */
                                ASSERT(bno >= del.br_blockcount);
                                bno -= del.br_blockcount;
-                               if (bno < got.br_startoff) {
-                                       if (--lastx >= 0)
-                                               xfs_bmbt_get_all(--ep, &got);
+                               if (got.br_startoff > bno) {
+                                       if (--lastx >= 0) {
+                                               ep = xfs_iext_get_ext(ifp,
+                                                                     lastx);
+                                               xfs_bmbt_get_all(ep, &got);
+                                       }
                                }
                                continue;
                        } else if (del.br_state == XFS_EXT_UNWRITTEN) {
@@ -5214,18 +5174,19 @@ xfs_bunmapi(
                                        prev.br_startoff = start;
                                }
                                prev.br_state = XFS_EXT_UNWRITTEN;
-                               error = xfs_bmap_add_extent(ip, lastx - 1, &cur,
+                               lastx--;
+                               error = xfs_bmap_add_extent(ip, &lastx, &cur,
                                        &prev, firstblock, flist, &logflags,
-                                       XFS_DATA_FORK, 0);
+                                       XFS_DATA_FORK);
                                if (error)
                                        goto error0;
                                goto nodelete;
                        } else {
                                ASSERT(del.br_state == XFS_EXT_NORM);
                                del.br_state = XFS_EXT_UNWRITTEN;
-                               error = xfs_bmap_add_extent(ip, lastx, &cur,
+                               error = xfs_bmap_add_extent(ip, &lastx, &cur,
                                        &del, firstblock, flist, &logflags,
-                                       XFS_DATA_FORK, 0);
+                                       XFS_DATA_FORK);
                                if (error)
                                        goto error0;
                                goto nodelete;
@@ -5240,13 +5201,13 @@ xfs_bunmapi(
                                rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
                                do_div(rtexts, mp->m_sb.sb_rextsize);
                                xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
-                                               (int64_t)rtexts, rsvd);
+                                               (int64_t)rtexts, 0);
                                (void)xfs_trans_reserve_quota_nblks(NULL,
                                        ip, -((long)del.br_blockcount), 0,
                                        XFS_QMOPT_RES_RTBLKS);
                        } else {
                                xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
-                                               (int64_t)del.br_blockcount, rsvd);
+                                               (int64_t)del.br_blockcount, 0);
                                (void)xfs_trans_reserve_quota_nblks(NULL,
                                        ip, -((long)del.br_blockcount), 0,
                                        XFS_QMOPT_RES_REGBLKS);
@@ -5277,31 +5238,29 @@ xfs_bunmapi(
                        error = XFS_ERROR(ENOSPC);
                        goto error0;
                }
-               error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del,
-                               &tmp_logflags, whichfork, rsvd);
+               error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del,
+                               &tmp_logflags, whichfork);
                logflags |= tmp_logflags;
                if (error)
                        goto error0;
                bno = del.br_startoff - 1;
 nodelete:
-               lastx = ifp->if_lastex;
                /*
                 * If not done go on to the next (previous) record.
-                * Reset ep in case the extents array was re-alloced.
                 */
-               ep = xfs_iext_get_ext(ifp, lastx);
                if (bno != (xfs_fileoff_t)-1 && bno >= start) {
-                       if (lastx >= XFS_IFORK_NEXTENTS(ip, whichfork) ||
-                           xfs_bmbt_get_startoff(ep) > bno) {
-                               if (--lastx >= 0)
-                                       ep = xfs_iext_get_ext(ifp, lastx);
-                       }
-                       if (lastx >= 0)
+                       if (lastx >= 0) {
+                               ep = xfs_iext_get_ext(ifp, lastx);
+                               if (xfs_bmbt_get_startoff(ep) > bno) {
+                                       if (--lastx >= 0)
+                                               ep = xfs_iext_get_ext(ifp,
+                                                                     lastx);
+                               }
                                xfs_bmbt_get_all(ep, &got);
+                       }
                        extno++;
                }
        }
-       ifp->if_lastex = lastx;
        *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
        ASSERT(ifp->if_ext_max ==
               XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
index 3651191daea10cd99bae79443a2b1b9305940250..c62234bde053de008d13d41d40d92f133649e446 100644 (file)
@@ -69,7 +69,6 @@ typedef       struct xfs_bmap_free
 #define XFS_BMAPI_ENTIRE       0x004   /* return entire extent, not trimmed */
 #define XFS_BMAPI_METADATA     0x008   /* mapping metadata not user data */
 #define XFS_BMAPI_ATTRFORK     0x010   /* use attribute fork not data */
-#define XFS_BMAPI_RSVBLOCKS    0x020   /* OK to alloc. reserved data blocks */
 #define        XFS_BMAPI_PREALLOC      0x040   /* preallocation op: unwritten space */
 #define        XFS_BMAPI_IGSTATE       0x080   /* Ignore state - */
                                        /* combine contig. space */
@@ -87,7 +86,6 @@ typedef       struct xfs_bmap_free
        { XFS_BMAPI_ENTIRE,     "ENTIRE" }, \
        { XFS_BMAPI_METADATA,   "METADATA" }, \
        { XFS_BMAPI_ATTRFORK,   "ATTRFORK" }, \
-       { XFS_BMAPI_RSVBLOCKS,  "RSVBLOCKS" }, \
        { XFS_BMAPI_PREALLOC,   "PREALLOC" }, \
        { XFS_BMAPI_IGSTATE,    "IGSTATE" }, \
        { XFS_BMAPI_CONTIG,     "CONTIG" }, \
index c8e3349c287c635275224b65f4200dd99ed5431f..a098a20ca63e29bbd021a266e287d87ca796fd8c 100644 (file)
@@ -920,7 +920,6 @@ xfs_iread_extents(
        /*
         * We know that the size is valid (it's checked in iformat_btree)
         */
-       ifp->if_lastex = NULLEXTNUM;
        ifp->if_bytes = ifp->if_real_bytes = 0;
        ifp->if_flags |= XFS_IFEXTENTS;
        xfs_iext_add(ifp, 0, nextents);
@@ -2558,12 +2557,9 @@ xfs_iflush_fork(
        case XFS_DINODE_FMT_EXTENTS:
                ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
                       !(iip->ili_format.ilf_fields & extflag[whichfork]));
-               ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) ||
-                       (ifp->if_bytes == 0));
-               ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) ||
-                       (ifp->if_bytes > 0));
                if ((iip->ili_format.ilf_fields & extflag[whichfork]) &&
                    (ifp->if_bytes > 0)) {
+                       ASSERT(xfs_iext_get_ext(ifp, 0));
                        ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
                        (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
                                whichfork);
@@ -3112,6 +3108,8 @@ xfs_iext_get_ext(
        xfs_extnum_t    idx)            /* index of target extent */
 {
        ASSERT(idx >= 0);
+       ASSERT(idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
+
        if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
                return ifp->if_u1.if_ext_irec->er_extbuf;
        } else if (ifp->if_flags & XFS_IFEXTIREC) {
@@ -3191,7 +3189,6 @@ xfs_iext_add(
                }
                ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
                ifp->if_real_bytes = 0;
-               ifp->if_lastex = nextents + ext_diff;
        }
        /*
         * Otherwise use a linear (direct) extent list.
@@ -3886,8 +3883,10 @@ xfs_iext_idx_to_irec(
        xfs_extnum_t    page_idx = *idxp; /* extent index in target list */
 
        ASSERT(ifp->if_flags & XFS_IFEXTIREC);
-       ASSERT(page_idx >= 0 && page_idx <=
-               ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
+       ASSERT(page_idx >= 0);
+       ASSERT(page_idx <= ifp->if_bytes / sizeof(xfs_bmbt_rec_t));
+       ASSERT(page_idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t) || realloc);
+
        nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
        erp_idx = 0;
        low = 0;
index ff4e2a30227dcb4b86dd69b784a8bb94cca4b5d7..3ae6d58e54739b42b8ff5ff49dfaad7648201add 100644 (file)
@@ -67,7 +67,6 @@ typedef struct xfs_ifork {
        short                   if_broot_bytes; /* bytes allocated for root */
        unsigned char           if_flags;       /* per-fork flags */
        unsigned char           if_ext_max;     /* max # of extent records */
-       xfs_extnum_t            if_lastex;      /* last if_extents used */
        union {
                xfs_bmbt_rec_host_t *if_extents;/* linear map file exts */
                xfs_ext_irec_t  *if_ext_irec;   /* irec map file exts */
index 7d56e88a3f0eb6c671e863c4977d2f47a500b830..c7755d5a5fbe967ed58bb04ffcc0d47d082a87e4 100644 (file)
@@ -29,6 +29,7 @@
 #include "xfs_mount.h"
 #include "xfs_error.h"
 #include "xfs_alloc.h"
+#include "xfs_discard.h"
 
 /*
  * Perform initial CIL structure initialisation. If the CIL is not
@@ -361,18 +362,28 @@ xlog_cil_committed(
        int     abort)
 {
        struct xfs_cil_ctx      *ctx = args;
+       struct xfs_mount        *mp = ctx->cil->xc_log->l_mp;
 
        xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain,
                                        ctx->start_lsn, abort);
 
        xfs_alloc_busy_sort(&ctx->busy_extents);
-       xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, &ctx->busy_extents);
+       xfs_alloc_busy_clear(mp, &ctx->busy_extents,
+                            (mp->m_flags & XFS_MOUNT_DISCARD) && !abort);
 
        spin_lock(&ctx->cil->xc_cil_lock);
        list_del(&ctx->committing);
        spin_unlock(&ctx->cil->xc_cil_lock);
 
        xlog_cil_free_logvec(ctx->lv_chain);
+
+       if (!list_empty(&ctx->busy_extents)) {
+               ASSERT(mp->m_flags & XFS_MOUNT_DISCARD);
+
+               xfs_discard_extents(mp, &ctx->busy_extents);
+               xfs_alloc_busy_clear(mp, &ctx->busy_extents, false);
+       }
+
        kmem_free(ctx);
 }
 
index 19af0ab0d0c6c6c1cab862016dbe258263e39d31..3d68bb267c5fc064279c5545cf2039974c89fa7b 100644 (file)
@@ -224,6 +224,7 @@ typedef struct xfs_mount {
 #define XFS_MOUNT_FS_SHUTDOWN  (1ULL << 4)     /* atomic stop of all filesystem
                                                   operations, typically for
                                                   disk errors in metadata */
+#define XFS_MOUNT_DISCARD      (1ULL << 5)     /* discard unused blocks */
 #define XFS_MOUNT_RETERR       (1ULL << 6)     /* return alignment errors to
                                                   user */
 #define XFS_MOUNT_NOALIGN      (1ULL << 7)     /* turn off stripe alignment
index d1f24858ccc4d365db467f8bdbe222ef4511d354..7c7bc2b786bd47d6ec89e31bcf966f6dc5121ec1 100644 (file)
@@ -609,7 +609,7 @@ xfs_trans_free(
        struct xfs_trans        *tp)
 {
        xfs_alloc_busy_sort(&tp->t_busy);
-       xfs_alloc_busy_clear(tp->t_mountp, &tp->t_busy);
+       xfs_alloc_busy_clear(tp->t_mountp, &tp->t_busy, false);
 
        atomic_dec(&tp->t_mountp->m_active_trans);
        xfs_trans_free_dqinfo(tp);
index 110fa700f85380e29397287ef597734f2a366149..71c778033f575e53b106c0fa649202d0a6a5ee3a 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _ASM_GENERIC_BITOPS_FIND_H_
 #define _ASM_GENERIC_BITOPS_FIND_H_
 
+#ifndef find_next_bit
 /**
  * find_next_bit - find the next set bit in a memory region
  * @addr: The address to base the search on
@@ -9,7 +10,9 @@
  */
 extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
                size, unsigned long offset);
+#endif
 
+#ifndef find_next_zero_bit
 /**
  * find_next_zero_bit - find the next cleared bit in a memory region
  * @addr: The address to base the search on
@@ -18,6 +21,7 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
  */
 extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
                long size, unsigned long offset);
+#endif
 
 #ifdef CONFIG_GENERIC_FIND_FIRST_BIT
 
index 946a21b1b5dc66dffae64c4b58f10efaa2e940fd..f95c663a6a4169f6a7ae2258623aeebec6d661c9 100644 (file)
@@ -30,13 +30,20 @@ static inline unsigned long find_first_zero_bit_le(const void *addr,
 
 #define BITOP_LE_SWIZZLE       ((BITS_PER_LONG-1) & ~0x7)
 
+#ifndef find_next_zero_bit_le
 extern unsigned long find_next_zero_bit_le(const void *addr,
                unsigned long size, unsigned long offset);
+#endif
+
+#ifndef find_next_bit_le
 extern unsigned long find_next_bit_le(const void *addr,
                unsigned long size, unsigned long offset);
+#endif
 
+#ifndef find_first_zero_bit_le
 #define find_first_zero_bit_le(addr, size) \
        find_next_zero_bit_le((addr), (size), 0)
+#endif
 
 #else
 #error "Please fix <asm/byteorder.h>"
index 91784841e4079e35fe4fab89aaaf8a60c4e7b081..dfb0ec666c9441d08ab1e81166e27c6a290b56c9 100644 (file)
@@ -162,46 +162,6 @@ extern void warn_slowpath_null(const char *file, const int line);
        unlikely(__ret_warn_once);                              \
 })
 
-#ifdef CONFIG_PRINTK
-
-#define WARN_ON_RATELIMIT(condition, state)                    \
-               WARN_ON((condition) && __ratelimit(state))
-
-#define __WARN_RATELIMIT(condition, state, format...)          \
-({                                                             \
-       int rtn = 0;                                            \
-       if (unlikely(__ratelimit(state)))                       \
-               rtn = WARN(condition, format);                  \
-       rtn;                                                    \
-})
-
-#define WARN_RATELIMIT(condition, format...)                   \
-({                                                             \
-       static DEFINE_RATELIMIT_STATE(_rs,                      \
-                                     DEFAULT_RATELIMIT_INTERVAL,       \
-                                     DEFAULT_RATELIMIT_BURST); \
-       __WARN_RATELIMIT(condition, &_rs, format);              \
-})
-
-#else
-
-#define WARN_ON_RATELIMIT(condition, state)                    \
-       WARN_ON(condition)
-
-#define __WARN_RATELIMIT(condition, state, format...)          \
-({                                                             \
-       int rtn = WARN(condition, format);                      \
-       rtn;                                                    \
-})
-
-#define WARN_RATELIMIT(condition, format...)                   \
-({                                                             \
-       int rtn = WARN(condition, format);                      \
-       rtn;                                                    \
-})
-
-#endif
-
 /*
  * WARN_ON_SMP() is for cases that the warning is either
  * meaningless for !SMP or may even cause failures.
diff --git a/include/asm-generic/ptrace.h b/include/asm-generic/ptrace.h
new file mode 100644 (file)
index 0000000..82e674f
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Common low level (register) ptrace helpers
+ *
+ * Copyright 2004-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef __ASM_GENERIC_PTRACE_H__
+#define __ASM_GENERIC_PTRACE_H__
+
+#ifndef __ASSEMBLY__
+
+/* Helpers for working with the instruction pointer */
+#ifndef GET_IP
+#define GET_IP(regs) ((regs)->pc)
+#endif
+#ifndef SET_IP
+#define SET_IP(regs, val) (GET_IP(regs) = (val))
+#endif
+
+static inline unsigned long instruction_pointer(struct pt_regs *regs)
+{
+       return GET_IP(regs);
+}
+static inline void instruction_pointer_set(struct pt_regs *regs,
+                                           unsigned long val)
+{
+       SET_IP(regs, val);
+}
+
+#ifndef profile_pc
+#define profile_pc(regs) instruction_pointer(regs)
+#endif
+
+/* Helpers for working with the user stack pointer */
+#ifndef GET_USP
+#define GET_USP(regs) ((regs)->usp)
+#endif
+#ifndef SET_USP
+#define SET_USP(regs, val) (GET_USP(regs) = (val))
+#endif
+
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+       return GET_USP(regs);
+}
+static inline void user_stack_pointer_set(struct pt_regs *regs,
+                                          unsigned long val)
+{
+       SET_USP(regs, val);
+}
+
+/* Helpers for working with the frame pointer */
+#ifndef GET_FP
+#define GET_FP(regs) ((regs)->fp)
+#endif
+#ifndef SET_FP
+#define SET_FP(regs, val) (GET_FP(regs) = (val))
+#endif
+
+static inline unsigned long frame_pointer(struct pt_regs *regs)
+{
+       return GET_FP(regs);
+}
+static inline void frame_pointer_set(struct pt_regs *regs,
+                                     unsigned long val)
+{
+       SET_FP(regs, val);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif
index 198087a16fc4f5407049e416abec17c846086ca1..1ae12710d7328a160bef4b793a912995a07c474e 100644 (file)
 #ifndef __BASIC_MMIO_GPIO_H
 #define __BASIC_MMIO_GPIO_H
 
+#include <linux/gpio.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+
 struct bgpio_pdata {
        int base;
+       int ngpio;
 };
 
+struct device;
+
+struct bgpio_chip {
+       struct gpio_chip gc;
+
+       unsigned long (*read_reg)(void __iomem *reg);
+       void (*write_reg)(void __iomem *reg, unsigned long data);
+
+       void __iomem *reg_dat;
+       void __iomem *reg_set;
+       void __iomem *reg_clr;
+       void __iomem *reg_dir;
+
+       /* Number of bits (GPIOs): <register width> * 8. */
+       int bits;
+
+       /*
+        * Some GPIO controllers work with the big-endian bits notation,
+        * e.g. in a 8-bits register, GPIO7 is the least significant bit.
+        */
+       unsigned long (*pin2mask)(struct bgpio_chip *bgc, unsigned int pin);
+
+       /*
+        * Used to lock bgpio_chip->data. Also, this is needed to keep
+        * shadowed and real data registers writes together.
+        */
+       spinlock_t lock;
+
+       /* Shadowed data register to clear/set bits safely. */
+       unsigned long data;
+
+       /* Shadowed direction registers to clear/set direction safely. */
+       unsigned long dir;
+};
+
+static inline struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc)
+{
+       return container_of(gc, struct bgpio_chip, gc);
+}
+
+int __devexit bgpio_remove(struct bgpio_chip *bgc);
+int __devinit bgpio_init(struct bgpio_chip *bgc,
+                        struct device *dev,
+                        unsigned long sz,
+                        void __iomem *dat,
+                        void __iomem *set,
+                        void __iomem *clr,
+                        void __iomem *dirout,
+                        void __iomem *dirin,
+                        bool big_endian);
+
 #endif /* __BASIC_MMIO_GPIO_H */
index 2184c6b97aebb699206e2eb7426bd716b8f0e54f..a3ef66a2a08303bb8cc7492a70bda66c22c2e473 100644 (file)
@@ -148,7 +148,7 @@ static inline unsigned long __ffs64(u64 word)
 
 #ifdef __KERNEL__
 
-#ifdef CONFIG_GENERIC_FIND_LAST_BIT
+#ifndef find_last_bit
 /**
  * find_last_bit - find the last set bit in a memory region
  * @addr: The address to start the search at
@@ -158,7 +158,7 @@ static inline unsigned long __ffs64(u64 word)
  */
 extern unsigned long find_last_bit(const unsigned long *addr,
                                   unsigned long size);
-#endif /* CONFIG_GENERIC_FIND_LAST_BIT */
+#endif
 
 #endif /* __KERNEL__ */
 #endif
index f5df23561b96d0428cfed1d26fdaa11127e46b0d..503c8a6b30792d8e725a9233be6298e801e439ac 100644 (file)
@@ -217,8 +217,24 @@ int cont_write_begin(struct file *, struct address_space *, loff_t,
                        get_block_t *, loff_t *);
 int generic_cont_expand_simple(struct inode *inode, loff_t size);
 int block_commit_write(struct page *page, unsigned from, unsigned to);
+int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
+                               get_block_t get_block);
 int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
                                get_block_t get_block);
+/* Convert errno to return value from ->page_mkwrite() call */
+static inline int block_page_mkwrite_return(int err)
+{
+       if (err == 0)
+               return VM_FAULT_LOCKED;
+       if (err == -EFAULT)
+               return VM_FAULT_NOPAGE;
+       if (err == -ENOMEM)
+               return VM_FAULT_OOM;
+       if (err == -EAGAIN)
+               return VM_FAULT_RETRY;
+       /* -ENOSPC, -EDQUOT, -EIO ... */
+       return VM_FAULT_SIGBUS;
+}
 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
 int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned,
index 5ac7ebc36dbb335604acfa74007a325fc0c7d658..ab4ac0ccb857ac4c6b5197c19f3a31a2dc2a3504 100644 (file)
@@ -467,12 +467,14 @@ struct cgroup_subsys {
        int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
        void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp);
        int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
-                         struct task_struct *tsk, bool threadgroup);
+                         struct task_struct *tsk);
+       int (*can_attach_task)(struct cgroup *cgrp, struct task_struct *tsk);
        void (*cancel_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
-                         struct task_struct *tsk, bool threadgroup);
+                             struct task_struct *tsk);
+       void (*pre_attach)(struct cgroup *cgrp);
+       void (*attach_task)(struct cgroup *cgrp, struct task_struct *tsk);
        void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp,
-                       struct cgroup *old_cgrp, struct task_struct *tsk,
-                       bool threadgroup);
+                      struct cgroup *old_cgrp, struct task_struct *tsk);
        void (*fork)(struct cgroup_subsys *ss, struct task_struct *task);
        void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp,
                        struct cgroup *old_cgrp, struct task_struct *task);
@@ -553,9 +555,6 @@ static inline struct cgroup* task_cgroup(struct task_struct *task,
        return task_subsys_state(task, subsys_id)->cgroup;
 }
 
-int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *ss,
-                                                       char *nodename);
-
 /* A cgroup_iter should be treated as an opaque object */
 struct cgroup_iter {
        struct list_head *cg_link;
index cdbfcb8780ec94d4e69ea9ab0826e31d49ca8cfe..ac663c18776c95fdf77f7eb49ce2b4503d2cb795 100644 (file)
@@ -19,12 +19,6 @@ SUBSYS(debug)
 
 /* */
 
-#ifdef CONFIG_CGROUP_NS
-SUBSYS(ns)
-#endif
-
-/* */
-
 #ifdef CONFIG_CGROUP_SCHED
 SUBSYS(cpu_cgroup)
 #endif
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
new file mode 100644 (file)
index 0000000..04ffb2e
--- /dev/null
@@ -0,0 +1,122 @@
+#ifndef _LINUX_CLEANCACHE_H
+#define _LINUX_CLEANCACHE_H
+
+#include <linux/fs.h>
+#include <linux/exportfs.h>
+#include <linux/mm.h>
+
+#define CLEANCACHE_KEY_MAX 6
+
+/*
+ * cleancache requires every file with a page in cleancache to have a
+ * unique key unless/until the file is removed/truncated.  For some
+ * filesystems, the inode number is unique, but for "modern" filesystems
+ * an exportable filehandle is required (see exportfs.h)
+ */
+struct cleancache_filekey {
+       union {
+               ino_t ino;
+               __u32 fh[CLEANCACHE_KEY_MAX];
+               u32 key[CLEANCACHE_KEY_MAX];
+       } u;
+};
+
+struct cleancache_ops {
+       int (*init_fs)(size_t);
+       int (*init_shared_fs)(char *uuid, size_t);
+       int (*get_page)(int, struct cleancache_filekey,
+                       pgoff_t, struct page *);
+       void (*put_page)(int, struct cleancache_filekey,
+                       pgoff_t, struct page *);
+       void (*flush_page)(int, struct cleancache_filekey, pgoff_t);
+       void (*flush_inode)(int, struct cleancache_filekey);
+       void (*flush_fs)(int);
+};
+
+extern struct cleancache_ops
+       cleancache_register_ops(struct cleancache_ops *ops);
+extern void __cleancache_init_fs(struct super_block *);
+extern void __cleancache_init_shared_fs(char *, struct super_block *);
+extern int  __cleancache_get_page(struct page *);
+extern void __cleancache_put_page(struct page *);
+extern void __cleancache_flush_page(struct address_space *, struct page *);
+extern void __cleancache_flush_inode(struct address_space *);
+extern void __cleancache_flush_fs(struct super_block *);
+extern int cleancache_enabled;
+
+#ifdef CONFIG_CLEANCACHE
+static inline bool cleancache_fs_enabled(struct page *page)
+{
+       return page->mapping->host->i_sb->cleancache_poolid >= 0;
+}
+static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping)
+{
+       return mapping->host->i_sb->cleancache_poolid >= 0;
+}
+#else
+#define cleancache_enabled (0)
+#define cleancache_fs_enabled(_page) (0)
+#define cleancache_fs_enabled_mapping(_page) (0)
+#endif
+
+/*
+ * The shim layer provided by these inline functions allows the compiler
+ * to reduce all cleancache hooks to nothingness if CONFIG_CLEANCACHE
+ * is disabled, to a single global variable check if CONFIG_CLEANCACHE
+ * is enabled but no cleancache "backend" has dynamically enabled it,
+ * and, for the most frequent cleancache ops, to a single global variable
+ * check plus a superblock element comparison if CONFIG_CLEANCACHE is enabled
+ * and a cleancache backend has dynamically enabled cleancache, but the
+ * filesystem referenced by that cleancache op has not enabled cleancache.
+ * As a result, CONFIG_CLEANCACHE can be enabled by default with essentially
+ * no measurable performance impact.
+ */
+
+static inline void cleancache_init_fs(struct super_block *sb)
+{
+       if (cleancache_enabled)
+               __cleancache_init_fs(sb);
+}
+
+static inline void cleancache_init_shared_fs(char *uuid, struct super_block *sb)
+{
+       if (cleancache_enabled)
+               __cleancache_init_shared_fs(uuid, sb);
+}
+
+static inline int cleancache_get_page(struct page *page)
+{
+       int ret = -1;
+
+       if (cleancache_enabled && cleancache_fs_enabled(page))
+               ret = __cleancache_get_page(page);
+       return ret;
+}
+
+static inline void cleancache_put_page(struct page *page)
+{
+       if (cleancache_enabled && cleancache_fs_enabled(page))
+               __cleancache_put_page(page);
+}
+
+static inline void cleancache_flush_page(struct address_space *mapping,
+                                       struct page *page)
+{
+       /* careful... page->mapping is NULL sometimes when this is called */
+       if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
+               __cleancache_flush_page(mapping, page);
+}
+
+static inline void cleancache_flush_inode(struct address_space *mapping)
+{
+       if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
+               __cleancache_flush_inode(mapping);
+}
+
+static inline void cleancache_flush_fs(struct super_block *sb)
+{
+       if (cleancache_enabled)
+               __cleancache_flush_fs(sb);
+}
+
+#endif /* _LINUX_CLEANCACHE_H */
index 088cd4ace4ef756c542cffc66e9b8a42f5ad1b38..74054074e876d8da1ae9de24dc8ae64f856391c7 100644 (file)
@@ -66,6 +66,11 @@ static inline void vmcore_unusable(void)
        if (is_kdump_kernel())
                elfcorehdr_addr = ELFCORE_ADDR_ERR;
 }
+
+#define HAVE_OLDMEM_PFN_IS_RAM 1
+extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn));
+extern void unregister_oldmem_pfn_is_ram(void);
+
 #else /* !CONFIG_CRASH_DUMP */
 static inline int is_kdump_kernel(void) { return 0; }
 #endif /* CONFIG_CRASH_DUMP */
index be16b61283ccb9855eabe82a97e4fd2345409f90..82607992f308aa4510f15c1becd64158e86947ca 100644 (file)
@@ -1,4 +1,4 @@
-/* Credentials management - see Documentation/credentials.txt
+/* Credentials management - see Documentation/security/credentials.txt
  *
  * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
index ebeb2f3ad068db1ec6d8cee92a1bcfb6cd097630..6843cf193a445134cde17f7d2db6893399ac8cda 100644 (file)
@@ -21,6 +21,8 @@ struct flex_array {
                struct {
                        int element_size;
                        int total_nr_elements;
+                       int elems_per_part;
+                       u32 reciprocal_elems;
                        struct flex_array_part *parts[];
                };
                /*
index 3f9d3251790d081529e51870a073682260b2212b..241609346dfb7f0853252b3549a2637f3cd3eb2c 100644 (file)
@@ -1428,6 +1428,11 @@ struct super_block {
         */
        char __rcu *s_options;
        const struct dentry_operations *s_d_op; /* default d_op for dentries */
+
+       /*
+        * Saved pool identifier for cleancache (-1 means none)
+        */
+       int cleancache_poolid;
 };
 
 extern struct timespec current_fs_time(struct super_block *sb);
index 943c76b3d4bb94d9536ecf8e324bea8ffda853e4..59225ef27d15062813e4799ad95e6df2f57241be 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _LINUX_HUGETLB_H
 #define _LINUX_HUGETLB_H
 
+#include <linux/mm_types.h>
 #include <linux/fs.h>
 #include <linux/hugetlb_inline.h>
 
@@ -41,7 +42,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        unsigned long address, unsigned int flags);
 int hugetlb_reserve_pages(struct inode *inode, long from, long to,
                                                struct vm_area_struct *vma,
-                                               int acctflags);
+                                               vm_flags_t vm_flags);
 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
 int dequeue_hwpoisoned_huge_page(struct page *page);
 void copy_huge_page(struct page *dst, struct page *src);
@@ -168,7 +169,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
 
 extern const struct file_operations hugetlbfs_file_operations;
 extern const struct vm_operations_struct hugetlb_vm_ops;
-struct file *hugetlb_file_setup(const char *name, size_t size, int acct,
+struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
                                struct user_struct **user, int creat_flags);
 int hugetlb_get_quota(struct address_space *mapping, long delta);
 void hugetlb_put_quota(struct address_space *mapping, long delta);
@@ -192,7 +193,7 @@ static inline void set_file_hugepages(struct file *file)
 #define is_file_hugepages(file)                        0
 #define set_file_hugepages(file)               BUG()
 static inline struct file *hugetlb_file_setup(const char *name, size_t size,
-               int acctflag, struct user_struct **user, int creat_flags)
+               vm_flags_t acctflag, struct user_struct **user, int creat_flags)
 {
        return ERR_PTR(-ENOSYS);
 }
index 6931489a5c14433e57b28493e306d92f8b1d28d4..2bb681fbeb35b3f4dcdece493931e0679c89d824 100644 (file)
@@ -7,7 +7,7 @@
 
 static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
 {
-       return vma->vm_flags & VM_HUGETLB;
+       return !!(vma->vm_flags & VM_HUGETLB);
 }
 
 #else
index 0c0d1ae79981f90e48fa71eea442a8d06026c7f1..ba4f88624fcd2321fbeffd11ce61ec983e81dd9d 100644 (file)
@@ -91,6 +91,7 @@
 #define BCI_INTR_OFFSET                2
 #define MADC_INTR_OFFSET       3
 #define USB_INTR_OFFSET                4
+#define CHARGERFAULT_INTR_OFFSET 5
 #define BCI_PRES_INTR_OFFSET   9
 #define USB_PRES_INTR_OFFSET   10
 #define RTC_INTR_OFFSET                11
 #define MMC_PU                         (0x1 << 3)
 #define MMC_PD                         (0x1 << 2)
 
-
+#define TWL_SIL_TYPE(rev)              ((rev) & 0x00FFFFFF)
+#define TWL_SIL_REV(rev)               ((rev) >> 24)
+#define TWL_SIL_5030                   0x09002F
+#define TWL5030_REV_1_0                        0x00
+#define TWL5030_REV_1_1                        0x10
+#define TWL5030_REV_1_2                        0x30
 
 #define TWL4030_CLASS_ID               0x4030
 #define TWL6030_CLASS_ID               0x6030
@@ -165,6 +171,8 @@ static inline int twl_class_is_ ##class(void)       \
 TWL_CLASS_IS(4030, TWL4030_CLASS_ID)
 TWL_CLASS_IS(6030, TWL6030_CLASS_ID)
 
+#define TWL6025_SUBCLASS       BIT(4)  /* TWL6025 has changed registers */
+
 /*
  * Read and write single 8-bit registers
  */
@@ -180,6 +188,9 @@ int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg);
 int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
 int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes);
 
+int twl_get_type(void);
+int twl_get_version(void);
+
 int twl6030_interrupt_unmask(u8 bit_mask, u8 offset);
 int twl6030_interrupt_mask(u8 bit_mask, u8 offset);
 
@@ -279,7 +290,12 @@ static inline int twl6030_mmc_card_detect(struct device *dev, int slot)
  *(Use TWL_4030_MODULE_INTBR)
  */
 
+#define REG_IDCODE_7_0                 0x00
+#define REG_IDCODE_15_8                        0x01
+#define REG_IDCODE_16_23               0x02
+#define REG_IDCODE_31_24               0x03
 #define REG_GPPUPDCTR1                 0x0F
+#define REG_UNLOCK_TEST_REG            0x12
 
 /*I2C1 and I2C4(SR) SDA/SCL pull-up control bits */
 
@@ -288,6 +304,8 @@ static inline int twl6030_mmc_card_detect(struct device *dev, int slot)
 #define SR_I2C_SCL_CTRL_PU             BIT(4)
 #define SR_I2C_SDA_CTRL_PU             BIT(6)
 
+#define TWL_EEPROM_R_UNLOCK            0x49
+
 /*----------------------------------------------------------------------*/
 
 /*
@@ -501,7 +519,7 @@ static inline int twl6030_mmc_card_detect(struct device *dev, int slot)
 #define RES_32KCLKOUT           26
 #define RES_RESET               27
 /* Power Reference */
-#define RES_Main_Ref            28
+#define RES_MAIN_REF            28
 
 #define TOTAL_RESOURCES                28
 /*
@@ -593,6 +611,7 @@ enum twl4030_usb_mode {
 
 struct twl4030_usb_data {
        enum twl4030_usb_mode   usb_mode;
+       unsigned long           features;
 
        int             (*phy_init)(struct device *dev);
        int             (*phy_exit)(struct device *dev);
@@ -699,6 +718,20 @@ struct twl4030_platform_data {
        struct regulator_init_data              *vcxio;
        struct regulator_init_data              *vusb;
        struct regulator_init_data              *clk32kg;
+       /* TWL6025 LDO regulators */
+       struct regulator_init_data              *ldo1;
+       struct regulator_init_data              *ldo2;
+       struct regulator_init_data              *ldo3;
+       struct regulator_init_data              *ldo4;
+       struct regulator_init_data              *ldo5;
+       struct regulator_init_data              *ldo6;
+       struct regulator_init_data              *ldo7;
+       struct regulator_init_data              *ldoln;
+       struct regulator_init_data              *ldousb;
+       /* TWL6025 DCDC regulators */
+       struct regulator_init_data              *smps3;
+       struct regulator_init_data              *smps4;
+       struct regulator_init_data              *vio6025;
 };
 
 /*----------------------------------------------------------------------*/
@@ -780,4 +813,21 @@ static inline int twl4030charger_usb_en(int enable) { return 0; }
 #define TWL6030_REG_VRTC       47
 #define TWL6030_REG_CLK32KG    48
 
+/* LDOs on 6025 have different names */
+#define TWL6025_REG_LDO2       49
+#define TWL6025_REG_LDO4       50
+#define TWL6025_REG_LDO3       51
+#define TWL6025_REG_LDO5       52
+#define TWL6025_REG_LDO1       53
+#define TWL6025_REG_LDO7       54
+#define TWL6025_REG_LDO6       55
+#define TWL6025_REG_LDOLN      56
+#define TWL6025_REG_LDOUSB     57
+
+/* 6025 DCDC supplies */
+#define TWL6025_REG_SMPS3      58
+#define TWL6025_REG_SMPS4      59
+#define TWL6025_REG_VIO                60
+
+
 #endif /* End of __TWL4030_H */
index 0f1325d98295d5b8b88761fe46f34654ffee8011..0065ffd3226ba9dc17d272d80bee076b8c0f612f 100644 (file)
@@ -132,10 +132,6 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
 
 int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
 
-#ifdef CONFIG_SYSCTL
-extern struct ctl_table ether_table[];
-#endif
-
 int mac_pton(const char *s, u8 *mac);
 extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
 
index bafc58c00fc32b92fdbdb533c3e43d0e659c44b3..580f70c02391712f7cc0351d645361b683b82a8b 100644 (file)
 extern struct files_struct init_files;
 extern struct fs_struct init_fs;
 
+#ifdef CONFIG_CGROUPS
+#define INIT_THREADGROUP_FORK_LOCK(sig)                                        \
+       .threadgroup_fork_lock =                                        \
+               __RWSEM_INITIALIZER(sig.threadgroup_fork_lock),
+#else
+#define INIT_THREADGROUP_FORK_LOCK(sig)
+#endif
+
 #define INIT_SIGNALS(sig) {                                            \
        .nr_threads     = 1,                                            \
        .wait_chldexit  = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
@@ -38,6 +46,7 @@ extern struct fs_struct init_fs;
        },                                                              \
        .cred_guard_mutex =                                             \
                 __MUTEX_INITIALIZER(sig.cred_guard_mutex),             \
+       INIT_THREADGROUP_FORK_LOCK(sig)                                 \
 }
 
 extern struct nsproxy init_nsproxy;
diff --git a/include/linux/input/pmic8xxx-keypad.h b/include/linux/input/pmic8xxx-keypad.h
new file mode 100644 (file)
index 0000000..5f1e2f9
--- /dev/null
@@ -0,0 +1,52 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PMIC8XXX_KEYPAD_H__
+#define __PMIC8XXX_KEYPAD_H__
+
+#include <linux/input/matrix_keypad.h>
+
+#define PM8XXX_KEYPAD_DEV_NAME     "pm8xxx-keypad"
+
+/**
+ * struct pm8xxx_keypad_platform_data - platform data for keypad
+ * @keymap_data - matrix keymap data
+ * @input_name - input device name
+ * @input_phys_device - input device name
+ * @num_cols - number of columns of keypad
+ * @num_rows - number of row of keypad
+ * @debounce_ms - debounce period in milliseconds
+ * @scan_delay_ms - scan delay in milliseconds
+ * @row_hold_ns - row hold period in nanoseconds
+ * @wakeup - configure keypad as wakeup
+ * @rep - enable or disable key repeat bit
+ */
+struct pm8xxx_keypad_platform_data {
+       const struct matrix_keymap_data *keymap_data;
+
+       const char *input_name;
+       const char *input_phys_device;
+
+       unsigned int num_cols;
+       unsigned int num_rows;
+       unsigned int rows_gpio_start;
+       unsigned int cols_gpio_start;
+
+       unsigned int debounce_ms;
+       unsigned int scan_delay_ms;
+       unsigned int row_hold_ns;
+
+       bool wakeup;
+       bool rep;
+};
+
+#endif /*__PMIC8XXX_KEYPAD_H__ */
diff --git a/include/linux/input/pmic8xxx-pwrkey.h b/include/linux/input/pmic8xxx-pwrkey.h
new file mode 100644 (file)
index 0000000..6d2974e
--- /dev/null
@@ -0,0 +1,31 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PMIC8XXX_PWRKEY_H__
+#define __PMIC8XXX_PWRKEY_H__
+
+#define PM8XXX_PWRKEY_DEV_NAME "pm8xxx-pwrkey"
+
+/**
+ * struct pm8xxx_pwrkey_platform_data - platform data for pwrkey driver
+ * @pull up:  power on register control for pull up/down configuration
+ * @kpd_trigger_delay_us: time delay for power key state change interrupt
+ *                  trigger.
+ * @wakeup: configure power key as wakeup source
+ */
+struct pm8xxx_pwrkey_platform_data  {
+       bool pull_up;
+       u32  kpd_trigger_delay_us;
+       u32  wakeup;
+};
+
+#endif /* __PMIC8XXX_PWRKEY_H__ */
index 906590aa69072959bbf9e758d28f00bae609089d..204f9cd26c16012126206106f5d36be3c0af21cf 100644 (file)
@@ -236,7 +236,7 @@ static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg)
    directory for this interface.  Note that the entry will
    automatically be dstroyed when the interface is destroyed. */
 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
-                           read_proc_t *read_proc,
+                           const struct file_operations *proc_ops,
                            void *data);
 
 #endif /* __LINUX_IPMI_SMI_H */
index a32dcaec04e147917a3e085a5e83146189782336..4ecb7b16b278061a280d8f29bdeb7c88eae24979 100644 (file)
@@ -529,9 +529,10 @@ struct transaction_s
        enum {
                T_RUNNING,
                T_LOCKED,
-               T_RUNDOWN,
                T_FLUSH,
                T_COMMIT,
+               T_COMMIT_DFLUSH,
+               T_COMMIT_JFLUSH,
                T_FINISHED
        }                       t_state;
 
@@ -658,7 +659,9 @@ struct transaction_s
         * waiting for it to finish.
         */
        unsigned int t_synchronous_commit:1;
-       unsigned int t_flushed_data_blocks:1;
+
+       /* Disk flush needs to be sent to fs partition [no locking] */
+       int                     t_need_data_flush;
 
        /*
         * For use by the filesystem to store fs-specific data
@@ -1228,6 +1231,7 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *tid);
 int jbd2_journal_force_commit_nested(journal_t *journal);
 int jbd2_log_wait_commit(journal_t *journal, tid_t tid);
 int jbd2_log_do_checkpoint(journal_t *journal);
+int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid);
 
 void __jbd2_log_wait_for_space(journal_t *journal);
 extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *);
index ef19b99aff98d426c8e9b355a31360675daf9669..6ea4eebd3467b220f19bc5cd6e77214096ca2278 100644 (file)
@@ -9,7 +9,7 @@
  * 2 of the License, or (at your option) any later version.
  *
  *
- * See Documentation/keys.txt for information on keys/keyrings.
+ * See Documentation/security/keys.txt for information on keys/keyrings.
  */
 
 #ifndef _LINUX_KEY_H
index 5e9840f509804df5e49224a12bbbd079177ee4f6..9724a38ee69d5c9e292aa75c8fcc092486ebdbe7 100644 (file)
@@ -20,6 +20,8 @@
 #ifndef _LINUX_MEMCONTROL_H
 #define _LINUX_MEMCONTROL_H
 #include <linux/cgroup.h>
+#include <linux/vm_event_item.h>
+
 struct mem_cgroup;
 struct page_cgroup;
 struct page;
@@ -106,9 +108,10 @@ extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
  */
 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg);
-unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
-                                      struct zone *zone,
-                                      enum lru_list lru);
+int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
+unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
+                                               struct zone *zone,
+                                               enum lru_list lru);
 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
                                                      struct zone *zone);
 struct zone_reclaim_stat*
@@ -144,9 +147,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
 }
 
 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
-                                               gfp_t gfp_mask);
+                                               gfp_t gfp_mask,
+                                               unsigned long *total_scanned);
 u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
 
+void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail);
 #endif
@@ -302,8 +307,8 @@ mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
 }
 
 static inline unsigned long
-mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone,
-                        enum lru_list lru)
+mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, struct zone *zone,
+                            enum lru_list lru)
 {
        return 0;
 }
@@ -338,7 +343,8 @@ static inline void mem_cgroup_dec_page_stat(struct page *page,
 
 static inline
 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
-                                           gfp_t gfp_mask)
+                                           gfp_t gfp_mask,
+                                           unsigned long *total_scanned)
 {
        return 0;
 }
@@ -354,6 +360,10 @@ static inline void mem_cgroup_split_huge_fixup(struct page *head,
 {
 }
 
+static inline
+void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
+{
+}
 #endif /* CONFIG_CGROUP_MEM_CONT */
 
 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
index 8fba7972ff5f782f57c4db2609e70a847f308f89..63b4fb8e3b6f9aac05faef4534755a55dffc8733 100644 (file)
@@ -330,6 +330,11 @@ struct pm860x_led_pdata {
        unsigned long   flags;
 };
 
+struct pm860x_rtc_pdata {
+       int             (*sync)(unsigned int ticks);
+       int             vrtc;
+};
+
 struct pm860x_touch_pdata {
        int             gpadc_prebias;
        int             slot_cycle;
@@ -349,6 +354,7 @@ struct pm860x_power_pdata {
 struct pm860x_platform_data {
        struct pm860x_backlight_pdata   *backlight;
        struct pm860x_led_pdata         *led;
+       struct pm860x_rtc_pdata         *rtc;
        struct pm860x_touch_pdata       *touch;
        struct pm860x_power_pdata       *power;
        struct regulator_init_data      *regulator;
index 7d9b6ae1c203928ea28b0971d528054ad0a23162..896b5e47f16ec5a9ab0ef962de083e85ddbb0d19 100644 (file)
 #define AB5500_2_0     0x21
 #define AB5500_2_1     0x22
 
+/* AB8500 CIDs*/
+#define AB8500_CUTEARLY        0x00
+#define AB8500_CUT1P0  0x10
+#define AB8500_CUT1P1  0x11
+#define AB8500_CUT2P0  0x20
+#define AB8500_CUT3P0  0x30
+
 /*
  * AB3100, EVENTA1, A2 and A3 event register flags
  * these are catenated into a single 32-bit flag in the code
@@ -186,6 +193,7 @@ struct abx500_init_settings {
 struct ab3550_platform_data {
        struct {unsigned int base; unsigned int count; } irq;
        void *dev_data[AB3550_NUM_DEVICES];
+       size_t dev_data_sz[AB3550_NUM_DEVICES];
        struct abx500_init_settings *init_settings;
        unsigned int init_settings_sz;
 };
index de3c4ad19afb2def54bfceb28d2073d4d978b55f..ed793b77a1c5e2a77301964fc8822f18c4239e1c 100644 (file)
 
 #include <linux/types.h>
 
+struct led_classdev;
+struct asic3_led {
+       const char      *name;
+       const char      *default_trigger;
+       struct led_classdev *cdev;
+};
+
 struct asic3_platform_data {
        u16 *gpio_config;
        unsigned int gpio_config_num;
@@ -23,6 +30,8 @@ struct asic3_platform_data {
        unsigned int irq_base;
 
        unsigned int gpio_base;
+
+       struct asic3_led *leds;
 };
 
 #define ASIC3_NUM_GPIO_BANKS   4
@@ -111,9 +120,9 @@ struct asic3_platform_data {
 #define ASIC3_GPIOA11_PWM0             ASIC3_CONFIG_GPIO(11, 1, 1, 0)
 #define ASIC3_GPIOA12_PWM1             ASIC3_CONFIG_GPIO(12, 1, 1, 0)
 #define ASIC3_GPIOA15_CONTROL_CX       ASIC3_CONFIG_GPIO(15, 1, 1, 0)
-#define ASIC3_GPIOC0_LED0              ASIC3_CONFIG_GPIO(32, 1, 1, 0)
-#define ASIC3_GPIOC1_LED1              ASIC3_CONFIG_GPIO(33, 1, 1, 0)
-#define ASIC3_GPIOC2_LED2              ASIC3_CONFIG_GPIO(34, 1, 1, 0)
+#define ASIC3_GPIOC0_LED0              ASIC3_CONFIG_GPIO(32, 1, 0, 0)
+#define ASIC3_GPIOC1_LED1              ASIC3_CONFIG_GPIO(33, 1, 0, 0)
+#define ASIC3_GPIOC2_LED2              ASIC3_CONFIG_GPIO(34, 1, 0, 0)
 #define ASIC3_GPIOC3_SPI_RXD           ASIC3_CONFIG_GPIO(35, 1, 0, 0)
 #define ASIC3_GPIOC4_CF_nCD            ASIC3_CONFIG_GPIO(36, 1, 0, 0)
 #define ASIC3_GPIOC4_SPI_TXD           ASIC3_CONFIG_GPIO(36, 1, 1, 0)
@@ -152,6 +161,7 @@ struct asic3_platform_data {
 #define PWM_TIMEBASE_VALUE(x)    ((x)&0xf)   /* Low 4 bits sets time base */
 #define PWM_TIMEBASE_ENABLE     (1 << 4)   /* Enable clock */
 
+#define ASIC3_NUM_LEDS                  3
 #define ASIC3_LED_0_Base                0x0700
 #define ASIC3_LED_1_Base                0x0800
 #define ASIC3_LED_2_Base                     0x0900
@@ -287,10 +297,17 @@ struct asic3_platform_data {
  *
  *****************************************************************************/
 #define ASIC3_SD_CONFIG_BASE   0x0400 /* Assumes 32 bit addressing */
+#define ASIC3_SD_CONFIG_SIZE   0x0200 /* Assumes 32 bit addressing */
 #define ASIC3_SD_CTRL_BASE     0x1000
 #define ASIC3_SDIO_CTRL_BASE   0x1200
 
 #define ASIC3_MAP_SIZE_32BIT   0x2000
 #define ASIC3_MAP_SIZE_16BIT   0x1000
 
+/* Functions needed by leds-asic3 */
+
+struct asic3;
+extern void asic3_write_register(struct asic3 *asic, unsigned int reg, u32 val);
+extern u32 asic3_read_register(struct asic3 *asic, unsigned int reg);
+
 #endif /* __ASIC3_H__ */
index aef23309a742334d9f3fb33d3f3b5af38b7f024d..4e76163dd8624dec3dba23b7bdebb32ce8ad2d11 100644 (file)
@@ -33,8 +33,9 @@ struct mfd_cell {
        int                     (*suspend)(struct platform_device *dev);
        int                     (*resume)(struct platform_device *dev);
 
-       /* mfd_data can be used to pass data to client drivers */
-       void                    *mfd_data;
+       /* platform data passed to the sub devices drivers */
+       void                    *platform_data;
+       size_t                  pdata_size;
 
        /*
         * These resources can be specified relative to the parent device.
@@ -89,24 +90,6 @@ static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev)
        return pdev->mfd_cell;
 }
 
-/*
- * Given a platform device that's been created by mfd_add_devices(), fetch
- * the .mfd_data entry from the mfd_cell that created it.
- * Otherwise just return the platform_data pointer.
- * This maintains compatibility with platform drivers whose devices aren't
- * created by the mfd layer, and expect platform_data to contain what would've
- * otherwise been in mfd_data.
- */
-static inline void *mfd_get_data(struct platform_device *pdev)
-{
-       const struct mfd_cell *cell = mfd_get_cell(pdev);
-
-       if (cell)
-               return cell->mfd_data;
-       else
-               return pdev->dev.platform_data;
-}
-
 extern int mfd_add_devices(struct device *parent, int id,
                           struct mfd_cell *cells, int n_devs,
                           struct resource *mem_base,
index 69d1010e2e5146d7cd8d0a1c71cf8c05608024e0..5ff2400ad46cd4325b31027ff8b82554eb6bc592 100644 (file)
@@ -311,10 +311,6 @@ enum max8997_irq {
        MAX8997_IRQ_NR,
 };
 
-#define MAX8997_REG_BUCK1DVS(x)        (MAX8997_REG_BUCK1DVS1 + (x) - 1)
-#define MAX8997_REG_BUCK2DVS(x)        (MAX8997_REG_BUCK2DVS1 + (x) - 1)
-#define MAX8997_REG_BUCK5DVS(x)        (MAX8997_REG_BUCK5DVS1 + (x) - 1)
-
 #define MAX8997_NUM_GPIO       12
 struct max8997_dev {
        struct device *dev;
diff --git a/include/linux/mfd/pm8xxx/core.h b/include/linux/mfd/pm8xxx/core.h
new file mode 100644 (file)
index 0000000..bd2f4f6
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm PMIC 8xxx driver header file
+ *
+ */
+
+#ifndef __MFD_PM8XXX_CORE_H
+#define __MFD_PM8XXX_CORE_H
+
+#include <linux/mfd/core.h>
+
+struct pm8xxx_drvdata {
+       int     (*pmic_readb) (const struct device *dev, u16 addr, u8 *val);
+       int     (*pmic_writeb) (const struct device *dev, u16 addr, u8 val);
+       int     (*pmic_read_buf) (const struct device *dev, u16 addr, u8 *buf,
+                                                                       int n);
+       int     (*pmic_write_buf) (const struct device *dev, u16 addr, u8 *buf,
+                                                                       int n);
+       int     (*pmic_read_irq_stat) (const struct device *dev, int irq);
+       void    *pm_chip_data;
+};
+
+static inline int pm8xxx_readb(const struct device *dev, u16 addr, u8 *val)
+{
+       struct pm8xxx_drvdata *dd = dev_get_drvdata(dev);
+
+       if (!dd)
+               return -EINVAL;
+       return dd->pmic_readb(dev, addr, val);
+}
+
+static inline int pm8xxx_writeb(const struct device *dev, u16 addr, u8 val)
+{
+       struct pm8xxx_drvdata *dd = dev_get_drvdata(dev);
+
+       if (!dd)
+               return -EINVAL;
+       return dd->pmic_writeb(dev, addr, val);
+}
+
+static inline int pm8xxx_read_buf(const struct device *dev, u16 addr, u8 *buf,
+                                                                       int n)
+{
+       struct pm8xxx_drvdata *dd = dev_get_drvdata(dev);
+
+       if (!dd)
+               return -EINVAL;
+       return dd->pmic_read_buf(dev, addr, buf, n);
+}
+
+static inline int pm8xxx_write_buf(const struct device *dev, u16 addr, u8 *buf,
+                                                                       int n)
+{
+       struct pm8xxx_drvdata *dd = dev_get_drvdata(dev);
+
+       if (!dd)
+               return -EINVAL;
+       return dd->pmic_write_buf(dev, addr, buf, n);
+}
+
+static inline int pm8xxx_read_irq_stat(const struct device *dev, int irq)
+{
+       struct pm8xxx_drvdata *dd = dev_get_drvdata(dev);
+
+       if (!dd)
+               return -EINVAL;
+       return dd->pmic_read_irq_stat(dev, irq);
+}
+
+#endif
diff --git a/include/linux/mfd/pm8xxx/irq.h b/include/linux/mfd/pm8xxx/irq.h
new file mode 100644 (file)
index 0000000..4b21769
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm PMIC irq 8xxx driver header file
+ *
+ */
+
+#ifndef __MFD_PM8XXX_IRQ_H
+#define __MFD_PM8XXX_IRQ_H
+
+#include <linux/errno.h>
+#include <linux/err.h>
+
+struct pm8xxx_irq_core_data {
+       u32             rev;
+       int             nirqs;
+};
+
+struct pm8xxx_irq_platform_data {
+       int                             irq_base;
+       struct pm8xxx_irq_core_data     irq_cdata;
+       int                             devirq;
+       int                             irq_trigger_flag;
+};
+
+struct pm_irq_chip;
+
+#ifdef CONFIG_MFD_PM8XXX_IRQ
+int pm8xxx_get_irq_stat(struct pm_irq_chip *chip, int irq);
+struct pm_irq_chip * __devinit pm8xxx_irq_init(struct device *dev,
+                               const struct pm8xxx_irq_platform_data *pdata);
+int __devexit pm8xxx_irq_exit(struct pm_irq_chip *chip);
+#else
+static inline int pm8xxx_get_irq_stat(struct pm_irq_chip *chip, int irq)
+{
+       return -ENXIO;
+}
+static inline struct pm_irq_chip * __devinit pm8xxx_irq_init(
+                               const struct device *dev,
+                               const struct pm8xxx_irq_platform_data *pdata)
+{
+       return ERR_PTR(-ENXIO);
+}
+static inline int __devexit pm8xxx_irq_exit(struct pm_irq_chip *chip)
+{
+       return -ENXIO;
+}
+#endif /* CONFIG_MFD_PM8XXX_IRQ */
+#endif /* __MFD_PM8XXX_IRQ_H */
diff --git a/include/linux/mfd/pm8xxx/pm8921.h b/include/linux/mfd/pm8xxx/pm8921.h
new file mode 100644 (file)
index 0000000..d5517fd
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Qualcomm PMIC 8921 driver header file
+ *
+ */
+
+#ifndef __MFD_PM8921_H
+#define __MFD_PM8921_H
+
+#include <linux/device.h>
+#include <linux/mfd/pm8xxx/irq.h>
+
+#define PM8921_NR_IRQS         256
+
+struct pm8921_platform_data {
+       int                                     irq_base;
+       struct pm8xxx_irq_platform_data         *irq_pdata;
+};
+
+#endif
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
new file mode 100644 (file)
index 0000000..8bb85b9
--- /dev/null
@@ -0,0 +1,800 @@
+/*
+ * tps65910.h  --  TI TPS6591x
+ *
+ * Copyright 2010-2011 Texas Instruments Inc.
+ *
+ * Author: Graeme Gregory <gg@slimlogic.co.uk>
+ * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>
+ * Author: Arnaud Deconinck <a-deconinck@ti.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under  the terms of the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ */
+
+#ifndef __LINUX_MFD_TPS65910_H
+#define __LINUX_MFD_TPS65910_H
+
+/* TPS chip id list */
+#define TPS65910                       0
+#define TPS65911                       1
+
+/* TPS regulator type list */
+#define REGULATOR_LDO                  0
+#define REGULATOR_DCDC                 1
+
+/*
+ * List of registers for component TPS65910
+ *
+ */
+
+#define TPS65910_SECONDS                               0x0
+#define TPS65910_MINUTES                               0x1
+#define TPS65910_HOURS                                 0x2
+#define TPS65910_DAYS                                  0x3
+#define TPS65910_MONTHS                                        0x4
+#define TPS65910_YEARS                                 0x5
+#define TPS65910_WEEKS                                 0x6
+#define TPS65910_ALARM_SECONDS                         0x8
+#define TPS65910_ALARM_MINUTES                         0x9
+#define TPS65910_ALARM_HOURS                           0xA
+#define TPS65910_ALARM_DAYS                            0xB
+#define TPS65910_ALARM_MONTHS                          0xC
+#define TPS65910_ALARM_YEARS                           0xD
+#define TPS65910_RTC_CTRL                              0x10
+#define TPS65910_RTC_STATUS                            0x11
+#define TPS65910_RTC_INTERRUPTS                                0x12
+#define TPS65910_RTC_COMP_LSB                          0x13
+#define TPS65910_RTC_COMP_MSB                          0x14
+#define TPS65910_RTC_RES_PROG                          0x15
+#define TPS65910_RTC_RESET_STATUS                      0x16
+#define TPS65910_BCK1                                  0x17
+#define TPS65910_BCK2                                  0x18
+#define TPS65910_BCK3                                  0x19
+#define TPS65910_BCK4                                  0x1A
+#define TPS65910_BCK5                                  0x1B
+#define TPS65910_PUADEN                                        0x1C
+#define TPS65910_REF                                   0x1D
+#define TPS65910_VRTC                                  0x1E
+#define TPS65910_VIO                                   0x20
+#define TPS65910_VDD1                                  0x21
+#define TPS65910_VDD1_OP                               0x22
+#define TPS65910_VDD1_SR                               0x23
+#define TPS65910_VDD2                                  0x24
+#define TPS65910_VDD2_OP                               0x25
+#define TPS65910_VDD2_SR                               0x26
+#define TPS65910_VDD3                                  0x27
+#define TPS65910_VDIG1                                 0x30
+#define TPS65910_VDIG2                                 0x31
+#define TPS65910_VAUX1                                 0x32
+#define TPS65910_VAUX2                                 0x33
+#define TPS65910_VAUX33                                        0x34
+#define TPS65910_VMMC                                  0x35
+#define TPS65910_VPLL                                  0x36
+#define TPS65910_VDAC                                  0x37
+#define TPS65910_THERM                                 0x38
+#define TPS65910_BBCH                                  0x39
+#define TPS65910_DCDCCTRL                              0x3E
+#define TPS65910_DEVCTRL                               0x3F
+#define TPS65910_DEVCTRL2                              0x40
+#define TPS65910_SLEEP_KEEP_LDO_ON                     0x41
+#define TPS65910_SLEEP_KEEP_RES_ON                     0x42
+#define TPS65910_SLEEP_SET_LDO_OFF                     0x43
+#define TPS65910_SLEEP_SET_RES_OFF                     0x44
+#define TPS65910_EN1_LDO_ASS                           0x45
+#define TPS65910_EN1_SMPS_ASS                          0x46
+#define TPS65910_EN2_LDO_ASS                           0x47
+#define TPS65910_EN2_SMPS_ASS                          0x48
+#define TPS65910_EN3_LDO_ASS                           0x49
+#define TPS65910_SPARE                                 0x4A
+#define TPS65910_INT_STS                               0x50
+#define TPS65910_INT_MSK                               0x51
+#define TPS65910_INT_STS2                              0x52
+#define TPS65910_INT_MSK2                              0x53
+#define TPS65910_INT_STS3                              0x54
+#define TPS65910_INT_MSK3                              0x55
+#define TPS65910_GPIO0                                 0x60
+#define TPS65910_GPIO1                                 0x61
+#define TPS65910_GPIO2                                 0x62
+#define TPS65910_GPIO3                                 0x63
+#define TPS65910_GPIO4                                 0x64
+#define TPS65910_GPIO5                                 0x65
+#define TPS65910_GPIO6                                 0x66
+#define TPS65910_GPIO7                                 0x67
+#define TPS65910_GPIO8                                 0x68
+#define TPS65910_JTAGVERNUM                            0x80
+#define TPS65910_MAX_REGISTER                          0x80
+
+/*
+ * List of registers specific to TPS65911
+ */
+#define TPS65911_VDDCTRL                               0x27
+#define TPS65911_VDDCTRL_OP                            0x28
+#define TPS65911_VDDCTRL_SR                            0x29
+#define TPS65911_LDO1                                  0x30
+#define TPS65911_LDO2                                  0x31
+#define TPS65911_LDO5                                  0x32
+#define TPS65911_LDO8                                  0x33
+#define TPS65911_LDO7                                  0x34
+#define TPS65911_LDO6                                  0x35
+#define TPS65911_LDO4                                  0x36
+#define TPS65911_LDO3                                  0x37
+#define TPS65911_VMBCH                                 0x6A
+#define TPS65911_VMBCH2                                        0x6B
+
+/*
+ * List of register bitfields for component TPS65910
+ *
+ */
+
+
+/*Register BCK1  (0x80) register.RegisterDescription */
+#define BCK1_BCKUP_MASK                                        0xFF
+#define BCK1_BCKUP_SHIFT                               0
+
+
+/*Register BCK2  (0x80) register.RegisterDescription */
+#define BCK2_BCKUP_MASK                                        0xFF
+#define BCK2_BCKUP_SHIFT                               0
+
+
+/*Register BCK3  (0x80) register.RegisterDescription */
+#define BCK3_BCKUP_MASK                                        0xFF
+#define BCK3_BCKUP_SHIFT                               0
+
+
+/*Register BCK4  (0x80) register.RegisterDescription */
+#define BCK4_BCKUP_MASK                                        0xFF
+#define BCK4_BCKUP_SHIFT                               0
+
+
+/*Register BCK5  (0x80) register.RegisterDescription */
+#define BCK5_BCKUP_MASK                                        0xFF
+#define BCK5_BCKUP_SHIFT                               0
+
+
+/*Register PUADEN  (0x80) register.RegisterDescription */
+#define PUADEN_EN3P_MASK                               0x80
+#define PUADEN_EN3P_SHIFT                              7
+#define PUADEN_I2CCTLP_MASK                            0x40
+#define PUADEN_I2CCTLP_SHIFT                           6
+#define PUADEN_I2CSRP_MASK                             0x20
+#define PUADEN_I2CSRP_SHIFT                            5
+#define PUADEN_PWRONP_MASK                             0x10
+#define PUADEN_PWRONP_SHIFT                            4
+#define PUADEN_SLEEPP_MASK                             0x08
+#define PUADEN_SLEEPP_SHIFT                            3
+#define PUADEN_PWRHOLDP_MASK                           0x04
+#define PUADEN_PWRHOLDP_SHIFT                          2
+#define PUADEN_BOOT1P_MASK                             0x02
+#define PUADEN_BOOT1P_SHIFT                            1
+#define PUADEN_BOOT0P_MASK                             0x01
+#define PUADEN_BOOT0P_SHIFT                            0
+
+
+/*Register REF (0x80) register.RegisterDescription */
+#define REF_VMBCH_SEL_MASK                             0x0C
+#define REF_VMBCH_SEL_SHIFT                            2
+#define REF_ST_MASK                                    0x03
+#define REF_ST_SHIFT                                   0
+
+
+/*Register VRTC  (0x80) register.RegisterDescription */
+#define VRTC_VRTC_OFFMASK_MASK                         0x08
+#define VRTC_VRTC_OFFMASK_SHIFT                                3
+#define VRTC_ST_MASK                                   0x03
+#define VRTC_ST_SHIFT                                  0
+
+
+/*Register VIO (0x80) register.RegisterDescription */
+#define VIO_ILMAX_MASK                                 0xC0
+#define VIO_ILMAX_SHIFT                                        6
+#define VIO_SEL_MASK                                   0x0C
+#define VIO_SEL_SHIFT                                  2
+#define VIO_ST_MASK                                    0x03
+#define VIO_ST_SHIFT                                   0
+
+
+/*Register VDD1  (0x80) register.RegisterDescription */
+#define VDD1_VGAIN_SEL_MASK                            0xC0
+#define VDD1_VGAIN_SEL_SHIFT                           6
+#define VDD1_ILMAX_MASK                                        0x20
+#define VDD1_ILMAX_SHIFT                               5
+#define VDD1_TSTEP_MASK                                        0x1C
+#define VDD1_TSTEP_SHIFT                               2
+#define VDD1_ST_MASK                                   0x03
+#define VDD1_ST_SHIFT                                  0
+
+
+/*Register VDD1_OP  (0x80) register.RegisterDescription */
+#define VDD1_OP_CMD_MASK                               0x80
+#define VDD1_OP_CMD_SHIFT                              7
+#define VDD1_OP_SEL_MASK                               0x7F
+#define VDD1_OP_SEL_SHIFT                              0
+
+
+/*Register VDD1_SR  (0x80) register.RegisterDescription */
+#define VDD1_SR_SEL_MASK                               0x7F
+#define VDD1_SR_SEL_SHIFT                              0
+
+
+/*Register VDD2  (0x80) register.RegisterDescription */
+#define VDD2_VGAIN_SEL_MASK                            0xC0
+#define VDD2_VGAIN_SEL_SHIFT                           6
+#define VDD2_ILMAX_MASK                                        0x20
+#define VDD2_ILMAX_SHIFT                               5
+#define VDD2_TSTEP_MASK                                        0x1C
+#define VDD2_TSTEP_SHIFT                               2
+#define VDD2_ST_MASK                                   0x03
+#define VDD2_ST_SHIFT                                  0
+
+
+/*Register VDD2_OP  (0x80) register.RegisterDescription */
+#define VDD2_OP_CMD_MASK                               0x80
+#define VDD2_OP_CMD_SHIFT                              7
+#define VDD2_OP_SEL_MASK                               0x7F
+#define VDD2_OP_SEL_SHIFT                              0
+
+/*Register VDD2_SR  (0x80) register.RegisterDescription */
+#define VDD2_SR_SEL_MASK                               0x7F
+#define VDD2_SR_SEL_SHIFT                              0
+
+
+/*Registers VDD1, VDD2 voltage values definitions */
+#define VDD1_2_NUM_VOLTS                               73
+#define VDD1_2_MIN_VOLT                                        6000
+#define VDD1_2_OFFSET                                  125
+
+
+/*Register VDD3  (0x80) register.RegisterDescription */
+#define VDD3_CKINEN_MASK                               0x04
+#define VDD3_CKINEN_SHIFT                              2
+#define VDD3_ST_MASK                                   0x03
+#define VDD3_ST_SHIFT                                  0
+#define VDDCTRL_MIN_VOLT                               6000
+#define VDDCTRL_OFFSET                                 125
+
+/*Registers VDIG (0x80) to VDAC register.RegisterDescription */
+#define LDO_SEL_MASK                                   0x0C
+#define LDO_SEL_SHIFT                                  2
+#define LDO_ST_MASK                                    0x03
+#define LDO_ST_SHIFT                                   0
+#define LDO_ST_ON_BIT                                  0x01
+#define LDO_ST_MODE_BIT                                        0x02    
+
+
+/* Registers LDO1 to LDO8 in tps65910 */
+#define LDO1_SEL_MASK                                  0xFC
+#define LDO3_SEL_MASK                                  0x7C
+#define LDO_MIN_VOLT                                   1000
+#define LDO_MAX_VOLT                                   3300;
+
+
+/*Register VDIG1  (0x80) register.RegisterDescription */
+#define VDIG1_SEL_MASK                                 0x0C
+#define VDIG1_SEL_SHIFT                                        2
+#define VDIG1_ST_MASK                                  0x03
+#define VDIG1_ST_SHIFT                                 0
+
+
+/*Register VDIG2  (0x80) register.RegisterDescription */
+#define VDIG2_SEL_MASK                                 0x0C
+#define VDIG2_SEL_SHIFT                                        2
+#define VDIG2_ST_MASK                                  0x03
+#define VDIG2_ST_SHIFT                                 0
+
+
+/*Register VAUX1  (0x80) register.RegisterDescription */
+#define VAUX1_SEL_MASK                                 0x0C
+#define VAUX1_SEL_SHIFT                                        2
+#define VAUX1_ST_MASK                                  0x03
+#define VAUX1_ST_SHIFT                                 0
+
+
+/*Register VAUX2  (0x80) register.RegisterDescription */
+#define VAUX2_SEL_MASK                                 0x0C
+#define VAUX2_SEL_SHIFT                                        2
+#define VAUX2_ST_MASK                                  0x03
+#define VAUX2_ST_SHIFT                                 0
+
+
+/*Register VAUX33  (0x80) register.RegisterDescription */
+#define VAUX33_SEL_MASK                                        0x0C
+#define VAUX33_SEL_SHIFT                               2
+#define VAUX33_ST_MASK                                 0x03
+#define VAUX33_ST_SHIFT                                        0
+
+
+/*Register VMMC  (0x80) register.RegisterDescription */
+#define VMMC_SEL_MASK                                  0x0C
+#define VMMC_SEL_SHIFT                                 2
+#define VMMC_ST_MASK                                   0x03
+#define VMMC_ST_SHIFT                                  0
+
+
+/*Register VPLL  (0x80) register.RegisterDescription */
+#define VPLL_SEL_MASK                                  0x0C
+#define VPLL_SEL_SHIFT                                 2
+#define VPLL_ST_MASK                                   0x03
+#define VPLL_ST_SHIFT                                  0
+
+
+/*Register VDAC  (0x80) register.RegisterDescription */
+#define VDAC_SEL_MASK                                  0x0C
+#define VDAC_SEL_SHIFT                                 2
+#define VDAC_ST_MASK                                   0x03
+#define VDAC_ST_SHIFT                                  0
+
+
+/*Register THERM  (0x80) register.RegisterDescription */
+#define THERM_THERM_HD_MASK                            0x20
+#define THERM_THERM_HD_SHIFT                           5
+#define THERM_THERM_TS_MASK                            0x10
+#define THERM_THERM_TS_SHIFT                           4
+#define THERM_THERM_HDSEL_MASK                         0x0C
+#define THERM_THERM_HDSEL_SHIFT                                2
+#define THERM_RSVD1_MASK                               0x02
+#define THERM_RSVD1_SHIFT                              1
+#define THERM_THERM_STATE_MASK                         0x01
+#define THERM_THERM_STATE_SHIFT                                0
+
+
+/*Register BBCH  (0x80) register.RegisterDescription */
+#define BBCH_BBSEL_MASK                                        0x06
+#define BBCH_BBSEL_SHIFT                               1
+#define BBCH_BBCHEN_MASK                               0x01
+#define BBCH_BBCHEN_SHIFT                              0
+
+
+/*Register DCDCCTRL  (0x80) register.RegisterDescription */
+#define DCDCCTRL_VDD2_PSKIP_MASK                       0x20
+#define DCDCCTRL_VDD2_PSKIP_SHIFT                      5
+#define DCDCCTRL_VDD1_PSKIP_MASK                       0x10
+#define DCDCCTRL_VDD1_PSKIP_SHIFT                      4
+#define DCDCCTRL_VIO_PSKIP_MASK                                0x08
+#define DCDCCTRL_VIO_PSKIP_SHIFT                       3
+#define DCDCCTRL_DCDCCKEXT_MASK                                0x04
+#define DCDCCTRL_DCDCCKEXT_SHIFT                       2
+#define DCDCCTRL_DCDCCKSYNC_MASK                       0x03
+#define DCDCCTRL_DCDCCKSYNC_SHIFT                      0
+
+
+/*Register DEVCTRL  (0x80) register.RegisterDescription */
+#define DEVCTRL_RTC_PWDN_MASK                          0x40
+#define DEVCTRL_RTC_PWDN_SHIFT                         6
+#define DEVCTRL_CK32K_CTRL_MASK                                0x20
+#define DEVCTRL_CK32K_CTRL_SHIFT                       5
+#define DEVCTRL_SR_CTL_I2C_SEL_MASK                    0x10
+#define DEVCTRL_SR_CTL_I2C_SEL_SHIFT                   4
+#define DEVCTRL_DEV_OFF_RST_MASK                       0x08
+#define DEVCTRL_DEV_OFF_RST_SHIFT                      3
+#define DEVCTRL_DEV_ON_MASK                            0x04
+#define DEVCTRL_DEV_ON_SHIFT                           2
+#define DEVCTRL_DEV_SLP_MASK                           0x02
+#define DEVCTRL_DEV_SLP_SHIFT                          1
+#define DEVCTRL_DEV_OFF_MASK                           0x01
+#define DEVCTRL_DEV_OFF_SHIFT                          0
+
+
+/*Register DEVCTRL2  (0x80) register.RegisterDescription */
+#define DEVCTRL2_TSLOT_LENGTH_MASK                     0x30
+#define DEVCTRL2_TSLOT_LENGTH_SHIFT                    4
+#define DEVCTRL2_SLEEPSIG_POL_MASK                     0x08
+#define DEVCTRL2_SLEEPSIG_POL_SHIFT                    3
+#define DEVCTRL2_PWON_LP_OFF_MASK                      0x04
+#define DEVCTRL2_PWON_LP_OFF_SHIFT                     2
+#define DEVCTRL2_PWON_LP_RST_MASK                      0x02
+#define DEVCTRL2_PWON_LP_RST_SHIFT                     1
+#define DEVCTRL2_IT_POL_MASK                           0x01
+#define DEVCTRL2_IT_POL_SHIFT                          0
+
+
+/*Register SLEEP_KEEP_LDO_ON  (0x80) register.RegisterDescription */
+#define SLEEP_KEEP_LDO_ON_VDAC_KEEPON_MASK             0x80
+#define SLEEP_KEEP_LDO_ON_VDAC_KEEPON_SHIFT            7
+#define SLEEP_KEEP_LDO_ON_VPLL_KEEPON_MASK             0x40
+#define SLEEP_KEEP_LDO_ON_VPLL_KEEPON_SHIFT            6
+#define SLEEP_KEEP_LDO_ON_VAUX33_KEEPON_MASK           0x20
+#define SLEEP_KEEP_LDO_ON_VAUX33_KEEPON_SHIFT          5
+#define SLEEP_KEEP_LDO_ON_VAUX2_KEEPON_MASK            0x10
+#define SLEEP_KEEP_LDO_ON_VAUX2_KEEPON_SHIFT           4
+#define SLEEP_KEEP_LDO_ON_VAUX1_KEEPON_MASK            0x08
+#define SLEEP_KEEP_LDO_ON_VAUX1_KEEPON_SHIFT           3
+#define SLEEP_KEEP_LDO_ON_VDIG2_KEEPON_MASK            0x04
+#define SLEEP_KEEP_LDO_ON_VDIG2_KEEPON_SHIFT           2
+#define SLEEP_KEEP_LDO_ON_VDIG1_KEEPON_MASK            0x02
+#define SLEEP_KEEP_LDO_ON_VDIG1_KEEPON_SHIFT           1
+#define SLEEP_KEEP_LDO_ON_VMMC_KEEPON_MASK             0x01
+#define SLEEP_KEEP_LDO_ON_VMMC_KEEPON_SHIFT            0
+
+
+/*Register SLEEP_KEEP_RES_ON  (0x80) register.RegisterDescription */
+#define SLEEP_KEEP_RES_ON_THERM_KEEPON_MASK            0x80
+#define SLEEP_KEEP_RES_ON_THERM_KEEPON_SHIFT           7
+#define SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_MASK                0x40
+#define SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_SHIFT       6
+#define SLEEP_KEEP_RES_ON_VRTC_KEEPON_MASK             0x20
+#define SLEEP_KEEP_RES_ON_VRTC_KEEPON_SHIFT            5
+#define SLEEP_KEEP_RES_ON_I2CHS_KEEPON_MASK            0x10
+#define SLEEP_KEEP_RES_ON_I2CHS_KEEPON_SHIFT           4
+#define SLEEP_KEEP_RES_ON_VDD3_KEEPON_MASK             0x08
+#define SLEEP_KEEP_RES_ON_VDD3_KEEPON_SHIFT            3
+#define SLEEP_KEEP_RES_ON_VDD2_KEEPON_MASK             0x04
+#define SLEEP_KEEP_RES_ON_VDD2_KEEPON_SHIFT            2
+#define SLEEP_KEEP_RES_ON_VDD1_KEEPON_MASK             0x02
+#define SLEEP_KEEP_RES_ON_VDD1_KEEPON_SHIFT            1
+#define SLEEP_KEEP_RES_ON_VIO_KEEPON_MASK              0x01
+#define SLEEP_KEEP_RES_ON_VIO_KEEPON_SHIFT             0
+
+
+/*Register SLEEP_SET_LDO_OFF  (0x80) register.RegisterDescription */
+#define SLEEP_SET_LDO_OFF_VDAC_SETOFF_MASK             0x80
+#define SLEEP_SET_LDO_OFF_VDAC_SETOFF_SHIFT            7
+#define SLEEP_SET_LDO_OFF_VPLL_SETOFF_MASK             0x40
+#define SLEEP_SET_LDO_OFF_VPLL_SETOFF_SHIFT            6
+#define SLEEP_SET_LDO_OFF_VAUX33_SETOFF_MASK           0x20
+#define SLEEP_SET_LDO_OFF_VAUX33_SETOFF_SHIFT          5
+#define SLEEP_SET_LDO_OFF_VAUX2_SETOFF_MASK            0x10
+#define SLEEP_SET_LDO_OFF_VAUX2_SETOFF_SHIFT           4
+#define SLEEP_SET_LDO_OFF_VAUX1_SETOFF_MASK            0x08
+#define SLEEP_SET_LDO_OFF_VAUX1_SETOFF_SHIFT           3
+#define SLEEP_SET_LDO_OFF_VDIG2_SETOFF_MASK            0x04
+#define SLEEP_SET_LDO_OFF_VDIG2_SETOFF_SHIFT           2
+#define SLEEP_SET_LDO_OFF_VDIG1_SETOFF_MASK            0x02
+#define SLEEP_SET_LDO_OFF_VDIG1_SETOFF_SHIFT           1
+#define SLEEP_SET_LDO_OFF_VMMC_SETOFF_MASK             0x01
+#define SLEEP_SET_LDO_OFF_VMMC_SETOFF_SHIFT            0
+
+
+/*Register SLEEP_SET_RES_OFF  (0x80) register.RegisterDescription */
+#define SLEEP_SET_RES_OFF_DEFAULT_VOLT_MASK            0x80
+#define SLEEP_SET_RES_OFF_DEFAULT_VOLT_SHIFT           7
+#define SLEEP_SET_RES_OFF_RSVD_MASK                    0x60
+#define SLEEP_SET_RES_OFF_RSVD_SHIFT                   5
+#define SLEEP_SET_RES_OFF_SPARE_SETOFF_MASK            0x10
+#define SLEEP_SET_RES_OFF_SPARE_SETOFF_SHIFT           4
+#define SLEEP_SET_RES_OFF_VDD3_SETOFF_MASK             0x08
+#define SLEEP_SET_RES_OFF_VDD3_SETOFF_SHIFT            3
+#define SLEEP_SET_RES_OFF_VDD2_SETOFF_MASK             0x04
+#define SLEEP_SET_RES_OFF_VDD2_SETOFF_SHIFT            2
+#define SLEEP_SET_RES_OFF_VDD1_SETOFF_MASK             0x02
+#define SLEEP_SET_RES_OFF_VDD1_SETOFF_SHIFT            1
+#define SLEEP_SET_RES_OFF_VIO_SETOFF_MASK              0x01
+#define SLEEP_SET_RES_OFF_VIO_SETOFF_SHIFT             0
+
+
+/*Register EN1_LDO_ASS (0x80) register.RegisterDescription */
+#define EN1_LDO_ASS_VDAC_EN1_MASK                      0x80
+#define EN1_LDO_ASS_VDAC_EN1_SHIFT                     7
+#define EN1_LDO_ASS_VPLL_EN1_MASK                      0x40
+#define EN1_LDO_ASS_VPLL_EN1_SHIFT                     6
+#define EN1_LDO_ASS_VAUX33_EN1_MASK                    0x20
+#define EN1_LDO_ASS_VAUX33_EN1_SHIFT                   5
+#define EN1_LDO_ASS_VAUX2_EN1_MASK                     0x10
+#define EN1_LDO_ASS_VAUX2_EN1_SHIFT                    4
+#define EN1_LDO_ASS_VAUX1_EN1_MASK                     0x08
+#define EN1_LDO_ASS_VAUX1_EN1_SHIFT                    3
+#define EN1_LDO_ASS_VDIG2_EN1_MASK                     0x04
+#define EN1_LDO_ASS_VDIG2_EN1_SHIFT                    2
+#define EN1_LDO_ASS_VDIG1_EN1_MASK                     0x02
+#define EN1_LDO_ASS_VDIG1_EN1_SHIFT                    1
+#define EN1_LDO_ASS_VMMC_EN1_MASK                      0x01
+#define EN1_LDO_ASS_VMMC_EN1_SHIFT                     0
+
+
+/*Register EN1_SMPS_ASS  (0x80) register.RegisterDescription */
+#define EN1_SMPS_ASS_RSVD_MASK                         0xE0
+#define EN1_SMPS_ASS_RSVD_SHIFT                                5
+#define EN1_SMPS_ASS_SPARE_EN1_MASK                    0x10
+#define EN1_SMPS_ASS_SPARE_EN1_SHIFT                   4
+#define EN1_SMPS_ASS_VDD3_EN1_MASK                     0x08
+#define EN1_SMPS_ASS_VDD3_EN1_SHIFT                    3
+#define EN1_SMPS_ASS_VDD2_EN1_MASK                     0x04
+#define EN1_SMPS_ASS_VDD2_EN1_SHIFT                    2
+#define EN1_SMPS_ASS_VDD1_EN1_MASK                     0x02
+#define EN1_SMPS_ASS_VDD1_EN1_SHIFT                    1
+#define EN1_SMPS_ASS_VIO_EN1_MASK                      0x01
+#define EN1_SMPS_ASS_VIO_EN1_SHIFT                     0
+
+
+/*Register EN2_LDO_ASS (0x80) register.RegisterDescription */
+#define EN2_LDO_ASS_VDAC_EN2_MASK                      0x80
+#define EN2_LDO_ASS_VDAC_EN2_SHIFT                     7
+#define EN2_LDO_ASS_VPLL_EN2_MASK                      0x40
+#define EN2_LDO_ASS_VPLL_EN2_SHIFT                     6
+#define EN2_LDO_ASS_VAUX33_EN2_MASK                    0x20
+#define EN2_LDO_ASS_VAUX33_EN2_SHIFT                   5
+#define EN2_LDO_ASS_VAUX2_EN2_MASK                     0x10
+#define EN2_LDO_ASS_VAUX2_EN2_SHIFT                    4
+#define EN2_LDO_ASS_VAUX1_EN2_MASK                     0x08
+#define EN2_LDO_ASS_VAUX1_EN2_SHIFT                    3
+#define EN2_LDO_ASS_VDIG2_EN2_MASK                     0x04
+#define EN2_LDO_ASS_VDIG2_EN2_SHIFT                    2
+#define EN2_LDO_ASS_VDIG1_EN2_MASK                     0x02
+#define EN2_LDO_ASS_VDIG1_EN2_SHIFT                    1
+#define EN2_LDO_ASS_VMMC_EN2_MASK                      0x01
+#define EN2_LDO_ASS_VMMC_EN2_SHIFT                     0
+
+
+/*Register EN2_SMPS_ASS  (0x80) register.RegisterDescription */
+#define EN2_SMPS_ASS_RSVD_MASK                         0xE0
+#define EN2_SMPS_ASS_RSVD_SHIFT                                5
+#define EN2_SMPS_ASS_SPARE_EN2_MASK                    0x10
+#define EN2_SMPS_ASS_SPARE_EN2_SHIFT                   4
+#define EN2_SMPS_ASS_VDD3_EN2_MASK                     0x08
+#define EN2_SMPS_ASS_VDD3_EN2_SHIFT                    3
+#define EN2_SMPS_ASS_VDD2_EN2_MASK                     0x04
+#define EN2_SMPS_ASS_VDD2_EN2_SHIFT                    2
+#define EN2_SMPS_ASS_VDD1_EN2_MASK                     0x02
+#define EN2_SMPS_ASS_VDD1_EN2_SHIFT                    1
+#define EN2_SMPS_ASS_VIO_EN2_MASK                      0x01
+#define EN2_SMPS_ASS_VIO_EN2_SHIFT                     0
+
+
+/*Register EN3_LDO_ASS (0x80) register.RegisterDescription */
+#define EN3_LDO_ASS_VDAC_EN3_MASK                      0x80
+#define EN3_LDO_ASS_VDAC_EN3_SHIFT                     7
+#define EN3_LDO_ASS_VPLL_EN3_MASK                      0x40
+#define EN3_LDO_ASS_VPLL_EN3_SHIFT                     6
+#define EN3_LDO_ASS_VAUX33_EN3_MASK                    0x20
+#define EN3_LDO_ASS_VAUX33_EN3_SHIFT                   5
+#define EN3_LDO_ASS_VAUX2_EN3_MASK                     0x10
+#define EN3_LDO_ASS_VAUX2_EN3_SHIFT                    4
+#define EN3_LDO_ASS_VAUX1_EN3_MASK                     0x08
+#define EN3_LDO_ASS_VAUX1_EN3_SHIFT                    3
+#define EN3_LDO_ASS_VDIG2_EN3_MASK                     0x04
+#define EN3_LDO_ASS_VDIG2_EN3_SHIFT                    2
+#define EN3_LDO_ASS_VDIG1_EN3_MASK                     0x02
+#define EN3_LDO_ASS_VDIG1_EN3_SHIFT                    1
+#define EN3_LDO_ASS_VMMC_EN3_MASK                      0x01
+#define EN3_LDO_ASS_VMMC_EN3_SHIFT                     0
+
+
+/*Register SPARE  (0x80) register.RegisterDescription */
+#define SPARE_SPARE_MASK                               0xFF
+#define SPARE_SPARE_SHIFT                              0
+
+
+/*Register INT_STS  (0x80) register.RegisterDescription */
+#define INT_STS_RTC_PERIOD_IT_MASK                     0x80
+#define INT_STS_RTC_PERIOD_IT_SHIFT                    7
+#define INT_STS_RTC_ALARM_IT_MASK                      0x40
+#define INT_STS_RTC_ALARM_IT_SHIFT                     6
+#define INT_STS_HOTDIE_IT_MASK                         0x20
+#define INT_STS_HOTDIE_IT_SHIFT                                5
+#define INT_STS_PWRHOLD_IT_MASK                                0x10
+#define INT_STS_PWRHOLD_IT_SHIFT                       4
+#define INT_STS_PWRON_LP_IT_MASK                       0x08
+#define INT_STS_PWRON_LP_IT_SHIFT                      3
+#define INT_STS_PWRON_IT_MASK                          0x04
+#define INT_STS_PWRON_IT_SHIFT                         2
+#define INT_STS_VMBHI_IT_MASK                          0x02
+#define INT_STS_VMBHI_IT_SHIFT                         1
+#define INT_STS_VMBDCH_IT_MASK                         0x01
+#define INT_STS_VMBDCH_IT_SHIFT                                0
+
+
+/*Register INT_MSK  (0x80) register.RegisterDescription */
+#define INT_MSK_RTC_PERIOD_IT_MSK_MASK                 0x80
+#define INT_MSK_RTC_PERIOD_IT_MSK_SHIFT                        7
+#define INT_MSK_RTC_ALARM_IT_MSK_MASK                  0x40
+#define INT_MSK_RTC_ALARM_IT_MSK_SHIFT                 6
+#define INT_MSK_HOTDIE_IT_MSK_MASK                     0x20
+#define INT_MSK_HOTDIE_IT_MSK_SHIFT                    5
+#define INT_MSK_PWRHOLD_IT_MSK_MASK                    0x10
+#define INT_MSK_PWRHOLD_IT_MSK_SHIFT                   4
+#define INT_MSK_PWRON_LP_IT_MSK_MASK                   0x08
+#define INT_MSK_PWRON_LP_IT_MSK_SHIFT                  3
+#define INT_MSK_PWRON_IT_MSK_MASK                      0x04
+#define INT_MSK_PWRON_IT_MSK_SHIFT                     2
+#define INT_MSK_VMBHI_IT_MSK_MASK                      0x02
+#define INT_MSK_VMBHI_IT_MSK_SHIFT                     1
+#define INT_MSK_VMBDCH_IT_MSK_MASK                     0x01
+#define INT_MSK_VMBDCH_IT_MSK_SHIFT                    0
+
+
+/*Register INT_STS2  (0x80) register.RegisterDescription */
+#define INT_STS2_GPIO3_F_IT_MASK                       0x80
+#define INT_STS2_GPIO3_F_IT_SHIFT                      7
+#define INT_STS2_GPIO3_R_IT_MASK                       0x40
+#define INT_STS2_GPIO3_R_IT_SHIFT                      6
+#define INT_STS2_GPIO2_F_IT_MASK                       0x20
+#define INT_STS2_GPIO2_F_IT_SHIFT                      5
+#define INT_STS2_GPIO2_R_IT_MASK                       0x10
+#define INT_STS2_GPIO2_R_IT_SHIFT                      4
+#define INT_STS2_GPIO1_F_IT_MASK                       0x08
+#define INT_STS2_GPIO1_F_IT_SHIFT                      3
+#define INT_STS2_GPIO1_R_IT_MASK                       0x04
+#define INT_STS2_GPIO1_R_IT_SHIFT                      2
+#define INT_STS2_GPIO0_F_IT_MASK                       0x02
+#define INT_STS2_GPIO0_F_IT_SHIFT                      1
+#define INT_STS2_GPIO0_R_IT_MASK                       0x01
+#define INT_STS2_GPIO0_R_IT_SHIFT                      0
+
+
+/*Register INT_MSK2  (0x80) register.RegisterDescription */
+#define INT_MSK2_GPIO3_F_IT_MSK_MASK                   0x80
+#define INT_MSK2_GPIO3_F_IT_MSK_SHIFT                  7
+#define INT_MSK2_GPIO3_R_IT_MSK_MASK                   0x40
+#define INT_MSK2_GPIO3_R_IT_MSK_SHIFT                  6
+#define INT_MSK2_GPIO2_F_IT_MSK_MASK                   0x20
+#define INT_MSK2_GPIO2_F_IT_MSK_SHIFT                  5
+#define INT_MSK2_GPIO2_R_IT_MSK_MASK                   0x10
+#define INT_MSK2_GPIO2_R_IT_MSK_SHIFT                  4
+#define INT_MSK2_GPIO1_F_IT_MSK_MASK                   0x08
+#define INT_MSK2_GPIO1_F_IT_MSK_SHIFT                  3
+#define INT_MSK2_GPIO1_R_IT_MSK_MASK                   0x04
+#define INT_MSK2_GPIO1_R_IT_MSK_SHIFT                  2
+#define INT_MSK2_GPIO0_F_IT_MSK_MASK                   0x02
+#define INT_MSK2_GPIO0_F_IT_MSK_SHIFT                  1
+#define INT_MSK2_GPIO0_R_IT_MSK_MASK                   0x01
+#define INT_MSK2_GPIO0_R_IT_MSK_SHIFT                  0
+
+
+/*Register INT_STS3  (0x80) register.RegisterDescription */
+#define INT_STS3_GPIO5_F_IT_MASK                       0x08
+#define INT_STS3_GPIO5_F_IT_SHIFT                      3
+#define INT_STS3_GPIO5_R_IT_MASK                       0x04
+#define INT_STS3_GPIO5_R_IT_SHIFT                      2
+#define INT_STS3_GPIO4_F_IT_MASK                       0x02
+#define INT_STS3_GPIO4_F_IT_SHIFT                      1
+#define INT_STS3_GPIO4_R_IT_MASK                       0x01
+#define INT_STS3_GPIO4_R_IT_SHIFT                      0
+
+
+/*Register INT_MSK3  (0x80) register.RegisterDescription */
+#define INT_MSK3_GPIO5_F_IT_MSK_MASK                   0x08
+#define INT_MSK3_GPIO5_F_IT_MSK_SHIFT                  3
+#define INT_MSK3_GPIO5_R_IT_MSK_MASK                   0x04
+#define INT_MSK3_GPIO5_R_IT_MSK_SHIFT                  2
+#define INT_MSK3_GPIO4_F_IT_MSK_MASK                   0x02
+#define INT_MSK3_GPIO4_F_IT_MSK_SHIFT                  1
+#define INT_MSK3_GPIO4_R_IT_MSK_MASK                   0x01
+#define INT_MSK3_GPIO4_R_IT_MSK_SHIFT                  0
+
+
+/*Register GPIO  (0x80) register.RegisterDescription */
+#define GPIO_DEB_MASK                           0x10
+#define GPIO_DEB_SHIFT                          4
+#define GPIO_PUEN_MASK                          0x08
+#define GPIO_PUEN_SHIFT                         3
+#define GPIO_CFG_MASK                           0x04
+#define GPIO_CFG_SHIFT                          2
+#define GPIO_STS_MASK                           0x02
+#define GPIO_STS_SHIFT                          1
+#define GPIO_SET_MASK                           0x01
+#define GPIO_SET_SHIFT                          0
+
+
+/*Register JTAGVERNUM  (0x80) register.RegisterDescription */
+#define JTAGVERNUM_VERNUM_MASK                         0x0F
+#define JTAGVERNUM_VERNUM_SHIFT                                0
+
+
+/* Register VDDCTRL (0x27) bit definitions */
+#define VDDCTRL_ST_MASK                                  0x03
+#define VDDCTRL_ST_SHIFT                                 0
+
+
+/*Register VDDCTRL_OP  (0x28) bit definitios */
+#define VDDCTRL_OP_CMD_MASK                              0x80
+#define VDDCTRL_OP_CMD_SHIFT                             7
+#define VDDCTRL_OP_SEL_MASK                              0x7F
+#define VDDCTRL_OP_SEL_SHIFT                             0
+
+
+/*Register VDDCTRL_SR  (0x29) bit definitions */
+#define VDDCTRL_SR_SEL_MASK                              0x7F
+#define VDDCTRL_SR_SEL_SHIFT                             0
+
+
+/* IRQ Definitions */
+#define TPS65910_IRQ_VBAT_VMBDCH                       0
+#define TPS65910_IRQ_VBAT_VMHI                         1
+#define TPS65910_IRQ_PWRON                             2
+#define TPS65910_IRQ_PWRON_LP                          3
+#define TPS65910_IRQ_PWRHOLD                           4
+#define TPS65910_IRQ_HOTDIE                            5
+#define TPS65910_IRQ_RTC_ALARM                         6
+#define TPS65910_IRQ_RTC_PERIOD                                7
+#define TPS65910_IRQ_GPIO_R                            8
+#define TPS65910_IRQ_GPIO_F                            9
+#define TPS65910_NUM_IRQ                               10
+
+#define TPS65911_IRQ_VBAT_VMBDCH                       0
+#define TPS65911_IRQ_VBAT_VMBDCH2L                     1
+#define TPS65911_IRQ_VBAT_VMBDCH2H                     2
+#define TPS65911_IRQ_VBAT_VMHI                         3
+#define TPS65911_IRQ_PWRON                             4
+#define TPS65911_IRQ_PWRON_LP                          5
+#define TPS65911_IRQ_PWRHOLD_F                         6
+#define TPS65911_IRQ_PWRHOLD_R                         7
+#define TPS65911_IRQ_HOTDIE                            8
+#define TPS65911_IRQ_RTC_ALARM                         9
+#define TPS65911_IRQ_RTC_PERIOD                                10
+#define TPS65911_IRQ_GPIO0_R                           11
+#define TPS65911_IRQ_GPIO0_F                           12
+#define TPS65911_IRQ_GPIO1_R                           13
+#define TPS65911_IRQ_GPIO1_F                           14
+#define TPS65911_IRQ_GPIO2_R                           15
+#define TPS65911_IRQ_GPIO2_F                           16
+#define TPS65911_IRQ_GPIO3_R                           17
+#define TPS65911_IRQ_GPIO3_F                           18
+#define TPS65911_IRQ_GPIO4_R                           19
+#define TPS65911_IRQ_GPIO4_F                           20
+#define TPS65911_IRQ_GPIO5_R                           21
+#define TPS65911_IRQ_GPIO5_F                           22
+#define TPS65911_IRQ_WTCHDG                            23
+#define TPS65911_IRQ_PWRDN                             24
+
+#define TPS65911_NUM_IRQ                               25
+
+
+/* GPIO Register Definitions */
+#define TPS65910_GPIO_DEB                              BIT(2)
+#define TPS65910_GPIO_PUEN                             BIT(3)
+#define TPS65910_GPIO_CFG                              BIT(2)
+#define TPS65910_GPIO_STS                              BIT(1)
+#define TPS65910_GPIO_SET                              BIT(0)
+
+/**
+ * struct tps65910_board
+ * Board platform data may be used to initialize regulators.
+ */
+
+struct tps65910_board {
+       int gpio_base;
+       int irq;
+       int irq_base;
+       int vmbch_threshold;
+       int vmbch2_threshold;
+       struct regulator_init_data *tps65910_pmic_init_data;
+};
+
+/**
+ * struct tps65910 - tps65910 sub-driver chip access routines
+ */
+
+struct tps65910 {
+       struct device *dev;
+       struct i2c_client *i2c_client;
+       struct mutex io_mutex;
+       unsigned int id;
+       int (*read)(struct tps65910 *tps65910, u8 reg, int size, void *dest);
+       int (*write)(struct tps65910 *tps65910, u8 reg, int size, void *src);
+
+       /* Client devices */
+       struct tps65910_pmic *pmic;
+       struct tps65910_rtc *rtc;
+       struct tps65910_power *power;
+
+       /* GPIO Handling */
+       struct gpio_chip gpio;
+
+       /* IRQ Handling */
+       struct mutex irq_lock;
+       int chip_irq;
+       int irq_base;
+       int irq_num;
+       u32 irq_mask;
+};
+
+struct tps65910_platform_data {
+       int irq;
+       int irq_base;
+};
+
+int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
+int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
+void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base);
+int tps65910_irq_init(struct tps65910 *tps65910, int irq,
+               struct tps65910_platform_data *pdata);
+
+static inline int tps65910_chip_id(struct tps65910 *tps65910)
+{
+       return tps65910->id;
+}
+
+#endif /*  __LINUX_MFD_TPS65910_H */
index 2ec317c68e59acf6769775ae9c24b1bfddefd59f..5cc16bbd1da1a855aaa924f25f1a6f0c716f97d5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * MFD driver for twl4030 codec submodule
  *
- * Author:     Peter Ujfalusi <peter.ujfalusi@nokia.com>
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
  *
  * Copyright:   (C) 2009 Nokia Corporation
  *
index 903280d21866a9ae690c37bb7f57537a4d61ae95..0d515ee1c24747319b561f0f0bda98dd1e50903e 100644 (file)
@@ -301,30 +301,4 @@ int wm831x_device_suspend(struct wm831x *wm831x);
 int wm831x_irq_init(struct wm831x *wm831x, int irq);
 void wm831x_irq_exit(struct wm831x *wm831x);
 
-static inline int __must_check wm831x_request_irq(struct wm831x *wm831x,
-                                                 unsigned int irq,
-                                                 irq_handler_t handler,
-                                                 unsigned long flags,
-                                                 const char *name,
-                                                 void *dev)
-{
-       return request_threaded_irq(irq, NULL, handler, flags, name, dev);
-}
-
-static inline void wm831x_free_irq(struct wm831x *wm831x,
-                                  unsigned int irq, void *dev)
-{
-       free_irq(irq, dev);
-}
-
-static inline void wm831x_disable_irq(struct wm831x *wm831x, int irq)
-{
-       disable_irq(irq);
-}
-
-static inline void wm831x_enable_irq(struct wm831x *wm831x, int irq)
-{
-       enable_irq(irq);
-}
-
 #endif
index 632d1567a1b676f4c74dfeec798951081fed108c..ff42d700293fc71485d80f092c978fd633919b27 100644 (file)
@@ -105,6 +105,9 @@ struct wm831x_watchdog_pdata {
 #define WM831X_MAX_LDO    11
 #define WM831X_MAX_ISINK  2
 
+#define WM831X_GPIO_CONFIGURE 0x10000
+#define WM831X_GPIO_NUM 16
+
 struct wm831x_pdata {
        /** Used to distinguish multiple WM831x chips */
        int wm831x_num;
@@ -119,6 +122,7 @@ struct wm831x_pdata {
 
        int irq_base;
        int gpio_base;
+       int gpio_defaults[WM831X_GPIO_NUM];
        struct wm831x_backlight_pdata *backlight;
        struct wm831x_backup_pdata *backup;
        struct wm831x_battery_pdata *battery;
index 8eb969ebf90450c19414af90118c083388002f88..9670f71d7be961bbe56e9c1701f475bc4be7981c 100644 (file)
@@ -165,12 +165,12 @@ extern pgprot_t protection_map[16];
  */
 static inline int is_linear_pfn_mapping(struct vm_area_struct *vma)
 {
-       return (vma->vm_flags & VM_PFN_AT_MMAP);
+       return !!(vma->vm_flags & VM_PFN_AT_MMAP);
 }
 
 static inline int is_pfn_mapping(struct vm_area_struct *vma)
 {
-       return (vma->vm_flags & VM_PFNMAP);
+       return !!(vma->vm_flags & VM_PFNMAP);
 }
 
 /*
@@ -1408,17 +1408,11 @@ extern void exit_mmap(struct mm_struct *);
 extern int mm_take_all_locks(struct mm_struct *mm);
 extern void mm_drop_all_locks(struct mm_struct *mm);
 
-#ifdef CONFIG_PROC_FS
 /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */
 extern void added_exe_file_vma(struct mm_struct *mm);
 extern void removed_exe_file_vma(struct mm_struct *mm);
-#else
-static inline void added_exe_file_vma(struct mm_struct *mm)
-{}
-
-static inline void removed_exe_file_vma(struct mm_struct *mm)
-{}
-#endif /* CONFIG_PROC_FS */
+extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
+extern struct file *get_mm_exe_file(struct mm_struct *mm);
 
 extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
 extern int install_special_mapping(struct mm_struct *mm,
@@ -1432,7 +1426,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
        unsigned long flag, unsigned long pgoff);
 extern unsigned long mmap_region(struct file *file, unsigned long addr,
        unsigned long len, unsigned long flags,
-       unsigned int vm_flags, unsigned long pgoff);
+       vm_flags_t vm_flags, unsigned long pgoff);
 
 static inline unsigned long do_mmap(struct file *file, unsigned long addr,
        unsigned long len, unsigned long prot,
index 071d459e866bdccf55541c0b6147d73c3acd2670..2a78aae78c690b7897a6fbe835201a023a579a3c 100644 (file)
@@ -102,6 +102,8 @@ struct page {
 #endif
 };
 
+typedef unsigned long __nocast vm_flags_t;
+
 /*
  * A region containing a mapping of a non-memory backed file under NOMMU
  * conditions.  These are held in a global tree and are pinned by the VMAs that
@@ -109,7 +111,7 @@ struct page {
  */
 struct vm_region {
        struct rb_node  vm_rb;          /* link in global region tree */
-       unsigned long   vm_flags;       /* VMA vm_flags */
+       vm_flags_t      vm_flags;       /* VMA vm_flags */
        unsigned long   vm_start;       /* start address of region */
        unsigned long   vm_end;         /* region initialised to here */
        unsigned long   vm_top;         /* region allocated to here */
@@ -300,11 +302,9 @@ struct mm_struct {
        struct task_struct __rcu *owner;
 #endif
 
-#ifdef CONFIG_PROC_FS
        /* store ref to file /proc/<pid>/exe symlink points to */
        struct file *exe_file;
        unsigned long num_exe_file_vmas;
-#endif
 #ifdef CONFIG_MMU_NOTIFIER
        struct mmu_notifier_mm *mmu_notifier_mm;
 #endif
index 261f299c94412be90e5b24e6b2406bc9815ee608..c928dac6cad0c3b1022ebdca9d53ecef1865b797 100644 (file)
@@ -273,11 +273,6 @@ struct zone_reclaim_stat {
         */
        unsigned long           recent_rotated[2];
        unsigned long           recent_scanned[2];
-
-       /*
-        * accumulated for batching
-        */
-       unsigned long           nr_saved_scan[NR_LRU_LISTS];
 };
 
 struct zone {
index 1da55e9b6f01a9733f38b309e7b282f447764da7..b29923006b11e50b4aac2c99f9a81be14afc7fd1 100644 (file)
@@ -289,11 +289,5 @@ extern int kernel_sock_shutdown(struct socket *sock,
        MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
                     "-type-" __stringify(type))
 
-#ifdef CONFIG_SYSCTL
-#include <linux/sysctl.h>
-#include <linux/ratelimit.h>
-extern struct ratelimit_state net_ratelimit_state;
-#endif
-
 #endif /* __KERNEL__ */
 #endif /* _LINUX_NET_H */
index 7fa95df60146f2920cf89e2163289017230a91a6..857f5026ced65267ad77db343be0f486cb61a7d5 100644 (file)
@@ -13,6 +13,7 @@
 #endif
 #include <linux/types.h>
 #include <linux/compiler.h>
+#include <linux/sysctl.h>
 
 /* Responses from hook functions. */
 #define NF_DROP 0
index a0196ac790513b9c8dcbb8f33b04218a7fa67b01..ac3c822eb39a66e48253fd3e4193d755bed51bf5 100644 (file)
@@ -839,7 +839,7 @@ type_pf_tdel(struct ip_set *set, void *value, u32 timeout)
        struct htable *t = h->table;
        const struct type_pf_elem *d = value;
        struct hbucket *n;
-       int i, ret = 0;
+       int i;
        struct type_pf_elem *data;
        u32 key;
 
@@ -850,7 +850,7 @@ type_pf_tdel(struct ip_set *set, void *value, u32 timeout)
                if (!type_pf_data_equal(data, d))
                        continue;
                if (type_pf_data_expired(data))
-                       ret = -IPSET_ERR_EXIST;
+                       return -IPSET_ERR_EXIST;
                if (i != n->pos - 1)
                        /* Not last one */
                        type_pf_data_copy(data, ahash_tdata(n, n->pos - 1));
index 9f30c5f2ec1cfcd6f02bb3d76413f4e223402827..bcdd40ad39ed57c0d8f47cc9a0859d779d9b8021 100644 (file)
@@ -45,7 +45,7 @@ ip_set_timeout_test(unsigned long timeout)
 {
        return timeout != IPSET_ELEM_UNSET &&
               (timeout == IPSET_ELEM_PERMANENT ||
-               time_after(timeout, jiffies));
+               time_is_after_jiffies(timeout));
 }
 
 static inline bool
@@ -53,7 +53,7 @@ ip_set_timeout_expired(unsigned long timeout)
 {
        return timeout != IPSET_ELEM_UNSET &&
               timeout != IPSET_ELEM_PERMANENT &&
-              time_before(timeout, jiffies);
+              time_is_before_jiffies(timeout);
 }
 
 static inline unsigned long
@@ -64,7 +64,7 @@ ip_set_timeout_set(u32 timeout)
        if (!timeout)
                return IPSET_ELEM_PERMANENT;
 
-       t = timeout * HZ + jiffies;
+       t = msecs_to_jiffies(timeout * 1000) + jiffies;
        if (t == IPSET_ELEM_UNSET || t == IPSET_ELEM_PERMANENT)
                /* Bingo! */
                t++;
@@ -75,7 +75,8 @@ ip_set_timeout_set(u32 timeout)
 static inline u32
 ip_set_timeout_get(unsigned long timeout)
 {
-       return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ;
+       return timeout == IPSET_ELEM_PERMANENT ? 0 : 
+               jiffies_to_msecs(timeout - jiffies)/1000;
 }
 
 #else
@@ -89,14 +90,14 @@ static inline bool
 ip_set_timeout_test(unsigned long timeout)
 {
        return timeout == IPSET_ELEM_PERMANENT ||
-              time_after(timeout, jiffies);
+              time_is_after_jiffies(timeout);
 }
 
 static inline bool
 ip_set_timeout_expired(unsigned long timeout)
 {
        return timeout != IPSET_ELEM_PERMANENT &&
-              time_before(timeout, jiffies);
+              time_is_before_jiffies(timeout);
 }
 
 static inline unsigned long
@@ -107,7 +108,7 @@ ip_set_timeout_set(u32 timeout)
        if (!timeout)
                return IPSET_ELEM_PERMANENT;
 
-       t = timeout * HZ + jiffies;
+       t = msecs_to_jiffies(timeout * 1000) + jiffies;
        if (t == IPSET_ELEM_PERMANENT)
                /* Bingo! :-) */
                t++;
@@ -118,7 +119,8 @@ ip_set_timeout_set(u32 timeout)
 static inline u32
 ip_set_timeout_get(unsigned long timeout)
 {
-       return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ;
+       return timeout == IPSET_ELEM_PERMANENT ? 0 :
+               jiffies_to_msecs(timeout - jiffies)/1000;
 }
 #endif /* ! IP_SET_BITMAP_TIMEOUT */
 
index 4c4ac3f3ce5a9968b0b5f2c7534ebc04faa74248..a9dd89552f9c61e804249e5b21d2504251925f2f 100644 (file)
@@ -24,6 +24,7 @@
 /* leave room for NETLINK_DM (DM Events) */
 #define NETLINK_SCSITRANSPORT  18      /* SCSI Transports */
 #define NETLINK_ECRYPTFS       19
+#define NETLINK_RDMA           20
 
 #define MAX_LINKS 32           
 
index 7b370c7cfeffb27db00d71fa2127381abdbfd518..50d20aba57d3067c4fdd42680c7ca0b7b938ede0 100644 (file)
@@ -81,13 +81,4 @@ static inline void get_nsproxy(struct nsproxy *ns)
        atomic_inc(&ns->count);
 }
 
-#ifdef CONFIG_CGROUP_NS
-int ns_cgroup_clone(struct task_struct *tsk, struct pid *pid);
-#else
-static inline int ns_cgroup_clone(struct task_struct *tsk, struct pid *pid)
-{
-       return 0;
-}
-#endif
-
 #endif
index cdced84261d79d125a8d6031c76638b72d259c20..b152d44fb18122e0659a7cdabf0d6b782ef63d59 100644 (file)
@@ -105,7 +105,7 @@ extern struct pid_namespace init_pid_ns;
  * or rcu_read_lock() held.
  *
  * find_pid_ns() finds the pid in the namespace specified
- * find_vpid() finr the pid by its virtual id, i.e. in the current namespace
+ * find_vpid() finds the pid by its virtual id, i.e. in the current namespace
  *
  * see also find_task_by_vpid() set in include/linux/sched.h
  */
diff --git a/include/linux/power/isp1704_charger.h b/include/linux/power/isp1704_charger.h
new file mode 100644 (file)
index 0000000..68096a6
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * ISP1704 USB Charger Detection driver
+ *
+ * Copyright (C) 2011 Nokia Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+#ifndef __ISP1704_CHARGER_H
+#define __ISP1704_CHARGER_H
+
+struct isp1704_charger_data {
+       void            (*set_power)(bool on);
+};
+
+#endif
diff --git a/include/linux/power/max8903_charger.h b/include/linux/power/max8903_charger.h
new file mode 100644 (file)
index 0000000..24f51db
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * max8903_charger.h - Maxim 8903 USB/Adapter Charger Driver
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#ifndef __MAX8903_CHARGER_H__
+#define __MAX8903_CHARGER_H__
+
+struct max8903_pdata {
+       /*
+        * GPIOs
+        * cen, chg, flt, and usus are optional.
+        * dok, dcm, and uok are not optional depending on the status of
+        * dc_valid and usb_valid.
+        */
+       int cen;        /* Charger Enable input */
+       int dok;        /* DC(Adapter) Power OK output */
+       int uok;        /* USB Power OK output */
+       int chg;        /* Charger status output */
+       int flt;        /* Fault output */
+       int dcm;        /* Current-Limit Mode input (1: DC, 2: USB) */
+       int usus;       /* USB Suspend Input (1: suspended) */
+
+       /*
+        * DC(Adapter/TA) is wired
+        * When dc_valid is true,
+        *      dok and dcm should be valid.
+        *
+        * At least one of dc_valid or usb_valid should be true.
+        */
+       bool dc_valid;
+       /*
+        * USB is wired
+        * When usb_valid is true,
+        *      uok should be valid.
+        */
+       bool usb_valid;
+};
+
+#endif /* __MAX8903_CHARGER_H__ */
index 648c9c58add7ccafd511d1864cd78f1c5b80df79..e7576cf9e32d7bf8cdc1bb50230a42fb80d184ad 100644 (file)
@@ -173,12 +173,6 @@ extern void proc_net_remove(struct net *net, const char *name);
 extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
        struct proc_dir_entry *parent);
 
-/* While the {get|set|dup}_mm_exe_file functions are for mm_structs, they are
- * only needed to implement /proc/<pid>|self/exe so we define them here. */
-extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
-extern struct file *get_mm_exe_file(struct mm_struct *mm);
-extern void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm);
-
 extern struct file *proc_ns_fget(int fd);
 
 #else
@@ -230,19 +224,6 @@ static inline void pid_ns_release_proc(struct pid_namespace *ns)
 {
 }
 
-static inline void set_mm_exe_file(struct mm_struct *mm,
-                                  struct file *new_exe_file)
-{}
-
-static inline struct file *get_mm_exe_file(struct mm_struct *mm)
-{
-       return NULL;
-}
-
-static inline void dup_mm_exe_file(struct mm_struct *oldmm,
-                                  struct mm_struct *newmm)
-{}
-
 static inline struct file *proc_ns_fget(int fd)
 {
        return ERR_PTR(-EINVAL);
index 03ff67b0cdf5403deab6f69924c3f9a419e644db..2f007157fab9b9f51370136658b0918f85d15e9a 100644 (file)
@@ -41,4 +41,44 @@ extern struct ratelimit_state printk_ratelimit_state;
 extern int ___ratelimit(struct ratelimit_state *rs, const char *func);
 #define __ratelimit(state) ___ratelimit(state, __func__)
 
+#ifdef CONFIG_PRINTK
+
+#define WARN_ON_RATELIMIT(condition, state)                    \
+               WARN_ON((condition) && __ratelimit(state))
+
+#define __WARN_RATELIMIT(condition, state, format...)          \
+({                                                             \
+       int rtn = 0;                                            \
+       if (unlikely(__ratelimit(state)))                       \
+               rtn = WARN(condition, format);                  \
+       rtn;                                                    \
+})
+
+#define WARN_RATELIMIT(condition, format...)                   \
+({                                                             \
+       static DEFINE_RATELIMIT_STATE(_rs,                      \
+                                     DEFAULT_RATELIMIT_INTERVAL,       \
+                                     DEFAULT_RATELIMIT_BURST); \
+       __WARN_RATELIMIT(condition, &_rs, format);              \
+})
+
+#else
+
+#define WARN_ON_RATELIMIT(condition, state)                    \
+       WARN_ON(condition)
+
+#define __WARN_RATELIMIT(condition, state, format...)          \
+({                                                             \
+       int rtn = WARN(condition, format);                      \
+       rtn;                                                    \
+})
+
+#define WARN_RATELIMIT(condition, format...)                   \
+({                                                             \
+       int rtn = WARN(condition, format);                      \
+       rtn;                                                    \
+})
+
+#endif
+
 #endif /* _LINUX_RATELIMIT_H */
index c4c4fc45f856ee7ee57b4a9234827aafa3242f61..ce3127a75c8866a27afeeb875bd5943c2dc61976 100644 (file)
@@ -68,6 +68,8 @@ struct regulator_state {
  *
  * @min_uV: Smallest voltage consumers may set.
  * @max_uV: Largest voltage consumers may set.
+ * @uV_offset: Offset applied to voltages from consumer to compensate for
+ *             voltage drops.
  *
  * @min_uA: Smallest consumers consumers may set.
  * @max_uA: Largest current consumers may set.
@@ -99,6 +101,8 @@ struct regulation_constraints {
        int min_uV;
        int max_uV;
 
+       int uV_offset;
+
        /* current output range (inclusive) - for current control */
        int min_uA;
        int max_uA;
@@ -160,8 +164,6 @@ struct regulator_consumer_supply {
  * @supply_regulator: Parent regulator.  Specified using the regulator name
  *                    as it appears in the name field in sysfs, which can
  *                    be explicitly set using the constraints field 'name'.
- * @supply_regulator_dev: Parent regulator (if any) - DEPRECATED in favour
- *                        of supply_regulator.
  *
  * @constraints: Constraints.  These must be specified for the regulator to
  *               be usable.
@@ -173,7 +175,6 @@ struct regulator_consumer_supply {
  */
 struct regulator_init_data {
        const char *supply_regulator;        /* or NULL for system supply */
-       struct device *supply_regulator_dev; /* or NULL for system supply */
 
        struct regulation_constraints constraints;
 
index 877ece45426f6c3eb0fa3850f0c93db0ee84468a..b27ebea25660bff86dbdf2f86040f26904ab45c7 100644 (file)
@@ -92,10 +92,10 @@ struct rtc_pll_info {
 #define RTC_PLL_SET    _IOW('p', 0x12, struct rtc_pll_info)  /* Set PLL correction */
 
 /* interrupt flags */
-#define RTC_IRQF 0x80 /* any of the following is active */
-#define RTC_PF 0x40
-#define RTC_AF 0x20
-#define RTC_UF 0x10
+#define RTC_IRQF 0x80  /* Any of the following is active */
+#define RTC_PF 0x40    /* Periodic interrupt */
+#define RTC_AF 0x20    /* Alarm interrupt */
+#define RTC_UF 0x10    /* Update interrupt for 1Hz RTC */
 
 #ifdef __KERNEL__
 
index f18300eddfcb51041eb833eeefb1284288e7f2c9..dc8871295a5ab59edb7d4a35aa5c71c02e2cd6e1 100644 (file)
@@ -513,6 +513,7 @@ struct thread_group_cputimer {
        spinlock_t lock;
 };
 
+#include <linux/rwsem.h>
 struct autogroup;
 
 /*
@@ -632,6 +633,16 @@ struct signal_struct {
        unsigned audit_tty;
        struct tty_audit_buf *tty_audit_buf;
 #endif
+#ifdef CONFIG_CGROUPS
+       /*
+        * The threadgroup_fork_lock prevents threads from forking with
+        * CLONE_THREAD while held for writing. Use this for fork-sensitive
+        * threadgroup-wide operations. It's taken for reading in fork.c in
+        * copy_process().
+        * Currently only needed write-side by cgroups.
+        */
+       struct rw_semaphore threadgroup_fork_lock;
+#endif
 
        int oom_adj;            /* OOM kill score adjustment (bit shift) */
        int oom_score_adj;      /* OOM kill score adjustment */
@@ -2323,6 +2334,31 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
        spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
 }
 
+/* See the declaration of threadgroup_fork_lock in signal_struct. */
+#ifdef CONFIG_CGROUPS
+static inline void threadgroup_fork_read_lock(struct task_struct *tsk)
+{
+       down_read(&tsk->signal->threadgroup_fork_lock);
+}
+static inline void threadgroup_fork_read_unlock(struct task_struct *tsk)
+{
+       up_read(&tsk->signal->threadgroup_fork_lock);
+}
+static inline void threadgroup_fork_write_lock(struct task_struct *tsk)
+{
+       down_write(&tsk->signal->threadgroup_fork_lock);
+}
+static inline void threadgroup_fork_write_unlock(struct task_struct *tsk)
+{
+       up_write(&tsk->signal->threadgroup_fork_lock);
+}
+#else
+static inline void threadgroup_fork_read_lock(struct task_struct *tsk) {}
+static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) {}
+static inline void threadgroup_fork_write_lock(struct task_struct *tsk) {}
+static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) {}
+#endif
+
 #ifndef __HAVE_THREAD_FUNCTIONS
 
 #define task_thread_info(task) ((struct thread_info *)(task)->stack)
index 06d69648fc86c3a731c528cde7fba3b72a2688be..e9811892844f1cacca117fae323abef084ab615f 100644 (file)
@@ -41,9 +41,6 @@ typedef struct {
 #define __SEQLOCK_UNLOCKED(lockname) \
                 { 0, __SPIN_LOCK_UNLOCKED(lockname) }
 
-#define SEQLOCK_UNLOCKED \
-                __SEQLOCK_UNLOCKED(old_style_seqlock_init)
-
 #define seqlock_init(x)                                        \
        do {                                            \
                (x)->sequence = 0;                      \
index 74243c86ba39fd475c3188083f95d0f367133498..7ad824d510a2138bca6b7e3d1ec8aae4c0ac014a 100644 (file)
@@ -98,16 +98,6 @@ void ipi_call_unlock_irq(void);
  */
 int on_each_cpu(smp_call_func_t func, void *info, int wait);
 
-#define MSG_ALL_BUT_SELF       0x8000  /* Assume <32768 CPU's */
-#define MSG_ALL                        0x8001
-
-#define MSG_INVALIDATE_TLB     0x0001  /* Remote processor TLB invalidate */
-#define MSG_STOP_CPU           0x0002  /* Sent to shut down slave CPU's
-                                        * when rebooting
-                                        */
-#define MSG_RESCHEDULE         0x0003  /* Reschedule request from master CPU*/
-#define MSG_CALL_FUNCTION       0x0004  /* Call function on all other CPUs */
-
 /*
  * Mark the boot cpu "online" so that it can call console drivers in
  * printk() and can access its per-cpu storage.
index b4d7710bc38d2b26effd6025243150490e0a26a9..bb4f5fbbbd8e8ba1cfcfaed6b99533ec8a68ca0e 100644 (file)
@@ -581,7 +581,7 @@ extern int spi_bus_unlock(struct spi_master *master);
  * Callable only from contexts that can sleep.
  */
 static inline int
-spi_write(struct spi_device *spi, const u8 *buf, size_t len)
+spi_write(struct spi_device *spi, const void *buf, size_t len)
 {
        struct spi_transfer     t = {
                        .tx_buf         = buf,
@@ -605,7 +605,7 @@ spi_write(struct spi_device *spi, const u8 *buf, size_t len)
  * Callable only from contexts that can sleep.
  */
 static inline int
-spi_read(struct spi_device *spi, u8 *buf, size_t len)
+spi_read(struct spi_device *spi, void *buf, size_t len)
 {
        struct spi_transfer     t = {
                        .rx_buf         = buf,
@@ -620,8 +620,8 @@ spi_read(struct spi_device *spi, u8 *buf, size_t len)
 
 /* this copies txbuf and rxbuf data; for small transfers only! */
 extern int spi_write_then_read(struct spi_device *spi,
-               const u8 *txbuf, unsigned n_tx,
-               u8 *rxbuf, unsigned n_rx);
+               const void *txbuf, unsigned n_tx,
+               void *rxbuf, unsigned n_rx);
 
 /**
  * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read
index a5c6da5d8df8dd5ad28b6b9525bfa787caa3e74d..384eb5fe530b8e90c44c9f65ebb719f965cf65df 100644 (file)
@@ -257,7 +257,8 @@ extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
 extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                                                gfp_t gfp_mask, bool noswap,
                                                unsigned int swappiness,
-                                               struct zone *zone);
+                                               struct zone *zone,
+                                               unsigned long *nr_scanned);
 extern int __isolate_lru_page(struct page *page, int mode, int file);
 extern unsigned long shrink_all_memory(unsigned long nr_pages);
 extern int vm_swappiness;
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
new file mode 100644 (file)
index 0000000..03b90cd
--- /dev/null
@@ -0,0 +1,64 @@
+#ifndef VM_EVENT_ITEM_H_INCLUDED
+#define VM_EVENT_ITEM_H_INCLUDED
+
+#ifdef CONFIG_ZONE_DMA
+#define DMA_ZONE(xx) xx##_DMA,
+#else
+#define DMA_ZONE(xx)
+#endif
+
+#ifdef CONFIG_ZONE_DMA32
+#define DMA32_ZONE(xx) xx##_DMA32,
+#else
+#define DMA32_ZONE(xx)
+#endif
+
+#ifdef CONFIG_HIGHMEM
+#define HIGHMEM_ZONE(xx) , xx##_HIGH
+#else
+#define HIGHMEM_ZONE(xx)
+#endif
+
+#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
+
+enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
+               FOR_ALL_ZONES(PGALLOC),
+               PGFREE, PGACTIVATE, PGDEACTIVATE,
+               PGFAULT, PGMAJFAULT,
+               FOR_ALL_ZONES(PGREFILL),
+               FOR_ALL_ZONES(PGSTEAL),
+               FOR_ALL_ZONES(PGSCAN_KSWAPD),
+               FOR_ALL_ZONES(PGSCAN_DIRECT),
+#ifdef CONFIG_NUMA
+               PGSCAN_ZONE_RECLAIM_FAILED,
+#endif
+               PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
+               KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
+               KSWAPD_SKIP_CONGESTION_WAIT,
+               PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+#ifdef CONFIG_COMPACTION
+               COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
+               COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
+#endif
+#ifdef CONFIG_HUGETLB_PAGE
+               HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
+#endif
+               UNEVICTABLE_PGCULLED,   /* culled to noreclaim list */
+               UNEVICTABLE_PGSCANNED,  /* scanned for reclaimability */
+               UNEVICTABLE_PGRESCUED,  /* rescued from noreclaim list */
+               UNEVICTABLE_PGMLOCKED,
+               UNEVICTABLE_PGMUNLOCKED,
+               UNEVICTABLE_PGCLEARED,  /* on COW, page truncate */
+               UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
+               UNEVICTABLE_MLOCKFREED,
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+               THP_FAULT_ALLOC,
+               THP_FAULT_FALLBACK,
+               THP_COLLAPSE_ALLOC,
+               THP_COLLAPSE_ALLOC_FAILED,
+               THP_SPLIT,
+#endif
+               NR_VM_EVENT_ITEMS
+};
+
+#endif         /* VM_EVENT_ITEM_H_INCLUDED */
index 51359837511ae4180ec22ac0ac71b625475dfaab..bcd942fa611cadd855a15078bbdb78ed58ff5404 100644 (file)
@@ -5,69 +5,9 @@
 #include <linux/percpu.h>
 #include <linux/mm.h>
 #include <linux/mmzone.h>
+#include <linux/vm_event_item.h>
 #include <asm/atomic.h>
 
-#ifdef CONFIG_ZONE_DMA
-#define DMA_ZONE(xx) xx##_DMA,
-#else
-#define DMA_ZONE(xx)
-#endif
-
-#ifdef CONFIG_ZONE_DMA32
-#define DMA32_ZONE(xx) xx##_DMA32,
-#else
-#define DMA32_ZONE(xx)
-#endif
-
-#ifdef CONFIG_HIGHMEM
-#define HIGHMEM_ZONE(xx) , xx##_HIGH
-#else
-#define HIGHMEM_ZONE(xx)
-#endif
-
-
-#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
-
-enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
-               FOR_ALL_ZONES(PGALLOC),
-               PGFREE, PGACTIVATE, PGDEACTIVATE,
-               PGFAULT, PGMAJFAULT,
-               FOR_ALL_ZONES(PGREFILL),
-               FOR_ALL_ZONES(PGSTEAL),
-               FOR_ALL_ZONES(PGSCAN_KSWAPD),
-               FOR_ALL_ZONES(PGSCAN_DIRECT),
-#ifdef CONFIG_NUMA
-               PGSCAN_ZONE_RECLAIM_FAILED,
-#endif
-               PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
-               KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
-               KSWAPD_SKIP_CONGESTION_WAIT,
-               PAGEOUTRUN, ALLOCSTALL, PGROTATED,
-#ifdef CONFIG_COMPACTION
-               COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
-               COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
-#endif
-#ifdef CONFIG_HUGETLB_PAGE
-               HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
-#endif
-               UNEVICTABLE_PGCULLED,   /* culled to noreclaim list */
-               UNEVICTABLE_PGSCANNED,  /* scanned for reclaimability */
-               UNEVICTABLE_PGRESCUED,  /* rescued from noreclaim list */
-               UNEVICTABLE_PGMLOCKED,
-               UNEVICTABLE_PGMUNLOCKED,
-               UNEVICTABLE_PGCLEARED,  /* on COW, page truncate */
-               UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */
-               UNEVICTABLE_MLOCKFREED,
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-               THP_FAULT_ALLOC,
-               THP_FAULT_FALLBACK,
-               THP_COLLAPSE_ALLOC,
-               THP_COLLAPSE_ALLOC_FAILED,
-               THP_SPLIT,
-#endif
-               NR_VM_EVENT_ITEMS
-};
-
 extern int sysctl_stat_interval;
 
 #ifdef CONFIG_VM_EVENT_COUNTERS
diff --git a/include/media/m5mols.h b/include/media/m5mols.h
new file mode 100644 (file)
index 0000000..2d7e7ca
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Driver header for M-5MOLS 8M Pixel camera sensor with ISP
+ *
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * Author: HeungJun Kim, riverful.kim@samsung.com
+ *
+ * Copyright (C) 2009 Samsung Electronics Co., Ltd.
+ * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef MEDIA_M5MOLS_H
+#define MEDIA_M5MOLS_H
+
+/**
+ * struct m5mols_platform_data - platform data for M-5MOLS driver
+ * @irq:       GPIO getting the irq pin of M-5MOLS
+ * @gpio_reset:        GPIO driving the reset pin of M-5MOLS
+ * @reset_polarity: active state for gpio_rst pin, 0 or 1
+ * @set_power: an additional callback to the board setup code
+ *             to be called after enabling and before disabling
+ *             the sensor's supply regulators
+ */
+struct m5mols_platform_data {
+       int irq;
+       int gpio_reset;
+       u8 reset_polarity;
+       int (*set_power)(struct device *dev, int on);
+};
+
+#endif /* MEDIA_M5MOLS_H */
index 07cf4b9d0a656e56c28b3e6e16c4d6abc14f1b0a..bf365721d6b05f180169bfb4030c05b2c0345cab 100644 (file)
@@ -4,6 +4,9 @@
 #include <dvb_net.h>
 #include <dvb_frontend.h>
 
+#ifndef _VIDEOBUF_DVB_H_
+#define        _VIDEOBUF_DVB_H_
+
 struct videobuf_dvb {
        /* filling that the job of the driver */
        char                       *name;
@@ -54,6 +57,7 @@ void videobuf_dvb_dealloc_frontends(struct videobuf_dvb_frontends *f);
 struct videobuf_dvb_frontend * videobuf_dvb_get_frontend(struct videobuf_dvb_frontends *f, int id);
 int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f, struct dvb_frontend *p);
 
+#endif                 /* _VIDEOBUF_DVB_H_ */
 
 /*
  * Local variables:
index 4fff432aeade033b06f62e2526711035424e299d..481f856c650f8132f77f7f0c9afaa78fe83eb39b 100644 (file)
@@ -797,7 +797,8 @@ struct netns_ipvs {
        struct list_head        rs_table[IP_VS_RTAB_SIZE];
        /* ip_vs_app */
        struct list_head        app_list;
-
+       /* ip_vs_ftp */
+       struct ip_vs_app        *ftp_app;
        /* ip_vs_proto */
        #define IP_VS_PROTO_TAB_SIZE    32      /* must be power of 2 */
        struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE];
index dcc8f5749d3fa5d799e9bc7acff9ab279b864ca3..2bf9ed9ef26b266a583550238e8433861d218c80 100644 (file)
@@ -7,6 +7,7 @@
 #include <asm/atomic.h>
 #include <linux/workqueue.h>
 #include <linux/list.h>
+#include <linux/sysctl.h>
 
 #include <net/netns/core.h>
 #include <net/netns/mib.h>
diff --git a/include/net/net_ratelimit.h b/include/net/net_ratelimit.h
new file mode 100644 (file)
index 0000000..7727b42
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef _LINUX_NET_RATELIMIT_H
+#define _LINUX_NET_RATELIMIT_H
+
+#include <linux/ratelimit.h>
+
+extern struct ratelimit_state net_ratelimit_state;
+
+#endif /* _LINUX_NET_RATELIMIT_H */
index e7c043216558eba802e4e1011dab4e08ed144c76..ea56f76c0c2208dd2a9aea0d599b67e3b51b6a18 100644 (file)
@@ -1 +1,6 @@
+header-y += ib_user_cm.h
 header-y += ib_user_mad.h
+header-y += ib_user_sa.h
+header-y += ib_user_verbs.h
+header-y += rdma_netlink.h
+header-y += rdma_user_cm.h
index bd3d380781e0bcc61fb464d9d50d217b24251d14..f79014aa28f9928da0a72904c8cbf2ebe9b7fb36 100644 (file)
@@ -34,6 +34,7 @@
 #ifndef IB_USER_CM_H
 #define IB_USER_CM_H
 
+#include <linux/types.h>
 #include <rdma/ib_user_sa.h>
 
 #define IB_USER_CM_ABI_VERSION 5
index 169f7a53fb0c696cf7cbb9f441a4e5fe021641bd..26977c149c414df2a9f166b3dcb9539f0aca6205 100644 (file)
@@ -111,6 +111,20 @@ struct rdma_cm_event {
        } param;
 };
 
+enum rdma_cm_state {
+       RDMA_CM_IDLE,
+       RDMA_CM_ADDR_QUERY,
+       RDMA_CM_ADDR_RESOLVED,
+       RDMA_CM_ROUTE_QUERY,
+       RDMA_CM_ROUTE_RESOLVED,
+       RDMA_CM_CONNECT,
+       RDMA_CM_DISCONNECT,
+       RDMA_CM_ADDR_BOUND,
+       RDMA_CM_LISTEN,
+       RDMA_CM_DEVICE_REMOVAL,
+       RDMA_CM_DESTROYING
+};
+
 struct rdma_cm_id;
 
 /**
@@ -130,6 +144,7 @@ struct rdma_cm_id {
        rdma_cm_event_handler    event_handler;
        struct rdma_route        route;
        enum rdma_port_space     ps;
+       enum ib_qp_type          qp_type;
        u8                       port_num;
 };
 
@@ -140,9 +155,11 @@ struct rdma_cm_id {
  *   returned rdma_id.
  * @context: User specified context associated with the id.
  * @ps: RDMA port space.
+ * @qp_type: type of queue pair associated with the id.
  */
 struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
-                                 void *context, enum rdma_port_space ps);
+                                 void *context, enum rdma_port_space ps,
+                                 enum ib_qp_type qp_type);
 
 /**
   * rdma_destroy_id - Destroys an RDMA identifier.
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
new file mode 100644 (file)
index 0000000..3c5363a
--- /dev/null
@@ -0,0 +1,92 @@
+#ifndef _RDMA_NETLINK_H
+#define _RDMA_NETLINK_H
+
+#include <linux/types.h>
+
+enum {
+       RDMA_NL_RDMA_CM = 1
+};
+
+#define RDMA_NL_GET_CLIENT(type) ((type & (((1 << 6) - 1) << 10)) >> 10)
+#define RDMA_NL_GET_OP(type) (type & ((1 << 10) - 1))
+#define RDMA_NL_GET_TYPE(client, op) ((client << 10) + op)
+
+enum {
+       RDMA_NL_RDMA_CM_ID_STATS = 0,
+       RDMA_NL_RDMA_CM_NUM_OPS
+};
+
+enum {
+       RDMA_NL_RDMA_CM_ATTR_SRC_ADDR = 1,
+       RDMA_NL_RDMA_CM_ATTR_DST_ADDR,
+       RDMA_NL_RDMA_CM_NUM_ATTR,
+};
+
+struct rdma_cm_id_stats {
+       __u32   qp_num;
+       __u32   bound_dev_if;
+       __u32   port_space;
+       __s32   pid;
+       __u8    cm_state;
+       __u8    node_type;
+       __u8    port_num;
+       __u8    qp_type;
+};
+
+#ifdef __KERNEL__
+
+#include <linux/netlink.h>
+
+struct ibnl_client_cbs {
+       int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb);
+};
+
+int ibnl_init(void);
+void ibnl_cleanup(void);
+
+/**
+ * Add a a client to the list of IB netlink exporters.
+ * @index: Index of the added client
+ * @nops: Number of supported ops by the added client.
+ * @cb_table: A table for op->callback
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int ibnl_add_client(int index, int nops,
+                   const struct ibnl_client_cbs cb_table[]);
+
+/**
+ * Remove a client from IB netlink.
+ * @index: Index of the removed IB client.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int ibnl_remove_client(int index);
+
+/**
+ * Put a new message in a supplied skb.
+ * @skb: The netlink skb.
+ * @nlh: Pointer to put the header of the new netlink message.
+ * @seq: The message sequence number.
+ * @len: The requested message length to allocate.
+ * @client: Calling IB netlink client.
+ * @op: message content op.
+ * Returns the allocated buffer on success and NULL on failure.
+ */
+void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq,
+                  int len, int client, int op);
+/**
+ * Put a new attribute in a supplied skb.
+ * @skb: The netlink skb.
+ * @nlh: Header of the netlink message to append the attribute to.
+ * @len: The length of the attribute data.
+ * @data: The attribute data to put.
+ * @type: The attribute type.
+ * Returns the 0 and a negative error code on failure.
+ */
+int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
+                 int len, void *data, int type);
+
+#endif /* __KERNEL__ */
+
+#endif /* _RDMA_NETLINK_H */
diff --git a/include/trace/events/gpio.h b/include/trace/events/gpio.h
new file mode 100644 (file)
index 0000000..927a8ad
--- /dev/null
@@ -0,0 +1,56 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gpio
+
+#if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_GPIO_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(gpio_direction,
+
+       TP_PROTO(unsigned gpio, int in, int err),
+
+       TP_ARGS(gpio, in, err),
+
+       TP_STRUCT__entry(
+               __field(unsigned, gpio)
+               __field(int, in)
+               __field(int, err)
+       ),
+
+       TP_fast_assign(
+               __entry->gpio = gpio;
+               __entry->in = in;
+               __entry->err = err;
+       ),
+
+       TP_printk("%u %3s (%d)", __entry->gpio,
+               __entry->in ? "in" : "out", __entry->err)
+);
+
+TRACE_EVENT(gpio_value,
+
+       TP_PROTO(unsigned gpio, int get, int value),
+
+       TP_ARGS(gpio, get, value),
+
+       TP_STRUCT__entry(
+               __field(unsigned, gpio)
+               __field(int, get)
+               __field(int, value)
+       ),
+
+       TP_fast_assign(
+               __entry->gpio = gpio;
+               __entry->get = get;
+               __entry->value = value;
+       ),
+
+       TP_printk("%u %3s %d", __entry->gpio,
+               __entry->get ? "get" : "set", __entry->value)
+);
+
+#endif /* if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index b33257bc7e83b58562dd5abc9f12cd89eadbc813..70213b4515ebcd562e0995ac6f14b5c1ba07e9a4 100644 (file)
@@ -58,6 +58,7 @@
 #define __HYPERVISOR_event_channel_op     32
 #define __HYPERVISOR_physdev_op           33
 #define __HYPERVISOR_hvm_op               34
+#define __HYPERVISOR_tmem_op              38
 
 /* Architecture-specific hypercall definitions. */
 #define __HYPERVISOR_arch_0               48
@@ -461,6 +462,27 @@ typedef uint8_t xen_domain_handle_t[16];
 #define __mk_unsigned_long(x) x ## UL
 #define mk_unsigned_long(x) __mk_unsigned_long(x)
 
+#define TMEM_SPEC_VERSION 1
+
+struct tmem_op {
+       uint32_t cmd;
+       int32_t pool_id;
+       union {
+               struct {  /* for cmd == TMEM_NEW_POOL */
+                       uint64_t uuid[2];
+                       uint32_t flags;
+               } new;
+               struct {
+                       uint64_t oid[3];
+                       uint32_t index;
+                       uint32_t tmem_offset;
+                       uint32_t pfn_offset;
+                       uint32_t len;
+                       GUEST_HANDLE(void) gmfn; /* guest machine page frame */
+               } gen;
+       } u;
+};
+
 #else /* __ASSEMBLY__ */
 
 /* In assembly code we cannot use C numeric constant suffixes. */
index 332aac6499667658e45b8be17181b81a75618bbb..ebafac4231eeff15883809419e6df0c211b90e1a 100644 (file)
@@ -589,14 +589,6 @@ config CGROUP_DEBUG
 
          Say N if unsure.
 
-config CGROUP_NS
-       bool "Namespace cgroup subsystem"
-       help
-         Provides a simple namespace cgroup subsystem to
-         provide hierarchical naming of sets of namespaces,
-         for instance virtual servers and checkpoint/restart
-         jobs.
-
 config CGROUP_FREEZER
        bool "Freezer cgroup subsystem"
        help
index 729acb7e31487f67660a3202d818f1f55b4fd6e5..ab3385a21b27ac9b528ea270f2d4567522d2c966 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -347,7 +347,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
        struct file * file;
        char name[13];
        int id;
-       int acctflag = 0;
+       vm_flags_t acctflag = 0;
 
        if (size < SHMMIN || size > ns->shm_ctlmax)
                return -EINVAL;
index e9cf19155b4620a061af103c9732fc5e7d7a8290..2d64cfcc8b42bd187bed3b3bf3124be74b392219 100644 (file)
@@ -61,7 +61,6 @@ obj-$(CONFIG_COMPAT) += compat.o
 obj-$(CONFIG_CGROUPS) += cgroup.o
 obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o
 obj-$(CONFIG_CPUSETS) += cpuset.o
-obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o
 obj-$(CONFIG_UTS_NS) += utsname.o
 obj-$(CONFIG_USER_NS) += user_namespace.o
 obj-$(CONFIG_PID_NS) += pid_namespace.o
index 909a35510af53656c684021b113977c10bfbbac8..2731d115d725c0a216f06d18cd60367eab4210da 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
 #include <linux/eventfd.h>
 #include <linux/poll.h>
+#include <linux/flex_array.h> /* used in cgroup_attach_proc */
 
 #include <asm/atomic.h>
 
@@ -1735,6 +1736,76 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
 }
 EXPORT_SYMBOL_GPL(cgroup_path);
 
+/*
+ * cgroup_task_migrate - move a task from one cgroup to another.
+ *
+ * 'guarantee' is set if the caller promises that a new css_set for the task
+ * will already exist. If not set, this function might sleep, and can fail with
+ * -ENOMEM. Otherwise, it can only fail with -ESRCH.
+ */
+static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
+                              struct task_struct *tsk, bool guarantee)
+{
+       struct css_set *oldcg;
+       struct css_set *newcg;
+
+       /*
+        * get old css_set. we need to take task_lock and refcount it, because
+        * an exiting task can change its css_set to init_css_set and drop its
+        * old one without taking cgroup_mutex.
+        */
+       task_lock(tsk);
+       oldcg = tsk->cgroups;
+       get_css_set(oldcg);
+       task_unlock(tsk);
+
+       /* locate or allocate a new css_set for this task. */
+       if (guarantee) {
+               /* we know the css_set we want already exists. */
+               struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
+               read_lock(&css_set_lock);
+               newcg = find_existing_css_set(oldcg, cgrp, template);
+               BUG_ON(!newcg);
+               get_css_set(newcg);
+               read_unlock(&css_set_lock);
+       } else {
+               might_sleep();
+               /* find_css_set will give us newcg already referenced. */
+               newcg = find_css_set(oldcg, cgrp);
+               if (!newcg) {
+                       put_css_set(oldcg);
+                       return -ENOMEM;
+               }
+       }
+       put_css_set(oldcg);
+
+       /* if PF_EXITING is set, the tsk->cgroups pointer is no longer safe. */
+       task_lock(tsk);
+       if (tsk->flags & PF_EXITING) {
+               task_unlock(tsk);
+               put_css_set(newcg);
+               return -ESRCH;
+       }
+       rcu_assign_pointer(tsk->cgroups, newcg);
+       task_unlock(tsk);
+
+       /* Update the css_set linked lists if we're using them */
+       write_lock(&css_set_lock);
+       if (!list_empty(&tsk->cg_list))
+               list_move(&tsk->cg_list, &newcg->tasks);
+       write_unlock(&css_set_lock);
+
+       /*
+        * We just gained a reference on oldcg by taking it from the task. As
+        * trading it for newcg is protected by cgroup_mutex, we're safe to drop
+        * it here; it will be freed under RCU.
+        */
+       put_css_set(oldcg);
+
+       set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
+       return 0;
+}
+
 /**
  * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp'
  * @cgrp: the cgroup the task is attaching to
@@ -1745,11 +1816,9 @@ EXPORT_SYMBOL_GPL(cgroup_path);
  */
 int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
 {
-       int retval = 0;
+       int retval;
        struct cgroup_subsys *ss, *failed_ss = NULL;
        struct cgroup *oldcgrp;
-       struct css_set *cg;
-       struct css_set *newcg;
        struct cgroupfs_root *root = cgrp->root;
 
        /* Nothing to do if the task is already in that cgroup */
@@ -1759,7 +1828,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
 
        for_each_subsys(root, ss) {
                if (ss->can_attach) {
-                       retval = ss->can_attach(ss, cgrp, tsk, false);
+                       retval = ss->can_attach(ss, cgrp, tsk);
                        if (retval) {
                                /*
                                 * Remember on which subsystem the can_attach()
@@ -1771,46 +1840,29 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
                                goto out;
                        }
                }
+               if (ss->can_attach_task) {
+                       retval = ss->can_attach_task(cgrp, tsk);
+                       if (retval) {
+                               failed_ss = ss;
+                               goto out;
+                       }
+               }
        }
 
-       task_lock(tsk);
-       cg = tsk->cgroups;
-       get_css_set(cg);
-       task_unlock(tsk);
-       /*
-        * Locate or allocate a new css_set for this task,
-        * based on its final set of cgroups
-        */
-       newcg = find_css_set(cg, cgrp);
-       put_css_set(cg);
-       if (!newcg) {
-               retval = -ENOMEM;
-               goto out;
-       }
-
-       task_lock(tsk);
-       if (tsk->flags & PF_EXITING) {
-               task_unlock(tsk);
-               put_css_set(newcg);
-               retval = -ESRCH;
+       retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, false);
+       if (retval)
                goto out;
-       }
-       rcu_assign_pointer(tsk->cgroups, newcg);
-       task_unlock(tsk);
-
-       /* Update the css_set linked lists if we're using them */
-       write_lock(&css_set_lock);
-       if (!list_empty(&tsk->cg_list))
-               list_move(&tsk->cg_list, &newcg->tasks);
-       write_unlock(&css_set_lock);
 
        for_each_subsys(root, ss) {
+               if (ss->pre_attach)
+                       ss->pre_attach(cgrp);
+               if (ss->attach_task)
+                       ss->attach_task(cgrp, tsk);
                if (ss->attach)
-                       ss->attach(ss, cgrp, oldcgrp, tsk, false);
+                       ss->attach(ss, cgrp, oldcgrp, tsk);
        }
-       set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
+
        synchronize_rcu();
-       put_css_set(cg);
 
        /*
         * wake up rmdir() waiter. the rmdir should fail since the cgroup
@@ -1829,7 +1881,7 @@ out:
                                 */
                                break;
                        if (ss->cancel_attach)
-                               ss->cancel_attach(ss, cgrp, tsk, false);
+                               ss->cancel_attach(ss, cgrp, tsk);
                }
        }
        return retval;
@@ -1860,49 +1912,370 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
 
 /*
- * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
- * held. May take task_lock of task
+ * cgroup_attach_proc works in two stages, the first of which prefetches all
+ * new css_sets needed (to make sure we have enough memory before committing
+ * to the move) and stores them in a list of entries of the following type.
+ * TODO: possible optimization: use css_set->rcu_head for chaining instead
+ */
+struct cg_list_entry {
+       struct css_set *cg;
+       struct list_head links;
+};
+
+static bool css_set_check_fetched(struct cgroup *cgrp,
+                                 struct task_struct *tsk, struct css_set *cg,
+                                 struct list_head *newcg_list)
+{
+       struct css_set *newcg;
+       struct cg_list_entry *cg_entry;
+       struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
+
+       read_lock(&css_set_lock);
+       newcg = find_existing_css_set(cg, cgrp, template);
+       if (newcg)
+               get_css_set(newcg);
+       read_unlock(&css_set_lock);
+
+       /* doesn't exist at all? */
+       if (!newcg)
+               return false;
+       /* see if it's already in the list */
+       list_for_each_entry(cg_entry, newcg_list, links) {
+               if (cg_entry->cg == newcg) {
+                       put_css_set(newcg);
+                       return true;
+               }
+       }
+
+       /* not found */
+       put_css_set(newcg);
+       return false;
+}
+
+/*
+ * Find the new css_set and store it in the list in preparation for moving the
+ * given task to the given cgroup. Returns 0 or -ENOMEM.
+ */
+static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg,
+                           struct list_head *newcg_list)
+{
+       struct css_set *newcg;
+       struct cg_list_entry *cg_entry;
+
+       /* ensure a new css_set will exist for this thread */
+       newcg = find_css_set(cg, cgrp);
+       if (!newcg)
+               return -ENOMEM;
+       /* add it to the list */
+       cg_entry = kmalloc(sizeof(struct cg_list_entry), GFP_KERNEL);
+       if (!cg_entry) {
+               put_css_set(newcg);
+               return -ENOMEM;
+       }
+       cg_entry->cg = newcg;
+       list_add(&cg_entry->links, newcg_list);
+       return 0;
+}
+
+/**
+ * cgroup_attach_proc - attach all threads in a threadgroup to a cgroup
+ * @cgrp: the cgroup to attach to
+ * @leader: the threadgroup leader task_struct of the group to be attached
+ *
+ * Call holding cgroup_mutex and the threadgroup_fork_lock of the leader. Will
+ * take task_lock of each thread in leader's threadgroup individually in turn.
+ */
+int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader)
+{
+       int retval, i, group_size;
+       struct cgroup_subsys *ss, *failed_ss = NULL;
+       bool cancel_failed_ss = false;
+       /* guaranteed to be initialized later, but the compiler needs this */
+       struct cgroup *oldcgrp = NULL;
+       struct css_set *oldcg;
+       struct cgroupfs_root *root = cgrp->root;
+       /* threadgroup list cursor and array */
+       struct task_struct *tsk;
+       struct flex_array *group;
+       /*
+        * we need to make sure we have css_sets for all the tasks we're
+        * going to move -before- we actually start moving them, so that in
+        * case we get an ENOMEM we can bail out before making any changes.
+        */
+       struct list_head newcg_list;
+       struct cg_list_entry *cg_entry, *temp_nobe;
+
+       /*
+        * step 0: in order to do expensive, possibly blocking operations for
+        * every thread, we cannot iterate the thread group list, since it needs
+        * rcu or tasklist locked. instead, build an array of all threads in the
+        * group - threadgroup_fork_lock prevents new threads from appearing,
+        * and if threads exit, this will just be an over-estimate.
+        */
+       group_size = get_nr_threads(leader);
+       /* flex_array supports very large thread-groups better than kmalloc. */
+       group = flex_array_alloc(sizeof(struct task_struct *), group_size,
+                                GFP_KERNEL);
+       if (!group)
+               return -ENOMEM;
+       /* pre-allocate to guarantee space while iterating in rcu read-side. */
+       retval = flex_array_prealloc(group, 0, group_size - 1, GFP_KERNEL);
+       if (retval)
+               goto out_free_group_list;
+
+       /* prevent changes to the threadgroup list while we take a snapshot. */
+       rcu_read_lock();
+       if (!thread_group_leader(leader)) {
+               /*
+                * a race with de_thread from another thread's exec() may strip
+                * us of our leadership, making while_each_thread unsafe to use
+                * on this task. if this happens, there is no choice but to
+                * throw this task away and try again (from cgroup_procs_write);
+                * this is "double-double-toil-and-trouble-check locking".
+                */
+               rcu_read_unlock();
+               retval = -EAGAIN;
+               goto out_free_group_list;
+       }
+       /* take a reference on each task in the group to go in the array. */
+       tsk = leader;
+       i = 0;
+       do {
+               /* as per above, nr_threads may decrease, but not increase. */
+               BUG_ON(i >= group_size);
+               get_task_struct(tsk);
+               /*
+                * saying GFP_ATOMIC has no effect here because we did prealloc
+                * earlier, but it's good form to communicate our expectations.
+                */
+               retval = flex_array_put_ptr(group, i, tsk, GFP_ATOMIC);
+               BUG_ON(retval != 0);
+               i++;
+       } while_each_thread(leader, tsk);
+       /* remember the number of threads in the array for later. */
+       group_size = i;
+       rcu_read_unlock();
+
+       /*
+        * step 1: check that we can legitimately attach to the cgroup.
+        */
+       for_each_subsys(root, ss) {
+               if (ss->can_attach) {
+                       retval = ss->can_attach(ss, cgrp, leader);
+                       if (retval) {
+                               failed_ss = ss;
+                               goto out_cancel_attach;
+                       }
+               }
+               /* a callback to be run on every thread in the threadgroup. */
+               if (ss->can_attach_task) {
+                       /* run on each task in the threadgroup. */
+                       for (i = 0; i < group_size; i++) {
+                               tsk = flex_array_get_ptr(group, i);
+                               retval = ss->can_attach_task(cgrp, tsk);
+                               if (retval) {
+                                       failed_ss = ss;
+                                       cancel_failed_ss = true;
+                                       goto out_cancel_attach;
+                               }
+                       }
+               }
+       }
+
+       /*
+        * step 2: make sure css_sets exist for all threads to be migrated.
+        * we use find_css_set, which allocates a new one if necessary.
+        */
+       INIT_LIST_HEAD(&newcg_list);
+       for (i = 0; i < group_size; i++) {
+               tsk = flex_array_get_ptr(group, i);
+               /* nothing to do if this task is already in the cgroup */
+               oldcgrp = task_cgroup_from_root(tsk, root);
+               if (cgrp == oldcgrp)
+                       continue;
+               /* get old css_set pointer */
+               task_lock(tsk);
+               if (tsk->flags & PF_EXITING) {
+                       /* ignore this task if it's going away */
+                       task_unlock(tsk);
+                       continue;
+               }
+               oldcg = tsk->cgroups;
+               get_css_set(oldcg);
+               task_unlock(tsk);
+               /* see if the new one for us is already in the list? */
+               if (css_set_check_fetched(cgrp, tsk, oldcg, &newcg_list)) {
+                       /* was already there, nothing to do. */
+                       put_css_set(oldcg);
+               } else {
+                       /* we don't already have it. get new one. */
+                       retval = css_set_prefetch(cgrp, oldcg, &newcg_list);
+                       put_css_set(oldcg);
+                       if (retval)
+                               goto out_list_teardown;
+               }
+       }
+
+       /*
+        * step 3: now that we're guaranteed success wrt the css_sets, proceed
+        * to move all tasks to the new cgroup, calling ss->attach_task for each
+        * one along the way. there are no failure cases after here, so this is
+        * the commit point.
+        */
+       for_each_subsys(root, ss) {
+               if (ss->pre_attach)
+                       ss->pre_attach(cgrp);
+       }
+       for (i = 0; i < group_size; i++) {
+               tsk = flex_array_get_ptr(group, i);
+               /* leave current thread as it is if it's already there */
+               oldcgrp = task_cgroup_from_root(tsk, root);
+               if (cgrp == oldcgrp)
+                       continue;
+               /* attach each task to each subsystem */
+               for_each_subsys(root, ss) {
+                       if (ss->attach_task)
+                               ss->attach_task(cgrp, tsk);
+               }
+               /* if the thread is PF_EXITING, it can just get skipped. */
+               retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, true);
+               BUG_ON(retval != 0 && retval != -ESRCH);
+       }
+       /* nothing is sensitive to fork() after this point. */
+
+       /*
+        * step 4: do expensive, non-thread-specific subsystem callbacks.
+        * TODO: if ever a subsystem needs to know the oldcgrp for each task
+        * being moved, this call will need to be reworked to communicate that.
+        */
+       for_each_subsys(root, ss) {
+               if (ss->attach)
+                       ss->attach(ss, cgrp, oldcgrp, leader);
+       }
+
+       /*
+        * step 5: success! and cleanup
+        */
+       synchronize_rcu();
+       cgroup_wakeup_rmdir_waiter(cgrp);
+       retval = 0;
+out_list_teardown:
+       /* clean up the list of prefetched css_sets. */
+       list_for_each_entry_safe(cg_entry, temp_nobe, &newcg_list, links) {
+               list_del(&cg_entry->links);
+               put_css_set(cg_entry->cg);
+               kfree(cg_entry);
+       }
+out_cancel_attach:
+       /* same deal as in cgroup_attach_task */
+       if (retval) {
+               for_each_subsys(root, ss) {
+                       if (ss == failed_ss) {
+                               if (cancel_failed_ss && ss->cancel_attach)
+                                       ss->cancel_attach(ss, cgrp, leader);
+                               break;
+                       }
+                       if (ss->cancel_attach)
+                               ss->cancel_attach(ss, cgrp, leader);
+               }
+       }
+       /* clean up the array of referenced threads in the group. */
+       for (i = 0; i < group_size; i++) {
+               tsk = flex_array_get_ptr(group, i);
+               put_task_struct(tsk);
+       }
+out_free_group_list:
+       flex_array_free(group);
+       return retval;
+}
+
+/*
+ * Find the task_struct of the task to attach by vpid and pass it along to the
+ * function to attach either it or all tasks in its threadgroup. Will take
+ * cgroup_mutex; may take task_lock of task.
  */
-static int attach_task_by_pid(struct cgroup *cgrp, u64 pid)
+static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup)
 {
        struct task_struct *tsk;
        const struct cred *cred = current_cred(), *tcred;
        int ret;
 
+       if (!cgroup_lock_live_group(cgrp))
+               return -ENODEV;
+
        if (pid) {
                rcu_read_lock();
                tsk = find_task_by_vpid(pid);
-               if (!tsk || tsk->flags & PF_EXITING) {
+               if (!tsk) {
                        rcu_read_unlock();
+                       cgroup_unlock();
+                       return -ESRCH;
+               }
+               if (threadgroup) {
+                       /*
+                        * RCU protects this access, since tsk was found in the
+                        * tid map. a race with de_thread may cause group_leader
+                        * to stop being the leader, but cgroup_attach_proc will
+                        * detect it later.
+                        */
+                       tsk = tsk->group_leader;
+               } else if (tsk->flags & PF_EXITING) {
+                       /* optimization for the single-task-only case */
+                       rcu_read_unlock();
+                       cgroup_unlock();
                        return -ESRCH;
                }
 
+               /*
+                * even if we're attaching all tasks in the thread group, we
+                * only need to check permissions on one of them.
+                */
                tcred = __task_cred(tsk);
                if (cred->euid &&
                    cred->euid != tcred->uid &&
                    cred->euid != tcred->suid) {
                        rcu_read_unlock();
+                       cgroup_unlock();
                        return -EACCES;
                }
                get_task_struct(tsk);
                rcu_read_unlock();
        } else {
-               tsk = current;
+               if (threadgroup)
+                       tsk = current->group_leader;
+               else
+                       tsk = current;
                get_task_struct(tsk);
        }
 
-       ret = cgroup_attach_task(cgrp, tsk);
+       if (threadgroup) {
+               threadgroup_fork_write_lock(tsk);
+               ret = cgroup_attach_proc(cgrp, tsk);
+               threadgroup_fork_write_unlock(tsk);
+       } else {
+               ret = cgroup_attach_task(cgrp, tsk);
+       }
        put_task_struct(tsk);
+       cgroup_unlock();
        return ret;
 }
 
 static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
+{
+       return attach_task_by_pid(cgrp, pid, false);
+}
+
+static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid)
 {
        int ret;
-       if (!cgroup_lock_live_group(cgrp))
-               return -ENODEV;
-       ret = attach_task_by_pid(cgrp, pid);
-       cgroup_unlock();
+       do {
+               /*
+                * attach_proc fails with -EAGAIN if threadgroup leadership
+                * changes in the middle of the operation, in which case we need
+                * to find the task_struct for the new leader and start over.
+                */
+               ret = attach_task_by_pid(cgrp, tgid, true);
+       } while (ret == -EAGAIN);
        return ret;
 }
 
@@ -3259,9 +3632,9 @@ static struct cftype files[] = {
        {
                .name = CGROUP_FILE_GENERIC_PREFIX "procs",
                .open = cgroup_procs_open,
-               /* .write_u64 = cgroup_procs_write, TODO */
+               .write_u64 = cgroup_procs_write,
                .release = cgroup_pidlist_release,
-               .mode = S_IRUGO,
+               .mode = S_IRUGO | S_IWUSR,
        },
        {
                .name = "notify_on_release",
@@ -4256,122 +4629,6 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
                put_css_set_taskexit(cg);
 }
 
-/**
- * cgroup_clone - clone the cgroup the given subsystem is attached to
- * @tsk: the task to be moved
- * @subsys: the given subsystem
- * @nodename: the name for the new cgroup
- *
- * Duplicate the current cgroup in the hierarchy that the given
- * subsystem is attached to, and move this task into the new
- * child.
- */
-int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
-                                                       char *nodename)
-{
-       struct dentry *dentry;
-       int ret = 0;
-       struct cgroup *parent, *child;
-       struct inode *inode;
-       struct css_set *cg;
-       struct cgroupfs_root *root;
-       struct cgroup_subsys *ss;
-
-       /* We shouldn't be called by an unregistered subsystem */
-       BUG_ON(!subsys->active);
-
-       /* First figure out what hierarchy and cgroup we're dealing
-        * with, and pin them so we can drop cgroup_mutex */
-       mutex_lock(&cgroup_mutex);
- again:
-       root = subsys->root;
-       if (root == &rootnode) {
-               mutex_unlock(&cgroup_mutex);
-               return 0;
-       }
-
-       /* Pin the hierarchy */
-       if (!atomic_inc_not_zero(&root->sb->s_active)) {
-               /* We race with the final deactivate_super() */
-               mutex_unlock(&cgroup_mutex);
-               return 0;
-       }
-
-       /* Keep the cgroup alive */
-       task_lock(tsk);
-       parent = task_cgroup(tsk, subsys->subsys_id);
-       cg = tsk->cgroups;
-       get_css_set(cg);
-       task_unlock(tsk);
-
-       mutex_unlock(&cgroup_mutex);
-
-       /* Now do the VFS work to create a cgroup */
-       inode = parent->dentry->d_inode;
-
-       /* Hold the parent directory mutex across this operation to
-        * stop anyone else deleting the new cgroup */
-       mutex_lock(&inode->i_mutex);
-       dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename));
-       if (IS_ERR(dentry)) {
-               printk(KERN_INFO
-                      "cgroup: Couldn't allocate dentry for %s: %ld\n", nodename,
-                      PTR_ERR(dentry));
-               ret = PTR_ERR(dentry);
-               goto out_release;
-       }
-
-       /* Create the cgroup directory, which also creates the cgroup */
-       ret = vfs_mkdir(inode, dentry, 0755);
-       child = __d_cgrp(dentry);
-       dput(dentry);
-       if (ret) {
-               printk(KERN_INFO
-                      "Failed to create cgroup %s: %d\n", nodename,
-                      ret);
-               goto out_release;
-       }
-
-       /* The cgroup now exists. Retake cgroup_mutex and check
-        * that we're still in the same state that we thought we
-        * were. */
-       mutex_lock(&cgroup_mutex);
-       if ((root != subsys->root) ||
-           (parent != task_cgroup(tsk, subsys->subsys_id))) {
-               /* Aargh, we raced ... */
-               mutex_unlock(&inode->i_mutex);
-               put_css_set(cg);
-
-               deactivate_super(root->sb);
-               /* The cgroup is still accessible in the VFS, but
-                * we're not going to try to rmdir() it at this
-                * point. */
-               printk(KERN_INFO
-                      "Race in cgroup_clone() - leaking cgroup %s\n",
-                      nodename);
-               goto again;
-       }
-
-       /* do any required auto-setup */
-       for_each_subsys(root, ss) {
-               if (ss->post_clone)
-                       ss->post_clone(ss, child);
-       }
-
-       /* All seems fine. Finish by moving the task into the new cgroup */
-       ret = cgroup_attach_task(child, tsk);
-       mutex_unlock(&cgroup_mutex);
-
- out_release:
-       mutex_unlock(&inode->i_mutex);
-
-       mutex_lock(&cgroup_mutex);
-       put_css_set(cg);
-       mutex_unlock(&cgroup_mutex);
-       deactivate_super(root->sb);
-       return ret;
-}
-
 /**
  * cgroup_is_descendant - see if @cgrp is a descendant of @task's cgrp
  * @cgrp: the cgroup in question
index e7bebb7c6c38ff7ddaefb807d7e50cd1bcc1adbc..e691818d7e450f5f8785b8b97a08cdbb2f4e1493 100644 (file)
@@ -160,7 +160,7 @@ static void freezer_destroy(struct cgroup_subsys *ss,
  */
 static int freezer_can_attach(struct cgroup_subsys *ss,
                              struct cgroup *new_cgroup,
-                             struct task_struct *task, bool threadgroup)
+                             struct task_struct *task)
 {
        struct freezer *freezer;
 
@@ -172,26 +172,17 @@ static int freezer_can_attach(struct cgroup_subsys *ss,
        if (freezer->state != CGROUP_THAWED)
                return -EBUSY;
 
+       return 0;
+}
+
+static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
+{
        rcu_read_lock();
-       if (__cgroup_freezing_or_frozen(task)) {
+       if (__cgroup_freezing_or_frozen(tsk)) {
                rcu_read_unlock();
                return -EBUSY;
        }
        rcu_read_unlock();
-
-       if (threadgroup) {
-               struct task_struct *c;
-
-               rcu_read_lock();
-               list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
-                       if (__cgroup_freezing_or_frozen(c)) {
-                               rcu_read_unlock();
-                               return -EBUSY;
-                       }
-               }
-               rcu_read_unlock();
-       }
-
        return 0;
 }
 
@@ -390,6 +381,9 @@ struct cgroup_subsys freezer_subsys = {
        .populate       = freezer_populate,
        .subsys_id      = freezer_subsys_id,
        .can_attach     = freezer_can_attach,
+       .can_attach_task = freezer_can_attach_task,
+       .pre_attach     = NULL,
+       .attach_task    = NULL,
        .attach         = NULL,
        .fork           = freezer_fork,
        .exit           = NULL,
index 2bb8c2e98fff4074bce131a145966bfb1c788866..1ceeb049c82749e796fe8001f56fb5c7d67fe846 100644 (file)
@@ -1367,14 +1367,10 @@ static int fmeter_getrate(struct fmeter *fmp)
        return val;
 }
 
-/* Protected by cgroup_lock */
-static cpumask_var_t cpus_attach;
-
 /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
 static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
-                            struct task_struct *tsk, bool threadgroup)
+                            struct task_struct *tsk)
 {
-       int ret;
        struct cpuset *cs = cgroup_cs(cont);
 
        if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
@@ -1391,29 +1387,42 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont,
        if (tsk->flags & PF_THREAD_BOUND)
                return -EINVAL;
 
-       ret = security_task_setscheduler(tsk);
-       if (ret)
-               return ret;
-       if (threadgroup) {
-               struct task_struct *c;
-
-               rcu_read_lock();
-               list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
-                       ret = security_task_setscheduler(c);
-                       if (ret) {
-                               rcu_read_unlock();
-                               return ret;
-                       }
-               }
-               rcu_read_unlock();
-       }
        return 0;
 }
 
-static void cpuset_attach_task(struct task_struct *tsk, nodemask_t *to,
-                              struct cpuset *cs)
+static int cpuset_can_attach_task(struct cgroup *cgrp, struct task_struct *task)
+{
+       return security_task_setscheduler(task);
+}
+
+/*
+ * Protected by cgroup_lock. The nodemasks must be stored globally because
+ * dynamically allocating them is not allowed in pre_attach, and they must
+ * persist among pre_attach, attach_task, and attach.
+ */
+static cpumask_var_t cpus_attach;
+static nodemask_t cpuset_attach_nodemask_from;
+static nodemask_t cpuset_attach_nodemask_to;
+
+/* Set-up work for before attaching each task. */
+static void cpuset_pre_attach(struct cgroup *cont)
+{
+       struct cpuset *cs = cgroup_cs(cont);
+
+       if (cs == &top_cpuset)
+               cpumask_copy(cpus_attach, cpu_possible_mask);
+       else
+               guarantee_online_cpus(cs, cpus_attach);
+
+       guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
+}
+
+/* Per-thread attachment work. */
+static void cpuset_attach_task(struct cgroup *cont, struct task_struct *tsk)
 {
        int err;
+       struct cpuset *cs = cgroup_cs(cont);
+
        /*
         * can_attach beforehand should guarantee that this doesn't fail.
         * TODO: have a better way to handle failure here
@@ -1421,45 +1430,29 @@ static void cpuset_attach_task(struct task_struct *tsk, nodemask_t *to,
        err = set_cpus_allowed_ptr(tsk, cpus_attach);
        WARN_ON_ONCE(err);
 
-       cpuset_change_task_nodemask(tsk, to);
+       cpuset_change_task_nodemask(tsk, &cpuset_attach_nodemask_to);
        cpuset_update_task_spread_flag(cs, tsk);
-
 }
 
 static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
-                         struct cgroup *oldcont, struct task_struct *tsk,
-                         bool threadgroup)
+                         struct cgroup *oldcont, struct task_struct *tsk)
 {
        struct mm_struct *mm;
        struct cpuset *cs = cgroup_cs(cont);
        struct cpuset *oldcs = cgroup_cs(oldcont);
-       static nodemask_t to;           /* protected by cgroup_mutex */
 
-       if (cs == &top_cpuset) {
-               cpumask_copy(cpus_attach, cpu_possible_mask);
-       } else {
-               guarantee_online_cpus(cs, cpus_attach);
-       }
-       guarantee_online_mems(cs, &to);
-
-       /* do per-task migration stuff possibly for each in the threadgroup */
-       cpuset_attach_task(tsk, &to, cs);
-       if (threadgroup) {
-               struct task_struct *c;
-               rcu_read_lock();
-               list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
-                       cpuset_attach_task(c, &to, cs);
-               }
-               rcu_read_unlock();
-       }
-
-       /* change mm; only needs to be done once even if threadgroup */
-       to = cs->mems_allowed;
+       /*
+        * Change mm, possibly for multiple threads in a threadgroup. This is
+        * expensive and may sleep.
+        */
+       cpuset_attach_nodemask_from = oldcs->mems_allowed;
+       cpuset_attach_nodemask_to = cs->mems_allowed;
        mm = get_task_mm(tsk);
        if (mm) {
-               mpol_rebind_mm(mm, &to);
+               mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
                if (is_memory_migrate(cs))
-                       cpuset_migrate_mm(mm, &oldcs->mems_allowed, &to);
+                       cpuset_migrate_mm(mm, &cpuset_attach_nodemask_from,
+                                         &cpuset_attach_nodemask_to);
                mmput(mm);
        }
 }
@@ -1809,10 +1802,9 @@ static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
 }
 
 /*
- * post_clone() is called at the end of cgroup_clone().
- * 'cgroup' was just created automatically as a result of
- * a cgroup_clone(), and the current task is about to
- * be moved into 'cgroup'.
+ * post_clone() is called during cgroup_create() when the
+ * clone_children mount argument was specified.  The cgroup
+ * can not yet have any tasks.
  *
  * Currently we refuse to set up the cgroup - thereby
  * refusing the task to be entered, and as a result refusing
@@ -1911,6 +1903,9 @@ struct cgroup_subsys cpuset_subsys = {
        .create = cpuset_create,
        .destroy = cpuset_destroy,
        .can_attach = cpuset_can_attach,
+       .can_attach_task = cpuset_can_attach_task,
+       .pre_attach = cpuset_pre_attach,
+       .attach_task = cpuset_attach_task,
        .attach = cpuset_attach,
        .populate = cpuset_populate,
        .post_clone = cpuset_post_clone,
index e12c8af793f8083c5fcda23a65ec33086b2ca8fb..174fa84eca303ced39cc03910114d495f7b777b0 100644 (file)
@@ -1,4 +1,4 @@
-/* Task credentials management - see Documentation/credentials.txt
+/* Task credentials management - see Documentation/security/credentials.txt
  *
  * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
index 8e7e135d08177e9f99e3ecf2c8142d56a1989c85..ca406d916713696c66e2a47b60cfd5d7a615f711 100644 (file)
@@ -59,7 +59,6 @@
 #include <linux/taskstats_kern.h>
 #include <linux/random.h>
 #include <linux/tty.h>
-#include <linux/proc_fs.h>
 #include <linux/blkdev.h>
 #include <linux/fs_struct.h>
 #include <linux/magic.h>
@@ -597,6 +596,57 @@ void mmput(struct mm_struct *mm)
 }
 EXPORT_SYMBOL_GPL(mmput);
 
+/*
+ * We added or removed a vma mapping the executable. The vmas are only mapped
+ * during exec and are not mapped with the mmap system call.
+ * Callers must hold down_write() on the mm's mmap_sem for these
+ */
+void added_exe_file_vma(struct mm_struct *mm)
+{
+       mm->num_exe_file_vmas++;
+}
+
+void removed_exe_file_vma(struct mm_struct *mm)
+{
+       mm->num_exe_file_vmas--;
+       if ((mm->num_exe_file_vmas == 0) && mm->exe_file){
+               fput(mm->exe_file);
+               mm->exe_file = NULL;
+       }
+
+}
+
+void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
+{
+       if (new_exe_file)
+               get_file(new_exe_file);
+       if (mm->exe_file)
+               fput(mm->exe_file);
+       mm->exe_file = new_exe_file;
+       mm->num_exe_file_vmas = 0;
+}
+
+struct file *get_mm_exe_file(struct mm_struct *mm)
+{
+       struct file *exe_file;
+
+       /* We need mmap_sem to protect against races with removal of
+        * VM_EXECUTABLE vmas */
+       down_read(&mm->mmap_sem);
+       exe_file = mm->exe_file;
+       if (exe_file)
+               get_file(exe_file);
+       up_read(&mm->mmap_sem);
+       return exe_file;
+}
+
+static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
+{
+       /* It's safe to write the exe_file pointer without exe_file_lock because
+        * this is called during fork when the task is not yet in /proc */
+       newmm->exe_file = get_mm_exe_file(oldmm);
+}
+
 /**
  * get_task_mm - acquire a reference to the task's mm
  *
@@ -957,6 +1007,10 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
        tty_audit_fork(sig);
        sched_autogroup_fork(sig);
 
+#ifdef CONFIG_CGROUPS
+       init_rwsem(&sig->threadgroup_fork_lock);
+#endif
+
        sig->oom_adj = current->signal->oom_adj;
        sig->oom_score_adj = current->signal->oom_score_adj;
        sig->oom_score_adj_min = current->signal->oom_score_adj_min;
@@ -1138,6 +1192,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        monotonic_to_bootbased(&p->real_start_time);
        p->io_context = NULL;
        p->audit_context = NULL;
+       if (clone_flags & CLONE_THREAD)
+               threadgroup_fork_read_lock(current);
        cgroup_fork(p);
 #ifdef CONFIG_NUMA
        p->mempolicy = mpol_dup(p->mempolicy);
@@ -1223,12 +1279,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        if (clone_flags & CLONE_THREAD)
                p->tgid = current->tgid;
 
-       if (current->nsproxy != p->nsproxy) {
-               retval = ns_cgroup_clone(p, pid);
-               if (retval)
-                       goto bad_fork_free_pid;
-       }
-
        p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
        /*
         * Clear TID on mm_release()?
@@ -1342,6 +1392,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        write_unlock_irq(&tasklist_lock);
        proc_fork_connector(p);
        cgroup_post_fork(p);
+       if (clone_flags & CLONE_THREAD)
+               threadgroup_fork_read_unlock(current);
        perf_event_fork(p);
        return p;
 
@@ -1380,6 +1432,8 @@ bad_fork_cleanup_policy:
        mpol_put(p->mempolicy);
 bad_fork_cleanup_cgroup:
 #endif
+       if (clone_flags & CLONE_THREAD)
+               threadgroup_fork_read_unlock(current);
        cgroup_exit(p, cgroup_callbacks_done);
        delayacct_tsk_free(p);
        module_put(task_thread_info(p)->exec_domain->module);
index 64e3df6ab1efdd32b9ea8088b4b565d536548b0c..4bd4faa6323ac14d327addcf1012c9318c51f623 100644 (file)
@@ -352,6 +352,7 @@ void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
 #ifdef CONFIG_SMP
        remove_proc_entry("smp_affinity", desc->dir);
        remove_proc_entry("affinity_hint", desc->dir);
+       remove_proc_entry("smp_affinity_list", desc->dir);
        remove_proc_entry("node", desc->dir);
 #endif
        remove_proc_entry("spurious", desc->dir);
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c
deleted file mode 100644 (file)
index 2c98ad9..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * ns_cgroup.c - namespace cgroup subsystem
- *
- * Copyright 2006, 2007 IBM Corp
- */
-
-#include <linux/module.h>
-#include <linux/cgroup.h>
-#include <linux/fs.h>
-#include <linux/proc_fs.h>
-#include <linux/slab.h>
-#include <linux/nsproxy.h>
-
-struct ns_cgroup {
-       struct cgroup_subsys_state css;
-};
-
-struct cgroup_subsys ns_subsys;
-
-static inline struct ns_cgroup *cgroup_to_ns(
-               struct cgroup *cgroup)
-{
-       return container_of(cgroup_subsys_state(cgroup, ns_subsys_id),
-                           struct ns_cgroup, css);
-}
-
-int ns_cgroup_clone(struct task_struct *task, struct pid *pid)
-{
-       char name[PROC_NUMBUF];
-
-       snprintf(name, PROC_NUMBUF, "%d", pid_vnr(pid));
-       return cgroup_clone(task, &ns_subsys, name);
-}
-
-/*
- * Rules:
- *   1. you can only enter a cgroup which is a descendant of your current
- *     cgroup
- *   2. you can only place another process into a cgroup if
- *     a. you have CAP_SYS_ADMIN
- *     b. your cgroup is an ancestor of task's destination cgroup
- *       (hence either you are in the same cgroup as task, or in an
- *        ancestor cgroup thereof)
- */
-static int ns_can_attach(struct cgroup_subsys *ss, struct cgroup *new_cgroup,
-                        struct task_struct *task, bool threadgroup)
-{
-       if (current != task) {
-               if (!capable(CAP_SYS_ADMIN))
-                       return -EPERM;
-
-               if (!cgroup_is_descendant(new_cgroup, current))
-                       return -EPERM;
-       }
-
-       if (!cgroup_is_descendant(new_cgroup, task))
-               return -EPERM;
-
-       if (threadgroup) {
-               struct task_struct *c;
-               rcu_read_lock();
-               list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
-                       if (!cgroup_is_descendant(new_cgroup, c)) {
-                               rcu_read_unlock();
-                               return -EPERM;
-                       }
-               }
-               rcu_read_unlock();
-       }
-
-       return 0;
-}
-
-/*
- * Rules: you can only create a cgroup if
- *     1. you are capable(CAP_SYS_ADMIN)
- *     2. the target cgroup is a descendant of your own cgroup
- */
-static struct cgroup_subsys_state *ns_create(struct cgroup_subsys *ss,
-                                               struct cgroup *cgroup)
-{
-       struct ns_cgroup *ns_cgroup;
-
-       if (!capable(CAP_SYS_ADMIN))
-               return ERR_PTR(-EPERM);
-       if (!cgroup_is_descendant(cgroup, current))
-               return ERR_PTR(-EPERM);
-       if (test_bit(CGRP_CLONE_CHILDREN, &cgroup->flags)) {
-               printk("ns_cgroup can't be created with parent "
-                      "'clone_children' set.\n");
-               return ERR_PTR(-EINVAL);
-       }
-
-       printk_once("ns_cgroup deprecated: consider using the "
-                   "'clone_children' flag without the ns_cgroup.\n");
-
-       ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL);
-       if (!ns_cgroup)
-               return ERR_PTR(-ENOMEM);
-       return &ns_cgroup->css;
-}
-
-static void ns_destroy(struct cgroup_subsys *ss,
-                       struct cgroup *cgroup)
-{
-       struct ns_cgroup *ns_cgroup;
-
-       ns_cgroup = cgroup_to_ns(cgroup);
-       kfree(ns_cgroup);
-}
-
-struct cgroup_subsys ns_subsys = {
-       .name = "ns",
-       .can_attach = ns_can_attach,
-       .create = ns_create,
-       .destroy  = ns_destroy,
-       .subsys_id = ns_subsys_id,
-};
index 5424e37673ed0fc5df510c5a03db31e50fba5dae..d6a00f3de15d59b3e1bcb8ebcc32373e447df180 100644 (file)
@@ -201,10 +201,6 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags,
                goto out;
        }
 
-       err = ns_cgroup_clone(current, task_pid(current));
-       if (err)
-               put_nsproxy(*new_nsp);
-
 out:
        return err;
 }
index beb184689af9a1f225d3ab874a92c5690276c500..fd8d1e035df977e70b6148b3709a2b9cc18f42d2 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/string.h>
 #include <linux/platform_device.h>
 #include <linux/init.h>
+#include <linux/kernel.h>
 
 #include <linux/uaccess.h>
 
@@ -404,24 +405,36 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
                size_t count, loff_t *f_pos)
 {
        s32 value;
-       int x;
-       char ascii_value[11];
        struct pm_qos_request_list *pm_qos_req;
 
        if (count == sizeof(s32)) {
                if (copy_from_user(&value, buf, sizeof(s32)))
                        return -EFAULT;
-       } else if (count == 11) { /* len('0x12345678/0') */
-               if (copy_from_user(ascii_value, buf, 11))
+       } else if (count <= 11) { /* ASCII perhaps? */
+               char ascii_value[11];
+               unsigned long int ulval;
+               int ret;
+
+               if (copy_from_user(ascii_value, buf, count))
                        return -EFAULT;
-               if (strlen(ascii_value) != 10)
-                       return -EINVAL;
-               x = sscanf(ascii_value, "%x", &value);
-               if (x != 1)
+
+               if (count > 10) {
+                       if (ascii_value[10] == '\n')
+                               ascii_value[10] = '\0';
+                       else
+                               return -EINVAL;
+               } else {
+                       ascii_value[count] = '\0';
+               }
+               ret = strict_strtoul(ascii_value, 16, &ulval);
+               if (ret) {
+                       pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret);
                        return -EINVAL;
-               pr_debug("%s, %d, 0x%x\n", ascii_value, x, value);
-       } else
+               }
+               value = (s32)lower_32_bits(ulval);
+       } else {
                return -EINVAL;
+       }
 
        pm_qos_req = filp->private_data;
        pm_qos_update_request(pm_qos_req, value);
index f9bec56d88258992b61e8f9fb3306dd010e652e1..8f7b1db1ece1b3273f9eebbc128d7c884390c638 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/gfp.h>
 #include <linux/syscore_ops.h>
 #include <scsi/scsi_scan.h>
-#include <asm/suspend.h>
 
 #include "power.h"
 
@@ -55,10 +54,9 @@ static int hibernation_mode = HIBERNATION_SHUTDOWN;
 static const struct platform_hibernation_ops *hibernation_ops;
 
 /**
- * hibernation_set_ops - set the global hibernate operations
- * @ops: the hibernation operations to use in subsequent hibernation transitions
+ * hibernation_set_ops - Set the global hibernate operations.
+ * @ops: Hibernation operations to use in subsequent hibernation transitions.
  */
-
 void hibernation_set_ops(const struct platform_hibernation_ops *ops)
 {
        if (ops && !(ops->begin && ops->end &&  ops->pre_snapshot
@@ -115,10 +113,9 @@ static int hibernation_test(int level) { return 0; }
 #endif /* !CONFIG_PM_DEBUG */
 
 /**
- *     platform_begin - tell the platform driver that we're starting
- *     hibernation
+ * platform_begin - Call platform to start hibernation.
+ * @platform_mode: Whether or not to use the platform driver.
  */
-
 static int platform_begin(int platform_mode)
 {
        return (platform_mode && hibernation_ops) ?
@@ -126,10 +123,9 @@ static int platform_begin(int platform_mode)
 }
 
 /**
- *     platform_end - tell the platform driver that we've entered the
- *     working state
+ * platform_end - Call platform to finish transition to the working state.
+ * @platform_mode: Whether or not to use the platform driver.
  */
-
 static void platform_end(int platform_mode)
 {
        if (platform_mode && hibernation_ops)
@@ -137,8 +133,11 @@ static void platform_end(int platform_mode)
 }
 
 /**
- *     platform_pre_snapshot - prepare the machine for hibernation using the
- *     platform driver if so configured and return an error code if it fails
+ * platform_pre_snapshot - Call platform to prepare the machine for hibernation.
+ * @platform_mode: Whether or not to use the platform driver.
+ *
+ * Use the platform driver to prepare the system for creating a hibernate image,
+ * if so configured, and return an error code if that fails.
  */
 
 static int platform_pre_snapshot(int platform_mode)
@@ -148,10 +147,14 @@ static int platform_pre_snapshot(int platform_mode)
 }
 
 /**
- *     platform_leave - prepare the machine for switching to the normal mode
- *     of operation using the platform driver (called with interrupts disabled)
+ * platform_leave - Call platform to prepare a transition to the working state.
+ * @platform_mode: Whether or not to use the platform driver.
+ *
+ * Use the platform driver prepare to prepare the machine for switching to the
+ * normal mode of operation.
+ *
+ * This routine is called on one CPU with interrupts disabled.
  */
-
 static void platform_leave(int platform_mode)
 {
        if (platform_mode && hibernation_ops)
@@ -159,10 +162,14 @@ static void platform_leave(int platform_mode)
 }
 
 /**
- *     platform_finish - switch the machine to the normal mode of operation
- *     using the platform driver (must be called after platform_prepare())
+ * platform_finish - Call platform to switch the system to the working state.
+ * @platform_mode: Whether or not to use the platform driver.
+ *
+ * Use the platform driver to switch the machine to the normal mode of
+ * operation.
+ *
+ * This routine must be called after platform_prepare().
  */
-
 static void platform_finish(int platform_mode)
 {
        if (platform_mode && hibernation_ops)
@@ -170,11 +177,15 @@ static void platform_finish(int platform_mode)
 }
 
 /**
- *     platform_pre_restore - prepare the platform for the restoration from a
- *     hibernation image.  If the restore fails after this function has been
- *     called, platform_restore_cleanup() must be called.
+ * platform_pre_restore - Prepare for hibernate image restoration.
+ * @platform_mode: Whether or not to use the platform driver.
+ *
+ * Use the platform driver to prepare the system for resume from a hibernation
+ * image.
+ *
+ * If the restore fails after this function has been called,
+ * platform_restore_cleanup() must be called.
  */
-
 static int platform_pre_restore(int platform_mode)
 {
        return (platform_mode && hibernation_ops) ?
@@ -182,12 +193,16 @@ static int platform_pre_restore(int platform_mode)
 }
 
 /**
- *     platform_restore_cleanup - switch the platform to the normal mode of
- *     operation after a failing restore.  If platform_pre_restore() has been
- *     called before the failing restore, this function must be called too,
- *     regardless of the result of platform_pre_restore().
+ * platform_restore_cleanup - Switch to the working state after failing restore.
+ * @platform_mode: Whether or not to use the platform driver.
+ *
+ * Use the platform driver to switch the system to the normal mode of operation
+ * after a failing restore.
+ *
+ * If platform_pre_restore() has been called before the failing restore, this
+ * function must be called too, regardless of the result of
+ * platform_pre_restore().
  */
-
 static void platform_restore_cleanup(int platform_mode)
 {
        if (platform_mode && hibernation_ops)
@@ -195,10 +210,9 @@ static void platform_restore_cleanup(int platform_mode)
 }
 
 /**
- *     platform_recover - recover the platform from a failure to suspend
- *     devices.
+ * platform_recover - Recover from a failure to suspend devices.
+ * @platform_mode: Whether or not to use the platform driver.
  */
-
 static void platform_recover(int platform_mode)
 {
        if (platform_mode && hibernation_ops && hibernation_ops->recover)
@@ -206,13 +220,12 @@ static void platform_recover(int platform_mode)
 }
 
 /**
- *     swsusp_show_speed - print the time elapsed between two events.
- *     @start: Starting event.
- *     @stop: Final event.
- *     @nr_pages -     number of pages processed between @start and @stop
- *     @msg -          introductory message to print
+ * swsusp_show_speed - Print time elapsed between two events during hibernation.
+ * @start: Starting event.
+ * @stop: Final event.
+ * @nr_pages: Number of memory pages processed between @start and @stop.
+ * @msg: Additional diagnostic message to print.
  */
-
 void swsusp_show_speed(struct timeval *start, struct timeval *stop,
                        unsigned nr_pages, char *msg)
 {
@@ -235,25 +248,18 @@ void swsusp_show_speed(struct timeval *start, struct timeval *stop,
 }
 
 /**
- *     create_image - freeze devices that need to be frozen with interrupts
- *     off, create the hibernation image and thaw those devices.  Control
- *     reappears in this routine after a restore.
+ * create_image - Create a hibernation image.
+ * @platform_mode: Whether or not to use the platform driver.
+ *
+ * Execute device drivers' .freeze_noirq() callbacks, create a hibernation image
+ * and execute the drivers' .thaw_noirq() callbacks.
+ *
+ * Control reappears in this routine after the subsequent restore.
  */
-
 static int create_image(int platform_mode)
 {
        int error;
 
-       error = arch_prepare_suspend();
-       if (error)
-               return error;
-
-       /* At this point, dpm_suspend_start() has been called, but *not*
-        * dpm_suspend_noirq(). We *must* call dpm_suspend_noirq() now.
-        * Otherwise, drivers for some devices (e.g. interrupt controllers)
-        * become desynchronized with the actual state of the hardware
-        * at resume time, and evil weirdness ensues.
-        */
        error = dpm_suspend_noirq(PMSG_FREEZE);
        if (error) {
                printk(KERN_ERR "PM: Some devices failed to power down, "
@@ -297,9 +303,6 @@ static int create_image(int platform_mode)
 
  Power_up:
        syscore_resume();
-       /* NOTE:  dpm_resume_noirq() is just a resume() for devices
-        * that suspended with irqs off ... no overall powerup.
-        */
 
  Enable_irqs:
        local_irq_enable();
@@ -317,14 +320,11 @@ static int create_image(int platform_mode)
 }
 
 /**
- *     hibernation_snapshot - quiesce devices and create the hibernation
- *     snapshot image.
- *     @platform_mode - if set, use the platform driver, if available, to
- *                      prepare the platform firmware for the power transition.
+ * hibernation_snapshot - Quiesce devices and create a hibernation image.
+ * @platform_mode: If set, use platform driver to prepare for the transition.
  *
- *     Must be called with pm_mutex held
+ * This routine must be called with pm_mutex held.
  */
-
 int hibernation_snapshot(int platform_mode)
 {
        pm_message_t msg = PMSG_RECOVER;
@@ -384,13 +384,14 @@ int hibernation_snapshot(int platform_mode)
 }
 
 /**
- *     resume_target_kernel - prepare devices that need to be suspended with
- *     interrupts off, restore the contents of highmem that have not been
- *     restored yet from the image and run the low level code that will restore
- *     the remaining contents of memory and switch to the just restored target
- *     kernel.
+ * resume_target_kernel - Restore system state from a hibernation image.
+ * @platform_mode: Whether or not to use the platform driver.
+ *
+ * Execute device drivers' .freeze_noirq() callbacks, restore the contents of
+ * highmem that have not been restored yet from the image and run the low-level
+ * code that will restore the remaining contents of memory and switch to the
+ * just restored target kernel.
  */
-
 static int resume_target_kernel(bool platform_mode)
 {
        int error;
@@ -416,24 +417,26 @@ static int resume_target_kernel(bool platform_mode)
        if (error)
                goto Enable_irqs;
 
-       /* We'll ignore saved state, but this gets preempt count (etc) right */
        save_processor_state();
        error = restore_highmem();
        if (!error) {
                error = swsusp_arch_resume();
                /*
                 * The code below is only ever reached in case of a failure.
-                * Otherwise execution continues at place where
-                * swsusp_arch_suspend() was called
+                * Otherwise, execution continues at the place where
+                * swsusp_arch_suspend() was called.
                 */
                BUG_ON(!error);
-               /* This call to restore_highmem() undos the previous one */
+               /*
+                * This call to restore_highmem() reverts the changes made by
+                * the previous one.
+                */
                restore_highmem();
        }
        /*
         * The only reason why swsusp_arch_resume() can fail is memory being
         * very tight, so we have to free it as soon as we can to avoid
-        * subsequent failures
+        * subsequent failures.
         */
        swsusp_free();
        restore_processor_state();
@@ -456,14 +459,12 @@ static int resume_target_kernel(bool platform_mode)
 }
 
 /**
- *     hibernation_restore - quiesce devices and restore the hibernation
- *     snapshot image.  If successful, control returns in hibernation_snaphot()
- *     @platform_mode - if set, use the platform driver, if available, to
- *                      prepare the platform firmware for the transition.
+ * hibernation_restore - Quiesce devices and restore from a hibernation image.
+ * @platform_mode: If set, use platform driver to prepare for the transition.
  *
- *     Must be called with pm_mutex held
+ * This routine must be called with pm_mutex held.  If it is successful, control
+ * reappears in the restored target kernel in hibernation_snaphot().
  */
-
 int hibernation_restore(int platform_mode)
 {
        int error;
@@ -483,10 +484,8 @@ int hibernation_restore(int platform_mode)
 }
 
 /**
- *     hibernation_platform_enter - enter the hibernation state using the
- *     platform driver (if available)
+ * hibernation_platform_enter - Power off the system using the platform driver.
  */
-
 int hibernation_platform_enter(void)
 {
        int error;
@@ -557,12 +556,12 @@ int hibernation_platform_enter(void)
 }
 
 /**
- *     power_down - Shut the machine down for hibernation.
+ * power_down - Shut the machine down for hibernation.
  *
- *     Use the platform driver, if configured so; otherwise try
- *     to power off or reboot.
+ * Use the platform driver, if configured, to put the system into the sleep
+ * state corresponding to hibernation, or try to power it off or reboot,
+ * depending on the value of hibernation_mode.
  */
-
 static void power_down(void)
 {
        switch (hibernation_mode) {
@@ -599,9 +598,8 @@ static int prepare_processes(void)
 }
 
 /**
- *     hibernate - The granpappy of the built-in hibernation management
+ * hibernate - Carry out system hibernation, including saving the image.
  */
-
 int hibernate(void)
 {
        int error;
@@ -679,17 +677,20 @@ int hibernate(void)
 
 
 /**
- *     software_resume - Resume from a saved image.
+ * software_resume - Resume from a saved hibernation image.
  *
- *     Called as a late_initcall (so all devices are discovered and
- *     initialized), we call swsusp to see if we have a saved image or not.
- *     If so, we quiesce devices, the restore the saved image. We will
- *     return above (in hibernate() ) if everything goes well.
- *     Otherwise, we fail gracefully and return to the normally
- *     scheduled program.
+ * This routine is called as a late initcall, when all devices have been
+ * discovered and initialized already.
  *
+ * The image reading code is called to see if there is a hibernation image
+ * available for reading.  If that is the case, devices are quiesced and the
+ * contents of memory is restored from the saved image.
+ *
+ * If this is successful, control reappears in the restored target kernel in
+ * hibernation_snaphot() which returns to hibernate().  Otherwise, the routine
+ * attempts to recover gracefully and make the kernel return to the normal mode
+ * of operation.
  */
-
 static int software_resume(void)
 {
        int error;
@@ -819,21 +820,17 @@ static const char * const hibernation_modes[] = {
        [HIBERNATION_TESTPROC]  = "testproc",
 };
 
-/**
- *     disk - Control hibernation mode
- *
- *     Suspend-to-disk can be handled in several ways. We have a few options
- *     for putting the system to sleep - using the platform driver (e.g. ACPI
- *     or other hibernation_ops), powering off the system or rebooting the
- *     system (for testing) as well as the two test modes.
+/*
+ * /sys/power/disk - Control hibernation mode.
  *
- *     The system can support 'platform', and that is known a priori (and
- *     encoded by the presence of hibernation_ops). However, the user may
- *     choose 'shutdown' or 'reboot' as alternatives, as well as one fo the
- *     test modes, 'test' or 'testproc'.
+ * Hibernation can be handled in several ways.  There are a few different ways
+ * to put the system into the sleep state: using the platform driver (e.g. ACPI
+ * or other hibernation_ops), powering it off or rebooting it (for testing
+ * mostly), or using one of the two available test modes.
  *
- *     show() will display what the mode is currently set to.
- *     store() will accept one of
+ * The sysfs file /sys/power/disk provides an interface for selecting the
+ * hibernation mode to use.  Reading from this file causes the available modes
+ * to be printed.  There are 5 modes that can be supported:
  *
  *     'platform'
  *     'shutdown'
@@ -841,8 +838,14 @@ static const char * const hibernation_modes[] = {
  *     'test'
  *     'testproc'
  *
- *     It will only change to 'platform' if the system
- *     supports it (as determined by having hibernation_ops).
+ * If a platform hibernation driver is in use, 'platform' will be supported
+ * and will be used by default.  Otherwise, 'shutdown' will be used by default.
+ * The selected option (i.e. the one corresponding to the current value of
+ * hibernation_mode) is enclosed by a square bracket.
+ *
+ * To select a given hibernation mode it is necessary to write the mode's
+ * string representation (as returned by reading from /sys/power/disk) back
+ * into /sys/power/disk.
  */
 
 static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
@@ -875,7 +878,6 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
        return buf-start;
 }
 
-
 static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
                          const char *buf, size_t n)
 {
index 14c9f87b9fc95d8eeb545b7fe29cccc171c9df47..961b389fe52fec5324910d5b85f6eb3b6749a43d 100644 (file)
@@ -303,14 +303,12 @@ static void profile_discard_flip_buffers(void)
        mutex_unlock(&profile_flip_mutex);
 }
 
-void profile_hits(int type, void *__pc, unsigned int nr_hits)
+static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
 {
        unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
        int i, j, cpu;
        struct profile_hit *hits;
 
-       if (prof_on != type || !prof_buffer)
-               return;
        pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
        i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
        secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
@@ -417,16 +415,20 @@ out_free:
 #define profile_discard_flip_buffers() do { } while (0)
 #define profile_cpu_callback           NULL
 
-void profile_hits(int type, void *__pc, unsigned int nr_hits)
+static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
 {
        unsigned long pc;
-
-       if (prof_on != type || !prof_buffer)
-               return;
        pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
        atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
 }
 #endif /* !CONFIG_SMP */
+
+void profile_hits(int type, void *__pc, unsigned int nr_hits)
+{
+       if (prof_on != type || !prof_buffer)
+               return;
+       do_profile_hits(type, __pc, nr_hits);
+}
 EXPORT_SYMBOL_GPL(profile_hits);
 
 void profile_tick(int type)
index 2d12893b8b0f0788dacb310f988cefd7d6d15440..5e43e9dc65d1c197aa9b085a4ccf546551570177 100644 (file)
@@ -8764,42 +8764,10 @@ cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
        return 0;
 }
 
-static int
-cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
-                     struct task_struct *tsk, bool threadgroup)
-{
-       int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
-       if (retval)
-               return retval;
-       if (threadgroup) {
-               struct task_struct *c;
-               rcu_read_lock();
-               list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
-                       retval = cpu_cgroup_can_attach_task(cgrp, c);
-                       if (retval) {
-                               rcu_read_unlock();
-                               return retval;
-                       }
-               }
-               rcu_read_unlock();
-       }
-       return 0;
-}
-
 static void
-cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
-                 struct cgroup *old_cont, struct task_struct *tsk,
-                 bool threadgroup)
+cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
 {
        sched_move_task(tsk);
-       if (threadgroup) {
-               struct task_struct *c;
-               rcu_read_lock();
-               list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
-                       sched_move_task(c);
-               }
-               rcu_read_unlock();
-       }
 }
 
 static void
@@ -8887,8 +8855,8 @@ struct cgroup_subsys cpu_cgroup_subsys = {
        .name           = "cpu",
        .create         = cpu_cgroup_create,
        .destroy        = cpu_cgroup_destroy,
-       .can_attach     = cpu_cgroup_can_attach,
-       .attach         = cpu_cgroup_attach,
+       .can_attach_task = cpu_cgroup_can_attach_task,
+       .attach_task    = cpu_cgroup_attach_task,
        .exit           = cpu_cgroup_exit,
        .populate       = cpu_cgroup_populate,
        .subsys_id      = cpu_cgroup_subsys_id,
index 9c10e38fc609b86605549eba4ce02b1b1cb09de1..830181cc7a83b3a7fa5d6898d6b83461a118fe92 100644 (file)
@@ -19,16 +19,6 @@ config RATIONAL
 config GENERIC_FIND_FIRST_BIT
        bool
 
-config GENERIC_FIND_NEXT_BIT
-       bool
-
-config GENERIC_FIND_BIT_LE
-       bool
-
-config GENERIC_FIND_LAST_BIT
-       bool
-       default y
-
 config CRC_CCITT
        tristate "CRC-CCITT functions"
        help
index 4b49a249064b5f747ebcec353c610536237e604a..6b597fdb1898c9a132dde2a343e9ec8fa2a5f1bf 100644 (file)
@@ -12,7 +12,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
         idr.o int_sqrt.o extable.o prio_tree.o \
         sha1.o irq_regs.o reciprocal_div.o argv_split.o \
         proportions.o prio_heap.o ratelimit.o show_mem.o \
-        is_single_threaded.o plist.o decompress.o
+        is_single_threaded.o plist.o decompress.o find_next_bit.o
 
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
@@ -22,7 +22,7 @@ lib-y += kobject.o kref.o klist.o
 obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
         bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
         string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \
-        bsearch.o
+        bsearch.o find_last_bit.o
 obj-y += kstrtox.o
 obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o
 
@@ -39,10 +39,6 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
 lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
-lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
-lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
-lib-$(CONFIG_GENERIC_FIND_BIT_LE) += find_next_bit.o
-obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
 
 CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
 obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
index 5d202e36bdd8b9e2d3e36d61d4733f11e2c4f147..d903959ad69598732ce47eba3cc17afbd45b0087 100644 (file)
@@ -15,6 +15,8 @@
 #include <asm/types.h>
 #include <asm/byteorder.h>
 
+#ifndef find_last_bit
+
 unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
 {
        unsigned long words;
@@ -43,3 +45,5 @@ found:
        return size;
 }
 EXPORT_SYMBOL(find_last_bit);
+
+#endif
index b0a8767282bf674fe980f17b5d192103e5ffc4f5..4bd75a73ba0041c0e4607e4c292c10f37ff343a2 100644 (file)
@@ -16,7 +16,7 @@
 
 #define BITOP_WORD(nr)         ((nr) / BITS_PER_LONG)
 
-#ifdef CONFIG_GENERIC_FIND_NEXT_BIT
+#ifndef find_next_bit
 /*
  * Find the next set bit in a memory region.
  */
@@ -59,7 +59,9 @@ found_middle:
        return result + __ffs(tmp);
 }
 EXPORT_SYMBOL(find_next_bit);
+#endif
 
+#ifndef find_next_zero_bit
 /*
  * This implementation of find_{first,next}_zero_bit was stolen from
  * Linus' asm-alpha/bitops.h.
@@ -103,9 +105,9 @@ found_middle:
        return result + ffz(tmp);
 }
 EXPORT_SYMBOL(find_next_zero_bit);
-#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
+#endif
 
-#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
+#ifndef find_first_bit
 /*
  * Find the first set bit in a memory region.
  */
@@ -131,7 +133,9 @@ found:
        return result + __ffs(tmp);
 }
 EXPORT_SYMBOL(find_first_bit);
+#endif
 
+#ifndef find_first_zero_bit
 /*
  * Find the first cleared bit in a memory region.
  */
@@ -157,10 +161,9 @@ found:
        return result + ffz(tmp);
 }
 EXPORT_SYMBOL(find_first_zero_bit);
-#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
+#endif
 
 #ifdef __BIG_ENDIAN
-#ifdef CONFIG_GENERIC_FIND_BIT_LE
 
 /* include/linux/byteorder does not support "unsigned long" type */
 static inline unsigned long ext2_swabp(const unsigned long * x)
@@ -186,6 +189,7 @@ static inline unsigned long ext2_swab(const unsigned long y)
 #endif
 }
 
+#ifndef find_next_zero_bit_le
 unsigned long find_next_zero_bit_le(const void *addr, unsigned
                long size, unsigned long offset)
 {
@@ -229,7 +233,9 @@ found_middle_swap:
        return result + ffz(ext2_swab(tmp));
 }
 EXPORT_SYMBOL(find_next_zero_bit_le);
+#endif
 
+#ifndef find_next_bit_le
 unsigned long find_next_bit_le(const void *addr, unsigned
                long size, unsigned long offset)
 {
@@ -274,6 +280,6 @@ found_middle_swap:
        return result + __ffs(ext2_swab(tmp));
 }
 EXPORT_SYMBOL(find_next_bit_le);
+#endif
 
-#endif /* CONFIG_GENERIC_FIND_BIT_LE */
 #endif /* __BIG_ENDIAN */
index cab7621f98aa1130f99de9b9949851e56b4cdc4d..9b8b89458c4ce4a8995afd9448f000c6e73335a7 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/stddef.h>
 #include <linux/module.h>
+#include <linux/reciprocal_div.h>
 
 struct flex_array_part {
        char elements[FLEX_ARRAY_PART_SIZE];
@@ -70,15 +71,15 @@ static inline int elements_fit_in_base(struct flex_array *fa)
  * Element size | Objects | Objects |
  * PAGE_SIZE=4k |  32-bit |  64-bit |
  * ---------------------------------|
- *      1 bytes | 4186112 | 2093056 |
- *      2 bytes | 2093056 | 1046528 |
- *      3 bytes | 1395030 |  697515 |
- *      4 bytes | 1046528 |  523264 |
- *     32 bytes |  130816 |   65408 |
- *     33 bytes |  126728 |   63364 |
- *   2048 bytes |    2044 |    1022 |
- *   2049 bytes |    1022 |     511 |
- *       void * | 1046528 |  261632 |
+ *      1 bytes | 4177920 | 2088960 |
+ *      2 bytes | 2088960 | 1044480 |
+ *      3 bytes | 1392300 |  696150 |
+ *      4 bytes | 1044480 |  522240 |
+ *     32 bytes |  130560 |   65408 |
+ *     33 bytes |  126480 |   63240 |
+ *   2048 bytes |    2040 |    1020 |
+ *   2049 bytes |    1020 |     510 |
+ *       void * | 1044480 |  261120 |
  *
  * Since 64-bit pointers are twice the size, we lose half the
  * capacity in the base structure.  Also note that no effort is made
@@ -88,11 +89,15 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
                                        gfp_t flags)
 {
        struct flex_array *ret;
+       int elems_per_part = 0;
+       int reciprocal_elems = 0;
        int max_size = 0;
 
-       if (element_size)
-               max_size = FLEX_ARRAY_NR_BASE_PTRS *
-                          FLEX_ARRAY_ELEMENTS_PER_PART(element_size);
+       if (element_size) {
+               elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size);
+               reciprocal_elems = reciprocal_value(elems_per_part);
+               max_size = FLEX_ARRAY_NR_BASE_PTRS * elems_per_part;
+       }
 
        /* max_size will end up 0 if element_size > PAGE_SIZE */
        if (total > max_size)
@@ -102,6 +107,8 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total,
                return NULL;
        ret->element_size = element_size;
        ret->total_nr_elements = total;
+       ret->elems_per_part = elems_per_part;
+       ret->reciprocal_elems = reciprocal_elems;
        if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO))
                memset(&ret->parts[0], FLEX_ARRAY_FREE,
                                                FLEX_ARRAY_BASE_BYTES_LEFT);
@@ -112,7 +119,7 @@ EXPORT_SYMBOL(flex_array_alloc);
 static int fa_element_to_part_nr(struct flex_array *fa,
                                        unsigned int element_nr)
 {
-       return element_nr / FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size);
+       return reciprocal_divide(element_nr, fa->reciprocal_elems);
 }
 
 /**
@@ -141,12 +148,12 @@ void flex_array_free(struct flex_array *fa)
 EXPORT_SYMBOL(flex_array_free);
 
 static unsigned int index_inside_part(struct flex_array *fa,
-                                       unsigned int element_nr)
+                                       unsigned int element_nr,
+                                       unsigned int part_nr)
 {
        unsigned int part_offset;
 
-       part_offset = element_nr %
-                               FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size);
+       part_offset = element_nr - part_nr * fa->elems_per_part;
        return part_offset * fa->element_size;
 }
 
@@ -186,7 +193,7 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags)
 int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
                        gfp_t flags)
 {
-       int part_nr;
+       int part_nr = 0;
        struct flex_array_part *part;
        void *dst;
 
@@ -202,7 +209,7 @@ int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
                if (!part)
                        return -ENOMEM;
        }
-       dst = &part->elements[index_inside_part(fa, element_nr)];
+       dst = &part->elements[index_inside_part(fa, element_nr, part_nr)];
        memcpy(dst, src, fa->element_size);
        return 0;
 }
@@ -217,7 +224,7 @@ EXPORT_SYMBOL(flex_array_put);
  */
 int flex_array_clear(struct flex_array *fa, unsigned int element_nr)
 {
-       int part_nr;
+       int part_nr = 0;
        struct flex_array_part *part;
        void *dst;
 
@@ -233,7 +240,7 @@ int flex_array_clear(struct flex_array *fa, unsigned int element_nr)
                if (!part)
                        return -EINVAL;
        }
-       dst = &part->elements[index_inside_part(fa, element_nr)];
+       dst = &part->elements[index_inside_part(fa, element_nr, part_nr)];
        memset(dst, FLEX_ARRAY_FREE, fa->element_size);
        return 0;
 }
@@ -302,7 +309,7 @@ EXPORT_SYMBOL(flex_array_prealloc);
  */
 void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
 {
-       int part_nr;
+       int part_nr = 0;
        struct flex_array_part *part;
 
        if (!fa->element_size)
@@ -317,7 +324,7 @@ void *flex_array_get(struct flex_array *fa, unsigned int element_nr)
                if (!part)
                        return NULL;
        }
-       return &part->elements[index_inside_part(fa, element_nr)];
+       return &part->elements[index_inside_part(fa, element_nr, part_nr)];
 }
 EXPORT_SYMBOL(flex_array_get);
 
index e9c0c61f2ddd0881bbb9dee37d3384dae0146ece..8ca47a5ee9c8529262c7d7e5a3224e1c2c36d27b 100644 (file)
@@ -347,3 +347,26 @@ config NEED_PER_CPU_KM
        depends on !SMP
        bool
        default y
+
+config CLEANCACHE
+       bool "Enable cleancache driver to cache clean pages if tmem is present"
+       default n
+       help
+         Cleancache can be thought of as a page-granularity victim cache
+         for clean pages that the kernel's pageframe replacement algorithm
+         (PFRA) would like to keep around, but can't since there isn't enough
+         memory.  So when the PFRA "evicts" a page, it first attempts to use
+         cleancacne code to put the data contained in that page into
+         "transcendent memory", memory that is not directly accessible or
+         addressable by the kernel and is of unknown and possibly
+         time-varying size.  And when a cleancache-enabled
+         filesystem wishes to access a page in a file on disk, it first
+         checks cleancache to see if it already contains it; if it does,
+         the page is copied into the kernel and a disk access is avoided.
+         When a transcendent memory driver is available (such as zcache or
+         Xen transcendent memory), a significant I/O reduction
+         may be achieved.  When none is available, all cleancache calls
+         are reduced to a single pointer-compare-against-NULL resulting
+         in a negligible performance hit.
+
+         If unsure, say Y to enable cleancache
index 42a8326c3e3da1f9fbedb0e0ee26d4cac47d1d85..836e4163c1bfd91c5742c9f5227fc105d79a3b76 100644 (file)
@@ -49,3 +49,4 @@ obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
 obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
 obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
 obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
+obj-$(CONFIG_CLEANCACHE) += cleancache.o
diff --git a/mm/cleancache.c b/mm/cleancache.c
new file mode 100644 (file)
index 0000000..bcaae4c
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+ * Cleancache frontend
+ *
+ * This code provides the generic "frontend" layer to call a matching
+ * "backend" driver implementation of cleancache.  See
+ * Documentation/vm/cleancache.txt for more information.
+ *
+ * Copyright (C) 2009-2010 Oracle Corp. All rights reserved.
+ * Author: Dan Magenheimer
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/exportfs.h>
+#include <linux/mm.h>
+#include <linux/cleancache.h>
+
+/*
+ * This global enablement flag may be read thousands of times per second
+ * by cleancache_get/put/flush even on systems where cleancache_ops
+ * is not claimed (e.g. cleancache is config'ed on but remains
+ * disabled), so is preferred to the slower alternative: a function
+ * call that checks a non-global.
+ */
+int cleancache_enabled;
+EXPORT_SYMBOL(cleancache_enabled);
+
+/*
+ * cleancache_ops is set by cleancache_ops_register to contain the pointers
+ * to the cleancache "backend" implementation functions.
+ */
+static struct cleancache_ops cleancache_ops;
+
+/* useful stats available in /sys/kernel/mm/cleancache */
+static unsigned long cleancache_succ_gets;
+static unsigned long cleancache_failed_gets;
+static unsigned long cleancache_puts;
+static unsigned long cleancache_flushes;
+
+/*
+ * register operations for cleancache, returning previous thus allowing
+ * detection of multiple backends and possible nesting
+ */
+struct cleancache_ops cleancache_register_ops(struct cleancache_ops *ops)
+{
+       struct cleancache_ops old = cleancache_ops;
+
+       cleancache_ops = *ops;
+       cleancache_enabled = 1;
+       return old;
+}
+EXPORT_SYMBOL(cleancache_register_ops);
+
+/* Called by a cleancache-enabled filesystem at time of mount */
+void __cleancache_init_fs(struct super_block *sb)
+{
+       sb->cleancache_poolid = (*cleancache_ops.init_fs)(PAGE_SIZE);
+}
+EXPORT_SYMBOL(__cleancache_init_fs);
+
+/* Called by a cleancache-enabled clustered filesystem at time of mount */
+void __cleancache_init_shared_fs(char *uuid, struct super_block *sb)
+{
+       sb->cleancache_poolid =
+               (*cleancache_ops.init_shared_fs)(uuid, PAGE_SIZE);
+}
+EXPORT_SYMBOL(__cleancache_init_shared_fs);
+
+/*
+ * If the filesystem uses exportable filehandles, use the filehandle as
+ * the key, else use the inode number.
+ */
+static int cleancache_get_key(struct inode *inode,
+                             struct cleancache_filekey *key)
+{
+       int (*fhfn)(struct dentry *, __u32 *fh, int *, int);
+       int len = 0, maxlen = CLEANCACHE_KEY_MAX;
+       struct super_block *sb = inode->i_sb;
+
+       key->u.ino = inode->i_ino;
+       if (sb->s_export_op != NULL) {
+               fhfn = sb->s_export_op->encode_fh;
+               if  (fhfn) {
+                       struct dentry d;
+                       d.d_inode = inode;
+                       len = (*fhfn)(&d, &key->u.fh[0], &maxlen, 0);
+                       if (len <= 0 || len == 255)
+                               return -1;
+                       if (maxlen > CLEANCACHE_KEY_MAX)
+                               return -1;
+               }
+       }
+       return 0;
+}
+
+/*
+ * "Get" data from cleancache associated with the poolid/inode/index
+ * that were specified when the data was put to cleanache and, if
+ * successful, use it to fill the specified page with data and return 0.
+ * The pageframe is unchanged and returns -1 if the get fails.
+ * Page must be locked by caller.
+ */
+int __cleancache_get_page(struct page *page)
+{
+       int ret = -1;
+       int pool_id;
+       struct cleancache_filekey key = { .u.key = { 0 } };
+
+       VM_BUG_ON(!PageLocked(page));
+       pool_id = page->mapping->host->i_sb->cleancache_poolid;
+       if (pool_id < 0)
+               goto out;
+
+       if (cleancache_get_key(page->mapping->host, &key) < 0)
+               goto out;
+
+       ret = (*cleancache_ops.get_page)(pool_id, key, page->index, page);
+       if (ret == 0)
+               cleancache_succ_gets++;
+       else
+               cleancache_failed_gets++;
+out:
+       return ret;
+}
+EXPORT_SYMBOL(__cleancache_get_page);
+
+/*
+ * "Put" data from a page to cleancache and associate it with the
+ * (previously-obtained per-filesystem) poolid and the page's,
+ * inode and page index.  Page must be locked.  Note that a put_page
+ * always "succeeds", though a subsequent get_page may succeed or fail.
+ */
+void __cleancache_put_page(struct page *page)
+{
+       int pool_id;
+       struct cleancache_filekey key = { .u.key = { 0 } };
+
+       VM_BUG_ON(!PageLocked(page));
+       pool_id = page->mapping->host->i_sb->cleancache_poolid;
+       if (pool_id >= 0 &&
+             cleancache_get_key(page->mapping->host, &key) >= 0) {
+               (*cleancache_ops.put_page)(pool_id, key, page->index, page);
+               cleancache_puts++;
+       }
+}
+EXPORT_SYMBOL(__cleancache_put_page);
+
+/*
+ * Flush any data from cleancache associated with the poolid and the
+ * page's inode and page index so that a subsequent "get" will fail.
+ */
+void __cleancache_flush_page(struct address_space *mapping, struct page *page)
+{
+       /* careful... page->mapping is NULL sometimes when this is called */
+       int pool_id = mapping->host->i_sb->cleancache_poolid;
+       struct cleancache_filekey key = { .u.key = { 0 } };
+
+       if (pool_id >= 0) {
+               VM_BUG_ON(!PageLocked(page));
+               if (cleancache_get_key(mapping->host, &key) >= 0) {
+                       (*cleancache_ops.flush_page)(pool_id, key, page->index);
+                       cleancache_flushes++;
+               }
+       }
+}
+EXPORT_SYMBOL(__cleancache_flush_page);
+
+/*
+ * Flush all data from cleancache associated with the poolid and the
+ * mappings's inode so that all subsequent gets to this poolid/inode
+ * will fail.
+ */
+void __cleancache_flush_inode(struct address_space *mapping)
+{
+       int pool_id = mapping->host->i_sb->cleancache_poolid;
+       struct cleancache_filekey key = { .u.key = { 0 } };
+
+       if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0)
+               (*cleancache_ops.flush_inode)(pool_id, key);
+}
+EXPORT_SYMBOL(__cleancache_flush_inode);
+
+/*
+ * Called by any cleancache-enabled filesystem at time of unmount;
+ * note that pool_id is surrendered and may be reutrned by a subsequent
+ * cleancache_init_fs or cleancache_init_shared_fs
+ */
+void __cleancache_flush_fs(struct super_block *sb)
+{
+       if (sb->cleancache_poolid >= 0) {
+               int old_poolid = sb->cleancache_poolid;
+               sb->cleancache_poolid = -1;
+               (*cleancache_ops.flush_fs)(old_poolid);
+       }
+}
+EXPORT_SYMBOL(__cleancache_flush_fs);
+
+#ifdef CONFIG_SYSFS
+
+/* see Documentation/ABI/xxx/sysfs-kernel-mm-cleancache */
+
+#define CLEANCACHE_SYSFS_RO(_name) \
+       static ssize_t cleancache_##_name##_show(struct kobject *kobj, \
+                               struct kobj_attribute *attr, char *buf) \
+       { \
+               return sprintf(buf, "%lu\n", cleancache_##_name); \
+       } \
+       static struct kobj_attribute cleancache_##_name##_attr = { \
+               .attr = { .name = __stringify(_name), .mode = 0444 }, \
+               .show = cleancache_##_name##_show, \
+       }
+
+CLEANCACHE_SYSFS_RO(succ_gets);
+CLEANCACHE_SYSFS_RO(failed_gets);
+CLEANCACHE_SYSFS_RO(puts);
+CLEANCACHE_SYSFS_RO(flushes);
+
+static struct attribute *cleancache_attrs[] = {
+       &cleancache_succ_gets_attr.attr,
+       &cleancache_failed_gets_attr.attr,
+       &cleancache_puts_attr.attr,
+       &cleancache_flushes_attr.attr,
+       NULL,
+};
+
+static struct attribute_group cleancache_attr_group = {
+       .attrs = cleancache_attrs,
+       .name = "cleancache",
+};
+
+#endif /* CONFIG_SYSFS */
+
+static int __init init_cleancache(void)
+{
+#ifdef CONFIG_SYSFS
+       int err;
+
+       err = sysfs_create_group(mm_kobj, &cleancache_attr_group);
+#endif /* CONFIG_SYSFS */
+       return 0;
+}
+module_init(init_cleancache)
index 68e782b3d3de0c0f1da75750dd57374cf1a03e89..bcdc393b6580a27820ff5252e5cb3ab154117a4f 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
 #include <linux/memcontrol.h>
 #include <linux/mm_inline.h> /* for page_is_file_cache() */
+#include <linux/cleancache.h>
 #include "internal.h"
 
 /*
@@ -118,6 +119,16 @@ void __delete_from_page_cache(struct page *page)
 {
        struct address_space *mapping = page->mapping;
 
+       /*
+        * if we're uptodate, flush out into the cleancache, otherwise
+        * invalidate any existing cleancache entries.  We can't leave
+        * stale data around in the cleancache once our page is gone
+        */
+       if (PageUptodate(page) && PageMappedToDisk(page))
+               cleancache_put_page(page);
+       else
+               cleancache_flush_page(mapping, page);
+
        radix_tree_delete(&mapping->page_tree, page->index);
        page->mapping = NULL;
        mapping->nrpages--;
@@ -1650,6 +1661,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
                /* No page in the page cache at all */
                do_sync_mmap_readahead(vma, ra, file, offset);
                count_vm_event(PGMAJFAULT);
+               mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
                ret = VM_FAULT_MAJOR;
 retry_find:
                page = find_get_page(mapping, offset);
index 7f4123056e0603ce3bae84c3fea05a7444160dc8..b8e0e2d468afb276816174bcb76a46667d74831b 100644 (file)
@@ -224,7 +224,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
                /*
                 * drop PG_Mlocked flag for over-mapped range
                 */
-               unsigned int saved_flags = vma->vm_flags;
+               vm_flags_t saved_flags = vma->vm_flags;
                munlock_vma_pages_range(vma, start, start + size);
                vma->vm_flags = saved_flags;
        }
index 5fd68b95c671bafd2687ab696e04a0ae73a17012..f33bb319b73f093ce38e828a265b24d9c18e0525 100644 (file)
@@ -2833,7 +2833,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
 int hugetlb_reserve_pages(struct inode *inode,
                                        long from, long to,
                                        struct vm_area_struct *vma,
-                                       int acctflag)
+                                       vm_flags_t vm_flags)
 {
        long ret, chg;
        struct hstate *h = hstate_inode(inode);
@@ -2843,7 +2843,7 @@ int hugetlb_reserve_pages(struct inode *inode,
         * attempt will be made for VM_NORESERVE to allocate a page
         * and filesystem quota without using reserves
         */
-       if (acctflag & VM_NORESERVE)
+       if (vm_flags & VM_NORESERVE)
                return 0;
 
        /*
index d5fd3dcd3f2e1f38278a73d8131cd80d7f6adc6d..bd9052a5d3ad74aa2db023c953f0b498e5ab7dd0 100644 (file)
@@ -94,6 +94,8 @@ enum mem_cgroup_events_index {
        MEM_CGROUP_EVENTS_PGPGIN,       /* # of pages paged in */
        MEM_CGROUP_EVENTS_PGPGOUT,      /* # of pages paged out */
        MEM_CGROUP_EVENTS_COUNT,        /* # of pages paged in/out */
+       MEM_CGROUP_EVENTS_PGFAULT,      /* # of page-faults */
+       MEM_CGROUP_EVENTS_PGMAJFAULT,   /* # of major page-faults */
        MEM_CGROUP_EVENTS_NSTATS,
 };
 /*
@@ -231,6 +233,11 @@ struct mem_cgroup {
         * reclaimed from.
         */
        int last_scanned_child;
+       int last_scanned_node;
+#if MAX_NUMNODES > 1
+       nodemask_t      scan_nodes;
+       unsigned long   next_scan_node_update;
+#endif
        /*
         * Should the accounting and control be hierarchical, per subtree?
         */
@@ -585,6 +592,16 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
        this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
 }
 
+void mem_cgroup_pgfault(struct mem_cgroup *mem, int val)
+{
+       this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val);
+}
+
+void mem_cgroup_pgmajfault(struct mem_cgroup *mem, int val)
+{
+       this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val);
+}
+
 static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem,
                                            enum mem_cgroup_events_index idx)
 {
@@ -624,18 +641,27 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
        preempt_enable();
 }
 
+static unsigned long
+mem_cgroup_get_zonestat_node(struct mem_cgroup *mem, int nid, enum lru_list idx)
+{
+       struct mem_cgroup_per_zone *mz;
+       u64 total = 0;
+       int zid;
+
+       for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+               mz = mem_cgroup_zoneinfo(mem, nid, zid);
+               total += MEM_CGROUP_ZSTAT(mz, idx);
+       }
+       return total;
+}
 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
                                        enum lru_list idx)
 {
-       int nid, zid;
-       struct mem_cgroup_per_zone *mz;
+       int nid;
        u64 total = 0;
 
        for_each_online_node(nid)
-               for (zid = 0; zid < MAX_NR_ZONES; zid++) {
-                       mz = mem_cgroup_zoneinfo(mem, nid, zid);
-                       total += MEM_CGROUP_ZSTAT(mz, idx);
-               }
+               total += mem_cgroup_get_zonestat_node(mem, nid, idx);
        return total;
 }
 
@@ -813,6 +839,33 @@ static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
        return (mem == root_mem_cgroup);
 }
 
+void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
+{
+       struct mem_cgroup *mem;
+
+       if (!mm)
+               return;
+
+       rcu_read_lock();
+       mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
+       if (unlikely(!mem))
+               goto out;
+
+       switch (idx) {
+       case PGMAJFAULT:
+               mem_cgroup_pgmajfault(mem, 1);
+               break;
+       case PGFAULT:
+               mem_cgroup_pgfault(mem, 1);
+               break;
+       default:
+               BUG();
+       }
+out:
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL(mem_cgroup_count_vm_event);
+
 /*
  * Following LRU functions are allowed to be used without PCG_LOCK.
  * Operations are called by routine of global LRU independently from memcg.
@@ -1064,9 +1117,9 @@ int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
        return (active > inactive);
 }
 
-unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
-                                      struct zone *zone,
-                                      enum lru_list lru)
+unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
+                                               struct zone *zone,
+                                               enum lru_list lru)
 {
        int nid = zone_to_nid(zone);
        int zid = zone_idx(zone);
@@ -1075,6 +1128,93 @@ unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
        return MEM_CGROUP_ZSTAT(mz, lru);
 }
 
+#ifdef CONFIG_NUMA
+static unsigned long mem_cgroup_node_nr_file_lru_pages(struct mem_cgroup *memcg,
+                                                       int nid)
+{
+       unsigned long ret;
+
+       ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_FILE) +
+               mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_FILE);
+
+       return ret;
+}
+
+static unsigned long mem_cgroup_nr_file_lru_pages(struct mem_cgroup *memcg)
+{
+       u64 total = 0;
+       int nid;
+
+       for_each_node_state(nid, N_HIGH_MEMORY)
+               total += mem_cgroup_node_nr_file_lru_pages(memcg, nid);
+
+       return total;
+}
+
+static unsigned long mem_cgroup_node_nr_anon_lru_pages(struct mem_cgroup *memcg,
+                                                       int nid)
+{
+       unsigned long ret;
+
+       ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_ANON) +
+               mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_ANON);
+
+       return ret;
+}
+
+static unsigned long mem_cgroup_nr_anon_lru_pages(struct mem_cgroup *memcg)
+{
+       u64 total = 0;
+       int nid;
+
+       for_each_node_state(nid, N_HIGH_MEMORY)
+               total += mem_cgroup_node_nr_anon_lru_pages(memcg, nid);
+
+       return total;
+}
+
+static unsigned long
+mem_cgroup_node_nr_unevictable_lru_pages(struct mem_cgroup *memcg, int nid)
+{
+       return mem_cgroup_get_zonestat_node(memcg, nid, LRU_UNEVICTABLE);
+}
+
+static unsigned long
+mem_cgroup_nr_unevictable_lru_pages(struct mem_cgroup *memcg)
+{
+       u64 total = 0;
+       int nid;
+
+       for_each_node_state(nid, N_HIGH_MEMORY)
+               total += mem_cgroup_node_nr_unevictable_lru_pages(memcg, nid);
+
+       return total;
+}
+
+static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
+                                                       int nid)
+{
+       enum lru_list l;
+       u64 total = 0;
+
+       for_each_lru(l)
+               total += mem_cgroup_get_zonestat_node(memcg, nid, l);
+
+       return total;
+}
+
+static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg)
+{
+       u64 total = 0;
+       int nid;
+
+       for_each_node_state(nid, N_HIGH_MEMORY)
+               total += mem_cgroup_node_nr_lru_pages(memcg, nid);
+
+       return total;
+}
+#endif /* CONFIG_NUMA */
+
 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
                                                      struct zone *zone)
 {
@@ -1418,6 +1558,81 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
        return ret;
 }
 
+#if MAX_NUMNODES > 1
+
+/*
+ * Always updating the nodemask is not very good - even if we have an empty
+ * list or the wrong list here, we can start from some node and traverse all
+ * nodes based on the zonelist. So update the list loosely once per 10 secs.
+ *
+ */
+static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
+{
+       int nid;
+
+       if (time_after(mem->next_scan_node_update, jiffies))
+               return;
+
+       mem->next_scan_node_update = jiffies + 10*HZ;
+       /* make a nodemask where this memcg uses memory from */
+       mem->scan_nodes = node_states[N_HIGH_MEMORY];
+
+       for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) {
+
+               if (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_FILE) ||
+                   mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_FILE))
+                       continue;
+
+               if (total_swap_pages &&
+                   (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_ANON) ||
+                    mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_ANON)))
+                       continue;
+               node_clear(nid, mem->scan_nodes);
+       }
+}
+
+/*
+ * Selecting a node where we start reclaim from. Because what we need is just
+ * reducing usage counter, start from anywhere is O,K. Considering
+ * memory reclaim from current node, there are pros. and cons.
+ *
+ * Freeing memory from current node means freeing memory from a node which
+ * we'll use or we've used. So, it may make LRU bad. And if several threads
+ * hit limits, it will see a contention on a node. But freeing from remote
+ * node means more costs for memory reclaim because of memory latency.
+ *
+ * Now, we use round-robin. Better algorithm is welcomed.
+ */
+int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
+{
+       int node;
+
+       mem_cgroup_may_update_nodemask(mem);
+       node = mem->last_scanned_node;
+
+       node = next_node(node, mem->scan_nodes);
+       if (node == MAX_NUMNODES)
+               node = first_node(mem->scan_nodes);
+       /*
+        * We call this when we hit limit, not when pages are added to LRU.
+        * No LRU may hold pages because all pages are UNEVICTABLE or
+        * memcg is too small and all pages are not on LRU. In that case,
+        * we use curret node.
+        */
+       if (unlikely(node == MAX_NUMNODES))
+               node = numa_node_id();
+
+       mem->last_scanned_node = node;
+       return node;
+}
+
+#else
+int mem_cgroup_select_victim_node(struct mem_cgroup *mem)
+{
+       return 0;
+}
+#endif
+
 /*
  * Scan the hierarchy if needed to reclaim memory. We remember the last child
  * we reclaimed from, so that we don't end up penalizing one child extensively
@@ -1433,7 +1648,8 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem)
 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
                                                struct zone *zone,
                                                gfp_t gfp_mask,
-                                               unsigned long reclaim_options)
+                                               unsigned long reclaim_options,
+                                               unsigned long *total_scanned)
 {
        struct mem_cgroup *victim;
        int ret, total = 0;
@@ -1442,6 +1658,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
        bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
        bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
        unsigned long excess;
+       unsigned long nr_scanned;
 
        excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
 
@@ -1484,10 +1701,12 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
                        continue;
                }
                /* we use swappiness of local cgroup */
-               if (check_soft)
+               if (check_soft) {
                        ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
-                               noswap, get_swappiness(victim), zone);
-               else
+                               noswap, get_swappiness(victim), zone,
+                               &nr_scanned);
+                       *total_scanned += nr_scanned;
+               } else
                        ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
                                                noswap, get_swappiness(victim));
                css_put(&victim->css);
@@ -1503,7 +1722,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
                        if (!res_counter_soft_limit_excess(&root_mem->res))
                                return total;
                } else if (mem_cgroup_margin(root_mem))
-                       return 1 + total;
+                       return total;
        }
        return total;
 }
@@ -1928,7 +2147,7 @@ static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
                return CHARGE_WOULDBLOCK;
 
        ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
-                                             gfp_mask, flags);
+                                             gfp_mask, flags, NULL);
        if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
                return CHARGE_RETRY;
        /*
@@ -3211,7 +3430,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
                        break;
 
                mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
-                                               MEM_CGROUP_RECLAIM_SHRINK);
+                                               MEM_CGROUP_RECLAIM_SHRINK,
+                                               NULL);
                curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
                /* Usage is reduced ? */
                if (curusage >= oldusage)
@@ -3271,7 +3491,8 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
 
                mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
                                                MEM_CGROUP_RECLAIM_NOSWAP |
-                                               MEM_CGROUP_RECLAIM_SHRINK);
+                                               MEM_CGROUP_RECLAIM_SHRINK,
+                                               NULL);
                curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
                /* Usage is reduced ? */
                if (curusage >= oldusage)
@@ -3285,7 +3506,8 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
 }
 
 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
-                                           gfp_t gfp_mask)
+                                           gfp_t gfp_mask,
+                                           unsigned long *total_scanned)
 {
        unsigned long nr_reclaimed = 0;
        struct mem_cgroup_per_zone *mz, *next_mz = NULL;
@@ -3293,6 +3515,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
        int loop = 0;
        struct mem_cgroup_tree_per_zone *mctz;
        unsigned long long excess;
+       unsigned long nr_scanned;
 
        if (order > 0)
                return 0;
@@ -3311,10 +3534,13 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
                if (!mz)
                        break;
 
+               nr_scanned = 0;
                reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
                                                gfp_mask,
-                                               MEM_CGROUP_RECLAIM_SOFT);
+                                               MEM_CGROUP_RECLAIM_SOFT,
+                                               &nr_scanned);
                nr_reclaimed += reclaimed;
+               *total_scanned += nr_scanned;
                spin_lock(&mctz->lock);
 
                /*
@@ -3337,10 +3563,9 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
                                 */
                                next_mz =
                                __mem_cgroup_largest_soft_limit_node(mctz);
-                               if (next_mz == mz) {
+                               if (next_mz == mz)
                                        css_put(&next_mz->mem->css);
-                                       next_mz = NULL;
-                               } else /* next_mz == NULL or other memcg */
+                               else /* next_mz == NULL or other memcg */
                                        break;
                        } while (1);
                }
@@ -3772,6 +3997,8 @@ enum {
        MCS_PGPGIN,
        MCS_PGPGOUT,
        MCS_SWAP,
+       MCS_PGFAULT,
+       MCS_PGMAJFAULT,
        MCS_INACTIVE_ANON,
        MCS_ACTIVE_ANON,
        MCS_INACTIVE_FILE,
@@ -3794,6 +4021,8 @@ struct {
        {"pgpgin", "total_pgpgin"},
        {"pgpgout", "total_pgpgout"},
        {"swap", "total_swap"},
+       {"pgfault", "total_pgfault"},
+       {"pgmajfault", "total_pgmajfault"},
        {"inactive_anon", "total_inactive_anon"},
        {"active_anon", "total_active_anon"},
        {"inactive_file", "total_inactive_file"},
@@ -3822,6 +4051,10 @@ mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
                val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
                s->stat[MCS_SWAP] += val * PAGE_SIZE;
        }
+       val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGFAULT);
+       s->stat[MCS_PGFAULT] += val;
+       val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGMAJFAULT);
+       s->stat[MCS_PGMAJFAULT] += val;
 
        /* per zone stat */
        val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
@@ -3845,6 +4078,51 @@ mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
                mem_cgroup_get_local_stat(iter, s);
 }
 
+#ifdef CONFIG_NUMA
+static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
+{
+       int nid;
+       unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
+       unsigned long node_nr;
+       struct cgroup *cont = m->private;
+       struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
+
+       total_nr = mem_cgroup_nr_lru_pages(mem_cont);
+       seq_printf(m, "total=%lu", total_nr);
+       for_each_node_state(nid, N_HIGH_MEMORY) {
+               node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid);
+               seq_printf(m, " N%d=%lu", nid, node_nr);
+       }
+       seq_putc(m, '\n');
+
+       file_nr = mem_cgroup_nr_file_lru_pages(mem_cont);
+       seq_printf(m, "file=%lu", file_nr);
+       for_each_node_state(nid, N_HIGH_MEMORY) {
+               node_nr = mem_cgroup_node_nr_file_lru_pages(mem_cont, nid);
+               seq_printf(m, " N%d=%lu", nid, node_nr);
+       }
+       seq_putc(m, '\n');
+
+       anon_nr = mem_cgroup_nr_anon_lru_pages(mem_cont);
+       seq_printf(m, "anon=%lu", anon_nr);
+       for_each_node_state(nid, N_HIGH_MEMORY) {
+               node_nr = mem_cgroup_node_nr_anon_lru_pages(mem_cont, nid);
+               seq_printf(m, " N%d=%lu", nid, node_nr);
+       }
+       seq_putc(m, '\n');
+
+       unevictable_nr = mem_cgroup_nr_unevictable_lru_pages(mem_cont);
+       seq_printf(m, "unevictable=%lu", unevictable_nr);
+       for_each_node_state(nid, N_HIGH_MEMORY) {
+               node_nr = mem_cgroup_node_nr_unevictable_lru_pages(mem_cont,
+                                                                       nid);
+               seq_printf(m, " N%d=%lu", nid, node_nr);
+       }
+       seq_putc(m, '\n');
+       return 0;
+}
+#endif /* CONFIG_NUMA */
+
 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
                                 struct cgroup_map_cb *cb)
 {
@@ -3855,6 +4133,7 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
        memset(&mystat, 0, sizeof(mystat));
        mem_cgroup_get_local_stat(mem_cont, &mystat);
 
+
        for (i = 0; i < NR_MCS_STAT; i++) {
                if (i == MCS_SWAP && !do_swap_account)
                        continue;
@@ -4278,6 +4557,22 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
        return 0;
 }
 
+#ifdef CONFIG_NUMA
+static const struct file_operations mem_control_numa_stat_file_operations = {
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+
+static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
+{
+       struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
+
+       file->f_op = &mem_control_numa_stat_file_operations;
+       return single_open(file, mem_control_numa_stat_show, cont);
+}
+#endif /* CONFIG_NUMA */
+
 static struct cftype mem_cgroup_files[] = {
        {
                .name = "usage_in_bytes",
@@ -4341,6 +4636,12 @@ static struct cftype mem_cgroup_files[] = {
                .unregister_event = mem_cgroup_oom_unregister_event,
                .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
        },
+#ifdef CONFIG_NUMA
+       {
+               .name = "numa_stat",
+               .open = mem_control_numa_stat_open,
+       },
+#endif
 };
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -4596,6 +4897,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
                res_counter_init(&mem->memsw, NULL);
        }
        mem->last_scanned_child = 0;
+       mem->last_scanned_node = MAX_NUMNODES;
        INIT_LIST_HEAD(&mem->oom_notify);
 
        if (parent)
@@ -4953,8 +5255,7 @@ static void mem_cgroup_clear_mc(void)
 
 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
                                struct cgroup *cgroup,
-                               struct task_struct *p,
-                               bool threadgroup)
+                               struct task_struct *p)
 {
        int ret = 0;
        struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
@@ -4993,8 +5294,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
 
 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
                                struct cgroup *cgroup,
-                               struct task_struct *p,
-                               bool threadgroup)
+                               struct task_struct *p)
 {
        mem_cgroup_clear_mc();
 }
@@ -5112,8 +5412,7 @@ retry:
 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
                                struct cgroup *cont,
                                struct cgroup *old_cont,
-                               struct task_struct *p,
-                               bool threadgroup)
+                               struct task_struct *p)
 {
        struct mm_struct *mm;
 
@@ -5131,22 +5430,19 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
 #else  /* !CONFIG_MMU */
 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
                                struct cgroup *cgroup,
-                               struct task_struct *p,
-                               bool threadgroup)
+                               struct task_struct *p)
 {
        return 0;
 }
 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
                                struct cgroup *cgroup,
-                               struct task_struct *p,
-                               bool threadgroup)
+                               struct task_struct *p)
 {
 }
 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
                                struct cgroup *cont,
                                struct cgroup *old_cont,
-                               struct task_struct *p,
-                               bool threadgroup)
+                               struct task_struct *p)
 {
 }
 #endif
index b73f677f0bb1dac6dc18e8b14765da0167cc3f11..6953d3926e01e8ad1370304064ec2a0e94ad1ebb 100644 (file)
@@ -730,7 +730,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
        add_taint(TAINT_BAD_PAGE);
 }
 
-static inline int is_cow_mapping(unsigned int flags)
+static inline int is_cow_mapping(vm_flags_t flags)
 {
        return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
 }
@@ -2874,6 +2874,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                /* Had to read the page from swap area: Major fault */
                ret = VM_FAULT_MAJOR;
                count_vm_event(PGMAJFAULT);
+               mem_cgroup_count_vm_event(mm, PGMAJFAULT);
        } else if (PageHWPoison(page)) {
                /*
                 * hwpoisoned dirty swapcache pages are kept for killing
@@ -3413,6 +3414,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        __set_current_state(TASK_RUNNING);
 
        count_vm_event(PGFAULT);
+       mem_cgroup_count_vm_event(mm, PGFAULT);
 
        /* do counter updates before entering really critical section. */
        check_sync_rss_stat(current);
index 516b2c2ddd5a55b244a89af33eeb3b6f3b88d410..048260c4e02ea7ced2692cc05fba67c9798c9145 100644 (file)
@@ -307,13 +307,13 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
  * For vmas that pass the filters, merge/split as appropriate.
  */
 static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
-       unsigned long start, unsigned long end, unsigned int newflags)
+       unsigned long start, unsigned long end, vm_flags_t newflags)
 {
        struct mm_struct *mm = vma->vm_mm;
        pgoff_t pgoff;
        int nr_pages;
        int ret = 0;
-       int lock = newflags & VM_LOCKED;
+       int lock = !!(newflags & VM_LOCKED);
 
        if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
            is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
@@ -385,7 +385,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
                prev = vma;
 
        for (nstart = start ; ; ) {
-               unsigned int newflags;
+               vm_flags_t newflags;
 
                /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
 
@@ -524,7 +524,7 @@ static int do_mlockall(int flags)
                goto out;
 
        for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
-               unsigned int newflags;
+               vm_flags_t newflags;
 
                newflags = vma->vm_flags | VM_LOCKED;
                if (!(flags & MCL_CURRENT))
index ac2631b7477f7b773431df0375ee59075b6e1e20..bbdc9af5e1177108894d881126761fa71ef9cada 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -960,7 +960,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
 {
        struct mm_struct * mm = current->mm;
        struct inode *inode;
-       unsigned int vm_flags;
+       vm_flags_t vm_flags;
        int error;
        unsigned long reqprot = prot;
 
@@ -1165,7 +1165,7 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
  */
 int vma_wants_writenotify(struct vm_area_struct *vma)
 {
-       unsigned int vm_flags = vma->vm_flags;
+       vm_flags_t vm_flags = vma->vm_flags;
 
        /* If it was private or non-writable, the write bit is already clear */
        if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
@@ -1193,7 +1193,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
  * We account for memory if it's a private writeable mapping,
  * not hugepages and VM_NORESERVE wasn't set.
  */
-static inline int accountable_mapping(struct file *file, unsigned int vm_flags)
+static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
 {
        /*
         * hugetlb has its own accounting separate from the core VM
@@ -1207,7 +1207,7 @@ static inline int accountable_mapping(struct file *file, unsigned int vm_flags)
 
 unsigned long mmap_region(struct file *file, unsigned long addr,
                          unsigned long len, unsigned long flags,
-                         unsigned int vm_flags, unsigned long pgoff)
+                         vm_flags_t vm_flags, unsigned long pgoff)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma, *prev;
index 2a00f17c3bf453efa0460d1a9b92171b4f92e2b7..a4e1db3f19812e8fc900e4b22fd778e0f959324e 100644 (file)
@@ -4323,10 +4323,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                zone->zone_pgdat = pgdat;
 
                zone_pcp_init(zone);
-               for_each_lru(l) {
+               for_each_lru(l)
                        INIT_LIST_HEAD(&zone->lru[l].list);
-                       zone->reclaim_stat.nr_saved_scan[l] = 0;
-               }
                zone->reclaim_stat.recent_rotated[0] = 0;
                zone->reclaim_stat.recent_rotated[1] = 0;
                zone->reclaim_stat.recent_scanned[0] = 0;
index 2daadc322ba61678cfc9120e3b65c551b5b96d2d..74ccff61d1bea8d6e258170280ea5b20523dad23 100644 (file)
@@ -130,7 +130,7 @@ struct page *lookup_cgroup_page(struct page_cgroup *pc)
        return page;
 }
 
-static void *__init_refok alloc_page_cgroup(size_t size, int nid)
+static void *__meminit alloc_page_cgroup(size_t size, int nid)
 {
        void *addr = NULL;
 
@@ -162,7 +162,7 @@ static void free_page_cgroup(void *addr)
 }
 #endif
 
-static int __init_refok init_section_page_cgroup(unsigned long pfn)
+static int __meminit init_section_page_cgroup(unsigned long pfn)
 {
        struct page_cgroup *base, *pc;
        struct mem_section *section;
@@ -475,7 +475,7 @@ int swap_cgroup_swapon(int type, unsigned long max_pages)
        if (!do_swap_account)
                return 0;
 
-       length = ((max_pages/SC_PER_PAGE) + 1);
+       length = DIV_ROUND_UP(max_pages, SC_PER_PAGE);
        array_size = length * sizeof(void *);
 
        array = vmalloc(array_size);
@@ -492,8 +492,8 @@ int swap_cgroup_swapon(int type, unsigned long max_pages)
                /* memory shortage */
                ctrl->map = NULL;
                ctrl->length = 0;
-               vfree(array);
                mutex_unlock(&swap_cgroup_mutex);
+               vfree(array);
                goto nomem;
        }
        mutex_unlock(&swap_cgroup_mutex);
@@ -508,7 +508,8 @@ nomem:
 
 void swap_cgroup_swapoff(int type)
 {
-       int i;
+       struct page **map;
+       unsigned long i, length;
        struct swap_cgroup_ctrl *ctrl;
 
        if (!do_swap_account)
@@ -516,17 +517,20 @@ void swap_cgroup_swapoff(int type)
 
        mutex_lock(&swap_cgroup_mutex);
        ctrl = &swap_cgroup_ctrl[type];
-       if (ctrl->map) {
-               for (i = 0; i < ctrl->length; i++) {
-                       struct page *page = ctrl->map[i];
+       map = ctrl->map;
+       length = ctrl->length;
+       ctrl->map = NULL;
+       ctrl->length = 0;
+       mutex_unlock(&swap_cgroup_mutex);
+
+       if (map) {
+               for (i = 0; i < length; i++) {
+                       struct page *page = map[i];
                        if (page)
                                __free_page(page);
                }
-               vfree(ctrl->map);
-               ctrl->map = NULL;
-               ctrl->length = 0;
+               vfree(map);
        }
-       mutex_unlock(&swap_cgroup_mutex);
 }
 
 #endif
index 69edb45a9f2830d0919a313e977d35f6a94efdd7..1acfb2687bfa144da8fa9b6b2f6c815663d44145 100644 (file)
@@ -1305,12 +1305,10 @@ repeat:
                swappage = lookup_swap_cache(swap);
                if (!swappage) {
                        shmem_swp_unmap(entry);
+                       spin_unlock(&info->lock);
                        /* here we actually do the io */
-                       if (type && !(*type & VM_FAULT_MAJOR)) {
-                               __count_vm_event(PGMAJFAULT);
+                       if (type)
                                *type |= VM_FAULT_MAJOR;
-                       }
-                       spin_unlock(&info->lock);
                        swappage = shmem_swapin(swap, gfp, info, idx);
                        if (!swappage) {
                                spin_lock(&info->lock);
@@ -1549,7 +1547,10 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
        if (error)
                return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
-
+       if (ret & VM_FAULT_MAJOR) {
+               count_vm_event(PGMAJFAULT);
+               mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
+       }
        return ret | VM_FAULT_LOCKED;
 }
 
index a9566752913596152022f5d22983cb4f2cb4d2cf..3a29a6180212d7bcf25ab0d6fd97acdb16fc8f42 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/task_io_accounting_ops.h>
 #include <linux/buffer_head.h> /* grr. try_to_release_page,
                                   do_invalidatepage */
+#include <linux/cleancache.h>
 #include "internal.h"
 
 
@@ -51,6 +52,7 @@ void do_invalidatepage(struct page *page, unsigned long offset)
 static inline void truncate_partial_page(struct page *page, unsigned partial)
 {
        zero_user_segment(page, partial, PAGE_CACHE_SIZE);
+       cleancache_flush_page(page->mapping, page);
        if (page_has_private(page))
                do_invalidatepage(page, partial);
 }
@@ -214,6 +216,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
        pgoff_t next;
        int i;
 
+       cleancache_flush_inode(mapping);
        if (mapping->nrpages == 0)
                return;
 
@@ -291,6 +294,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
                pagevec_release(&pvec);
                mem_cgroup_uncharge_end();
        }
+       cleancache_flush_inode(mapping);
 }
 EXPORT_SYMBOL(truncate_inode_pages_range);
 
@@ -440,6 +444,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
        int did_range_unmap = 0;
        int wrapped = 0;
 
+       cleancache_flush_inode(mapping);
        pagevec_init(&pvec, 0);
        next = start;
        while (next <= end && !wrapped &&
@@ -498,6 +503,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                mem_cgroup_uncharge_end();
                cond_resched();
        }
+       cleancache_flush_inode(mapping);
        return ret;
 }
 EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
index b5ccf3158d82440525eecceb66e0f291e7cd2795..1d34d75366a7d6902d3dd826bdcab42848c9503c 100644 (file)
@@ -2153,10 +2153,6 @@ struct vm_struct *alloc_vm_area(size_t size)
                return NULL;
        }
 
-       /* Make sure the pagetables are constructed in process kernel
-          mappings */
-       vmalloc_sync_all();
-
        return area;
 }
 EXPORT_SYMBOL_GPL(alloc_vm_area);
index 7e0116150dc73a02b762cb8939c5db34688aa194..faa0a088f9cc83a5a4cd7d05e40924144c795c19 100644 (file)
@@ -173,7 +173,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
                                struct scan_control *sc, enum lru_list lru)
 {
        if (!scanning_global_lru(sc))
-               return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
+               return mem_cgroup_zone_nr_lru_pages(sc->mem_cgroup, zone, lru);
 
        return zone_page_state(zone, NR_LRU_BASE + lru);
 }
@@ -1717,26 +1717,6 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
        return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
 }
 
-/*
- * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
- * until we collected @swap_cluster_max pages to scan.
- */
-static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
-                                      unsigned long *nr_saved_scan)
-{
-       unsigned long nr;
-
-       *nr_saved_scan += nr_to_scan;
-       nr = *nr_saved_scan;
-
-       if (nr >= SWAP_CLUSTER_MAX)
-               *nr_saved_scan = 0;
-       else
-               nr = 0;
-
-       return nr;
-}
-
 /*
  * Determine how aggressively the anon and file LRU lists should be
  * scanned.  The relative value of each set of LRU lists is determined
@@ -1755,6 +1735,22 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
        u64 fraction[2], denominator;
        enum lru_list l;
        int noswap = 0;
+       int force_scan = 0;
+
+
+       anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
+               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
+       file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
+               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+
+       if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) {
+               /* kswapd does zone balancing and need to scan this zone */
+               if (scanning_global_lru(sc) && current_is_kswapd())
+                       force_scan = 1;
+               /* memcg may have small limit and need to avoid priority drop */
+               if (!scanning_global_lru(sc))
+                       force_scan = 1;
+       }
 
        /* If we have no swap space, do not bother scanning anon pages. */
        if (!sc->may_swap || (nr_swap_pages <= 0)) {
@@ -1765,11 +1761,6 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
                goto out;
        }
 
-       anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
-               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
-       file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
-               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
-
        if (scanning_global_lru(sc)) {
                free  = zone_page_state(zone, NR_FREE_PAGES);
                /* If we have very few page cache pages,
@@ -1836,8 +1827,23 @@ out:
                        scan >>= priority;
                        scan = div64_u64(scan * fraction[file], denominator);
                }
-               nr[l] = nr_scan_try_batch(scan,
-                                         &reclaim_stat->nr_saved_scan[l]);
+
+               /*
+                * If zone is small or memcg is small, nr[l] can be 0.
+                * This results no-scan on this priority and priority drop down.
+                * For global direct reclaim, it can visit next zone and tend
+                * not to have problems. For global kswapd, it's for zone
+                * balancing and it need to scan a small amounts. When using
+                * memcg, priority drop can cause big latency. So, it's better
+                * to scan small amount. See may_noscan above.
+                */
+               if (!scan && force_scan) {
+                       if (file)
+                               scan = SWAP_CLUSTER_MAX;
+                       else if (!noswap)
+                               scan = SWAP_CLUSTER_MAX;
+               }
+               nr[l] = scan;
        }
 }
 
@@ -1977,11 +1983,14 @@ restart:
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static void shrink_zones(int priority, struct zonelist *zonelist,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
        struct zoneref *z;
        struct zone *zone;
+       unsigned long nr_soft_reclaimed;
+       unsigned long nr_soft_scanned;
+       unsigned long total_scanned = 0;
 
        for_each_zone_zonelist_nodemask(zone, z, zonelist,
                                        gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -1998,8 +2007,17 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
                                continue;       /* Let kswapd poll it */
                }
 
+               nr_soft_scanned = 0;
+               nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+                                                       sc->order, sc->gfp_mask,
+                                                       &nr_soft_scanned);
+               sc->nr_reclaimed += nr_soft_reclaimed;
+               total_scanned += nr_soft_scanned;
+
                shrink_zone(priority, zone, sc);
        }
+
+       return total_scanned;
 }
 
 static bool zone_reclaimable(struct zone *zone)
@@ -2064,7 +2082,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                sc->nr_scanned = 0;
                if (!priority)
                        disable_swap_token();
-               shrink_zones(priority, zonelist, sc);
+               total_scanned += shrink_zones(priority, zonelist, sc);
                /*
                 * Don't shrink slabs when reclaiming memory from
                 * over limit cgroups
@@ -2171,9 +2189,11 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                                                gfp_t gfp_mask, bool noswap,
                                                unsigned int swappiness,
-                                               struct zone *zone)
+                                               struct zone *zone,
+                                               unsigned long *nr_scanned)
 {
        struct scan_control sc = {
+               .nr_scanned = 0,
                .nr_to_reclaim = SWAP_CLUSTER_MAX,
                .may_writepage = !laptop_mode,
                .may_unmap = 1,
@@ -2182,6 +2202,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
                .order = 0,
                .mem_cgroup = mem,
        };
+
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
 
@@ -2200,6 +2221,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
 
        trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
 
+       *nr_scanned = sc.nr_scanned;
        return sc.nr_reclaimed;
 }
 
@@ -2210,6 +2232,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
 {
        struct zonelist *zonelist;
        unsigned long nr_reclaimed;
+       int nid;
        struct scan_control sc = {
                .may_writepage = !laptop_mode,
                .may_unmap = 1,
@@ -2226,7 +2249,14 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                .gfp_mask = sc.gfp_mask,
        };
 
-       zonelist = NODE_DATA(numa_node_id())->node_zonelists;
+       /*
+        * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't
+        * take care of from where we get pages. So the node where we start the
+        * scan does not need to be the current node.
+        */
+       nid = mem_cgroup_select_victim_node(mem_cont);
+
+       zonelist = NODE_DATA(nid)->node_zonelists;
 
        trace_mm_vmscan_memcg_reclaim_begin(0,
                                            sc.may_writepage,
@@ -2347,6 +2377,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
        int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
        unsigned long total_scanned;
        struct reclaim_state *reclaim_state = current->reclaim_state;
+       unsigned long nr_soft_reclaimed;
+       unsigned long nr_soft_scanned;
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
                .may_unmap = 1,
@@ -2439,11 +2471,15 @@ loop_again:
 
                        sc.nr_scanned = 0;
 
+                       nr_soft_scanned = 0;
                        /*
                         * Call soft limit reclaim before calling shrink_zone.
-                        * For now we ignore the return value
                         */
-                       mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask);
+                       nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone,
+                                                       order, sc.gfp_mask,
+                                                       &nr_soft_scanned);
+                       sc.nr_reclaimed += nr_soft_reclaimed;
+                       total_scanned += nr_soft_scanned;
 
                        /*
                         * We put equal pressure on every zone, unless
index b2274d1fd605b90cdeeb14a72cc909acd9e55001..c7a581a96894c7193ff75c48573976aad6133255 100644 (file)
@@ -46,8 +46,6 @@ int vlan_net_id __read_mostly;
 
 const char vlan_fullname[] = "802.1Q VLAN Support";
 const char vlan_version[] = DRV_VERSION;
-static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>";
-static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>";
 
 /* End of global variables definitions. */
 
@@ -673,8 +671,7 @@ static int __init vlan_proto_init(void)
 {
        int err;
 
-       pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright);
-       pr_info("All bugs added by %s\n", vlan_buggyright);
+       pr_info("%s v%s\n", vlan_fullname, vlan_version);
 
        err = register_pernet_subsys(&vlan_net_ops);
        if (err < 0)
index 844a7a5607e3d2ee985f80b18577ac2b6ff4be62..159c50f1c6bf672c24b0d13f8e4f52dd40a18589 100644 (file)
@@ -589,7 +589,8 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
                return -ENOMEM;
 
        /* Create the RDMA CM ID */
-       rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP);
+       rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP,
+                                    IB_QPT_RC);
        if (IS_ERR(rdma->cm_id))
                goto error;
 
index f7fa67c78766f59c0b107ece600348f401a40d57..f49da5814bc3c9828829ac2be54ed58b2fbc82a9 100644 (file)
@@ -59,6 +59,14 @@ static ssize_t show_atmaddress(struct device *cdev,
        return pos - buf;
 }
 
+static ssize_t show_atmindex(struct device *cdev,
+                            struct device_attribute *attr, char *buf)
+{
+       struct atm_dev *adev = to_atm_dev(cdev);
+
+       return sprintf(buf, "%d\n", adev->number);
+}
+
 static ssize_t show_carrier(struct device *cdev,
                            struct device_attribute *attr, char *buf)
 {
@@ -99,6 +107,7 @@ static ssize_t show_link_rate(struct device *cdev,
 
 static DEVICE_ATTR(address, S_IRUGO, show_address, NULL);
 static DEVICE_ATTR(atmaddress, S_IRUGO, show_atmaddress, NULL);
+static DEVICE_ATTR(atmindex, S_IRUGO, show_atmindex, NULL);
 static DEVICE_ATTR(carrier, S_IRUGO, show_carrier, NULL);
 static DEVICE_ATTR(type, S_IRUGO, show_type, NULL);
 static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
@@ -106,6 +115,7 @@ static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL);
 static struct device_attribute *atm_attrs[] = {
        &dev_attr_atmaddress,
        &dev_attr_address,
+       &dev_attr_atmindex,
        &dev_attr_carrier,
        &dev_attr_type,
        &dev_attr_link_rate,
index 25073b6ef4746cea1197e52f64bba908869d7b29..ba48daa68c1f19e19427ac96c64b161f271c3e57 100644 (file)
@@ -1171,7 +1171,7 @@ static int __init lane_module_init(void)
 #endif
 
        register_atm_ioctl(&lane_ioctl_ops);
-       pr_info("lec.c: " __DATE__ " " __TIME__ " initialized\n");
+       pr_info("lec.c: initialized\n");
        return 0;
 }
 
index 644cdf07164211b007d10e40918486497efe93c5..3ccca42e6f90ad476706770b78cce263c736f02e 100644 (file)
@@ -1482,7 +1482,7 @@ static __init int atm_mpoa_init(void)
        if (mpc_proc_init() != 0)
                pr_info("failed to initialize /proc/mpoa\n");
 
-       pr_info("mpc.c: " __DATE__ " " __TIME__ " initialized\n");
+       pr_info("mpc.c: initialized\n");
 
        return 0;
 }
index 1a92b369c8202c8706c42452950594233882ca06..2b5ca1a0054da4aec5f6a17064e1040c4a107dda 100644 (file)
@@ -1883,14 +1883,13 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
        struct xt_target *wt;
        void *dst = NULL;
        int off, pad = 0;
-       unsigned int size_kern, entry_offset, match_size = mwt->match_size;
+       unsigned int size_kern, match_size = mwt->match_size;
 
        strlcpy(name, mwt->u.name, sizeof(name));
 
        if (state->buf_kern_start)
                dst = state->buf_kern_start + state->buf_kern_offset;
 
-       entry_offset = (unsigned char *) mwt - base;
        switch (compat_mwt) {
        case EBT_COMPAT_MATCH:
                match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
@@ -1933,6 +1932,9 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
                size_kern = wt->targetsize;
                module_put(wt->me);
                break;
+
+       default:
+               return -EINVAL;
        }
 
        state->buf_kern_offset += match_size + off;
index f4265cc9c3fbee335f16f467ac7f0a197e43977c..0016f73396995e2ca21275d6bfc17ed8374e7f3c 100644 (file)
@@ -204,12 +204,11 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
 
        hlist_for_each_entry_rcu(r, n, rx_list, list) {
                char *fmt = (r->can_id & CAN_EFF_FLAG)?
-                       "   %-5s  %08X  %08x  %08x  %08x  %8ld  %s\n" :
-                       "   %-5s     %03X    %08x  %08lx  %08lx  %8ld  %s\n";
+                       "   %-5s  %08x  %08x  %pK  %pK  %8ld  %s\n" :
+                       "   %-5s     %03x    %08x  %pK  %pK  %8ld  %s\n";
 
                seq_printf(m, fmt, DNAME(dev), r->can_id, r->mask,
-                               (unsigned long)r->func, (unsigned long)r->data,
-                               r->matches, r->ident);
+                               r->func, r->data, r->matches, r->ident);
        }
 }
 
index 84e7304532e69f7ba1cc32fc47a4bdf8b7f5fc87..fd14116ad7f0626aca22412e3a02f1f0b9830d7d 100644 (file)
@@ -233,6 +233,29 @@ static int ethtool_set_feature_compat(struct net_device *dev,
        return 1;
 }
 
+static int ethtool_set_flags_compat(struct net_device *dev,
+       int (*legacy_set)(struct net_device *, u32),
+       struct ethtool_set_features_block *features, u32 mask)
+{
+       u32 value;
+
+       if (!legacy_set)
+               return 0;
+
+       if (!(features[0].valid & mask))
+               return 0;
+
+       value = dev->features & ~features[0].valid;
+       value |= features[0].requested;
+
+       features[0].valid &= ~mask;
+
+       if (legacy_set(dev, value & mask) < 0)
+               netdev_info(dev, "Legacy flags change failed\n");
+
+       return 1;
+}
+
 static int ethtool_set_features_compat(struct net_device *dev,
        struct ethtool_set_features_block *features)
 {
@@ -249,7 +272,7 @@ static int ethtool_set_features_compat(struct net_device *dev,
                features, NETIF_F_ALL_TSO);
        compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum,
                features, NETIF_F_RXCSUM);
-       compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_flags,
+       compat |= ethtool_set_flags_compat(dev, dev->ethtool_ops->set_flags,
                features, flags_dup_features);
 
        return compat;
index 0e3622f1dcb1eac6b3eea810e32806edf669d497..36f975fa87cb4975a01e4105c5cc3b0f9d905367 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/unaligned.h>
 #include <linux/filter.h>
 #include <linux/reciprocal_div.h>
+#include <linux/ratelimit.h>
 
 /* No hurry in this branch */
 static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
index a829e3f60aeb2812a9a4a92e3d5accf3ce128b77..77a65f031488b3f222e13d1f071a2d3f44218c70 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <net/ip.h>
 #include <net/sock.h>
+#include <net/net_ratelimit.h>
 
 #ifdef CONFIG_RPS
 static int rps_sock_flow_sysctl(ctl_table *table, int write,
index 2012bc797f9c32b9a9a2af81fc08768177d86377..386e263f60669a3a25be7e6d8a4361d7ce93161e 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/ratelimit.h>
 
 #include <net/sock.h>
+#include <net/net_ratelimit.h>
 
 #include <asm/byteorder.h>
 #include <asm/system.h>
index 61fac4cabc7825f591c5b94dd7520c095d9e7f3f..c14d88ad348d3365a11685a59c453d54c4db5332 100644 (file)
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(inet_csk_timer_bug_msg);
  * This struct holds the first and last local port number.
  */
 struct local_ports sysctl_local_ports __read_mostly = {
-       .lock = SEQLOCK_UNLOCKED,
+       .lock = __SEQLOCK_UNLOCKED(sysctl_local_ports.lock),
        .range = { 32768, 61000 },
 };
 
index 9df4e635fb5fe5d4a0c92526ad68b845094aa39d..ce616d92cc544dd0a8e3ffb0974aff825a5939e9 100644 (file)
@@ -154,11 +154,9 @@ void __init inet_initpeers(void)
 /* Called with or without local BH being disabled. */
 static void unlink_from_unused(struct inet_peer *p)
 {
-       if (!list_empty(&p->unused)) {
-               spin_lock_bh(&unused_peers.lock);
-               list_del_init(&p->unused);
-               spin_unlock_bh(&unused_peers.lock);
-       }
+       spin_lock_bh(&unused_peers.lock);
+       list_del_init(&p->unused);
+       spin_unlock_bh(&unused_peers.lock);
 }
 
 static int addr_compare(const struct inetpeer_addr *a,
@@ -205,6 +203,20 @@ static int addr_compare(const struct inetpeer_addr *a,
        u;                                                      \
 })
 
+static bool atomic_add_unless_return(atomic_t *ptr, int a, int u, int *newv)
+{
+       int cur, old = atomic_read(ptr);
+
+       while (old != u) {
+               *newv = old + a;
+               cur = atomic_cmpxchg(ptr, old, *newv);
+               if (cur == old)
+                       return true;
+               old = cur;
+       }
+       return false;
+}
+
 /*
  * Called with rcu_read_lock()
  * Because we hold no lock against a writer, its quite possible we fall
@@ -213,7 +225,8 @@ static int addr_compare(const struct inetpeer_addr *a,
  * We exit from this function if number of links exceeds PEER_MAXDEPTH
  */
 static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
-                                   struct inet_peer_base *base)
+                                   struct inet_peer_base *base,
+                                   int *newrefcnt)
 {
        struct inet_peer *u = rcu_dereference(base->root);
        int count = 0;
@@ -226,7 +239,7 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
                         * distinction between an unused entry (refcnt=0) and
                         * a freed one.
                         */
-                       if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1)))
+                       if (!atomic_add_unless_return(&u->refcnt, 1, -1, newrefcnt))
                                u = NULL;
                        return u;
                }
@@ -465,22 +478,23 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
        struct inet_peer_base *base = family_to_base(daddr->family);
        struct inet_peer *p;
        unsigned int sequence;
-       int invalidated;
+       int invalidated, newrefcnt = 0;
 
        /* Look up for the address quickly, lockless.
         * Because of a concurrent writer, we might not find an existing entry.
         */
        rcu_read_lock();
        sequence = read_seqbegin(&base->lock);
-       p = lookup_rcu(daddr, base);
+       p = lookup_rcu(daddr, base, &newrefcnt);
        invalidated = read_seqretry(&base->lock, sequence);
        rcu_read_unlock();
 
        if (p) {
-               /* The existing node has been found.
+found:         /* The existing node has been found.
                 * Remove the entry from unused list if it was there.
                 */
-               unlink_from_unused(p);
+               if (newrefcnt == 1)
+                       unlink_from_unused(p);
                return p;
        }
 
@@ -494,11 +508,9 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
        write_seqlock_bh(&base->lock);
        p = lookup(daddr, stack, base);
        if (p != peer_avl_empty) {
-               atomic_inc(&p->refcnt);
+               newrefcnt = atomic_inc_return(&p->refcnt);
                write_sequnlock_bh(&base->lock);
-               /* Remove the entry from unused list if it was there. */
-               unlink_from_unused(p);
-               return p;
+               goto found;
        }
        p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
        if (p) {
index a15c0152495922893cdd43b605ebb936e668d40d..7f9124914b139b062c71ccc3d526433dc9109ef4 100644 (file)
@@ -54,7 +54,7 @@
 #include <asm/atomic.h>
 #include <asm/ebcdic.h>
 #include <asm/io.h>
-#include <asm/s390_ext.h>
+#include <asm/irq.h>
 #include <asm/smp.h>
 
 /*
index 72d1ac611fdc8e70124c6fca29686ffa416e9cd3..8041befc65553588eda40e4a41e6b9e440a21367 100644 (file)
@@ -815,7 +815,7 @@ ip_set_flush(struct sock *ctnl, struct sk_buff *skb,
        ip_set_id_t i;
 
        if (unlikely(protocol_failed(attr)))
-               return -EPROTO;
+               return -IPSET_ERR_PROTOCOL;
 
        if (!attr[IPSET_ATTR_SETNAME]) {
                for (i = 0; i < ip_set_max; i++)
index 6b5dd6ddaae999b7153e68506be8cd26b59ceed6..af63553fa332e06bf0562a6f1630e33e30ca889c 100644 (file)
@@ -411,25 +411,35 @@ static struct ip_vs_app ip_vs_ftp = {
 static int __net_init __ip_vs_ftp_init(struct net *net)
 {
        int i, ret;
-       struct ip_vs_app *app = &ip_vs_ftp;
+       struct ip_vs_app *app;
+       struct netns_ipvs *ipvs = net_ipvs(net);
+
+       app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL);
+       if (!app)
+               return -ENOMEM;
+       INIT_LIST_HEAD(&app->a_list);
+       INIT_LIST_HEAD(&app->incs_list);
+       ipvs->ftp_app = app;
 
        ret = register_ip_vs_app(net, app);
        if (ret)
-               return ret;
+               goto err_exit;
 
        for (i=0; i<IP_VS_APP_MAX_PORTS; i++) {
                if (!ports[i])
                        continue;
                ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]);
                if (ret)
-                       break;
+                       goto err_unreg;
                pr_info("%s: loaded support on port[%d] = %d\n",
                        app->name, i, ports[i]);
        }
+       return 0;
 
-       if (ret)
-               unregister_ip_vs_app(net, app);
-
+err_unreg:
+       unregister_ip_vs_app(net, app);
+err_exit:
+       kfree(ipvs->ftp_app);
        return ret;
 }
 /*
@@ -437,9 +447,10 @@ static int __net_init __ip_vs_ftp_init(struct net *net)
  */
 static void __ip_vs_ftp_exit(struct net *net)
 {
-       struct ip_vs_app *app = &ip_vs_ftp;
+       struct netns_ipvs *ipvs = net_ipvs(net);
 
-       unregister_ip_vs_app(net, app);
+       unregister_ip_vs_app(net, ipvs->ftp_app);
+       kfree(ipvs->ftp_app);
 }
 
 static struct pernet_operations ip_vs_ftp_ops = {
index cce19f95c62467471bcc4b752b02b1d6205b14f2..3b83086bcc3045a2231dab3d84753dd0af7bed51 100644 (file)
@@ -325,7 +325,7 @@ static int rds_ib_laddr_check(__be32 addr)
        /* Create a CMA ID and try to bind it. This catches both
         * IB and iWARP capable NICs.
         */
-       cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP);
+       cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
        if (IS_ERR(cm_id))
                return PTR_ERR(cm_id);
 
index ee369d201a65891b8f8e3b562bf738067665df2f..fd453dd5124bed89d16fca1a8d198275d64d7053 100644 (file)
@@ -587,7 +587,7 @@ int rds_ib_conn_connect(struct rds_connection *conn)
        /* XXX I wonder what affect the port space has */
        /* delegate cm event handler to rdma_transport */
        ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
-                                    RDMA_PS_TCP);
+                                    RDMA_PS_TCP, IB_QPT_RC);
        if (IS_ERR(ic->i_cm_id)) {
                ret = PTR_ERR(ic->i_cm_id);
                ic->i_cm_id = NULL;
index 5a9676fe594f412215e75c2a4dc44fe3e599d58f..f7474844f096e50b9d340055c32c9a81f9a5c96a 100644 (file)
@@ -226,7 +226,7 @@ static int rds_iw_laddr_check(__be32 addr)
        /* Create a CMA ID and try to bind it. This catches both
         * IB and iWARP capable NICs.
         */
-       cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP);
+       cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
        if (IS_ERR(cm_id))
                return PTR_ERR(cm_id);
 
index 3a60a15d1b4a96deb7bd5636e8cab8244e64902e..c12db66f24c738fb83d1f7999b1784fd4ad807fd 100644 (file)
@@ -522,7 +522,7 @@ int rds_iw_conn_connect(struct rds_connection *conn)
        /* XXX I wonder what affect the port space has */
        /* delegate cm event handler to rdma_transport */
        ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn,
-                                    RDMA_PS_TCP);
+                                    RDMA_PS_TCP, IB_QPT_RC);
        if (IS_ERR(ic->i_cm_id)) {
                ret = PTR_ERR(ic->i_cm_id);
                ic->i_cm_id = NULL;
index 4195a0539829a85ebc2485d060172493a4817fe7..f8760e1b6688b4d68247e1f0f5a45a11f55702f1 100644 (file)
@@ -158,7 +158,8 @@ static int rds_rdma_listen_init(void)
        struct rdma_cm_id *cm_id;
        int ret;
 
-       cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP);
+       cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP,
+                              IB_QPT_RC);
        if (IS_ERR(cm_id)) {
                ret = PTR_ERR(cm_id);
                printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
index 6c014dd3a20bf235c5ccb32408ed7f2c726406be..c3c232a88d94daa8a9facb1c9cbd3a3025d0354e 100644 (file)
@@ -695,7 +695,8 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
                return ERR_PTR(-ENOMEM);
        xprt = &cma_xprt->sc_xprt;
 
-       listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP);
+       listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP,
+                                  IB_QPT_RC);
        if (IS_ERR(listen_id)) {
                ret = PTR_ERR(listen_id);
                dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
index d4297dc43dc4c834ecfa457f36f221385353ecf2..80f8da344df538c5b6ef054e3caddc617a9a49ec 100644 (file)
@@ -387,7 +387,7 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt,
 
        init_completion(&ia->ri_done);
 
-       id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP);
+       id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC);
        if (IS_ERR(id)) {
                rc = PTR_ERR(id);
                dprintk("RPC:       %s: rdma_create_id() failed %i\n",
index a936315ba2c87340f3a38d38c68bc599907a47d0..4d020ecb75242db1372be5cf09df4d705b6c7592 100644 (file)
@@ -1,2 +1,2 @@
-Please see Documentation/SELinux.txt for information on
+Please see Documentation/security/SELinux.txt for information on
 installing a dummy SELinux policy.
index 06d764ccbbe5533f3e77a7d6b00a6bb3c7d3614b..94de6b4907c8af064ed20743148f40b7d946b12e 100644 (file)
@@ -194,7 +194,7 @@ void aa_dfa_free_kref(struct kref *kref)
  * @flags: flags controlling what type of accept tables are acceptable
  *
  * Unpack a dfa that has been serialized.  To find information on the dfa
- * format look in Documentation/apparmor.txt
+ * format look in Documentation/security/apparmor.txt
  * Assumes the dfa @blob stream has been aligned on a 8 byte boundary
  *
  * Returns: an unpacked dfa ready for matching or ERR_PTR on failure
index e33aaf7e5744fcec9fd65d0e5974d13303fff506..d6d9a57b56525e5c7ca9df569f19d3da7d84feba 100644 (file)
@@ -12,8 +12,8 @@
  * published by the Free Software Foundation, version 2 of the
  * License.
  *
- * AppArmor uses a serialized binary format for loading policy.
- * To find policy format documentation look in Documentation/apparmor.txt
+ * AppArmor uses a serialized binary format for loading policy. To find
+ * policy format documentation look in Documentation/security/apparmor.txt
  * All policy is validated before it is used.
  */
 
index 8d9c48f13774b8d35d7bbb8444aafb4681c2d394..cd1f779fa51d38eafc4f75bac0649e592703a7fb 100644 (file)
@@ -62,8 +62,7 @@ static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
 struct cgroup_subsys devices_subsys;
 
 static int devcgroup_can_attach(struct cgroup_subsys *ss,
-               struct cgroup *new_cgroup, struct task_struct *task,
-               bool threadgroup)
+               struct cgroup *new_cgroup, struct task_struct *task)
 {
        if (current != task && !capable(CAP_SYS_ADMIN))
                        return -EPERM;
index 69907a58a6837d038675e55b6abede4176b92daf..b1cba5bf0a5e3d092b57318a242014e915af04ff 100644 (file)
@@ -8,7 +8,7 @@
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation, version 2 of the License.
  *
- * See Documentation/keys-trusted-encrypted.txt
+ * See Documentation/security/keys-trusted-encrypted.txt
  */
 
 #include <linux/uaccess.h>
index 6c0480db8885b6393b8f4bfea75b8d35ce3015d3..a3063eb3dc232a16eab45161e28f0c88fc642841 100644 (file)
@@ -847,6 +847,7 @@ void key_replace_session_keyring(void)
        new-> sgid      = old-> sgid;
        new->fsgid      = old->fsgid;
        new->user       = get_uid(old->user);
+       new->user_ns    = new->user->user_ns;
        new->group_info = get_group_info(old->group_info);
 
        new->securebits = old->securebits;
index b18a71745901811120e51e6b33624b9e77a98891..d31862e0aa1c00f415d447e12e780482e2bdbca5 100644 (file)
@@ -8,7 +8,7 @@
  * as published by the Free Software Foundation; either version
  * 2 of the License, or (at your option) any later version.
  *
- * See Documentation/keys-request-key.txt
+ * See Documentation/security/keys-request-key.txt
  */
 
 #include <linux/module.h>
index f6337c9082ebf6eae8cc0292c6042e5a6350f70f..6cff37529b80210c3bc2b789c44662bb56a33f52 100644 (file)
@@ -8,7 +8,7 @@
  * as published by the Free Software Foundation; either version
  * 2 of the License, or (at your option) any later version.
  *
- * See Documentation/keys-request-key.txt
+ * See Documentation/security/keys-request-key.txt
  */
 
 #include <linux/module.h>
index c99b9368368c3d6e2193e81abaf5ab417d34557b..0c33e2ea1f3c34879d44ca57044822fefc657695 100644 (file)
@@ -8,7 +8,7 @@
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation, version 2 of the License.
  *
- * See Documentation/keys-trusted-encrypted.txt
+ * See Documentation/security/keys-trusted-encrypted.txt
  */
 
 #include <linux/uaccess.h>
index fcb89cb0f2235b5801004eba9ada05738d858f58..d515b2128a4ef6f631f4113cf0fcf3575fea3e6d 100644 (file)
@@ -752,10 +752,9 @@ int avc_ss_reset(u32 seqno)
 int avc_has_perm_noaudit(u32 ssid, u32 tsid,
                         u16 tclass, u32 requested,
                         unsigned flags,
-                        struct av_decision *in_avd)
+                        struct av_decision *avd)
 {
        struct avc_node *node;
-       struct av_decision avd_entry, *avd;
        int rc = 0;
        u32 denied;
 
@@ -766,18 +765,11 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
        node = avc_lookup(ssid, tsid, tclass);
        if (unlikely(!node)) {
                rcu_read_unlock();
-
-               if (in_avd)
-                       avd = in_avd;
-               else
-                       avd = &avd_entry;
-
                security_compute_av(ssid, tsid, tclass, avd);
                rcu_read_lock();
                node = avc_insert(ssid, tsid, tclass, avd);
        } else {
-               if (in_avd)
-                       memcpy(in_avd, &node->ae.avd, sizeof(*in_avd));
+               memcpy(avd, &node->ae.avd, sizeof(*avd));
                avd = &node->ae.avd;
        }
 
index c3e4b52699f4ef6ad8734391c1ef01c53f06a597..973e00e34fa9f18ff95d7920a18edbeb5987a5ba 100644 (file)
@@ -2217,10 +2217,11 @@ out_unlock:
                goto out;
        }
        for (i = 0, j = 0; i < mynel; i++) {
+               struct av_decision dummy_avd;
                rc = avc_has_perm_noaudit(fromsid, mysids[i],
                                          SECCLASS_PROCESS, /* kernel value */
                                          PROCESS__TRANSITION, AVC_STRICT,
-                                         NULL);
+                                         &dummy_avd);
                if (!rc)
                        mysids2[j++] = mysids[i];
                cond_resched();
index 5d98194bcad55b6963037d95d35743909517018a..f8c5be46451058ba5a7f25d9a81195fd02643f92 100644 (file)
@@ -704,13 +704,12 @@ static int snd_ctl_elem_list(struct snd_card *card,
        struct snd_ctl_elem_list list;
        struct snd_kcontrol *kctl;
        struct snd_ctl_elem_id *dst, *id;
-       unsigned int offset, space, first, jidx;
+       unsigned int offset, space, jidx;
        
        if (copy_from_user(&list, _list, sizeof(list)))
                return -EFAULT;
        offset = list.offset;
        space = list.space;
-       first = 0;
        /* try limit maximum space */
        if (space > 16384)
                return -ENOMEM;
index 30ecad41403c1becb741f7312ca247eb96054b8c..2c041bb36ab3c177e032e16e39ee47777dce7f48 100644 (file)
@@ -342,7 +342,6 @@ static const struct file_operations snd_shutdown_f_ops =
 int snd_card_disconnect(struct snd_card *card)
 {
        struct snd_monitor_file *mfile;
-       struct file *file;
        int err;
 
        if (!card)
@@ -366,8 +365,6 @@ int snd_card_disconnect(struct snd_card *card)
        
        spin_lock(&card->files_lock);
        list_for_each_entry(mfile, &card->files_list, list) {
-               file = mfile->file;
-
                /* it's critical part, use endless loop */
                /* we have no room to fail */
                mfile->disconnected_f_op = mfile->file->f_op;
index 13b3f6f49fae3c8624b8d47e44b24268be211b3c..2045697f449d3f5ebe347572dc2dfba66b64568a 100644 (file)
@@ -90,11 +90,8 @@ static snd_pcm_sframes_t linear_transfer(struct snd_pcm_plugin *plugin,
                               struct snd_pcm_plugin_channel *dst_channels,
                               snd_pcm_uframes_t frames)
 {
-       struct linear_priv *data;
-
        if (snd_BUG_ON(!plugin || !src_channels || !dst_channels))
                return -ENXIO;
-       data = (struct linear_priv *)plugin->extra_data;
        if (frames == 0)
                return 0;
 #ifdef CONFIG_SND_DEBUG
index abfeff1611ce271eabe0bd98a1b75f613d626f96..f1341308bedaa585311896d3b62dcca8b86395cb 100644 (file)
@@ -1756,8 +1756,18 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
        wait_queue_t wait;
        int err = 0;
        snd_pcm_uframes_t avail = 0;
-       long tout;
-
+       long wait_time, tout;
+
+       if (runtime->no_period_wakeup)
+               wait_time = MAX_SCHEDULE_TIMEOUT;
+       else {
+               wait_time = 10;
+               if (runtime->rate) {
+                       long t = runtime->period_size * 2 / runtime->rate;
+                       wait_time = max(t, wait_time);
+               }
+               wait_time = msecs_to_jiffies(wait_time * 1000);
+       }
        init_waitqueue_entry(&wait, current);
        add_wait_queue(&runtime->tsleep, &wait);
        for (;;) {
@@ -1765,9 +1775,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
                        err = -ERESTARTSYS;
                        break;
                }
-               set_current_state(TASK_INTERRUPTIBLE);
                snd_pcm_stream_unlock_irq(substream);
-               tout = schedule_timeout(msecs_to_jiffies(10000));
+               tout = schedule_timeout_interruptible(wait_time);
                snd_pcm_stream_lock_irq(substream);
                switch (runtime->status->state) {
                case SNDRV_PCM_STATE_SUSPENDED:
index 1a07750f3836ae93826fa20d5c5f7a56511bb3c3..1c6be91dfb9887ea1b3634735858e27757c42e21 100644 (file)
@@ -1481,11 +1481,20 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
                        break; /* all drained */
                init_waitqueue_entry(&wait, current);
                add_wait_queue(&to_check->sleep, &wait);
-               set_current_state(TASK_INTERRUPTIBLE);
                snd_pcm_stream_unlock_irq(substream);
                up_read(&snd_pcm_link_rwsem);
                snd_power_unlock(card);
-               tout = schedule_timeout(10 * HZ);
+               if (runtime->no_period_wakeup)
+                       tout = MAX_SCHEDULE_TIMEOUT;
+               else {
+                       tout = 10;
+                       if (runtime->rate) {
+                               long t = runtime->period_size * 2 / runtime->rate;
+                               tout = max(t, tout);
+                       }
+                       tout = msecs_to_jiffies(tout * 1000);
+               }
+               tout = schedule_timeout_interruptible(tout);
                snd_power_lock(card);
                down_read(&snd_pcm_link_rwsem);
                snd_pcm_stream_lock_irq(substream);
@@ -1518,13 +1527,11 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
 static int snd_pcm_drop(struct snd_pcm_substream *substream)
 {
        struct snd_pcm_runtime *runtime;
-       struct snd_card *card;
        int result = 0;
        
        if (PCM_RUNTIME_CHECK(substream))
                return -ENXIO;
        runtime = substream->runtime;
-       card = substream->pcm->card;
 
        if (runtime->status->state == SNDRV_PCM_STATE_OPEN ||
            runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED ||
@@ -2056,7 +2063,6 @@ static int snd_pcm_open_file(struct file *file,
 {
        struct snd_pcm_file *pcm_file;
        struct snd_pcm_substream *substream;
-       struct snd_pcm_str *str;
        int err;
 
        if (rpcm_file)
@@ -2073,7 +2079,6 @@ static int snd_pcm_open_file(struct file *file,
        }
        pcm_file->substream = substream;
        if (substream->ref_count == 1) {
-               str = substream->pstr;
                substream->file = pcm_file;
                substream->pcm_release = pcm_release_private;
        }
@@ -3015,11 +3020,9 @@ static const struct vm_operations_struct snd_pcm_vm_ops_status =
 static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file,
                               struct vm_area_struct *area)
 {
-       struct snd_pcm_runtime *runtime;
        long size;
        if (!(area->vm_flags & VM_READ))
                return -EINVAL;
-       runtime = substream->runtime;
        size = area->vm_end - area->vm_start;
        if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status)))
                return -EINVAL;
@@ -3054,11 +3057,9 @@ static const struct vm_operations_struct snd_pcm_vm_ops_control =
 static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file,
                                struct vm_area_struct *area)
 {
-       struct snd_pcm_runtime *runtime;
        long size;
        if (!(area->vm_flags & VM_READ))
                return -EINVAL;
-       runtime = substream->runtime;
        size = area->vm_end - area->vm_start;
        if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)))
                return -EINVAL;
index e7a8e9e4edb2063760607f11afcd6187f94a92b4..f9077361c119d1a4a5eed6135a3d2e4042c812e5 100644 (file)
@@ -467,13 +467,11 @@ int snd_seq_queue_timer_open(int queueid)
 int snd_seq_queue_timer_close(int queueid)
 {
        struct snd_seq_queue *queue;
-       struct snd_seq_timer *tmr;
        int result = 0;
 
        queue = queueptr(queueid);
        if (queue == NULL)
                return -EINVAL;
-       tmr = queue->timer;
        snd_seq_timer_close(queue);
        queuefree(queue);
        return result;
index 8edd998509f72057143a75715ce19c17e4896354..45b4a8d70e085a36849f32d971e0d987b6ded638 100644 (file)
@@ -4719,7 +4719,7 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec,
                           cfg->dig_out_pins[0], cfg->dig_out_pins[1]);
        snd_printd("   inputs:");
        for (i = 0; i < cfg->num_inputs; i++) {
-               snd_printdd(" %s=0x%x",
+               snd_printd(" %s=0x%x",
                            hda_get_autocfg_input_label(codec, cfg, i),
                            cfg->inputs[i].pin);
        }
index 74b0560289c00630aca0ba952d8c80a0316e7fd8..b05f7be9dc1b154017838b4fda19486014ad6f47 100644 (file)
@@ -312,23 +312,6 @@ out_fail:
        return -EINVAL;
 }
 
-static int hdmi_eld_valid(struct hda_codec *codec, hda_nid_t nid)
-{
-       int eldv;
-       int present;
-
-       present = snd_hda_pin_sense(codec, nid);
-       eldv    = (present & AC_PINSENSE_ELDV);
-       present = (present & AC_PINSENSE_PRESENCE);
-
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-       printk(KERN_INFO "HDMI: sink_present = %d, eld_valid = %d\n",
-                       !!present, !!eldv);
-#endif
-
-       return eldv && present;
-}
-
 int snd_hdmi_get_eld_size(struct hda_codec *codec, hda_nid_t nid)
 {
        return snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_HDMI_DIP_SIZE,
@@ -343,7 +326,7 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld,
        int size;
        unsigned char *buf;
 
-       if (!hdmi_eld_valid(codec, nid))
+       if (!eld->eld_valid)
                return -ENOENT;
 
        size = snd_hdmi_get_eld_size(codec, nid);
@@ -477,6 +460,8 @@ static void hdmi_print_eld_info(struct snd_info_entry *entry,
 
        snd_iprintf(buffer, "monitor_present\t\t%d\n", e->monitor_present);
        snd_iprintf(buffer, "eld_valid\t\t%d\n", e->eld_valid);
+       if (!e->eld_valid)
+               return;
        snd_iprintf(buffer, "monitor_name\t\t%s\n", e->monitor_name);
        snd_iprintf(buffer, "connection_type\t\t%s\n",
                                eld_connection_type_names[e->conn_type]);
index 43a036716d259af107b0b8b7cad7eb05768117aa..486f6deb3eee952ab810750ecee652c23acc2b11 100644 (file)
@@ -391,6 +391,7 @@ struct azx {
 
        /* chip type specific */
        int driver_type;
+       unsigned int driver_caps;
        int playback_streams;
        int playback_index_offset;
        int capture_streams;
@@ -464,6 +465,34 @@ enum {
        AZX_NUM_DRIVERS, /* keep this as last entry */
 };
 
+/* driver quirks (capabilities) */
+/* bits 0-7 are used for indicating driver type */
+#define AZX_DCAPS_NO_TCSEL     (1 << 8)        /* No Intel TCSEL bit */
+#define AZX_DCAPS_NO_MSI       (1 << 9)        /* No MSI support */
+#define AZX_DCAPS_ATI_SNOOP    (1 << 10)       /* ATI snoop enable */
+#define AZX_DCAPS_NVIDIA_SNOOP (1 << 11)       /* Nvidia snoop enable */
+#define AZX_DCAPS_SCH_SNOOP    (1 << 12)       /* SCH/PCH snoop enable */
+#define AZX_DCAPS_RIRB_DELAY   (1 << 13)       /* Long delay in read loop */
+#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 14)     /* Put a delay before read */
+#define AZX_DCAPS_CTX_WORKAROUND (1 << 15)     /* X-Fi workaround */
+#define AZX_DCAPS_POSFIX_LPIB  (1 << 16)       /* Use LPIB as default */
+#define AZX_DCAPS_POSFIX_VIA   (1 << 17)       /* Use VIACOMBO as default */
+#define AZX_DCAPS_NO_64BIT     (1 << 18)       /* No 64bit address */
+#define AZX_DCAPS_SYNC_WRITE   (1 << 19)       /* sync each cmd write */
+
+/* quirks for ATI SB / AMD Hudson */
+#define AZX_DCAPS_PRESET_ATI_SB \
+       (AZX_DCAPS_ATI_SNOOP | AZX_DCAPS_NO_TCSEL | \
+        AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB)
+
+/* quirks for ATI/AMD HDMI */
+#define AZX_DCAPS_PRESET_ATI_HDMI \
+       (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB)
+
+/* quirks for Nvidia */
+#define AZX_DCAPS_PRESET_NVIDIA \
+       (AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI)
+
 static char *driver_short_names[] __devinitdata = {
        [AZX_DRIVER_ICH] = "HDA Intel",
        [AZX_DRIVER_PCH] = "HDA Intel PCH",
@@ -566,7 +595,7 @@ static void azx_init_cmd_io(struct azx *chip)
        /* reset the rirb hw write pointer */
        azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST);
        /* set N=1, get RIRB response interrupt for new entry */
-       if (chip->driver_type == AZX_DRIVER_CTX)
+       if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
                azx_writew(chip, RINTCNT, 0xc0);
        else
                azx_writew(chip, RINTCNT, 1);
@@ -1056,19 +1085,24 @@ static void azx_init_pci(struct azx *chip)
         * codecs.
         * The PCI register TCSEL is defined in the Intel manuals.
         */
-       if (chip->driver_type != AZX_DRIVER_ATI &&
-           chip->driver_type != AZX_DRIVER_ATIHDMI)
+       if (!(chip->driver_caps & AZX_DCAPS_NO_TCSEL)) {
+               snd_printdd(SFX "Clearing TCSEL\n");
                update_pci_byte(chip->pci, ICH6_PCIREG_TCSEL, 0x07, 0);
+       }
 
-       switch (chip->driver_type) {
-       case AZX_DRIVER_ATI:
-               /* For ATI SB450 azalia HD audio, we need to enable snoop */
+       /* For ATI SB450/600/700/800/900 and AMD Hudson azalia HD audio,
+        * we need to enable snoop.
+        */
+       if (chip->driver_caps & AZX_DCAPS_ATI_SNOOP) {
+               snd_printdd(SFX "Enabling ATI snoop\n");
                update_pci_byte(chip->pci,
                                ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR, 
                                0x07, ATI_SB450_HDAUDIO_ENABLE_SNOOP);
-               break;
-       case AZX_DRIVER_NVIDIA:
-               /* For NVIDIA HDA, enable snoop */
+       }
+
+       /* For NVIDIA HDA, enable snoop */
+       if (chip->driver_caps & AZX_DCAPS_NVIDIA_SNOOP) {
+               snd_printdd(SFX "Enabling Nvidia snoop\n");
                update_pci_byte(chip->pci,
                                NVIDIA_HDA_TRANSREG_ADDR,
                                0x0f, NVIDIA_HDA_ENABLE_COHBITS);
@@ -1078,9 +1112,10 @@ static void azx_init_pci(struct azx *chip)
                update_pci_byte(chip->pci,
                                NVIDIA_HDA_OSTRM_COH,
                                0x01, NVIDIA_HDA_ENABLE_COHBIT);
-               break;
-       case AZX_DRIVER_SCH:
-       case AZX_DRIVER_PCH:
+       }
+
+       /* Enable SCH/PCH snoop if needed */
+       if (chip->driver_caps & AZX_DCAPS_SCH_SNOOP) {
                pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop);
                if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) {
                        pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC,
@@ -1091,14 +1126,6 @@ static void azx_init_pci(struct azx *chip)
                                (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP)
                                ? "Failed" : "OK");
                }
-               break;
-       default:
-               /* AMD Hudson needs the similar snoop, as it seems... */
-               if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
-                       update_pci_byte(chip->pci,
-                               ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR,
-                               0x07, ATI_SB450_HDAUDIO_ENABLE_SNOOP);
-               break;
         }
 }
 
@@ -1152,7 +1179,7 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
        status = azx_readb(chip, RIRBSTS);
        if (status & RIRB_INT_MASK) {
                if (status & RIRB_INT_RESPONSE) {
-                       if (chip->driver_type == AZX_DRIVER_CTX)
+                       if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
                                udelay(80);
                        azx_update_rirb(chip);
                }
@@ -1421,8 +1448,10 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model)
        if (err < 0)
                return err;
 
-       if (chip->driver_type == AZX_DRIVER_NVIDIA)
+       if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
+               snd_printd(SFX "Enable delay in RIRB handling\n");
                chip->bus->needs_damn_long_delay = 1;
+       }
 
        codecs = 0;
        max_slots = azx_max_codecs[chip->driver_type];
@@ -1457,9 +1486,8 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model)
         * sequence like the pin-detection.  It seems that forcing the synced
         * access works around the stall.  Grrr...
         */
-       if (chip->pci->vendor == PCI_VENDOR_ID_AMD ||
-           chip->pci->vendor == PCI_VENDOR_ID_ATI) {
-               snd_printk(KERN_INFO SFX "Enable sync_write for AMD chipset\n");
+       if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) {
+               snd_printd(SFX "Enable sync_write for stable communication\n");
                chip->bus->sync_write = 1;
                chip->bus->allow_bus_reset = 1;
        }
@@ -1720,7 +1748,7 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream)
 
        stream_tag = azx_dev->stream_tag;
        /* CA-IBG chips need the playback stream starting from 1 */
-       if (chip->driver_type == AZX_DRIVER_CTX &&
+       if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) &&
            stream_tag > chip->capture_streams)
                stream_tag -= chip->capture_streams;
        return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag,
@@ -2365,20 +2393,14 @@ static int __devinit check_position_fix(struct azx *chip, int fix)
        }
 
        /* Check VIA/ATI HD Audio Controller exist */
-       switch (chip->driver_type) {
-       case AZX_DRIVER_VIA:
-               /* Use link position directly, avoid any transfer problem. */
+       if (chip->driver_caps & AZX_DCAPS_POSFIX_VIA) {
+               snd_printd(SFX "Using VIACOMBO position fix\n");
                return POS_FIX_VIACOMBO;
-       case AZX_DRIVER_ATI:
-               /* ATI chipsets don't work well with position-buffer */
+       }
+       if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) {
+               snd_printd(SFX "Using LPIB position fix\n");
                return POS_FIX_LPIB;
-       case AZX_DRIVER_GENERIC:
-               /* AMD chipsets also don't work with position-buffer */
-               if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
-                       return POS_FIX_LPIB;
-               break;
        }
-
        return POS_FIX_AUTO;
 }
 
@@ -2460,8 +2482,8 @@ static void __devinit check_msi(struct azx *chip)
        }
 
        /* NVidia chipsets seem to cause troubles with MSI */
-       if (chip->driver_type == AZX_DRIVER_NVIDIA) {
-               printk(KERN_INFO "hda_intel: Disable MSI for Nvidia chipset\n");
+       if (chip->driver_caps & AZX_DCAPS_NO_MSI) {
+               printk(KERN_INFO "hda_intel: Disabling MSI\n");
                chip->msi = 0;
        }
 }
@@ -2471,7 +2493,7 @@ static void __devinit check_msi(struct azx *chip)
  * constructor
  */
 static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
-                               int dev, int driver_type,
+                               int dev, unsigned int driver_caps,
                                struct azx **rchip)
 {
        struct azx *chip;
@@ -2499,7 +2521,8 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
        chip->card = card;
        chip->pci = pci;
        chip->irq = -1;
-       chip->driver_type = driver_type;
+       chip->driver_caps = driver_caps;
+       chip->driver_type = driver_caps & 0xff;
        check_msi(chip);
        chip->dev_index = dev;
        INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work);
@@ -2563,8 +2586,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
        snd_printdd(SFX "chipset global capabilities = 0x%x\n", gcap);
 
        /* disable SB600 64bit support for safety */
-       if ((chip->driver_type == AZX_DRIVER_ATI) ||
-           (chip->driver_type == AZX_DRIVER_ATIHDMI)) {
+       if (chip->pci->vendor == PCI_VENDOR_ID_ATI) {
                struct pci_dev *p_smbus;
                p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
                                         PCI_DEVICE_ID_ATI_SBX00_SMBUS,
@@ -2574,19 +2596,13 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
                                gcap &= ~ICH6_GCAP_64OK;
                        pci_dev_put(p_smbus);
                }
-       } else {
-               /* FIXME: not sure whether this is really needed, but
-                * Hudson isn't stable enough for allowing everything...
-                * let's check later again.
-                */
-               if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
-                       gcap &= ~ICH6_GCAP_64OK;
        }
 
-       /* disable 64bit DMA address for Teradici */
-       /* it does not work with device 6549:1200 subsys e4a2:040b */
-       if (chip->driver_type == AZX_DRIVER_TERA)
+       /* disable 64bit DMA address on some devices */
+       if (chip->driver_caps & AZX_DCAPS_NO_64BIT) {
+               snd_printd(SFX "Disabling 64bit DMA\n");
                gcap &= ~ICH6_GCAP_64OK;
+       }
 
        /* allow 64bit DMA address if supported by H/W */
        if ((gcap & ICH6_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
@@ -2788,38 +2804,62 @@ static void __devexit azx_remove(struct pci_dev *pci)
 /* PCI IDs */
 static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        /* CPT */
-       { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH },
+       { PCI_DEVICE(0x8086, 0x1c20),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP },
        /* PBG */
-       { PCI_DEVICE(0x8086, 0x1d20), .driver_data = AZX_DRIVER_PCH },
+       { PCI_DEVICE(0x8086, 0x1d20),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP },
        /* Panther Point */
-       { PCI_DEVICE(0x8086, 0x1e20), .driver_data = AZX_DRIVER_PCH },
+       { PCI_DEVICE(0x8086, 0x1e20),
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP },
        /* SCH */
-       { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH },
+       { PCI_DEVICE(0x8086, 0x811b),
+         .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP },
        /* Generic Intel */
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID),
          .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
          .class_mask = 0xffffff,
          .driver_data = AZX_DRIVER_ICH },
-       /* ATI SB 450/600 */
-       { PCI_DEVICE(0x1002, 0x437b), .driver_data = AZX_DRIVER_ATI },
-       { PCI_DEVICE(0x1002, 0x4383), .driver_data = AZX_DRIVER_ATI },
+       /* ATI SB 450/600/700/800/900 */
+       { PCI_DEVICE(0x1002, 0x437b),
+         .driver_data = AZX_DRIVER_ATI | AZX_DCAPS_PRESET_ATI_SB },
+       { PCI_DEVICE(0x1002, 0x4383),
+         .driver_data = AZX_DRIVER_ATI | AZX_DCAPS_PRESET_ATI_SB },
+       /* AMD Hudson */
+       { PCI_DEVICE(0x1022, 0x780d),
+         .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
        /* ATI HDMI */
-       { PCI_DEVICE(0x1002, 0x793b), .driver_data = AZX_DRIVER_ATIHDMI },
-       { PCI_DEVICE(0x1002, 0x7919), .driver_data = AZX_DRIVER_ATIHDMI },
-       { PCI_DEVICE(0x1002, 0x960f), .driver_data = AZX_DRIVER_ATIHDMI },
-       { PCI_DEVICE(0x1002, 0x970f), .driver_data = AZX_DRIVER_ATIHDMI },
-       { PCI_DEVICE(0x1002, 0xaa00), .driver_data = AZX_DRIVER_ATIHDMI },
-       { PCI_DEVICE(0x1002, 0xaa08), .driver_data = AZX_DRIVER_ATIHDMI },
-       { PCI_DEVICE(0x1002, 0xaa10), .driver_data = AZX_DRIVER_ATIHDMI },
-       { PCI_DEVICE(0x1002, 0xaa18), .driver_data = AZX_DRIVER_ATIHDMI },
-       { PCI_DEVICE(0x1002, 0xaa20), .driver_data = AZX_DRIVER_ATIHDMI },
-       { PCI_DEVICE(0x1002, 0xaa28), .driver_data = AZX_DRIVER_ATIHDMI },
-       { PCI_DEVICE(0x1002, 0xaa30), .driver_data = AZX_DRIVER_ATIHDMI },
-       { PCI_DEVICE(0x1002, 0xaa38), .driver_data = AZX_DRIVER_ATIHDMI },
-       { PCI_DEVICE(0x1002, 0xaa40), .driver_data = AZX_DRIVER_ATIHDMI },
-       { PCI_DEVICE(0x1002, 0xaa48), .driver_data = AZX_DRIVER_ATIHDMI },
+       { PCI_DEVICE(0x1002, 0x793b),
+         .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+       { PCI_DEVICE(0x1002, 0x7919),
+         .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+       { PCI_DEVICE(0x1002, 0x960f),
+         .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+       { PCI_DEVICE(0x1002, 0x970f),
+         .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+       { PCI_DEVICE(0x1002, 0xaa00),
+         .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+       { PCI_DEVICE(0x1002, 0xaa08),
+         .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+       { PCI_DEVICE(0x1002, 0xaa10),
+         .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+       { PCI_DEVICE(0x1002, 0xaa18),
+         .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+       { PCI_DEVICE(0x1002, 0xaa20),
+         .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+       { PCI_DEVICE(0x1002, 0xaa28),
+         .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+       { PCI_DEVICE(0x1002, 0xaa30),
+         .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+       { PCI_DEVICE(0x1002, 0xaa38),
+         .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+       { PCI_DEVICE(0x1002, 0xaa40),
+         .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
+       { PCI_DEVICE(0x1002, 0xaa48),
+         .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
        /* VIA VT8251/VT8237A */
-       { PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA },
+       { PCI_DEVICE(0x1106, 0x3288),
+         .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
        /* SIS966 */
        { PCI_DEVICE(0x1039, 0x7502), .driver_data = AZX_DRIVER_SIS },
        /* ULI M5461 */
@@ -2828,9 +2868,10 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
          .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
          .class_mask = 0xffffff,
-         .driver_data = AZX_DRIVER_NVIDIA },
+         .driver_data = AZX_DRIVER_NVIDIA | AZX_DCAPS_PRESET_NVIDIA },
        /* Teradici */
-       { PCI_DEVICE(0x6549, 0x1200), .driver_data = AZX_DRIVER_TERA },
+       { PCI_DEVICE(0x6549, 0x1200),
+         .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT },
        /* Creative X-Fi (CA0110-IBG) */
 #if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE)
        /* the following entry conflicts with snd-ctxfi driver,
@@ -2840,10 +2881,13 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        { PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_ANY_ID),
          .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
          .class_mask = 0xffffff,
-         .driver_data = AZX_DRIVER_CTX },
+         .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
+         AZX_DCAPS_RIRB_PRE_DELAY },
 #else
        /* this entry seems still valid -- i.e. without emu20kx chip */
-       { PCI_DEVICE(0x1102, 0x0009), .driver_data = AZX_DRIVER_CTX },
+       { PCI_DEVICE(0x1102, 0x0009),
+         .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
+         AZX_DCAPS_RIRB_PRE_DELAY },
 #endif
        /* Vortex86MX */
        { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC },
@@ -2853,11 +2897,11 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
        { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_ANY_ID),
          .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
          .class_mask = 0xffffff,
-         .driver_data = AZX_DRIVER_GENERIC },
+         .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI },
        { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_ANY_ID),
          .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
          .class_mask = 0xffffff,
-         .driver_data = AZX_DRIVER_GENERIC },
+         .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI },
        { 0, }
 };
 MODULE_DEVICE_TABLE(pci, azx_ids);
index f1b3875c57dff4a73a27ad7b97817eaa1c119af1..696ac2590307d7d46d8733a08cdef40cead16de7 100644 (file)
@@ -3159,6 +3159,7 @@ static const struct snd_pci_quirk ad1988_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x81ec, "Asus P5B-DLX", AD1988_6STACK_DIG),
        SND_PCI_QUIRK(0x1043, 0x81f6, "Asus M2N-SLI", AD1988_6STACK_DIG),
        SND_PCI_QUIRK(0x1043, 0x8277, "Asus P5K-E/WIFI-AP", AD1988_6STACK_DIG),
+       SND_PCI_QUIRK(0x1043, 0x82c0, "Asus M3N-HT Deluxe", AD1988_6STACK_DIG),
        SND_PCI_QUIRK(0x1043, 0x8311, "Asus P5Q-Premium/Pro", AD1988_6STACK_DIG),
        {}
 };
index 4f37477d3c71b5238b049275987cca2656df0107..3e6b9a8539c2ff4bb900b62bddaa2f5d4b008a2d 100644 (file)
@@ -3098,7 +3098,9 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
        SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
        SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
+       SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS),
        SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
+       SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO),
        SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */
        {}
 };
@@ -3433,7 +3435,9 @@ static void cx_auto_parse_output(struct hda_codec *codec)
                        break;
                }
        }
-       if (spec->auto_mute && cfg->line_out_pins[0] &&
+       if (spec->auto_mute &&
+           cfg->line_out_pins[0] &&
+           cfg->line_out_type != AUTO_PIN_SPEAKER_OUT &&
            cfg->line_out_pins[0] != cfg->hp_pins[0] &&
            cfg->line_out_pins[0] != cfg->speaker_pins[0]) {
                for (i = 0; i < cfg->line_outs; i++) {
@@ -3481,25 +3485,32 @@ static void cx_auto_update_speakers(struct hda_codec *codec)
 {
        struct conexant_spec *spec = codec->spec;
        struct auto_pin_cfg *cfg = &spec->autocfg;
-       int on;
+       int on = 1;
 
-       if (!spec->auto_mute)
-               on = 0;
-       else
-               on = spec->hp_present | spec->line_present;
+       /* turn on HP EAPD when HP jacks are present */
+       if (spec->auto_mute)
+               on = spec->hp_present;
        cx_auto_turn_eapd(codec, cfg->hp_outs, cfg->hp_pins, on);
-       do_automute(codec, cfg->speaker_outs, cfg->speaker_pins, !on);
+       /* mute speakers in auto-mode if HP or LO jacks are plugged */
+       if (spec->auto_mute)
+               on = !(spec->hp_present ||
+                      (spec->detect_line && spec->line_present));
+       do_automute(codec, cfg->speaker_outs, cfg->speaker_pins, on);
 
        /* toggle line-out mutes if needed, too */
        /* if LO is a copy of either HP or Speaker, don't need to handle it */
        if (cfg->line_out_pins[0] == cfg->hp_pins[0] ||
            cfg->line_out_pins[0] == cfg->speaker_pins[0])
                return;
-       if (!spec->automute_lines || !spec->auto_mute)
-               on = 0;
-       else
-               on = spec->hp_present;
-       do_automute(codec, cfg->line_outs, cfg->line_out_pins, !on);
+       if (spec->auto_mute) {
+               /* mute LO in auto-mode when HP jack is present */
+               if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT ||
+                   spec->automute_lines)
+                       on = !spec->hp_present;
+               else
+                       on = 1;
+       }
+       do_automute(codec, cfg->line_outs, cfg->line_out_pins, on);
 }
 
 static void cx_auto_hp_automute(struct hda_codec *codec)
@@ -3696,13 +3707,14 @@ static int cx_auto_mux_enum_update(struct hda_codec *codec,
 {
        struct conexant_spec *spec = codec->spec;
        hda_nid_t adc;
+       int changed = 1;
 
        if (!imux->num_items)
                return 0;
        if (idx >= imux->num_items)
                idx = imux->num_items - 1;
        if (spec->cur_mux[0] == idx)
-               return 0;
+               changed = 0;
        adc = spec->imux_info[idx].adc;
        select_input_connection(codec, spec->imux_info[idx].adc,
                                spec->imux_info[idx].pin);
@@ -3715,7 +3727,7 @@ static int cx_auto_mux_enum_update(struct hda_codec *codec,
                                           spec->cur_adc_format);
        }
        spec->cur_mux[0] = idx;
-       return 1;
+       return changed;
 }
 
 static int cx_auto_mux_enum_put(struct snd_kcontrol *kcontrol,
@@ -3789,7 +3801,7 @@ static void cx_auto_check_auto_mic(struct hda_codec *codec)
        int pset[INPUT_PIN_ATTR_NORMAL + 1];
        int i;
 
-       for (i = 0; i < INPUT_PIN_ATTR_NORMAL; i++)
+       for (i = 0; i < ARRAY_SIZE(pset); i++)
                pset[i] = -1;
        for (i = 0; i < spec->private_imux.num_items; i++) {
                hda_nid_t pin = spec->imux_info[i].pin;
index 322901873222c446f6570c8139671697fd291e23..bd0ae697f9c4118ae5947e1f6015d41cff3638ea 100644 (file)
@@ -48,8 +48,8 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
  *
  * The HDA correspondence of pipes/ports are converter/pin nodes.
  */
-#define MAX_HDMI_CVTS  3
-#define MAX_HDMI_PINS  3
+#define MAX_HDMI_CVTS  4
+#define MAX_HDMI_PINS  4
 
 struct hdmi_spec {
        int num_cvts;
@@ -78,10 +78,6 @@ struct hdmi_spec {
         */
        struct hda_multi_out multiout;
        const struct hda_pcm_stream *pcm_playback;
-
-       /* misc flags */
-       /* PD bit indicates only the update, not the current state */
-       unsigned int old_pin_detect:1;
 };
 
 
@@ -300,13 +296,6 @@ static int hda_node_index(hda_nid_t *nids, hda_nid_t nid)
        return -EINVAL;
 }
 
-static void hdmi_get_show_eld(struct hda_codec *codec, hda_nid_t pin_nid,
-                             struct hdmi_eld *eld)
-{
-       if (!snd_hdmi_get_eld(eld, codec, pin_nid))
-               snd_hdmi_show_eld(eld);
-}
-
 #ifdef BE_PARANOID
 static void hdmi_get_dip_index(struct hda_codec *codec, hda_nid_t pin_nid,
                                int *packet_index, int *byte_index)
@@ -694,35 +683,20 @@ static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid,
 static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
 {
        struct hdmi_spec *spec = codec->spec;
-       int tag = res >> AC_UNSOL_RES_TAG_SHIFT;
-       int pind = !!(res & AC_UNSOL_RES_PD);
+       int pin_nid = res >> AC_UNSOL_RES_TAG_SHIFT;
+       int pd = !!(res & AC_UNSOL_RES_PD);
        int eldv = !!(res & AC_UNSOL_RES_ELDV);
        int index;
 
        printk(KERN_INFO
                "HDMI hot plug event: Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
-               tag, pind, eldv);
+               pin_nid, pd, eldv);
 
-       index = hda_node_index(spec->pin, tag);
+       index = hda_node_index(spec->pin, pin_nid);
        if (index < 0)
                return;
 
-       if (spec->old_pin_detect) {
-               if (pind)
-                       hdmi_present_sense(codec, tag, &spec->sink_eld[index]);
-               pind = spec->sink_eld[index].monitor_present;
-       }
-
-       spec->sink_eld[index].monitor_present = pind;
-       spec->sink_eld[index].eld_valid = eldv;
-
-       if (pind && eldv) {
-               hdmi_get_show_eld(codec, spec->pin[index],
-                                 &spec->sink_eld[index]);
-               /* TODO: do real things about ELD */
-       }
-
-       snd_hda_input_jack_report(codec, tag);
+       hdmi_present_sense(codec, pin_nid, &spec->sink_eld[index]);
 }
 
 static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -903,13 +877,33 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, hda_nid_t pin_nid)
 static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid,
                               struct hdmi_eld *eld)
 {
+       /*
+        * Always execute a GetPinSense verb here, even when called from
+        * hdmi_intrinsic_event; for some NVIDIA HW, the unsolicited
+        * response's PD bit is not the real PD value, but indicates that
+        * the real PD value changed. An older version of the HD-audio
+        * specification worked this way. Hence, we just ignore the data in
+        * the unsolicited response to avoid custom WARs.
+        */
        int present = snd_hda_pin_sense(codec, pin_nid);
 
+       memset(eld, 0, sizeof(*eld));
+
        eld->monitor_present    = !!(present & AC_PINSENSE_PRESENCE);
-       eld->eld_valid          = !!(present & AC_PINSENSE_ELDV);
+       if (eld->monitor_present)
+               eld->eld_valid  = !!(present & AC_PINSENSE_ELDV);
+       else
+               eld->eld_valid  = 0;
 
-       if (present & AC_PINSENSE_ELDV)
-               hdmi_get_show_eld(codec, pin_nid, eld);
+       printk(KERN_INFO
+               "HDMI status: Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
+               pin_nid, eld->monitor_present, eld->eld_valid);
+
+       if (eld->eld_valid)
+               if (!snd_hdmi_get_eld(eld, codec, pin_nid))
+                       snd_hdmi_show_eld(eld);
+
+       snd_hda_input_jack_report(codec, pin_nid);
 }
 
 static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
@@ -927,7 +921,6 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
                                     SND_JACK_VIDEOOUT, NULL);
        if (err < 0)
                return err;
-       snd_hda_input_jack_report(codec, pin_nid);
 
        hdmi_present_sense(codec, pin_nid, &spec->sink_eld[spec->num_pins]);
 
@@ -1034,6 +1027,7 @@ static char *generic_hdmi_pcm_names[MAX_HDMI_CVTS] = {
        "HDMI 0",
        "HDMI 1",
        "HDMI 2",
+       "HDMI 3",
 };
 
 /*
@@ -1490,18 +1484,6 @@ static const struct hda_codec_ops nvhdmi_patch_ops_2ch = {
        .free = generic_hdmi_free,
 };
 
-static int patch_nvhdmi_8ch_89(struct hda_codec *codec)
-{
-       struct hdmi_spec *spec;
-       int err = patch_generic_hdmi(codec);
-
-       if (err < 0)
-               return err;
-       spec = codec->spec;
-       spec->old_pin_detect = 1;
-       return 0;
-}
-
 static int patch_nvhdmi_2ch(struct hda_codec *codec)
 {
        struct hdmi_spec *spec;
@@ -1515,7 +1497,6 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec)
        spec->multiout.num_dacs = 0;  /* no analog */
        spec->multiout.max_channels = 2;
        spec->multiout.dig_out_nid = nvhdmi_master_con_nid_7x;
-       spec->old_pin_detect = 1;
        spec->num_cvts = 1;
        spec->cvt[0] = nvhdmi_master_con_nid_7x;
        spec->pcm_playback = &nvhdmi_pcm_playback_2ch;
@@ -1658,28 +1639,28 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x10de0005, .name = "MCP77/78 HDMI",   .patch = patch_nvhdmi_8ch_7x },
 { .id = 0x10de0006, .name = "MCP77/78 HDMI",   .patch = patch_nvhdmi_8ch_7x },
 { .id = 0x10de0007, .name = "MCP79/7A HDMI",   .patch = patch_nvhdmi_8ch_7x },
-{ .id = 0x10de000a, .name = "GPU 0a HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de000b, .name = "GPU 0b HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de000c, .name = "MCP89 HDMI",      .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de000d, .name = "GPU 0d HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0010, .name = "GPU 10 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0011, .name = "GPU 11 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0012, .name = "GPU 12 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0013, .name = "GPU 13 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0014, .name = "GPU 14 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0015, .name = "GPU 15 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0016, .name = "GPU 16 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
+{ .id = 0x10de000a, .name = "GPU 0a HDMI/DP",  .patch = patch_generic_hdmi },
+{ .id = 0x10de000b, .name = "GPU 0b HDMI/DP",  .patch = patch_generic_hdmi },
+{ .id = 0x10de000c, .name = "MCP89 HDMI",      .patch = patch_generic_hdmi },
+{ .id = 0x10de000d, .name = "GPU 0d HDMI/DP",  .patch = patch_generic_hdmi },
+{ .id = 0x10de0010, .name = "GPU 10 HDMI/DP",  .patch = patch_generic_hdmi },
+{ .id = 0x10de0011, .name = "GPU 11 HDMI/DP",  .patch = patch_generic_hdmi },
+{ .id = 0x10de0012, .name = "GPU 12 HDMI/DP",  .patch = patch_generic_hdmi },
+{ .id = 0x10de0013, .name = "GPU 13 HDMI/DP",  .patch = patch_generic_hdmi },
+{ .id = 0x10de0014, .name = "GPU 14 HDMI/DP",  .patch = patch_generic_hdmi },
+{ .id = 0x10de0015, .name = "GPU 15 HDMI/DP",  .patch = patch_generic_hdmi },
+{ .id = 0x10de0016, .name = "GPU 16 HDMI/DP",  .patch = patch_generic_hdmi },
 /* 17 is known to be absent */
-{ .id = 0x10de0018, .name = "GPU 18 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0019, .name = "GPU 19 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de001a, .name = "GPU 1a HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de001b, .name = "GPU 1b HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de001c, .name = "GPU 1c HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0040, .name = "GPU 40 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0041, .name = "GPU 41 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0042, .name = "GPU 42 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0043, .name = "GPU 43 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
-{ .id = 0x10de0044, .name = "GPU 44 HDMI/DP",  .patch = patch_nvhdmi_8ch_89 },
+{ .id = 0x10de0018, .name = "GPU 18 HDMI/DP",  .patch = patch_generic_hdmi },
+{ .id = 0x10de0019, .name = "GPU 19 HDMI/DP",  .patch = patch_generic_hdmi },
+{ .id = 0x10de001a, .name = "GPU 1a HDMI/DP",  .patch = patch_generic_hdmi },
+{ .id = 0x10de001b, .name = "GPU 1b HDMI/DP",  .patch = patch_generic_hdmi },
+{ .id = 0x10de001c, .name = "GPU 1c HDMI/DP",  .patch = patch_generic_hdmi },
+{ .id = 0x10de0040, .name = "GPU 40 HDMI/DP",  .patch = patch_generic_hdmi },
+{ .id = 0x10de0041, .name = "GPU 41 HDMI/DP",  .patch = patch_generic_hdmi },
+{ .id = 0x10de0042, .name = "GPU 42 HDMI/DP",  .patch = patch_generic_hdmi },
+{ .id = 0x10de0043, .name = "GPU 43 HDMI/DP",  .patch = patch_generic_hdmi },
+{ .id = 0x10de0044, .name = "GPU 44 HDMI/DP",  .patch = patch_generic_hdmi },
 { .id = 0x10de0067, .name = "MCP67 HDMI",      .patch = patch_nvhdmi_2ch },
 { .id = 0x10de8001, .name = "MCP73 HDMI",      .patch = patch_nvhdmi_2ch },
 { .id = 0x80860054, .name = "IbexPeak HDMI",   .patch = patch_generic_hdmi },
index 28afbbf69ce00d30c76754eb6f8bb0c5adea519e..95572d290c2740a1aa146d053327028ca598d371 100644 (file)
@@ -146,7 +146,7 @@ static int at91sam9g20ek_wm8731_init(struct snd_soc_pcm_runtime *rtd)
                        "at91sam9g20ek_wm8731 "
                        ": at91sam9g20ek_wm8731_init() called\n");
 
-       ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_XTAL,
+       ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_MCLK,
                MCLK_RATE, SND_SOC_CLOCK_IN);
        if (ret < 0) {
                printk(KERN_ERR "Failed to set WM8731 SYSCLK: %d\n", ret);
index b8066ef10bb0ae65dcf3b49582fe89329e17f09c..46dbfd067f79647b7fca338eb85addd177eda966 100644 (file)
@@ -153,8 +153,7 @@ static int cq93vc_resume(struct snd_soc_codec *codec)
 
 static int cq93vc_probe(struct snd_soc_codec *codec)
 {
-       struct davinci_vc *davinci_vc =
-                       mfd_get_data(to_platform_device(codec->dev));
+       struct davinci_vc *davinci_vc = codec->dev->platform_data;
 
        davinci_vc->cq93vc.codec = codec;
        codec->control_data = davinci_vc;
index 575238d68e5eb0b8afbace12f51a14923f1d5f3b..bec788b12613add7ea8927762c23bb0572105b1a 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/pm.h>
 #include <linux/i2c.h>
 #include <linux/platform_device.h>
-#include <linux/mfd/core.h>
 #include <linux/i2c/twl.h>
 #include <linux/slab.h>
 #include <sound/core.h>
@@ -733,8 +732,7 @@ static int aif_event(struct snd_soc_dapm_widget *w,
 
 static void headset_ramp(struct snd_soc_codec *codec, int ramp)
 {
-       struct twl4030_codec_audio_data *pdata =
-                       mfd_get_data(to_platform_device(codec->dev));
+       struct twl4030_codec_audio_data *pdata = codec->dev->platform_data;
        unsigned char hs_gain, hs_pop;
        struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec);
        /* Base values for ramp delay calculation: 2^19 - 2^26 */
@@ -2299,7 +2297,7 @@ static struct snd_soc_codec_driver soc_codec_dev_twl4030 = {
 
 static int __devinit twl4030_codec_probe(struct platform_device *pdev)
 {
-       struct twl4030_codec_audio_data *pdata = mfd_get_data(pdev);
+       struct twl4030_codec_audio_data *pdata = pdev->dev.platform_data;
 
        if (!pdata) {
                dev_err(&pdev->dev, "platform_data is missing\n");
index c8a874d0d4cae5d9a0a66974ff3ac5b4887d492a..5836201834d97dcc769347ab98827841fce013ca 100644 (file)
@@ -441,8 +441,7 @@ EXPORT_SYMBOL_GPL(wl1273_get_format);
 
 static int wl1273_probe(struct snd_soc_codec *codec)
 {
-       struct wl1273_core **core =
-                       mfd_get_data(to_platform_device(codec->dev));
+       struct wl1273_core **core = codec->dev->platform_data;
        struct wl1273_priv *wl1273;
        int r;
 
index 14d0716bf0093b2a5d5b34ff5c8088302255b5dc..bcc208967917f149956985e7c8f5c290a1506aad 100644 (file)
@@ -22,7 +22,7 @@ SND_SOC_DAPM_ADC("ADC", "wm1250-ev1 Capture", SND_SOC_NOPM, 0, 0),
 SND_SOC_DAPM_DAC("DAC", "wm1250-ev1 Playback", SND_SOC_NOPM, 0, 0),
 
 SND_SOC_DAPM_INPUT("WM1250 Input"),
-SND_SOC_DAPM_INPUT("WM1250 Output"),
+SND_SOC_DAPM_OUTPUT("WM1250 Output"),
 };
 
 static const struct snd_soc_dapm_route wm1250_ev1_dapm_routes[] = {
index 736b785e375606435ab0e181f8d48634966242ff..fbee556cbf35adaba1bbca26e6659cb199590c07 100644 (file)
@@ -1378,7 +1378,7 @@ static void wm8400_probe_deferred(struct work_struct *work)
 
 static int wm8400_codec_probe(struct snd_soc_codec *codec)
 {
-       struct wm8400 *wm8400 = mfd_get_data(to_platform_device(codec->dev));
+       struct wm8400 *wm8400 = dev_get_platdata(codec->dev);
        struct wm8400_priv *priv;
        int ret;
        u16 reg;
index 6dec7cee2cb4bd5fcd0cda18c7b325284a2ae039..2dc964b55e4fa15ce23fec72fbc7c7925f8fbc1e 100644 (file)
@@ -198,7 +198,7 @@ static int wm8731_check_osc(struct snd_soc_dapm_widget *source,
 {
        struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(source->codec);
 
-       return wm8731->sysclk_type == WM8731_SYSCLK_MCLK;
+       return wm8731->sysclk_type == WM8731_SYSCLK_XTAL;
 }
 
 static const struct snd_soc_dapm_route wm8731_intercon[] = {
index ccc9bd8327948ecbc33290d17ee21d9717ba203d..a0b1a7278284ee7419ce662d0ba913391556b02e 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/gcd.h>
 #include <linux/gpio.h>
 #include <linux/i2c.h>
-#include <linux/delay.h>
 #include <linux/regulator/consumer.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
index 13e05a302a92edf15aa1f464298f7ae81a2e7579..9259f1f348999cd5963817402023e7b8d805768e 100644 (file)
@@ -205,7 +205,7 @@ static struct snd_soc_dai_driver davinci_vcif_dai = {
 
 static int davinci_vcif_probe(struct platform_device *pdev)
 {
-       struct davinci_vc *davinci_vc = mfd_get_data(pdev);
+       struct davinci_vc *davinci_vc = pdev->dev.platform_data;
        struct davinci_vcif_dev *davinci_vcif_dev;
        int ret;
 
index b5922984eac667008184a1475f1d5dc983f44e1a..99054cf1f68fe69febec8dd06528b121c8e6d9bd 100644 (file)
@@ -65,14 +65,6 @@ config SND_OMAP_SOC_OVERO
          Say Y if you want to add support for SoC audio on the
          Gumstix Overo or CompuLab CM-T35
 
-config SND_OMAP_SOC_OMAP2EVM
-       tristate "SoC Audio support for OMAP2EVM board"
-       depends on TWL4030_CORE && SND_OMAP_SOC && MACH_OMAP2EVM
-       select SND_OMAP_SOC_MCBSP
-       select SND_SOC_TWL4030
-       help
-         Say Y if you want to add support for SoC audio on the omap2evm board.
-
 config SND_OMAP_SOC_OMAP3EVM
        tristate "SoC Audio support for OMAP3EVM board"
        depends on TWL4030_CORE && SND_OMAP_SOC && MACH_OMAP3EVM
index ba9fc650db28a1e9b06641ad8ff2adb04647f87d..6c2c87eed5bb7bd39d79c41bc4596b7ec4a48417 100644 (file)
@@ -13,7 +13,6 @@ snd-soc-rx51-objs := rx51.o
 snd-soc-ams-delta-objs := ams-delta.o
 snd-soc-osk5912-objs := osk5912.o
 snd-soc-overo-objs := overo.o
-snd-soc-omap2evm-objs := omap2evm.o
 snd-soc-omap3evm-objs := omap3evm.o
 snd-soc-am3517evm-objs := am3517evm.o
 snd-soc-sdp3430-objs := sdp3430.o
diff --git a/sound/soc/omap/omap2evm.c b/sound/soc/omap/omap2evm.c
deleted file mode 100644 (file)
index 29b60d6..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * omap2evm.c  --  SoC audio machine driver for omap2evm board
- *
- * Author: Arun KS <arunks@mistralsolutions.com>
- *
- * Based on sound/soc/omap/overo.c by Steve Sakoman
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/clk.h>
-#include <linux/platform_device.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-
-#include <asm/mach-types.h>
-#include <mach/hardware.h>
-#include <mach/gpio.h>
-#include <plat/mcbsp.h>
-
-#include "omap-mcbsp.h"
-#include "omap-pcm.h"
-
-static int omap2evm_hw_params(struct snd_pcm_substream *substream,
-       struct snd_pcm_hw_params *params)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_dai *codec_dai = rtd->codec_dai;
-       struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
-       int ret;
-
-       /* Set codec DAI configuration */
-       ret = snd_soc_dai_set_fmt(codec_dai,
-                                 SND_SOC_DAIFMT_I2S |
-                                 SND_SOC_DAIFMT_NB_NF |
-                                 SND_SOC_DAIFMT_CBM_CFM);
-       if (ret < 0) {
-               printk(KERN_ERR "can't set codec DAI configuration\n");
-               return ret;
-       }
-
-       /* Set cpu DAI configuration */
-       ret = snd_soc_dai_set_fmt(cpu_dai,
-                                 SND_SOC_DAIFMT_I2S |
-                                 SND_SOC_DAIFMT_NB_NF |
-                                 SND_SOC_DAIFMT_CBM_CFM);
-       if (ret < 0) {
-               printk(KERN_ERR "can't set cpu DAI configuration\n");
-               return ret;
-       }
-
-       /* Set the codec system clock for DAC and ADC */
-       ret = snd_soc_dai_set_sysclk(codec_dai, 0, 26000000,
-                                           SND_SOC_CLOCK_IN);
-       if (ret < 0) {
-               printk(KERN_ERR "can't set codec system clock\n");
-               return ret;
-       }
-
-       return 0;
-}
-
-static struct snd_soc_ops omap2evm_ops = {
-       .hw_params = omap2evm_hw_params,
-};
-
-/* Digital audio interface glue - connects codec <--> CPU */
-static struct snd_soc_dai_link omap2evm_dai = {
-       .name = "TWL4030",
-       .stream_name = "TWL4030",
-       .cpu_dai_name = "omap-mcbsp-dai.1",
-       .codec_dai_name = "twl4030-hifi",
-       .platform_name = "omap-pcm-audio",
-       .codec_name = "twl4030-codec",
-       .ops = &omap2evm_ops,
-};
-
-/* Audio machine driver */
-static struct snd_soc_card snd_soc_omap2evm = {
-       .name = "omap2evm",
-       .dai_link = &omap2evm_dai,
-       .num_links = 1,
-};
-
-static struct platform_device *omap2evm_snd_device;
-
-static int __init omap2evm_soc_init(void)
-{
-       int ret;
-
-       if (!machine_is_omap2evm())
-               return -ENODEV;
-       printk(KERN_INFO "omap2evm SoC init\n");
-
-       omap2evm_snd_device = platform_device_alloc("soc-audio", -1);
-       if (!omap2evm_snd_device) {
-               printk(KERN_ERR "Platform device allocation failed\n");
-               return -ENOMEM;
-       }
-
-       platform_set_drvdata(omap2evm_snd_device, &snd_soc_omap2evm);
-
-       ret = platform_device_add(omap2evm_snd_device);
-       if (ret)
-               goto err1;
-
-       return 0;
-
-err1:
-       printk(KERN_ERR "Unable to add platform device\n");
-       platform_device_put(omap2evm_snd_device);
-
-       return ret;
-}
-module_init(omap2evm_soc_init);
-
-static void __exit omap2evm_soc_exit(void)
-{
-       platform_device_unregister(omap2evm_snd_device);
-}
-module_exit(omap2evm_soc_exit);
-
-MODULE_AUTHOR("Arun KS <arunks@mistralsolutions.com>");
-MODULE_DESCRIPTION("ALSA SoC omap2evm");
-MODULE_LICENSE("GPL");
index 2afabaf59491ed71ae331f827d40d04545e6b1b3..1a591f1ebfbd9431f89559be28421d8defde585c 100644 (file)
@@ -151,13 +151,13 @@ static struct snd_soc_ops raumfeld_cs4270_ops = {
        .hw_params = raumfeld_cs4270_hw_params,
 };
 
-static int raumfeld_line_suspend(struct snd_soc_card *card)
+static int raumfeld_analog_suspend(struct snd_soc_card *card)
 {
        raumfeld_enable_audio(false);
        return 0;
 }
 
-static int raumfeld_line_resume(struct snd_soc_card *card)
+static int raumfeld_analog_resume(struct snd_soc_card *card)
 {
        raumfeld_enable_audio(true);
        return 0;
@@ -225,32 +225,53 @@ static struct snd_soc_ops raumfeld_ak4104_ops = {
        .hw_params = raumfeld_ak4104_hw_params,
 };
 
-static struct snd_soc_dai_link raumfeld_dai[] = {
+#define DAI_LINK_CS4270                \
+{                                                      \
+       .name           = "CS4270",                     \
+       .stream_name    = "CS4270",                     \
+       .cpu_dai_name   = "pxa-ssp-dai.0",              \
+       .platform_name  = "pxa-pcm-audio",              \
+       .codec_dai_name = "cs4270-hifi",                \
+       .codec_name     = "cs4270-codec.0-0048",        \
+       .ops            = &raumfeld_cs4270_ops,         \
+}
+
+#define DAI_LINK_AK4104                \
+{                                                      \
+       .name           = "ak4104",                     \
+       .stream_name    = "Playback",                   \
+       .cpu_dai_name   = "pxa-ssp-dai.1",              \
+       .codec_dai_name = "ak4104-hifi",                \
+       .platform_name  = "pxa-pcm-audio",              \
+       .ops            = &raumfeld_ak4104_ops,         \
+       .codec_name     = "spi0.0",                     \
+}
+
+static struct snd_soc_dai_link snd_soc_raumfeld_connector_dai[] =
 {
-       .name           = "ak4104",
-       .stream_name    = "Playback",
-       .cpu_dai_name   = "pxa-ssp-dai.1",
-       .codec_dai_name = "ak4104-hifi",
-       .platform_name  = "pxa-pcm-audio",
-       .ops            = &raumfeld_ak4104_ops,
-       .codec_name     = "ak4104-codec.0",
-},
+       DAI_LINK_CS4270,
+       DAI_LINK_AK4104,
+};
+
+static struct snd_soc_dai_link snd_soc_raumfeld_speaker_dai[] =
 {
-       .name           = "CS4270",
-       .stream_name    = "CS4270",
-       .cpu_dai_name   = "pxa-ssp-dai.0",
-       .platform_name  = "pxa-pcm-audio",
-       .codec_dai_name = "cs4270-hifi",
-       .codec_name     = "cs4270-codec.0-0048",
-       .ops            = &raumfeld_cs4270_ops,
-},};
-
-static struct snd_soc_card snd_soc_raumfeld = {
-       .name           = "Raumfeld",
-       .dai_link       = raumfeld_dai,
-       .suspend_post   = raumfeld_line_suspend,
-       .resume_pre     = raumfeld_line_resume,
-       .num_links      = ARRAY_SIZE(raumfeld_dai),
+       DAI_LINK_CS4270,
+};
+
+static struct snd_soc_card snd_soc_raumfeld_connector = {
+       .name           = "Raumfeld Connector",
+       .dai_link       = snd_soc_raumfeld_connector_dai,
+       .num_links      = ARRAY_SIZE(snd_soc_raumfeld_connector_dai),
+       .suspend_post   = raumfeld_analog_suspend,
+       .resume_pre     = raumfeld_analog_resume,
+};
+
+static struct snd_soc_card snd_soc_raumfeld_speaker = {
+       .name           = "Raumfeld Speaker",
+       .dai_link       = snd_soc_raumfeld_speaker_dai,
+       .num_links      = ARRAY_SIZE(snd_soc_raumfeld_speaker_dai),
+       .suspend_post   = raumfeld_analog_suspend,
+       .resume_pre     = raumfeld_analog_resume,
 };
 
 static struct platform_device *raumfeld_audio_device;
@@ -271,22 +292,25 @@ static int __init raumfeld_audio_init(void)
 
        set_max9485_clk(MAX9485_MCLK_FREQ_122880);
 
-       /* Register LINE and SPDIF */
+       /* Register analog device */
        raumfeld_audio_device = platform_device_alloc("soc-audio", 0);
        if (!raumfeld_audio_device)
                return -ENOMEM;
 
-       platform_set_drvdata(raumfeld_audio_device,
-                            &snd_soc_raumfeld);
-       ret = platform_device_add(raumfeld_audio_device);
-
-       /* no S/PDIF on Speakers */
        if (machine_is_raumfeld_speaker())
+               platform_set_drvdata(raumfeld_audio_device,
+                                    &snd_soc_raumfeld_speaker);
+
+       if (machine_is_raumfeld_connector())
+               platform_set_drvdata(raumfeld_audio_device,
+                                    &snd_soc_raumfeld_connector);
+
+       ret = platform_device_add(raumfeld_audio_device);
+       if (ret < 0)
                return ret;
 
        raumfeld_enable_audio(true);
-
-       return ret;
+       return 0;
 }
 
 static void __exit raumfeld_audio_exit(void)
index bb7cd58129459cbd9f0fb729ad61d4ed7070ee18..d75043ed7fc0551f66881675df110dede0d5ccf0 100644 (file)
@@ -1306,10 +1306,6 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num)
        /* no, then find CPU DAI from registered DAIs*/
        list_for_each_entry(cpu_dai, &dai_list, list) {
                if (!strcmp(cpu_dai->name, dai_link->cpu_dai_name)) {
-
-                       if (!try_module_get(cpu_dai->dev->driver->owner))
-                               return -ENODEV;
-
                        rtd->cpu_dai = cpu_dai;
                        goto find_codec;
                }
@@ -1622,11 +1618,15 @@ static int soc_probe_dai_link(struct snd_soc_card *card, int num)
 
        /* probe the cpu_dai */
        if (!cpu_dai->probed) {
+               if (!try_module_get(cpu_dai->dev->driver->owner))
+                       return -ENODEV;
+
                if (cpu_dai->driver->probe) {
                        ret = cpu_dai->driver->probe(cpu_dai);
                        if (ret < 0) {
                                printk(KERN_ERR "asoc: failed to probe CPU DAI %s\n",
                                                cpu_dai->name);
+                               module_put(cpu_dai->dev->driver->owner);
                                return ret;
                        }
                }
index 456617e63789893c7d90f547cbc00f3c7ba168d5..999bb08cdfb143708312cd0da0e358caa077d766 100644 (file)
@@ -1110,7 +1110,7 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
        trace_snd_soc_dapm_start(card);
 
        list_for_each_entry(d, &card->dapm_list, list)
-               if (d->n_widgets)
+               if (d->n_widgets || d->codec == NULL)
                        d->dev_power = 0;
 
        /* Check which widgets we need to power and store them in
index a90662af2d6bbd7b2df46d17cb1da01ee849d29c..220c6167dd86d5ed2164db0c73b97afac1ba6ec5 100644 (file)
@@ -48,6 +48,7 @@
 #include <linux/usb/audio.h>
 #include <linux/usb/audio-v2.h>
 
+#include <sound/control.h>
 #include <sound/core.h>
 #include <sound/info.h>
 #include <sound/pcm.h>
@@ -492,14 +493,6 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
                }
        }
 
-       chip->txfr_quirk = 0;
-       err = 1; /* continue */
-       if (quirk && quirk->ifnum != QUIRK_NO_INTERFACE) {
-               /* need some special handlings */
-               if ((err = snd_usb_create_quirk(chip, intf, &usb_audio_driver, quirk)) < 0)
-                       goto __error;
-       }
-
        /*
         * For devices with more than one control interface, we assume the
         * first contains the audio controls. We might need a more specific
@@ -508,6 +501,14 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
        if (!chip->ctrl_intf)
                chip->ctrl_intf = alts;
 
+       chip->txfr_quirk = 0;
+       err = 1; /* continue */
+       if (quirk && quirk->ifnum != QUIRK_NO_INTERFACE) {
+               /* need some special handlings */
+               if ((err = snd_usb_create_quirk(chip, intf, &usb_audio_driver, quirk)) < 0)
+                       goto __error;
+       }
+
        if (err > 0) {
                /* create normal USB audio interfaces */
                if (snd_usb_create_streams(chip, ifnum) < 0 ||
index eab06edcc9b73f8a56f0ec2bac616f3a8d535073..c22fa76e363ae5699e9885cc548fdc28d25607b3 100644 (file)
@@ -86,16 +86,6 @@ struct mixer_build {
        const struct usbmix_selector_map *selector_map;
 };
 
-enum {
-       USB_MIXER_BOOLEAN,
-       USB_MIXER_INV_BOOLEAN,
-       USB_MIXER_S8,
-       USB_MIXER_U8,
-       USB_MIXER_S16,
-       USB_MIXER_U16,
-};
-
-
 /*E-mu 0202/0404/0204 eXtension Unit(XU) control*/
 enum {
        USB_XU_CLOCK_RATE               = 0xe301,
@@ -535,20 +525,21 @@ static int check_matrix_bitmap(unsigned char *bmap, int ich, int och, int num_ou
  * if failed, give up and free the control instance.
  */
 
-static int add_control_to_empty(struct mixer_build *state, struct snd_kcontrol *kctl)
+int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer,
+                             struct snd_kcontrol *kctl)
 {
        struct usb_mixer_elem_info *cval = kctl->private_data;
        int err;
 
-       while (snd_ctl_find_id(state->chip->card, &kctl->id))
+       while (snd_ctl_find_id(mixer->chip->card, &kctl->id))
                kctl->id.index++;
-       if ((err = snd_ctl_add(state->chip->card, kctl)) < 0) {
+       if ((err = snd_ctl_add(mixer->chip->card, kctl)) < 0) {
                snd_printd(KERN_ERR "cannot add control (err = %d)\n", err);
                return err;
        }
        cval->elem_id = &kctl->id;
-       cval->next_id_elem = state->mixer->id_elems[cval->id];
-       state->mixer->id_elems[cval->id] = cval;
+       cval->next_id_elem = mixer->id_elems[cval->id];
+       mixer->id_elems[cval->id] = cval;
        return 0;
 }
 
@@ -984,6 +975,9 @@ static struct snd_kcontrol_new usb_feature_unit_ctl_ro = {
        .put = NULL,
 };
 
+/* This symbol is exported in order to allow the mixer quirks to
+ * hook up to the standard feature unit control mechanism */
+struct snd_kcontrol_new *snd_usb_feature_unit_ctl = &usb_feature_unit_ctl;
 
 /*
  * build a feature control
@@ -1176,7 +1170,7 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
 
        snd_printdd(KERN_INFO "[%d] FU [%s] ch = %d, val = %d/%d/%d\n",
                    cval->id, kctl->id.name, cval->channels, cval->min, cval->max, cval->res);
-       add_control_to_empty(state, kctl);
+       snd_usb_mixer_add_control(state->mixer, kctl);
 }
 
 
@@ -1340,7 +1334,7 @@ static void build_mixer_unit_ctl(struct mixer_build *state,
 
        snd_printdd(KERN_INFO "[%d] MU [%s] ch = %d, val = %d/%d\n",
                    cval->id, kctl->id.name, cval->channels, cval->min, cval->max);
-       add_control_to_empty(state, kctl);
+       snd_usb_mixer_add_control(state->mixer, kctl);
 }
 
 
@@ -1641,7 +1635,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, void *raw
 
                snd_printdd(KERN_INFO "[%d] PU [%s] ch = %d, val = %d/%d\n",
                            cval->id, kctl->id.name, cval->channels, cval->min, cval->max);
-               if ((err = add_control_to_empty(state, kctl)) < 0)
+               if ((err = snd_usb_mixer_add_control(state->mixer, kctl)) < 0)
                        return err;
        }
        return 0;
@@ -1858,7 +1852,7 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, void
 
        snd_printdd(KERN_INFO "[%d] SU [%s] items = %d\n",
                    cval->id, kctl->id.name, desc->bNrInPins);
-       if ((err = add_control_to_empty(state, kctl)) < 0)
+       if ((err = snd_usb_mixer_add_control(state->mixer, kctl)) < 0)
                return err;
 
        return 0;
index b4a2c8165e4b2b6e7870fda7b0f3a490e17e97e8..ae1a14dcfe82cf51753da7265bcdaf6ce95969f4 100644 (file)
@@ -24,7 +24,16 @@ struct usb_mixer_interface {
        u8 xonar_u1_status;
 };
 
-#define MAX_CHANNELS   10      /* max logical channels */
+#define MAX_CHANNELS   16      /* max logical channels */
+
+enum {
+       USB_MIXER_BOOLEAN,
+       USB_MIXER_INV_BOOLEAN,
+       USB_MIXER_S8,
+       USB_MIXER_U8,
+       USB_MIXER_S16,
+       USB_MIXER_U16,
+};
 
 struct usb_mixer_elem_info {
        struct usb_mixer_interface *mixer;
@@ -55,4 +64,7 @@ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
 void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer);
 int snd_usb_mixer_activate(struct usb_mixer_interface *mixer);
 
+int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer,
+                             struct snd_kcontrol *kctl);
+
 #endif /* __USBMIXER_H */
index 9146cffa6ede3096b24eae718935226880530a25..3d0f4873112b17d9f5d7bfec90a87fbab2053ae6 100644 (file)
@@ -40,6 +40,8 @@
 #include "mixer_quirks.h"
 #include "helper.h"
 
+extern struct snd_kcontrol_new *snd_usb_feature_unit_ctl;
+
 /*
  * Sound Blaster remote control configuration
  *
@@ -492,6 +494,69 @@ static int snd_nativeinstruments_create_mixer(struct usb_mixer_interface *mixer,
        return err;
 }
 
+/* M-Audio FastTrack Ultra quirks */
+
+/* private_free callback */
+static void usb_mixer_elem_free(struct snd_kcontrol *kctl)
+{
+       kfree(kctl->private_data);
+       kctl->private_data = NULL;
+}
+
+static int snd_maudio_ftu_create_ctl(struct usb_mixer_interface *mixer,
+                                    int in, int out, const char *name)
+{
+       struct usb_mixer_elem_info *cval;
+       struct snd_kcontrol *kctl;
+
+       cval = kzalloc(sizeof(*cval), GFP_KERNEL);
+       if (!cval)
+               return -ENOMEM;
+
+       cval->id = 5;
+       cval->mixer = mixer;
+       cval->val_type = USB_MIXER_S16;
+       cval->channels = 1;
+       cval->control = out + 1;
+       cval->cmask = 1 << in;
+
+       kctl = snd_ctl_new1(snd_usb_feature_unit_ctl, cval);
+       if (!kctl) {
+               kfree(cval);
+               return -ENOMEM;
+       }
+
+       snprintf(kctl->id.name, sizeof(kctl->id.name), name);
+       kctl->private_free = usb_mixer_elem_free;
+       return snd_usb_mixer_add_control(mixer, kctl);
+}
+
+static int snd_maudio_ftu_create_mixer(struct usb_mixer_interface *mixer)
+{
+       char name[64];
+       int in, out, err;
+
+       for (out = 0; out < 8; out++) {
+               for (in = 0; in < 8; in++) {
+                       snprintf(name, sizeof(name),
+                                "AIn%d - Out%d Capture Volume", in  + 1, out + 1);
+                       err = snd_maudio_ftu_create_ctl(mixer, in, out, name);
+                       if (err < 0)
+                               return err;
+               }
+
+               for (in = 8; in < 16; in++) {
+                       snprintf(name, sizeof(name),
+                                "DIn%d - Out%d Playback Volume", in - 7, out + 1);
+                       err = snd_maudio_ftu_create_ctl(mixer, in, out, name);
+                       if (err < 0)
+                               return err;
+               }
+       }
+
+       return 0;
+}
+
 void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
                               unsigned char samplerate_id)
 {
@@ -533,6 +598,11 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
                                              snd_audigy2nx_proc_read);
                break;
 
+       case USB_ID(0x0763, 0x2080): /* M-Audio Fast Track Ultra */
+       case USB_ID(0x0763, 0x2081): /* M-Audio Fast Track Ultra 8R */
+               err = snd_maudio_ftu_create_mixer(mixer);
+               break;
+
        case USB_ID(0x0b05, 0x1739):
        case USB_ID(0x0b05, 0x1743):
                err = snd_xonar_u1_controls_create(mixer);
index 78792a8900c3c575904f9d712e0bb96b224ac2a1..0b2ae8e1c02d6e62a3671de37915f69f7db482b4 100644 (file)
@@ -1988,7 +1988,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                .data = & (const struct snd_usb_audio_quirk[]) {
                        {
                                .ifnum = 0,
-                               .type = QUIRK_IGNORE_INTERFACE
+                               .type = QUIRK_AUDIO_STANDARD_MIXER,
                        },
                        {
                                .ifnum = 1,
@@ -2055,7 +2055,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                .data = & (const struct snd_usb_audio_quirk[]) {
                        {
                                .ifnum = 0,
-                               .type = QUIRK_IGNORE_INTERFACE
+                               .type = QUIRK_AUDIO_STANDARD_MIXER,
                        },
                        {
                                .ifnum = 1,
index bd13d7257240528f97cc2a0caf5b134a0b9d58ac..2e969cbb393b004af02d37419fccb080ce99e9d6 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/usb.h>
 #include <linux/usb/audio.h>
 
+#include <sound/control.h>
 #include <sound/core.h>
 #include <sound/info.h>
 #include <sound/pcm.h>
@@ -262,6 +263,20 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
        return 0;
 }
 
+/*
+ * Create a standard mixer for the specified interface.
+ */
+static int create_standard_mixer_quirk(struct snd_usb_audio *chip,
+                                      struct usb_interface *iface,
+                                      struct usb_driver *driver,
+                                      const struct snd_usb_audio_quirk *quirk)
+{
+       if (quirk->ifnum < 0)
+               return 0;
+
+       return snd_usb_create_mixer(chip, quirk->ifnum, 0);
+}
+
 /*
  * audio-interface quirks
  *
@@ -294,7 +309,8 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
                [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
                [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk,
                [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
-               [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk
+               [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk,
+               [QUIRK_AUDIO_STANDARD_MIXER] = create_standard_mixer_quirk,
        };
 
        if (quirk->type < QUIRK_TYPE_COUNT) {
index 32f2a97f2f14f7494d07d5c4e84f1230aa283797..1e79986b577749ca0ebea31aa7b5c7c46ac750b1 100644 (file)
@@ -84,6 +84,7 @@ enum quirk_type {
        QUIRK_AUDIO_FIXED_ENDPOINT,
        QUIRK_AUDIO_EDIROL_UAXX,
        QUIRK_AUDIO_ALIGN_TRANSFER,
+       QUIRK_AUDIO_STANDARD_MIXER,
 
        QUIRK_TYPE_COUNT
 };