]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'cleanups' of git://repo.or.cz/linux-2.6/btrfs-unstable into inode_numbers
authorChris Mason <chris.mason@oracle.com>
Sun, 22 May 2011 16:33:42 +0000 (12:33 -0400)
committerChris Mason <chris.mason@oracle.com>
Sun, 22 May 2011 16:33:42 +0000 (12:33 -0400)
Conflicts:
fs/btrfs/extent-tree.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/tree-log.c

Signed-off-by: Chris Mason <chris.mason@oracle.com>
524 files changed:
Documentation/flexible-arrays.txt
MAINTAINERS
Makefile
arch/alpha/include/asm/unistd.h
arch/alpha/kernel/systbls.S
arch/alpha/kernel/time.c
arch/arm/boot/compressed/Makefile
arch/arm/boot/compressed/head.S
arch/arm/boot/compressed/vmlinux.lds.in
arch/arm/configs/at91x40_defconfig [new file with mode: 0644]
arch/arm/include/asm/kprobes.h
arch/arm/include/asm/system.h
arch/arm/kernel/kprobes-decode.c
arch/arm/kernel/kprobes.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/ptrace.c
arch/arm/kernel/signal.c
arch/arm/kernel/smp.c
arch/arm/kernel/sys_oabi-compat.c
arch/arm/mach-at91/Kconfig
arch/arm/mach-at91/board-eb01.c
arch/arm/mach-at91/include/mach/cpu.h
arch/arm/mach-davinci/Kconfig
arch/arm/mach-davinci/board-mityomapl138.c
arch/arm/mach-davinci/devices-da8xx.c
arch/arm/mach-davinci/include/mach/debug-macro.S
arch/arm/mach-davinci/include/mach/serial.h
arch/arm/mach-mx3/mach-vpr200.c
arch/arm/mach-mx5/board-mx53_loco.c
arch/arm/mach-mxs/clock-mx28.c
arch/arm/mach-omap2/clkt34xx_dpll3m2.c
arch/arm/mach-pxa/hx4700.c
arch/arm/mach-pxa/magician.c
arch/arm/mach-realview/include/mach/barriers.h
arch/arm/mach-tegra/include/mach/barriers.h
arch/arm/mm/init.c
arch/arm/mm/proc-xscale.S
arch/arm/plat-mxc/gpio.c
arch/arm/plat-mxc/ssi-fiq.S
arch/arm/plat-omap/iommu.c
arch/mips/Kconfig
arch/mips/alchemy/devboards/db1x00/board_setup.c
arch/mips/alchemy/xxs1500/init.c
arch/mips/ar7/gpio.c
arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
arch/mips/cavium-octeon/Kconfig
arch/mips/include/asm/cache.h
arch/mips/include/asm/cevt-r4k.h
arch/mips/include/asm/dma-mapping.h
arch/mips/include/asm/hugetlb.h
arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
arch/mips/jazz/jazzdma.c
arch/mips/jz4740/dma.c
arch/mips/jz4740/time.c
arch/mips/jz4740/timer.c
arch/mips/kernel/ftrace.c
arch/mips/kernel/ptrace.c
arch/mips/kernel/scall32-o32.S
arch/mips/kernel/scall64-64.S
arch/mips/kernel/scall64-n32.S
arch/mips/kernel/scall64-o32.S
arch/mips/kernel/traps.c
arch/mips/kernel/vmlinux.lds.S
arch/mips/loongson/common/env.c
arch/mips/mm/c-r4k.c
arch/mips/mm/tlbex.c
arch/mips/mti-malta/malta-init.c
arch/mips/mti-malta/malta-int.c
arch/mips/pmc-sierra/msp71xx/msp_irq_per.c
arch/mips/power/hibernate.S
arch/mips/rb532/gpio.c
arch/mips/sgi-ip22/ip22-platform.c
arch/mips/sgi-ip22/ip22-time.c
arch/mips/sgi-ip27/ip27-hubio.c
arch/mips/sgi-ip27/ip27-klnuma.c
arch/mips/sni/time.c
arch/powerpc/include/asm/8xx_immap.h
arch/powerpc/kernel/ptrace.c
arch/powerpc/platforms/83xx/suspend.c
arch/powerpc/sysdev/fsl_msi.c
arch/s390/crypto/prng.c
arch/s390/include/asm/diag.h
arch/s390/include/asm/mmu_context.h
arch/s390/kernel/diag.c
arch/s390/kernel/dis.c
arch/s390/kernel/entry.S
arch/s390/kernel/entry64.S
arch/s390/mm/cmm.c
arch/s390/mm/fault.c
arch/s390/oprofile/hwsampler.c
arch/s390/oprofile/hwsampler.h
arch/s390/oprofile/init.c
arch/sh/kernel/ptrace_32.c
arch/sparc/kernel/apc.c
arch/sparc/kernel/pci_sabre.c
arch/sparc/kernel/pci_schizo.c
arch/sparc/kernel/pmc.c
arch/sparc/kernel/smp_32.c
arch/sparc/kernel/time_32.c
arch/sparc/lib/checksum_32.S
arch/um/os-Linux/util.c
arch/x86/include/asm/apicdef.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/uv/uv_bau.h
arch/x86/include/asm/uv/uv_hub.h
arch/x86/include/asm/uv/uv_mmrs.h
arch/x86/include/asm/x86_init.h
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/mcheck/therm_throt.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/kprobes.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/reboot_32.S
arch/x86/kernel/x86_init.c
arch/x86/mm/init.c
arch/x86/mm/numa_64.c
arch/x86/platform/uv/tlb_uv.c
arch/x86/xen/mmu.c
block/blk-cgroup.c
block/blk-cgroup.h
block/blk-core.c
block/blk-throttle.c
block/cfq-iosched.c
drivers/ata/libahci.c
drivers/ata/libata-eh.c
drivers/atm/fore200e.c
drivers/block/DAC960.c
drivers/block/amiflop.c
drivers/block/ataflop.c
drivers/block/floppy.c
drivers/block/paride/pcd.c
drivers/block/paride/pd.c
drivers/block/paride/pf.c
drivers/block/rbd.c
drivers/block/swim.c
drivers/block/swim3.c
drivers/block/ub.c
drivers/block/xsysace.c
drivers/cdrom/cdrom.c
drivers/cdrom/gdrom.c
drivers/cdrom/viocd.c
drivers/char/hw_random/n2-drv.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/xilinx_hwicap/xilinx_hwicap.c
drivers/clk/clkdev.c
drivers/edac/ppc4xx_edac.c
drivers/firewire/ohci.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/nouveau/nouveau_mem.c
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nouveau_state.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_cursor.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/reg_srcs/cayman
drivers/gpu/drm/radeon/reg_srcs/evergreen
drivers/gpu/vga/vga_switcheroo.c
drivers/hwmon/twl4030-madc-hwmon.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-mpc.c
drivers/i2c/busses/i2c-parport.c
drivers/i2c/busses/i2c-pnx.c
drivers/input/touchscreen/ads7846.c
drivers/input/touchscreen/wm831x-ts.c
drivers/leds/leds-lm3530.c
drivers/media/dvb/dvb-usb/Kconfig
drivers/media/dvb/ngene/ngene-core.c
drivers/media/radio/saa7706h.c
drivers/media/radio/tef6862.c
drivers/media/rc/imon.c
drivers/media/rc/ite-cir.c
drivers/media/rc/mceusb.c
drivers/media/rc/rc-main.c
drivers/media/video/cx88/cx88-input.c
drivers/media/video/m52790.c
drivers/media/video/soc_camera.c
drivers/media/video/tda9840.c
drivers/media/video/tea6415c.c
drivers/media/video/tea6420.c
drivers/media/video/upd64031a.c
drivers/media/video/upd64083.c
drivers/media/video/v4l2-device.c
drivers/media/video/v4l2-subdev.c
drivers/message/i2o/i2o_block.c
drivers/mfd/asic3.c
drivers/mfd/omap-usb-host.c
drivers/mfd/twl4030-power.c
drivers/mmc/core/bus.c
drivers/mmc/host/omap.c
drivers/mmc/host/sdhci-of-core.c
drivers/mmc/host/sdhci-pci.c
drivers/mmc/host/sdhci.c
drivers/mmc/host/tmio_mmc_pio.c
drivers/mtd/maps/physmap_of.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/amd8111e.c
drivers/net/arm/etherh.c
drivers/net/atl1c/atl1c.h
drivers/net/atl1c/atl1c_main.c
drivers/net/benet/be.h
drivers/net/benet/be_cmds.c
drivers/net/benet/be_main.c
drivers/net/bnx2.c
drivers/net/bnx2x/bnx2x_cmn.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_3ad.h
drivers/net/can/mscan/mpc5xxx_can.c
drivers/net/can/sja1000/sja1000.c
drivers/net/can/slcan.c
drivers/net/ehea/ehea_ethtool.c
drivers/net/ehea/ehea_main.c
drivers/net/fs_enet/fs_enet-main.c
drivers/net/fs_enet/mac-fec.c
drivers/net/fs_enet/mii-fec.c
drivers/net/ftmac100.c
drivers/net/hydra.c
drivers/net/mii.c
drivers/net/ne-h8300.c
drivers/net/netconsole.c
drivers/net/pch_gbe/pch_gbe_main.c
drivers/net/r8169.c
drivers/net/sfc/mcdi.c
drivers/net/sfc/nic.c
drivers/net/sfc/nic.h
drivers/net/sfc/siena.c
drivers/net/slip.c
drivers/net/sunhme.c
drivers/net/tg3.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/ipheth.c
drivers/net/usb/smsc95xx.c
drivers/net/usb/usbnet.c
drivers/net/veth.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/recv.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/iwlegacy/iwl-4965-tx.c
drivers/net/wireless/iwlegacy/iwl-core.c
drivers/net/wireless/iwlegacy/iwl-dev.h
drivers/net/wireless/iwlegacy/iwl-led.c
drivers/net/wireless/iwlegacy/iwl4965-base.c
drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
drivers/net/wireless/iwlwifi/iwl-agn-tx.c
drivers/net/wireless/libertas/cmd.c
drivers/net/zorro8390.c
drivers/pci/setup-bus.c
drivers/platform/x86/eeepc-laptop.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/rapidio/switches/idt_gen2.c
drivers/rapidio/switches/idtcps.c
drivers/rapidio/switches/tsi57x.c
drivers/rtc/rtc-davinci.c
drivers/rtc/rtc-ds1286.c
drivers/rtc/rtc-ep93xx.c
drivers/rtc/rtc-m41t80.c
drivers/rtc/rtc-max8925.c
drivers/rtc/rtc-max8998.c
drivers/rtc/rtc-mc13xxx.c
drivers/rtc/rtc-msm6242.c
drivers/rtc/rtc-mxc.c
drivers/rtc/rtc-pcap.c
drivers/rtc/rtc-rp5c01.c
drivers/rtc/rtc-s3c.c
drivers/s390/block/dasd.c
drivers/s390/block/dasd_diag.c
drivers/s390/char/sclp_cmd.c
drivers/s390/char/tape_block.c
drivers/s390/kvm/kvm_virtio.c
drivers/scsi/qlogicpti.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_scan.c
drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
drivers/staging/ft1000/ft1000-pcmcia/ft1000_proc.c
drivers/staging/gma500/Kconfig
drivers/staging/intel_sst/intelmid_v1_control.c
drivers/staging/intel_sst/intelmid_v2_control.c
drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
drivers/staging/rts_pstor/debug.h
drivers/staging/rts_pstor/ms.c
drivers/staging/rts_pstor/rtsx_chip.c
drivers/staging/rts_pstor/rtsx_scsi.c
drivers/staging/rts_pstor/sd.c
drivers/staging/rts_pstor/trace.h
drivers/staging/rts_pstor/xd.c
drivers/staging/solo6x10/Kconfig
drivers/staging/usbip/vhci_hcd.c
drivers/staging/usbip/vhci_sysfs.c
drivers/staging/wlan-ng/cfg80211.c
drivers/tty/serial/of_serial.c
drivers/usb/gadget/fsl_qe_udc.c
drivers/usb/host/ehci-omap.c
drivers/usb/host/isp1760-hcd.c
drivers/usb/host/xhci-hub.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/omap2430.c
drivers/video/acornfb.c
drivers/video/fbmem.c
drivers/watchdog/mpc8xxx_wdt.c
fs/block_dev.c
fs/btrfs/Makefile
fs/btrfs/acl.c
fs/btrfs/btrfs_inode.h
fs/btrfs/compression.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c [new file with mode: 0644]
fs/btrfs/delayed-inode.h [new file with mode: 0644]
fs/btrfs/dir-item.c
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/export.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/free-space-cache.h
fs/btrfs/inode-map.c
fs/btrfs/inode-map.h [new file with mode: 0644]
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/relocation.c
fs/btrfs/super.c
fs/btrfs/sysfs.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/tree-log.c
fs/btrfs/xattr.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/snap.c
fs/ceph/super.h
fs/ceph/xattr.c
fs/cifs/cifs_unicode.c
fs/cifs/connect.c
fs/cifs/sess.c
fs/configfs/dir.c
fs/fuse/dir.c
fs/hpfs/Kconfig
fs/hpfs/alloc.c
fs/hpfs/anode.c
fs/hpfs/buffer.c
fs/hpfs/dir.c
fs/hpfs/dnode.c
fs/hpfs/ea.c
fs/hpfs/file.c
fs/hpfs/hpfs.h
fs/hpfs/hpfs_fn.h
fs/hpfs/inode.c
fs/hpfs/map.c
fs/hpfs/name.c
fs/hpfs/namei.c
fs/hpfs/super.c
fs/logfs/super.c
fs/namei.c
fs/nfs/nfs4filelayout.c
fs/nfs/nfs4filelayout.h
fs/nfs/nfs4filelayoutdev.c
fs/nfs/nfs4proc.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/read.c
fs/nfs/write.c
fs/nilfs2/alloc.c
fs/ocfs2/cluster/heartbeat.c
fs/ocfs2/dir.c
fs/ocfs2/dlm/dlmdomain.c
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/file.c
fs/ocfs2/journal.c
fs/partitions/efi.c
fs/proc/task_mmu.c
fs/ubifs/log.c
fs/ubifs/replay.c
fs/ubifs/super.c
fs/xfs/linux-2.6/xfs_sync.c
fs/xfs/xfs_trans_ail.c
include/drm/drm_fb_helper.h
include/drm/drm_mm.h
include/drm/drm_pciids.h
include/drm/radeon_drm.h
include/linux/bootmem.h
include/linux/capability.h
include/linux/cred.h
include/linux/device.h
include/linux/fb.h
include/linux/flex_array.h
include/linux/fs.h
include/linux/ftrace_event.h
include/linux/gfp.h
include/linux/mfd/wm831x/pdata.h
include/linux/mm.h
include/linux/nfs_xdr.h
include/linux/of_device.h
include/linux/pci_ids.h
include/linux/percpu.h
include/linux/proc_fs.h
include/linux/ptrace.h
include/linux/sched.h
include/linux/usb/usbnet.h
include/net/inet_ecn.h
include/net/ip_vs.h
include/net/llc_pdu.h
include/net/xfrm.h
include/scsi/scsi_device.h
include/trace/events/gfpflags.h
init/Kconfig
kernel/capability.c
kernel/cred.c
kernel/exit.c
kernel/irq/proc.c
kernel/power/suspend.c
kernel/power/user.c
kernel/ptrace.c
kernel/time/clocksource.c
kernel/time/tick-broadcast.c
kernel/trace/trace.c
kernel/trace/trace_events.c
lib/flex_array.c
lib/vsprintf.c
lib/xz/xz_dec_lzma2.c
mm/memory.c
mm/mlock.c
mm/mmap.c
mm/page_alloc.c
mm/page_cgroup.c
mm/shmem.c
mm/slub.c
mm/swap.c
mm/vmscan.c
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/9p/client.c
net/9p/protocol.c
net/9p/trans_common.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/l2cap_core.c
net/bridge/br_input.c
net/bridge/br_netfilter.c
net/bridge/netfilter/ebtables.c
net/can/bcm.c
net/can/raw.c
net/ceph/messenger.c
net/ceph/osd_client.c
net/core/dev.c
net/dccp/options.c
net/dsa/Kconfig
net/dsa/mv88e6131.c
net/ipv4/devinet.c
net/ipv4/fib_trie.c
net/ipv4/ip_fragment.c
net/ipv4/route.c
net/ipv4/tcp_cubic.c
net/ipv4/xfrm4_output.c
net/ipv4/xfrm4_state.c
net/ipv6/addrconf.c
net/ipv6/esp6.c
net/ipv6/netfilter/ip6t_REJECT.c
net/ipv6/route.c
net/ipv6/udp.c
net/ipv6/xfrm6_output.c
net/ipv6/xfrm6_state.c
net/mac80211/cfg.c
net/mac80211/debugfs_netdev.c
net/mac80211/tx.c
net/netfilter/ipvs/ip_vs_app.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_est.c
net/netfilter/ipvs/ip_vs_proto.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/x_tables.c
net/netfilter/xt_DSCP.c
net/netfilter/xt_conntrack.c
net/unix/af_unix.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_replay.c
net/xfrm/xfrm_user.c
security/selinux/hooks.c
security/selinux/ss/policydb.c
sound/pci/au88x0/au88x0_pcm.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/soc/codecs/ssm2602.c
sound/soc/codecs/uda134x.c
sound/soc/codecs/wm8903.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/jz4740/jz4740-i2s.c
sound/soc/mid-x86/sst_platform.c
sound/soc/samsung/goni_wm8994.c
sound/soc/soc-core.c
sound/usb/format.c
sound/usb/quirks.c
tools/perf/Makefile
tools/perf/builtin-record.c
tools/perf/builtin-test.c
tools/perf/builtin-top.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/python.c

index cb8a3a00cc92694947cb72840ddbcb340407ce08..df904aec99044f8056ac530b9e9dc6de8f26f73e 100644 (file)
@@ -66,10 +66,10 @@ trick is to ensure that any needed memory allocations are done before
 entering atomic context, using:
 
     int flex_array_prealloc(struct flex_array *array, unsigned int start,
-                           unsigned int end, gfp_t flags);
+                           unsigned int nr_elements, gfp_t flags);
 
 This function will ensure that memory for the elements indexed in the range
-defined by start and end has been allocated.  Thereafter, a
+defined by start and nr_elements has been allocated.  Thereafter, a
 flex_array_put() call on an element in that range is guaranteed not to
 block.
 
index 2199ba1323d2b286edb2ee003beaaf4927ec512d..69f19f10314a131428a58fed89c6a9dc962409da 100644 (file)
@@ -2813,38 +2813,19 @@ F:      Documentation/gpio.txt
 F:     drivers/gpio/
 F:     include/linux/gpio*
 
+GRE DEMULTIPLEXER DRIVER
+M:     Dmitry Kozlov <xeb@mail.ru>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     net/ipv4/gre.c
+F:     include/net/gre.h
+
 GRETH 10/100/1G Ethernet MAC device driver
 M:     Kristoffer Glembo <kristoffer@gaisler.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/greth*
 
-HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER
-M:     Frank Seidel <frank@f-seidel.de>
-L:     platform-driver-x86@vger.kernel.org
-W:     http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/
-S:     Maintained
-F:     drivers/platform/x86/hdaps.c
-
-HWPOISON MEMORY FAILURE HANDLING
-M:     Andi Kleen <andi@firstfloor.org>
-L:     linux-mm@kvack.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison
-S:     Maintained
-F:     mm/memory-failure.c
-F:     mm/hwpoison-inject.c
-
-HYPERVISOR VIRTUAL CONSOLE DRIVER
-L:     linuxppc-dev@lists.ozlabs.org
-S:     Odd Fixes
-F:     drivers/tty/hvc/
-
-iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER
-M:     Peter Jones <pjones@redhat.com>
-M:     Konrad Rzeszutek Wilk <konrad@kernel.org>
-S:     Maintained
-F:     drivers/firmware/iscsi_ibft*
-
 GSPCA FINEPIX SUBDRIVER
 M:     Frank Zago <frank@zago.net>
 L:     linux-media@vger.kernel.org
@@ -2895,6 +2876,26 @@ T:       git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:     Maintained
 F:     drivers/media/video/gspca/
 
+HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER
+M:     Frank Seidel <frank@f-seidel.de>
+L:     platform-driver-x86@vger.kernel.org
+W:     http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/
+S:     Maintained
+F:     drivers/platform/x86/hdaps.c
+
+HWPOISON MEMORY FAILURE HANDLING
+M:     Andi Kleen <andi@firstfloor.org>
+L:     linux-mm@kvack.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-mce-2.6.git hwpoison
+S:     Maintained
+F:     mm/memory-failure.c
+F:     mm/hwpoison-inject.c
+
+HYPERVISOR VIRTUAL CONSOLE DRIVER
+L:     linuxppc-dev@lists.ozlabs.org
+S:     Odd Fixes
+F:     drivers/tty/hvc/
+
 HARDWARE MONITORING
 M:     Jean Delvare <khali@linux-fr.org>
 M:     Guenter Roeck <guenter.roeck@ericsson.com>
@@ -3478,6 +3479,12 @@ F:       Documentation/isapnp.txt
 F:     drivers/pnp/isapnp/
 F:     include/linux/isapnp.h
 
+iSCSI BOOT FIRMWARE TABLE (iBFT) DRIVER
+M:     Peter Jones <pjones@redhat.com>
+M:     Konrad Rzeszutek Wilk <konrad@kernel.org>
+S:     Maintained
+F:     drivers/firmware/iscsi_ibft*
+
 ISCSI
 M:     Mike Christie <michaelc@cs.wisc.edu>
 L:     open-iscsi@googlegroups.com
@@ -4989,6 +4996,13 @@ F:       Documentation/pps/
 F:     drivers/pps/
 F:     include/linux/pps*.h
 
+PPTP DRIVER
+M:     Dmitry Kozlov <xeb@mail.ru>
+L:     netdev@vger.kernel.org
+S:     Maintained
+F:     drivers/net/pptp.c
+W:     http://sourceforge.net/projects/accel-pptp
+
 PREEMPTIBLE KERNEL
 M:     Robert Love <rml@tech9.net>
 L:     kpreempt-tech@lists.sourceforge.net
@@ -6556,7 +6570,7 @@ S:        Maintained
 F:     drivers/usb/host/uhci*
 
 USB "USBNET" DRIVER FRAMEWORK
-M:     David Brownell <dbrownell@users.sourceforge.net>
+M:     Oliver Neukum <oneukum@suse.de>
 L:     netdev@vger.kernel.org
 W:     http://www.linux-usb.org/usbnet
 S:     Maintained
@@ -7024,20 +7038,6 @@ M:       "Maciej W. Rozycki" <macro@linux-mips.org>
 S:     Maintained
 F:     drivers/tty/serial/zs.*
 
-GRE DEMULTIPLEXER DRIVER
-M:     Dmitry Kozlov <xeb@mail.ru>
-L:     netdev@vger.kernel.org
-S:     Maintained
-F:     net/ipv4/gre.c
-F:     include/net/gre.h
-
-PPTP DRIVER
-M:     Dmitry Kozlov <xeb@mail.ru>
-L:     netdev@vger.kernel.org
-S:     Maintained
-F:     drivers/net/pptp.c
-W:     http://sourceforge.net/projects/accel-pptp
-
 THE REST
 M:     Linus Torvalds <torvalds@linux-foundation.org>
 L:     linux-kernel@vger.kernel.org
index 5a7a2e4f5c0b04f6a82e52954ee0d74e6d667b5c..123d858dae03826a29f562b4792c4d5161878e0b 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 39
-EXTRAVERSION = -rc5
+EXTRAVERSION =
 NAME = Flesh-Eating Bats with Fangs
 
 # *DOCUMENTATION*
index 058937bf5a77718983126f2ab3813a023cb8c1d0..b1834166922dbb8260eb4457cf2c77a8c4616e59 100644 (file)
 #define __NR_fanotify_init             494
 #define __NR_fanotify_mark             495
 #define __NR_prlimit64                 496
+#define __NR_name_to_handle_at         497
+#define __NR_open_by_handle_at         498
+#define __NR_clock_adjtime             499
+#define __NR_syncfs                    500
 
 #ifdef __KERNEL__
 
-#define NR_SYSCALLS                    497
+#define NR_SYSCALLS                    501
 
 #define __ARCH_WANT_IPC_PARSE_VERSION
 #define __ARCH_WANT_OLD_READDIR
index a6a1de9db16fd6e57d96c4e0b1f76db70f27147f..15f999d41c75c820822fffb70a437ab0467d629e 100644 (file)
@@ -498,23 +498,27 @@ sys_call_table:
        .quad sys_ni_syscall                    /* sys_timerfd */
        .quad sys_eventfd
        .quad sys_recvmmsg
-       .quad sys_fallocate                             /* 480 */
+       .quad sys_fallocate                     /* 480 */
        .quad sys_timerfd_create
        .quad sys_timerfd_settime
        .quad sys_timerfd_gettime
        .quad sys_signalfd4
-       .quad sys_eventfd2                              /* 485 */
+       .quad sys_eventfd2                      /* 485 */
        .quad sys_epoll_create1
        .quad sys_dup3
        .quad sys_pipe2
        .quad sys_inotify_init1
-       .quad sys_preadv                                /* 490 */
+       .quad sys_preadv                        /* 490 */
        .quad sys_pwritev
        .quad sys_rt_tgsigqueueinfo
        .quad sys_perf_event_open
        .quad sys_fanotify_init
-       .quad sys_fanotify_mark                         /* 495 */
+       .quad sys_fanotify_mark                 /* 495 */
        .quad sys_prlimit64
+       .quad sys_name_to_handle_at
+       .quad sys_open_by_handle_at
+       .quad sys_clock_adjtime
+       .quad sys_syncfs                        /* 500 */
 
        .size sys_call_table, . - sys_call_table
        .type sys_call_table, @object
index 918e8e0b72ff1e3220845ea410277a83ff326517..818e74ed45dc01bbc0eaef0eb60b2ccbf8c137e9 100644 (file)
@@ -375,8 +375,7 @@ static struct clocksource clocksource_rpcc = {
 
 static inline void register_rpcc_clocksource(long cycle_freq)
 {
-       clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4);
-       clocksource_register(&clocksource_rpcc);
+       clocksource_register_hz(&clocksource_rpcc, cycle_freq);
 }
 #else /* !CONFIG_SMP */
 static inline void register_rpcc_clocksource(long cycle_freq)
index 8ebbb511c7836383aac94622d0ed00aed5c5f29a..0c6852d93506c0afef2841ea21df84ae72d907d5 100644 (file)
@@ -74,7 +74,7 @@ ZTEXTADDR     := $(CONFIG_ZBOOT_ROM_TEXT)
 ZBSSADDR       := $(CONFIG_ZBOOT_ROM_BSS)
 else
 ZTEXTADDR      := 0
-ZBSSADDR       := ALIGN(4)
+ZBSSADDR       := ALIGN(8)
 endif
 
 SEDFLAGS       = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
index adf583cd0c351196cbfb4efe450b845707c7e36c..49f5b2eaaa87e3f0ff290f666e11a868d19c77df 100644 (file)
@@ -179,15 +179,14 @@ not_angel:
                bl      cache_on
 
 restart:       adr     r0, LC0
-               ldmia   r0, {r1, r2, r3, r5, r6, r9, r11, r12}
-               ldr     sp, [r0, #32]
+               ldmia   r0, {r1, r2, r3, r6, r9, r11, r12}
+               ldr     sp, [r0, #28]
 
                /*
                 * We might be running at a different address.  We need
                 * to fix up various pointers.
                 */
                sub     r0, r0, r1              @ calculate the delta offset
-               add     r5, r5, r0              @ _start
                add     r6, r6, r0              @ _edata
 
 #ifndef CONFIG_ZBOOT_ROM
@@ -206,31 +205,40 @@ restart:  adr     r0, LC0
 /*
  * Check to see if we will overwrite ourselves.
  *   r4  = final kernel address
- *   r5  = start of this image
  *   r9  = size of decompressed image
  *   r10 = end of this image, including  bss/stack/malloc space if non XIP
  * We basically want:
- *   r4 >= r10 -> OK
- *   r4 + image length <= r5 -> OK
+ *   r4 - 16k page directory >= r10 -> OK
+ *   r4 + image length <= current position (pc) -> OK
  */
+               add     r10, r10, #16384
                cmp     r4, r10
                bhs     wont_overwrite
                add     r10, r4, r9
-               cmp     r10, r5
+   ARM(                cmp     r10, pc         )
+ THUMB(                mov     lr, pc          )
+ THUMB(                cmp     r10, lr         )
                bls     wont_overwrite
 
 /*
  * Relocate ourselves past the end of the decompressed kernel.
- *   r5  = start of this image
  *   r6  = _edata
  *   r10 = end of the decompressed kernel
  * Because we always copy ahead, we need to do it from the end and go
  * backward in case the source and destination overlap.
  */
-               /* Round up to next 256-byte boundary. */
-               add     r10, r10, #256
+               /*
+                * Bump to the next 256-byte boundary with the size of
+                * the relocation code added. This avoids overwriting
+                * ourself when the offset is small.
+                */
+               add     r10, r10, #((reloc_code_end - restart + 256) & ~255)
                bic     r10, r10, #255
 
+               /* Get start of code we want to copy and align it down. */
+               adr     r5, restart
+               bic     r5, r5, #31
+
                sub     r9, r6, r5              @ size to copy
                add     r9, r9, #31             @ rounded up to a multiple
                bic     r9, r9, #31             @ ... of 32 bytes
@@ -245,6 +253,11 @@ restart:   adr     r0, LC0
                /* Preserve offset to relocated code. */
                sub     r6, r9, r6
 
+#ifndef CONFIG_ZBOOT_ROM
+               /* cache_clean_flush may use the stack, so relocate it */
+               add     sp, sp, r6
+#endif
+
                bl      cache_clean_flush
 
                adr     r0, BSYM(restart)
@@ -333,7 +346,6 @@ not_relocated:      mov     r0, #0
 LC0:           .word   LC0                     @ r1
                .word   __bss_start             @ r2
                .word   _end                    @ r3
-               .word   _start                  @ r5
                .word   _edata                  @ r6
                .word   _image_size             @ r9
                .word   _got_start              @ r11
@@ -1062,6 +1074,7 @@ memdump:  mov     r12, r0
 #endif
 
                .ltorg
+reloc_code_end:
 
                .align
                .section ".stack", "aw", %nobits
index 5309909d72823592dc7e9f6db0ba1e73574da46d..ea80abe788449444a685339c01a128e04250ceb8 100644 (file)
@@ -54,6 +54,7 @@ SECTIONS
   .bss                 : { *(.bss) }
   _end = .;
 
+  . = ALIGN(8);                /* the stack must be 64-bit aligned */
   .stack               : { *(.stack) }
 
   .stab 0              : { *(.stab) }
diff --git a/arch/arm/configs/at91x40_defconfig b/arch/arm/configs/at91x40_defconfig
new file mode 100644 (file)
index 0000000..c55e921
--- /dev/null
@@ -0,0 +1,48 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_EMBEDDED=y
+# CONFIG_HOTPLUG is not set
+# CONFIG_ELF_CORE is not set
+# CONFIG_FUTEX is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_MMU is not set
+CONFIG_ARCH_AT91=y
+CONFIG_ARCH_AT91X40=y
+CONFIG_MACH_AT91EB01=y
+CONFIG_AT91_EARLY_USART0=y
+CONFIG_CPU_ARM7TDMI=y
+CONFIG_SET_MEM_PARAM=y
+CONFIG_DRAM_BASE=0x01000000
+CONFIG_DRAM_SIZE=0x00400000
+CONFIG_FLASH_MEM_BASE=0x01400000
+CONFIG_PROCESSOR_ID=0x14000040
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_BINFMT_FLAT=y
+# CONFIG_SUSPEND is not set
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_RAM=y
+CONFIG_MTD_ROM=y
+CONFIG_BLK_DEV_RAM=y
+# CONFIG_INPUT is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_DEVKMEM is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_EXT2_FS=y
+# CONFIG_DNOTIFY is not set
+CONFIG_ROMFS_FS=y
+# CONFIG_ENABLE_MUST_CHECK is not set
index bb8a19bd58225a3ce39bf39443cfd5b74b6409fd..e46bdd0097ebd9125f7382f76048d5fc20bbbb22 100644 (file)
@@ -39,10 +39,13 @@ typedef u32 kprobe_opcode_t;
 struct kprobe;
 typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *);
 
+typedef unsigned long (kprobe_check_cc)(unsigned long);
+
 /* Architecture specific copy of original instruction. */
 struct arch_specific_insn {
        kprobe_opcode_t         *insn;
        kprobe_insn_handler_t   *insn_handler;
+       kprobe_check_cc         *insn_check_cc;
 };
 
 struct prev_kprobe {
index 885be097769d108d9ff9c5d39dd0cd94af55fbb9..832888d0c20c263a761cc093bb96ae4977ed15bc 100644 (file)
@@ -159,7 +159,7 @@ extern unsigned int user_debug;
 #include <mach/barriers.h>
 #elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
 #define mb()           do { dsb(); outer_sync(); } while (0)
-#define rmb()          dmb()
+#define rmb()          dsb()
 #define wmb()          mb()
 #else
 #include <asm/memory.h>
index 23891317dc4be73d083f497291525feefaf015c6..15eeff6aea0e8185d9d4b7111da60afad3ea7967 100644 (file)
@@ -34,9 +34,6 @@
  *
  *   *) If the PC is written to by the instruction, the
  *      instruction must be fully simulated in software.
- *      If it is a conditional instruction, the handler
- *      will use insn[0] to copy its condition code to
- *     set r0 to 1 and insn[1] to "mov pc, lr" to return.
  *
  *   *) Otherwise, a modified form of the instruction is
  *      directly executed.  Its handler calls the
 
 #define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25)
 
+#define is_r15(insn, bitpos) (((insn) & (0xf << bitpos)) == (0xf << bitpos))
+
+/*
+ * Test if load/store instructions writeback the address register.
+ * if P (bit 24) == 0 or W (bit 21) == 1
+ */
+#define is_writeback(insn) ((insn ^ 0x01000000) & 0x01200000)
+
 #define PSR_fs (PSR_f|PSR_s)
 
 #define KPROBE_RETURN_INSTRUCTION      0xe1a0f00e      /* mov pc, lr */
-#define SET_R0_TRUE_INSTRUCTION                0xe3a00001      /* mov  r0, #1 */
-
-#define        truecc_insn(insn)       (((insn) & 0xf0000000) | \
-                                (SET_R0_TRUE_INSTRUCTION & 0x0fffffff))
 
 typedef long (insn_0arg_fn_t)(void);
 typedef long (insn_1arg_fn_t)(long);
@@ -419,14 +420,10 @@ insnslot_llret_4arg_rwflags(long r0, long r1, long r2, long r3, long *cpsr,
 
 static void __kprobes simulate_bbl(struct kprobe *p, struct pt_regs *regs)
 {
-       insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
        kprobe_opcode_t insn = p->opcode;
        long iaddr = (long)p->addr;
        int disp  = branch_displacement(insn);
 
-       if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn))
-               return;
-
        if (insn & (1 << 24))
                regs->ARM_lr = iaddr + 4;
 
@@ -446,14 +443,10 @@ static void __kprobes simulate_blx1(struct kprobe *p, struct pt_regs *regs)
 
 static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs)
 {
-       insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
        kprobe_opcode_t insn = p->opcode;
        int rm = insn & 0xf;
        long rmv = regs->uregs[rm];
 
-       if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn))
-               return;
-
        if (insn & (1 << 5))
                regs->ARM_lr = (long)p->addr + 4;
 
@@ -463,9 +456,16 @@ static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs)
                regs->ARM_cpsr |= PSR_T_BIT;
 }
 
+static void __kprobes simulate_mrs(struct kprobe *p, struct pt_regs *regs)
+{
+       kprobe_opcode_t insn = p->opcode;
+       int rd = (insn >> 12) & 0xf;
+       unsigned long mask = 0xf8ff03df; /* Mask out execution state */
+       regs->uregs[rd] = regs->ARM_cpsr & mask;
+}
+
 static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs)
 {
-       insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
        kprobe_opcode_t insn = p->opcode;
        int rn = (insn >> 16) & 0xf;
        int lbit = insn & (1 << 20);
@@ -476,9 +476,6 @@ static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs)
        int reg_bit_vector;
        int reg_count;
 
-       if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn))
-               return;
-
        reg_count = 0;
        reg_bit_vector = insn & 0xffff;
        while (reg_bit_vector) {
@@ -510,11 +507,6 @@ static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs)
 
 static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs)
 {
-       insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
-
-       if (!insnslot_1arg_rflags(0, regs->ARM_cpsr, i_fn))
-               return;
-
        regs->ARM_pc = (long)p->addr + str_pc_offset;
        simulate_ldm1stm1(p, regs);
        regs->ARM_pc = (long)p->addr + 4;
@@ -525,24 +517,16 @@ static void __kprobes simulate_mov_ipsp(struct kprobe *p, struct pt_regs *regs)
        regs->uregs[12] = regs->uregs[13];
 }
 
-static void __kprobes emulate_ldcstc(struct kprobe *p, struct pt_regs *regs)
-{
-       insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
-       kprobe_opcode_t insn = p->opcode;
-       int rn = (insn >> 16) & 0xf;
-       long rnv = regs->uregs[rn];
-
-       /* Save Rn in case of writeback. */
-       regs->uregs[rn] = insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn);
-}
-
 static void __kprobes emulate_ldrd(struct kprobe *p, struct pt_regs *regs)
 {
        insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
        kprobe_opcode_t insn = p->opcode;
+       long ppc = (long)p->addr + 8;
        int rd = (insn >> 12) & 0xf;
        int rn = (insn >> 16) & 0xf;
        int rm = insn & 0xf;  /* rm may be invalid, don't care. */
+       long rmv = (rm == 15) ? ppc : regs->uregs[rm];
+       long rnv = (rn == 15) ? ppc : regs->uregs[rn];
 
        /* Not following the C calling convention here, so need asm(). */
        __asm__ __volatile__ (
@@ -554,29 +538,36 @@ static void __kprobes emulate_ldrd(struct kprobe *p, struct pt_regs *regs)
                "str    r0, %[rn]       \n\t"   /* in case of writeback */
                "str    r2, %[rd0]      \n\t"
                "str    r3, %[rd1]      \n\t"
-               : [rn]  "+m" (regs->uregs[rn]),
+               : [rn]  "+m" (rnv),
                  [rd0] "=m" (regs->uregs[rd]),
                  [rd1] "=m" (regs->uregs[rd+1])
-               : [rm]   "m" (regs->uregs[rm]),
+               : [rm]   "m" (rmv),
                  [cpsr] "r" (regs->ARM_cpsr),
                  [i_fn] "r" (i_fn)
                : "r0", "r1", "r2", "r3", "lr", "cc"
        );
+       if (is_writeback(insn))
+               regs->uregs[rn] = rnv;
 }
 
 static void __kprobes emulate_strd(struct kprobe *p, struct pt_regs *regs)
 {
        insn_4arg_fn_t *i_fn = (insn_4arg_fn_t *)&p->ainsn.insn[0];
        kprobe_opcode_t insn = p->opcode;
+       long ppc = (long)p->addr + 8;
        int rd = (insn >> 12) & 0xf;
        int rn = (insn >> 16) & 0xf;
        int rm  = insn & 0xf;
-       long rnv = regs->uregs[rn];
-       long rmv = regs->uregs[rm];  /* rm/rmv may be invalid, don't care. */
+       long rnv = (rn == 15) ? ppc : regs->uregs[rn];
+       /* rm/rmv may be invalid, don't care. */
+       long rmv = (rm == 15) ? ppc : regs->uregs[rm];
+       long rnv_wb;
 
-       regs->uregs[rn] = insnslot_4arg_rflags(rnv, rmv, regs->uregs[rd],
+       rnv_wb = insnslot_4arg_rflags(rnv, rmv, regs->uregs[rd],
                                               regs->uregs[rd+1],
                                               regs->ARM_cpsr, i_fn);
+       if (is_writeback(insn))
+               regs->uregs[rn] = rnv_wb;
 }
 
 static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs)
@@ -630,31 +621,6 @@ static void __kprobes emulate_str(struct kprobe *p, struct pt_regs *regs)
                regs->uregs[rn] = rnv_wb;  /* Save Rn in case of writeback. */
 }
 
-static void __kprobes emulate_mrrc(struct kprobe *p, struct pt_regs *regs)
-{
-       insn_llret_0arg_fn_t *i_fn = (insn_llret_0arg_fn_t *)&p->ainsn.insn[0];
-       kprobe_opcode_t insn = p->opcode;
-       union reg_pair fnr;
-       int rd = (insn >> 12) & 0xf;
-       int rn = (insn >> 16) & 0xf;
-
-       fnr.dr = insnslot_llret_0arg_rflags(regs->ARM_cpsr, i_fn);
-       regs->uregs[rn] = fnr.r0;
-       regs->uregs[rd] = fnr.r1;
-}
-
-static void __kprobes emulate_mcrr(struct kprobe *p, struct pt_regs *regs)
-{
-       insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
-       kprobe_opcode_t insn = p->opcode;
-       int rd = (insn >> 12) & 0xf;
-       int rn = (insn >> 16) & 0xf;
-       long rnv = regs->uregs[rn];
-       long rdv = regs->uregs[rd];
-
-       insnslot_2arg_rflags(rnv, rdv, regs->ARM_cpsr, i_fn);
-}
-
 static void __kprobes emulate_sat(struct kprobe *p, struct pt_regs *regs)
 {
        insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
@@ -688,32 +654,32 @@ static void __kprobes emulate_none(struct kprobe *p, struct pt_regs *regs)
        insnslot_0arg_rflags(regs->ARM_cpsr, i_fn);
 }
 
-static void __kprobes emulate_rd12(struct kprobe *p, struct pt_regs *regs)
+static void __kprobes emulate_nop(struct kprobe *p, struct pt_regs *regs)
 {
-       insn_0arg_fn_t *i_fn = (insn_0arg_fn_t *)&p->ainsn.insn[0];
-       kprobe_opcode_t insn = p->opcode;
-       int rd = (insn >> 12) & 0xf;
-
-       regs->uregs[rd] = insnslot_0arg_rflags(regs->ARM_cpsr, i_fn);
 }
 
-static void __kprobes emulate_ird12(struct kprobe *p, struct pt_regs *regs)
+static void __kprobes
+emulate_rd12_modify(struct kprobe *p, struct pt_regs *regs)
 {
        insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
        kprobe_opcode_t insn = p->opcode;
-       int ird = (insn >> 12) & 0xf;
+       int rd = (insn >> 12) & 0xf;
+       long rdv = regs->uregs[rd];
 
-       insnslot_1arg_rflags(regs->uregs[ird], regs->ARM_cpsr, i_fn);
+       regs->uregs[rd] = insnslot_1arg_rflags(rdv, regs->ARM_cpsr, i_fn);
 }
 
-static void __kprobes emulate_rn16(struct kprobe *p, struct pt_regs *regs)
+static void __kprobes
+emulate_rd12rn0_modify(struct kprobe *p, struct pt_regs *regs)
 {
-       insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
+       insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
        kprobe_opcode_t insn = p->opcode;
-       int rn = (insn >> 16) & 0xf;
+       int rd = (insn >> 12) & 0xf;
+       int rn = insn & 0xf;
+       long rdv = regs->uregs[rd];
        long rnv = regs->uregs[rn];
 
-       insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn);
+       regs->uregs[rd] = insnslot_2arg_rflags(rdv, rnv, regs->ARM_cpsr, i_fn);
 }
 
 static void __kprobes emulate_rd12rm0(struct kprobe *p, struct pt_regs *regs)
@@ -818,6 +784,17 @@ emulate_alu_imm_rwflags(struct kprobe *p, struct pt_regs *regs)
        regs->uregs[rd] = insnslot_1arg_rwflags(rnv, &regs->ARM_cpsr, i_fn);
 }
 
+static void __kprobes
+emulate_alu_tests_imm(struct kprobe *p, struct pt_regs *regs)
+{
+       insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
+       kprobe_opcode_t insn = p->opcode;
+       int rn = (insn >> 16) & 0xf;
+       long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn];
+
+       insnslot_1arg_rwflags(rnv, &regs->ARM_cpsr, i_fn);
+}
+
 static void __kprobes
 emulate_alu_rflags(struct kprobe *p, struct pt_regs *regs)
 {
@@ -854,14 +831,34 @@ emulate_alu_rwflags(struct kprobe *p, struct pt_regs *regs)
                insnslot_3arg_rwflags(rnv, rmv, rsv, &regs->ARM_cpsr, i_fn);
 }
 
+static void __kprobes
+emulate_alu_tests(struct kprobe *p, struct pt_regs *regs)
+{
+       insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
+       kprobe_opcode_t insn = p->opcode;
+       long ppc = (long)p->addr + 8;
+       int rn = (insn >> 16) & 0xf;
+       int rs = (insn >> 8) & 0xf;     /* rs/rsv may be invalid, don't care. */
+       int rm = insn & 0xf;
+       long rnv = (rn == 15) ? ppc : regs->uregs[rn];
+       long rmv = (rm == 15) ? ppc : regs->uregs[rm];
+       long rsv = regs->uregs[rs];
+
+       insnslot_3arg_rwflags(rnv, rmv, rsv, &regs->ARM_cpsr, i_fn);
+}
+
 static enum kprobe_insn __kprobes
 prep_emulate_ldr_str(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 {
-       int ibit = (insn & (1 << 26)) ? 25 : 22;
+       int not_imm = (insn & (1 << 26)) ? (insn & (1 << 25))
+                                        : (~insn & (1 << 22));
+
+       if (is_writeback(insn) && is_r15(insn, 16))
+               return INSN_REJECTED;   /* Writeback to PC */
 
        insn &= 0xfff00fff;
        insn |= 0x00001000;     /* Rn = r0, Rd = r1 */
-       if (insn & (1 << ibit)) {
+       if (not_imm) {
                insn &= ~0xf;
                insn |= 2;      /* Rm = r2 */
        }
@@ -871,20 +868,40 @@ prep_emulate_ldr_str(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 }
 
 static enum kprobe_insn __kprobes
-prep_emulate_rd12rm0(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+prep_emulate_rd12_modify(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 {
-       insn &= 0xffff0ff0;     /* Rd = r0, Rm = r0 */
+       if (is_r15(insn, 12))
+               return INSN_REJECTED;   /* Rd is PC */
+
+       insn &= 0xffff0fff;     /* Rd = r0 */
        asi->insn[0] = insn;
-       asi->insn_handler = emulate_rd12rm0;
+       asi->insn_handler = emulate_rd12_modify;
        return INSN_GOOD;
 }
 
 static enum kprobe_insn __kprobes
-prep_emulate_rd12(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+prep_emulate_rd12rn0_modify(kprobe_opcode_t insn,
+                           struct arch_specific_insn *asi)
 {
-       insn &= 0xffff0fff;     /* Rd = r0 */
+       if (is_r15(insn, 12))
+               return INSN_REJECTED;   /* Rd is PC */
+
+       insn &= 0xffff0ff0;     /* Rd = r0 */
+       insn |= 0x00000001;     /* Rn = r1 */
+       asi->insn[0] = insn;
+       asi->insn_handler = emulate_rd12rn0_modify;
+       return INSN_GOOD;
+}
+
+static enum kprobe_insn __kprobes
+prep_emulate_rd12rm0(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+{
+       if (is_r15(insn, 12))
+               return INSN_REJECTED;   /* Rd is PC */
+
+       insn &= 0xffff0ff0;     /* Rd = r0, Rm = r0 */
        asi->insn[0] = insn;
-       asi->insn_handler = emulate_rd12;
+       asi->insn_handler = emulate_rd12rm0;
        return INSN_GOOD;
 }
 
@@ -892,6 +909,9 @@ static enum kprobe_insn __kprobes
 prep_emulate_rd12rn16rm0_wflags(kprobe_opcode_t insn,
                                struct arch_specific_insn *asi)
 {
+       if (is_r15(insn, 12))
+               return INSN_REJECTED;   /* Rd is PC */
+
        insn &= 0xfff00ff0;     /* Rd = r0, Rn = r0 */
        insn |= 0x00000001;     /* Rm = r1 */
        asi->insn[0] = insn;
@@ -903,6 +923,9 @@ static enum kprobe_insn __kprobes
 prep_emulate_rd16rs8rm0_wflags(kprobe_opcode_t insn,
                               struct arch_specific_insn *asi)
 {
+       if (is_r15(insn, 16))
+               return INSN_REJECTED;   /* Rd is PC */
+
        insn &= 0xfff0f0f0;     /* Rd = r0, Rs = r0 */
        insn |= 0x00000001;     /* Rm = r1          */
        asi->insn[0] = insn;
@@ -914,6 +937,9 @@ static enum kprobe_insn __kprobes
 prep_emulate_rd16rn12rs8rm0_wflags(kprobe_opcode_t insn,
                                   struct arch_specific_insn *asi)
 {
+       if (is_r15(insn, 16))
+               return INSN_REJECTED;   /* Rd is PC */
+
        insn &= 0xfff000f0;     /* Rd = r0, Rn = r0 */
        insn |= 0x00000102;     /* Rs = r1, Rm = r2 */
        asi->insn[0] = insn;
@@ -925,6 +951,9 @@ static enum kprobe_insn __kprobes
 prep_emulate_rdhi16rdlo12rs8rm0_wflags(kprobe_opcode_t insn,
                                       struct arch_specific_insn *asi)
 {
+       if (is_r15(insn, 16) || is_r15(insn, 12))
+               return INSN_REJECTED;   /* RdHi or RdLo is PC */
+
        insn &= 0xfff000f0;     /* RdHi = r0, RdLo = r1 */
        insn |= 0x00001203;     /* Rs = r2, Rm = r3 */
        asi->insn[0] = insn;
@@ -945,20 +974,13 @@ prep_emulate_rdhi16rdlo12rs8rm0_wflags(kprobe_opcode_t insn,
 static enum kprobe_insn __kprobes
 space_1111(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 {
-       /* CPS mmod == 1 : 1111 0001 0000 xx10 xxxx xxxx xx0x xxxx */
-       /* RFE           : 1111 100x x0x1 xxxx xxxx 1010 xxxx xxxx */
-       /* SRS           : 1111 100x x1x0 1101 xxxx 0101 xxxx xxxx */
-       if ((insn & 0xfff30020) == 0xf1020000 ||
-           (insn & 0xfe500f00) == 0xf8100a00 ||
-           (insn & 0xfe5f0f00) == 0xf84d0500)
-               return INSN_REJECTED;
-
-       /* PLD : 1111 01x1 x101 xxxx xxxx xxxx xxxx xxxx : */
-       if ((insn & 0xfd700000) == 0xf4500000) {
-               insn &= 0xfff0ffff;     /* Rn = r0 */
-               asi->insn[0] = insn;
-               asi->insn_handler = emulate_rn16;
-               return INSN_GOOD;
+       /* memory hint : 1111 0100 x001 xxxx xxxx xxxx xxxx xxxx : */
+       /* PLDI        : 1111 0100 x101 xxxx xxxx xxxx xxxx xxxx : */
+       /* PLDW        : 1111 0101 x001 xxxx xxxx xxxx xxxx xxxx : */
+       /* PLD         : 1111 0101 x101 xxxx xxxx xxxx xxxx xxxx : */
+       if ((insn & 0xfe300000) == 0xf4100000) {
+               asi->insn_handler = emulate_nop;
+               return INSN_GOOD_NO_SLOT;
        }
 
        /* BLX(1) : 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx : */
@@ -967,41 +989,22 @@ space_1111(kprobe_opcode_t insn, struct arch_specific_insn *asi)
                return INSN_GOOD_NO_SLOT;
        }
 
-       /* SETEND : 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */
-       /* CDP2   : 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
-       if ((insn & 0xffff00f0) == 0xf1010000 ||
-           (insn & 0xff000010) == 0xfe000000) {
-               asi->insn[0] = insn;
-               asi->insn_handler = emulate_none;
-               return INSN_GOOD;
-       }
+       /* CPS   : 1111 0001 0000 xxx0 xxxx xxxx xx0x xxxx */
+       /* SETEND: 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */
 
+       /* SRS   : 1111 100x x1x0 xxxx xxxx xxxx xxxx xxxx */
+       /* RFE   : 1111 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
+
+       /* Coprocessor instructions... */
        /* MCRR2 : 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */
        /* MRRC2 : 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */
-       if ((insn & 0xffe00000) == 0xfc400000) {
-               insn &= 0xfff00fff;     /* Rn = r0 */
-               insn |= 0x00001000;     /* Rd = r1 */
-               asi->insn[0] = insn;
-               asi->insn_handler =
-                       (insn & (1 << 20)) ? emulate_mrrc : emulate_mcrr;
-               return INSN_GOOD;
-       }
+       /* LDC2  : 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
+       /* STC2  : 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
+       /* CDP2  : 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
+       /* MCR2  : 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
+       /* MRC2  : 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
 
-       /* LDC2 : 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
-       /* STC2 : 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
-       if ((insn & 0xfe000000) == 0xfc000000) {
-               insn &= 0xfff0ffff;      /* Rn = r0 */
-               asi->insn[0] = insn;
-               asi->insn_handler = emulate_ldcstc;
-               return INSN_GOOD;
-       }
-
-       /* MCR2 : 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
-       /* MRC2 : 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
-       insn &= 0xffff0fff;     /* Rd = r0 */
-       asi->insn[0]      = insn;
-       asi->insn_handler = (insn & (1 << 20)) ? emulate_rd12 : emulate_ird12;
-       return INSN_GOOD;
+       return INSN_REJECTED;
 }
 
 static enum kprobe_insn __kprobes
@@ -1010,19 +1013,18 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
        /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx xxx0 xxxx */
        if ((insn & 0x0f900010) == 0x01000000) {
 
-               /* BXJ  : cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */
-               /* MSR  : cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */
-               if ((insn & 0x0ff000f0) == 0x01200020 ||
-                   (insn & 0x0fb000f0) == 0x01200000)
-                       return INSN_REJECTED;
-
-               /* MRS : cccc 0001 0x00 xxxx xxxx xxxx 0000 xxxx */
-               if ((insn & 0x0fb00010) == 0x01000000)
-                       return prep_emulate_rd12(insn, asi);
+               /* MRS cpsr : cccc 0001 0000 xxxx xxxx xxxx 0000 xxxx */
+               if ((insn & 0x0ff000f0) == 0x01000000) {
+                       if (is_r15(insn, 12))
+                               return INSN_REJECTED;   /* Rd is PC */
+                       asi->insn_handler = simulate_mrs;
+                       return INSN_GOOD_NO_SLOT;
+               }
 
                /* SMLALxy : cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */
                if ((insn & 0x0ff00090) == 0x01400080)
-                       return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi);
+                       return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn,
+                                                                       asi);
 
                /* SMULWy : cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */
                /* SMULxy : cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */
@@ -1031,24 +1033,29 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
                        return prep_emulate_rd16rs8rm0_wflags(insn, asi);
 
                /* SMLAxy : cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx : Q */
-               /* SMLAWy : cccc 0001 0010 xxxx xxxx xxxx 0x00 xxxx : Q */
-               return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
+               /* SMLAWy : cccc 0001 0010 xxxx xxxx xxxx 1x00 xxxx : Q */
+               if ((insn & 0x0ff00090) == 0x01000080 ||
+                   (insn & 0x0ff000b0) == 0x01200080)
+                       return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
+
+               /* BXJ      : cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */
+               /* MSR      : cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */
+               /* MRS spsr : cccc 0001 0100 xxxx xxxx xxxx 0000 xxxx */
 
+               /* Other instruction encodings aren't yet defined */
+               return INSN_REJECTED;
        }
 
        /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx 0xx1 xxxx */
        else if ((insn & 0x0f900090) == 0x01000010) {
 
-               /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */
-               if ((insn & 0xfff000f0) == 0xe1200070)
-                       return INSN_REJECTED;
-
                /* BLX(2) : cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */
                /* BX     : cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */
                if ((insn & 0x0ff000d0) == 0x01200010) {
-                       asi->insn[0] = truecc_insn(insn);
+                       if ((insn & 0x0ff000ff) == 0x0120003f)
+                               return INSN_REJECTED; /* BLX pc */
                        asi->insn_handler = simulate_blx2bx;
-                       return INSN_GOOD;
+                       return INSN_GOOD_NO_SLOT;
                }
 
                /* CLZ : cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */
@@ -1059,17 +1066,27 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
                /* QSUB    : cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx :Q */
                /* QDADD   : cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx :Q */
                /* QDSUB   : cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx :Q */
-               return prep_emulate_rd12rn16rm0_wflags(insn, asi);
+               if ((insn & 0x0f9000f0) == 0x01000050)
+                       return prep_emulate_rd12rn16rm0_wflags(insn, asi);
+
+               /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */
+               /* SMC  : cccc 0001 0110 xxxx xxxx xxxx 0111 xxxx */
+
+               /* Other instruction encodings aren't yet defined */
+               return INSN_REJECTED;
        }
 
        /* cccc 0000 xxxx xxxx xxxx xxxx xxxx 1001 xxxx */
-       else if ((insn & 0x0f000090) == 0x00000090) {
+       else if ((insn & 0x0f0000f0) == 0x00000090) {
 
                /* MUL    : cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx :   */
                /* MULS   : cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx :cc */
                /* MLA    : cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx :   */
                /* MLAS   : cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx :cc */
                /* UMAAL  : cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx :   */
+               /* undef  : cccc 0000 0101 xxxx xxxx xxxx 1001 xxxx :   */
+               /* MLS    : cccc 0000 0110 xxxx xxxx xxxx 1001 xxxx :   */
+               /* undef  : cccc 0000 0111 xxxx xxxx xxxx 1001 xxxx :   */
                /* UMULL  : cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx :   */
                /* UMULLS : cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx :cc */
                /* UMLAL  : cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx :   */
@@ -1078,13 +1095,15 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
                /* SMULLS : cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx :cc */
                /* SMLAL  : cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx :   */
                /* SMLALS : cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx :cc */
-               if ((insn & 0x0fe000f0) == 0x00000090) {
-                      return prep_emulate_rd16rs8rm0_wflags(insn, asi);
-               } else if  ((insn & 0x0fe000f0) == 0x00200090) {
-                      return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
-               } else {
-                      return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi);
-               }
+               if ((insn & 0x00d00000) == 0x00500000)
+                       return INSN_REJECTED;
+               else if ((insn & 0x00e00000) == 0x00000000)
+                       return prep_emulate_rd16rs8rm0_wflags(insn, asi);
+               else if ((insn & 0x00a00000) == 0x00200000)
+                       return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
+               else
+                       return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn,
+                                                                       asi);
        }
 
        /* cccc 000x xxxx xxxx xxxx xxxx xxxx 1xx1 xxxx */
@@ -1092,23 +1111,45 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 
                /* SWP   : cccc 0001 0000 xxxx xxxx xxxx 1001 xxxx */
                /* SWPB  : cccc 0001 0100 xxxx xxxx xxxx 1001 xxxx */
-               /* LDRD  : cccc 000x xxx0 xxxx xxxx xxxx 1101 xxxx */
-               /* STRD  : cccc 000x xxx0 xxxx xxxx xxxx 1111 xxxx */
+               /* ???   : cccc 0001 0x01 xxxx xxxx xxxx 1001 xxxx */
+               /* ???   : cccc 0001 0x10 xxxx xxxx xxxx 1001 xxxx */
+               /* ???   : cccc 0001 0x11 xxxx xxxx xxxx 1001 xxxx */
                /* STREX : cccc 0001 1000 xxxx xxxx xxxx 1001 xxxx */
                /* LDREX : cccc 0001 1001 xxxx xxxx xxxx 1001 xxxx */
+               /* STREXD: cccc 0001 1010 xxxx xxxx xxxx 1001 xxxx */
+               /* LDREXD: cccc 0001 1011 xxxx xxxx xxxx 1001 xxxx */
+               /* STREXB: cccc 0001 1100 xxxx xxxx xxxx 1001 xxxx */
+               /* LDREXB: cccc 0001 1101 xxxx xxxx xxxx 1001 xxxx */
+               /* STREXH: cccc 0001 1110 xxxx xxxx xxxx 1001 xxxx */
+               /* LDREXH: cccc 0001 1111 xxxx xxxx xxxx 1001 xxxx */
+
+               /* LDRD  : cccc 000x xxx0 xxxx xxxx xxxx 1101 xxxx */
+               /* STRD  : cccc 000x xxx0 xxxx xxxx xxxx 1111 xxxx */
                /* LDRH  : cccc 000x xxx1 xxxx xxxx xxxx 1011 xxxx */
                /* STRH  : cccc 000x xxx0 xxxx xxxx xxxx 1011 xxxx */
                /* LDRSB : cccc 000x xxx1 xxxx xxxx xxxx 1101 xxxx */
                /* LDRSH : cccc 000x xxx1 xxxx xxxx xxxx 1111 xxxx */
-               if ((insn & 0x0fb000f0) == 0x01000090) {
-                       /* SWP/SWPB */
-                       return prep_emulate_rd12rn16rm0_wflags(insn, asi);
+               if ((insn & 0x0f0000f0) == 0x01000090) {
+                       if ((insn & 0x0fb000f0) == 0x01000090) {
+                               /* SWP/SWPB */
+                               return prep_emulate_rd12rn16rm0_wflags(insn,
+                                                                       asi);
+                       } else {
+                               /* STREX/LDREX variants and unallocaed space */
+                               return INSN_REJECTED;
+                       }
+
                } else if ((insn & 0x0e1000d0) == 0x00000d0) {
                        /* STRD/LDRD */
+                       if ((insn & 0x0000e000) == 0x0000e000)
+                               return INSN_REJECTED;   /* Rd is LR or PC */
+                       if (is_writeback(insn) && is_r15(insn, 16))
+                               return INSN_REJECTED;   /* Writeback to PC */
+
                        insn &= 0xfff00fff;
                        insn |= 0x00002000;     /* Rn = r0, Rd = r2 */
-                       if (insn & (1 << 22)) {
-                               /* I bit */
+                       if (!(insn & (1 << 22))) {
+                               /* Register index */
                                insn &= ~0xf;
                                insn |= 1;      /* Rm = r1 */
                        }
@@ -1118,6 +1159,9 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
                        return INSN_GOOD;
                }
 
+               /* LDRH/STRH/LDRSB/LDRSH */
+               if (is_r15(insn, 12))
+                       return INSN_REJECTED;   /* Rd is PC */
                return prep_emulate_ldr_str(insn, asi);
        }
 
@@ -1125,7 +1169,7 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 
        /*
         * ALU op with S bit and Rd == 15 :
-        *      cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx
+        *      cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx
         */
        if ((insn & 0x0e10f000) == 0x0010f000)
                return INSN_REJECTED;
@@ -1154,22 +1198,61 @@ space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
                insn |= 0x00000200;     /* Rs = r2 */
        }
        asi->insn[0] = insn;
-       asi->insn_handler = (insn & (1 << 20)) ?  /* S-bit */
+
+       if ((insn & 0x0f900000) == 0x01100000) {
+               /*
+                * TST : cccc 0001 0001 xxxx xxxx xxxx xxxx xxxx
+                * TEQ : cccc 0001 0011 xxxx xxxx xxxx xxxx xxxx
+                * CMP : cccc 0001 0101 xxxx xxxx xxxx xxxx xxxx
+                * CMN : cccc 0001 0111 xxxx xxxx xxxx xxxx xxxx
+                */
+               asi->insn_handler = emulate_alu_tests;
+       } else {
+               /* ALU ops which write to Rd */
+               asi->insn_handler = (insn & (1 << 20)) ?  /* S-bit */
                                emulate_alu_rwflags : emulate_alu_rflags;
+       }
        return INSN_GOOD;
 }
 
 static enum kprobe_insn __kprobes
 space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 {
+       /* MOVW  : cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */
+       /* MOVT  : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */
+       if ((insn & 0x0fb00000) == 0x03000000)
+               return prep_emulate_rd12_modify(insn, asi);
+
+       /* hints : cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */
+       if ((insn & 0x0fff0000) == 0x03200000) {
+               unsigned op2 = insn & 0x000000ff;
+               if (op2 == 0x01 || op2 == 0x04) {
+                       /* YIELD : cccc 0011 0010 0000 xxxx xxxx 0000 0001 */
+                       /* SEV   : cccc 0011 0010 0000 xxxx xxxx 0000 0100 */
+                       asi->insn[0] = insn;
+                       asi->insn_handler = emulate_none;
+                       return INSN_GOOD;
+               } else if (op2 <= 0x03) {
+                       /* NOP   : cccc 0011 0010 0000 xxxx xxxx 0000 0000 */
+                       /* WFE   : cccc 0011 0010 0000 xxxx xxxx 0000 0010 */
+                       /* WFI   : cccc 0011 0010 0000 xxxx xxxx 0000 0011 */
+                       /*
+                        * We make WFE and WFI true NOPs to avoid stalls due
+                        * to missing events whilst processing the probe.
+                        */
+                       asi->insn_handler = emulate_nop;
+                       return INSN_GOOD_NO_SLOT;
+               }
+               /* For DBG and unallocated hints it's safest to reject them */
+               return INSN_REJECTED;
+       }
+
        /*
         * MSR   : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx
-        * Undef : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx
         * ALU op with S bit and Rd == 15 :
         *         cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx
         */
        if ((insn & 0x0fb00000) == 0x03200000 ||        /* MSR */
-           (insn & 0x0ff00000) == 0x03400000 ||        /* Undef */
            (insn & 0x0e10f000) == 0x0210f000)          /* ALU s-bit, R15  */
                return INSN_REJECTED;
 
@@ -1180,10 +1263,22 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
         * *S (bit 20) updates condition codes
         * ADC/SBC/RSC reads the C flag
         */
-       insn &= 0xffff0fff;     /* Rd = r0 */
+       insn &= 0xfff00fff;     /* Rn = r0 and Rd = r0 */
        asi->insn[0] = insn;
-       asi->insn_handler = (insn & (1 << 20)) ?  /* S-bit */
+
+       if ((insn & 0x0f900000) == 0x03100000) {
+               /*
+                * TST : cccc 0011 0001 xxxx xxxx xxxx xxxx xxxx
+                * TEQ : cccc 0011 0011 xxxx xxxx xxxx xxxx xxxx
+                * CMP : cccc 0011 0101 xxxx xxxx xxxx xxxx xxxx
+                * CMN : cccc 0011 0111 xxxx xxxx xxxx xxxx xxxx
+                */
+               asi->insn_handler = emulate_alu_tests_imm;
+       } else {
+               /* ALU ops which write to Rd */
+               asi->insn_handler = (insn & (1 << 20)) ?  /* S-bit */
                        emulate_alu_imm_rwflags : emulate_alu_imm_rflags;
+       }
        return INSN_GOOD;
 }
 
@@ -1192,6 +1287,8 @@ space_cccc_0110__1(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 {
        /* SEL : cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx GE: !!! */
        if ((insn & 0x0ff000f0) == 0x068000b0) {
+               if (is_r15(insn, 12))
+                       return INSN_REJECTED;   /* Rd is PC */
                insn &= 0xfff00ff0;     /* Rd = r0, Rn = r0 */
                insn |= 0x00000001;     /* Rm = r1 */
                asi->insn[0] = insn;
@@ -1205,6 +1302,8 @@ space_cccc_0110__1(kprobe_opcode_t insn, struct arch_specific_insn *asi)
        /* USAT16 : cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx :Q */
        if ((insn & 0x0fa00030) == 0x06a00010 ||
            (insn & 0x0fb000f0) == 0x06a00030) {
+               if (is_r15(insn, 12))
+                       return INSN_REJECTED;   /* Rd is PC */
                insn &= 0xffff0ff0;     /* Rd = r0, Rm = r0 */
                asi->insn[0] = insn;
                asi->insn_handler = emulate_sat;
@@ -1213,57 +1312,101 @@ space_cccc_0110__1(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 
        /* REV    : cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */
        /* REV16  : cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */
+       /* RBIT   : cccc 0110 1111 xxxx xxxx xxxx 0011 xxxx */
        /* REVSH  : cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */
        if ((insn & 0x0ff00070) == 0x06b00030 ||
-           (insn & 0x0ff000f0) == 0x06f000b0)
+           (insn & 0x0ff00070) == 0x06f00030)
                return prep_emulate_rd12rm0(insn, asi);
 
+       /* ???       : cccc 0110 0000 xxxx xxxx xxxx xxx1 xxxx :   */
        /* SADD16    : cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx :GE */
        /* SADDSUBX  : cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx :GE */
        /* SSUBADDX  : cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx :GE */
        /* SSUB16    : cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx :GE */
        /* SADD8     : cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx :GE */
+       /* ???       : cccc 0110 0001 xxxx xxxx xxxx 1011 xxxx :   */
+       /* ???       : cccc 0110 0001 xxxx xxxx xxxx 1101 xxxx :   */
        /* SSUB8     : cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx :GE */
        /* QADD16    : cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx :   */
        /* QADDSUBX  : cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx :   */
        /* QSUBADDX  : cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx :   */
        /* QSUB16    : cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx :   */
        /* QADD8     : cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx :   */
+       /* ???       : cccc 0110 0010 xxxx xxxx xxxx 1011 xxxx :   */
+       /* ???       : cccc 0110 0010 xxxx xxxx xxxx 1101 xxxx :   */
        /* QSUB8     : cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx :   */
        /* SHADD16   : cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx :   */
        /* SHADDSUBX : cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx :   */
        /* SHSUBADDX : cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx :   */
        /* SHSUB16   : cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx :   */
        /* SHADD8    : cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx :   */
+       /* ???       : cccc 0110 0011 xxxx xxxx xxxx 1011 xxxx :   */
+       /* ???       : cccc 0110 0011 xxxx xxxx xxxx 1101 xxxx :   */
        /* SHSUB8    : cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx :   */
+       /* ???       : cccc 0110 0100 xxxx xxxx xxxx xxx1 xxxx :   */
        /* UADD16    : cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx :GE */
        /* UADDSUBX  : cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx :GE */
        /* USUBADDX  : cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx :GE */
        /* USUB16    : cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx :GE */
        /* UADD8     : cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx :GE */
+       /* ???       : cccc 0110 0101 xxxx xxxx xxxx 1011 xxxx :   */
+       /* ???       : cccc 0110 0101 xxxx xxxx xxxx 1101 xxxx :   */
        /* USUB8     : cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx :GE */
        /* UQADD16   : cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx :   */
        /* UQADDSUBX : cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx :   */
        /* UQSUBADDX : cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx :   */
        /* UQSUB16   : cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx :   */
        /* UQADD8    : cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx :   */
+       /* ???       : cccc 0110 0110 xxxx xxxx xxxx 1011 xxxx :   */
+       /* ???       : cccc 0110 0110 xxxx xxxx xxxx 1101 xxxx :   */
        /* UQSUB8    : cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx :   */
        /* UHADD16   : cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx :   */
        /* UHADDSUBX : cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx :   */
        /* UHSUBADDX : cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx :   */
        /* UHSUB16   : cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx :   */
        /* UHADD8    : cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx :   */
+       /* ???       : cccc 0110 0111 xxxx xxxx xxxx 1011 xxxx :   */
+       /* ???       : cccc 0110 0111 xxxx xxxx xxxx 1101 xxxx :   */
        /* UHSUB8    : cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx :   */
+       if ((insn & 0x0f800010) == 0x06000010) {
+               if ((insn & 0x00300000) == 0x00000000 ||
+                   (insn & 0x000000e0) == 0x000000a0 ||
+                   (insn & 0x000000e0) == 0x000000c0)
+                       return INSN_REJECTED;   /* Unallocated space */
+               return prep_emulate_rd12rn16rm0_wflags(insn, asi);
+       }
+
        /* PKHBT     : cccc 0110 1000 xxxx xxxx xxxx x001 xxxx :   */
        /* PKHTB     : cccc 0110 1000 xxxx xxxx xxxx x101 xxxx :   */
+       if ((insn & 0x0ff00030) == 0x06800010)
+               return prep_emulate_rd12rn16rm0_wflags(insn, asi);
+
        /* SXTAB16   : cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx :   */
-       /* SXTB      : cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx :   */
+       /* SXTB16    : cccc 0110 1000 1111 xxxx xxxx 0111 xxxx :   */
+       /* ???       : cccc 0110 1001 xxxx xxxx xxxx 0111 xxxx :   */
        /* SXTAB     : cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx :   */
+       /* SXTB      : cccc 0110 1010 1111 xxxx xxxx 0111 xxxx :   */
        /* SXTAH     : cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx :   */
+       /* SXTH      : cccc 0110 1011 1111 xxxx xxxx 0111 xxxx :   */
        /* UXTAB16   : cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx :   */
+       /* UXTB16    : cccc 0110 1100 1111 xxxx xxxx 0111 xxxx :   */
+       /* ???       : cccc 0110 1101 xxxx xxxx xxxx 0111 xxxx :   */
        /* UXTAB     : cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx :   */
+       /* UXTB      : cccc 0110 1110 1111 xxxx xxxx 0111 xxxx :   */
        /* UXTAH     : cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx :   */
-       return prep_emulate_rd12rn16rm0_wflags(insn, asi);
+       /* UXTH      : cccc 0110 1111 1111 xxxx xxxx 0111 xxxx :   */
+       if ((insn & 0x0f8000f0) == 0x06800070) {
+               if ((insn & 0x00300000) == 0x00100000)
+                       return INSN_REJECTED;   /* Unallocated space */
+
+               if ((insn & 0x000f0000) == 0x000f0000)
+                       return prep_emulate_rd12rm0(insn, asi);
+               else
+                       return prep_emulate_rd12rn16rm0_wflags(insn, asi);
+       }
+
+       /* Other instruction encodings aren't yet defined */
+       return INSN_REJECTED;
 }
 
 static enum kprobe_insn __kprobes
@@ -1273,29 +1416,49 @@ space_cccc_0111__1(kprobe_opcode_t insn, struct arch_specific_insn *asi)
        if ((insn & 0x0ff000f0) == 0x03f000f0)
                return INSN_REJECTED;
 
-       /* USADA8 : cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx */
-       /* USAD8  : cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx */
-       if ((insn & 0x0ff000f0) == 0x07800010)
-                return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
-
        /* SMLALD : cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */
        /* SMLSLD : cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */
        if ((insn & 0x0ff00090) == 0x07400010)
                return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi);
 
        /* SMLAD  : cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx :Q */
+       /* SMUAD  : cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx :Q */
        /* SMLSD  : cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx :Q */
+       /* SMUSD  : cccc 0111 0000 xxxx 1111 xxxx 01x1 xxxx :  */
        /* SMMLA  : cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx :  */
-       /* SMMLS  : cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx :  */
+       /* SMMUL  : cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx :  */
+       /* USADA8 : cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx :  */
+       /* USAD8  : cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx :  */
        if ((insn & 0x0ff00090) == 0x07000010 ||
            (insn & 0x0ff000d0) == 0x07500010 ||
-           (insn & 0x0ff000d0) == 0x075000d0)
+           (insn & 0x0ff000f0) == 0x07800010) {
+
+               if ((insn & 0x0000f000) == 0x0000f000)
+                       return prep_emulate_rd16rs8rm0_wflags(insn, asi);
+               else
+                       return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
+       }
+
+       /* SMMLS  : cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx :  */
+       if ((insn & 0x0ff000d0) == 0x075000d0)
                return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
 
-       /* SMUSD  : cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx :  */
-       /* SMUAD  : cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx :Q */
-       /* SMMUL  : cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx :  */
-       return prep_emulate_rd16rs8rm0_wflags(insn, asi);
+       /* SBFX   : cccc 0111 101x xxxx xxxx xxxx x101 xxxx :  */
+       /* UBFX   : cccc 0111 111x xxxx xxxx xxxx x101 xxxx :  */
+       if ((insn & 0x0fa00070) == 0x07a00050)
+               return prep_emulate_rd12rm0(insn, asi);
+
+       /* BFI    : cccc 0111 110x xxxx xxxx xxxx x001 xxxx :  */
+       /* BFC    : cccc 0111 110x xxxx xxxx xxxx x001 1111 :  */
+       if ((insn & 0x0fe00070) == 0x07c00010) {
+
+               if ((insn & 0x0000000f) == 0x0000000f)
+                       return prep_emulate_rd12_modify(insn, asi);
+               else
+                       return prep_emulate_rd12rn0_modify(insn, asi);
+       }
+
+       return INSN_REJECTED;
 }
 
 static enum kprobe_insn __kprobes
@@ -1309,6 +1472,10 @@ space_cccc_01xx(kprobe_opcode_t insn, struct arch_specific_insn *asi)
        /* STRB  : cccc 01xx x1x0 xxxx xxxx xxxx xxxx xxxx */
        /* STRBT : cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */
        /* STRT  : cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */
+
+       if ((insn & 0x00500000) == 0x00500000 && is_r15(insn, 12))
+               return INSN_REJECTED;   /* LDRB into PC */
+
        return prep_emulate_ldr_str(insn, asi);
 }
 
@@ -1323,10 +1490,9 @@ space_cccc_100x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 
        /* LDM(1) : cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
        /* STM(1) : cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */
-       asi->insn[0] = truecc_insn(insn);
        asi->insn_handler = ((insn & 0x108000) == 0x008000) ? /* STM & R15 */
                                simulate_stm1_pc : simulate_ldm1stm1;
-       return INSN_GOOD;
+       return INSN_GOOD_NO_SLOT;
 }
 
 static enum kprobe_insn __kprobes
@@ -1334,58 +1500,117 @@ space_cccc_101x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 {
        /* B  : cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */
        /* BL : cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */
-       asi->insn[0] = truecc_insn(insn);
        asi->insn_handler = simulate_bbl;
-       return INSN_GOOD;
+       return INSN_GOOD_NO_SLOT;
 }
 
 static enum kprobe_insn __kprobes
-space_cccc_1100_010x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+space_cccc_11xx(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 {
+       /* Coprocessor instructions... */
        /* MCRR : cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */
        /* MRRC : cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */
-       insn &= 0xfff00fff;
-       insn |= 0x00001000;     /* Rn = r0, Rd = r1 */
-       asi->insn[0] = insn;
-       asi->insn_handler = (insn & (1 << 20)) ? emulate_mrrc : emulate_mcrr;
-       return INSN_GOOD;
+       /* LDC  : cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
+       /* STC  : cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
+       /* CDP  : cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
+       /* MCR  : cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
+       /* MRC  : cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
+
+       /* SVC  : cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */
+
+       return INSN_REJECTED;
 }
 
-static enum kprobe_insn __kprobes
-space_cccc_110x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+static unsigned long __kprobes __check_eq(unsigned long cpsr)
 {
-       /* LDC : cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
-       /* STC : cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
-       insn &= 0xfff0ffff;     /* Rn = r0 */
-       asi->insn[0] = insn;
-       asi->insn_handler = emulate_ldcstc;
-       return INSN_GOOD;
+       return cpsr & PSR_Z_BIT;
 }
 
-static enum kprobe_insn __kprobes
-space_cccc_111x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
+static unsigned long __kprobes __check_ne(unsigned long cpsr)
 {
-       /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */
-       /* SWI  : cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */
-       if ((insn & 0xfff000f0) == 0xe1200070 ||
-           (insn & 0x0f000000) == 0x0f000000)
-               return INSN_REJECTED;
+       return (~cpsr) & PSR_Z_BIT;
+}
 
-       /* CDP : cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
-       if ((insn & 0x0f000010) == 0x0e000000) {
-               asi->insn[0] = insn;
-               asi->insn_handler = emulate_none;
-               return INSN_GOOD;
-       }
+static unsigned long __kprobes __check_cs(unsigned long cpsr)
+{
+       return cpsr & PSR_C_BIT;
+}
 
-       /* MCR : cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
-       /* MRC : cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
-       insn &= 0xffff0fff;     /* Rd = r0 */
-       asi->insn[0] = insn;
-       asi->insn_handler = (insn & (1 << 20)) ? emulate_rd12 : emulate_ird12;
-       return INSN_GOOD;
+static unsigned long __kprobes __check_cc(unsigned long cpsr)
+{
+       return (~cpsr) & PSR_C_BIT;
+}
+
+static unsigned long __kprobes __check_mi(unsigned long cpsr)
+{
+       return cpsr & PSR_N_BIT;
+}
+
+static unsigned long __kprobes __check_pl(unsigned long cpsr)
+{
+       return (~cpsr) & PSR_N_BIT;
+}
+
+static unsigned long __kprobes __check_vs(unsigned long cpsr)
+{
+       return cpsr & PSR_V_BIT;
+}
+
+static unsigned long __kprobes __check_vc(unsigned long cpsr)
+{
+       return (~cpsr) & PSR_V_BIT;
+}
+
+static unsigned long __kprobes __check_hi(unsigned long cpsr)
+{
+       cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
+       return cpsr & PSR_C_BIT;
 }
 
+static unsigned long __kprobes __check_ls(unsigned long cpsr)
+{
+       cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
+       return (~cpsr) & PSR_C_BIT;
+}
+
+static unsigned long __kprobes __check_ge(unsigned long cpsr)
+{
+       cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+       return (~cpsr) & PSR_N_BIT;
+}
+
+static unsigned long __kprobes __check_lt(unsigned long cpsr)
+{
+       cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+       return cpsr & PSR_N_BIT;
+}
+
+static unsigned long __kprobes __check_gt(unsigned long cpsr)
+{
+       unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+       temp |= (cpsr << 1);                     /* PSR_N_BIT |= PSR_Z_BIT */
+       return (~temp) & PSR_N_BIT;
+}
+
+static unsigned long __kprobes __check_le(unsigned long cpsr)
+{
+       unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
+       temp |= (cpsr << 1);                     /* PSR_N_BIT |= PSR_Z_BIT */
+       return temp & PSR_N_BIT;
+}
+
+static unsigned long __kprobes __check_al(unsigned long cpsr)
+{
+       return true;
+}
+
+static kprobe_check_cc * const condition_checks[16] = {
+       &__check_eq, &__check_ne, &__check_cs, &__check_cc,
+       &__check_mi, &__check_pl, &__check_vs, &__check_vc,
+       &__check_hi, &__check_ls, &__check_ge, &__check_lt,
+       &__check_gt, &__check_le, &__check_al, &__check_al
+};
+
 /* Return:
  *   INSN_REJECTED     If instruction is one not allowed to kprobe,
  *   INSN_GOOD         If instruction is supported and uses instruction slot,
@@ -1401,133 +1626,45 @@ space_cccc_111x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 enum kprobe_insn __kprobes
 arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 {
+       asi->insn_check_cc = condition_checks[insn>>28];
        asi->insn[1] = KPROBE_RETURN_INSTRUCTION;
 
-       if ((insn & 0xf0000000) == 0xf0000000) {
+       if ((insn & 0xf0000000) == 0xf0000000)
 
                return space_1111(insn, asi);
 
-       } else if ((insn & 0x0e000000) == 0x00000000) {
+       else if ((insn & 0x0e000000) == 0x00000000)
 
                return space_cccc_000x(insn, asi);
 
-       } else if ((insn & 0x0e000000) == 0x02000000) {
+       else if ((insn & 0x0e000000) == 0x02000000)
 
                return space_cccc_001x(insn, asi);
 
-       } else if ((insn & 0x0f000010) == 0x06000010) {
+       else if ((insn & 0x0f000010) == 0x06000010)
 
                return space_cccc_0110__1(insn, asi);
 
-       } else if ((insn & 0x0f000010) == 0x07000010) {
+       else if ((insn & 0x0f000010) == 0x07000010)
 
                return space_cccc_0111__1(insn, asi);
 
-       } else if ((insn & 0x0c000000) == 0x04000000) {
+       else if ((insn & 0x0c000000) == 0x04000000)
 
                return space_cccc_01xx(insn, asi);
 
-       } else if ((insn & 0x0e000000) == 0x08000000) {
+       else if ((insn & 0x0e000000) == 0x08000000)
 
                return space_cccc_100x(insn, asi);
 
-       } else if ((insn & 0x0e000000) == 0x0a000000) {
+       else if ((insn & 0x0e000000) == 0x0a000000)
 
                return space_cccc_101x(insn, asi);
 
-       } else if ((insn & 0x0fe00000) == 0x0c400000) {
-
-               return space_cccc_1100_010x(insn, asi);
-
-       } else if ((insn & 0x0e000000) == 0x0c000000) {
-
-               return space_cccc_110x(insn, asi);
-
-       }
-
-       return space_cccc_111x(insn, asi);
+       return space_cccc_11xx(insn, asi);
 }
 
 void __init arm_kprobe_decode_init(void)
 {
        find_str_pc_offset();
 }
-
-
-/*
- * All ARM instructions listed below.
- *
- * Instructions and their general purpose registers are given.
- * If a particular register may not use R15, it is prefixed with a "!".
- * If marked with a "*" means the value returned by reading R15
- * is implementation defined.
- *
- * ADC/ADD/AND/BIC/CMN/CMP/EOR/MOV/MVN/ORR/RSB/RSC/SBC/SUB/TEQ
- *     TST: Rd, Rn, Rm, !Rs
- * BX: Rm
- * BLX(2): !Rm
- * BX: Rm (R15 legal, but discouraged)
- * BXJ: !Rm,
- * CLZ: !Rd, !Rm
- * CPY: Rd, Rm
- * LDC/2,STC/2 immediate offset & unindex: Rn
- * LDC/2,STC/2 immediate pre/post-indexed: !Rn
- * LDM(1/3): !Rn, register_list
- * LDM(2): !Rn, !register_list
- * LDR,STR,PLD immediate offset: Rd, Rn
- * LDR,STR,PLD register offset: Rd, Rn, !Rm
- * LDR,STR,PLD scaled register offset: Rd, !Rn, !Rm
- * LDR,STR immediate pre/post-indexed: Rd, !Rn
- * LDR,STR register pre/post-indexed: Rd, !Rn, !Rm
- * LDR,STR scaled register pre/post-indexed: Rd, !Rn, !Rm
- * LDRB,STRB immediate offset: !Rd, Rn
- * LDRB,STRB register offset: !Rd, Rn, !Rm
- * LDRB,STRB scaled register offset: !Rd, !Rn, !Rm
- * LDRB,STRB immediate pre/post-indexed: !Rd, !Rn
- * LDRB,STRB register pre/post-indexed: !Rd, !Rn, !Rm
- * LDRB,STRB scaled register pre/post-indexed: !Rd, !Rn, !Rm
- * LDRT,LDRBT,STRBT immediate pre/post-indexed: !Rd, !Rn
- * LDRT,LDRBT,STRBT register pre/post-indexed: !Rd, !Rn, !Rm
- * LDRT,LDRBT,STRBT scaled register pre/post-indexed: !Rd, !Rn, !Rm
- * LDRH/SH/SB/D,STRH/SH/SB/D immediate offset: !Rd, Rn
- * LDRH/SH/SB/D,STRH/SH/SB/D register offset: !Rd, Rn, !Rm
- * LDRH/SH/SB/D,STRH/SH/SB/D immediate pre/post-indexed: !Rd, !Rn
- * LDRH/SH/SB/D,STRH/SH/SB/D register pre/post-indexed: !Rd, !Rn, !Rm
- * LDREX: !Rd, !Rn
- * MCR/2: !Rd
- * MCRR/2,MRRC/2: !Rd, !Rn
- * MLA: !Rd, !Rn, !Rm, !Rs
- * MOV: Rd
- * MRC/2: !Rd (if Rd==15, only changes cond codes, not the register)
- * MRS,MSR: !Rd
- * MUL: !Rd, !Rm, !Rs
- * PKH{BT,TB}: !Rd, !Rn, !Rm
- * QDADD,[U]QADD/16/8/SUBX: !Rd, !Rm, !Rn
- * QDSUB,[U]QSUB/16/8/ADDX: !Rd, !Rm, !Rn
- * REV/16/SH: !Rd, !Rm
- * RFE: !Rn
- * {S,U}[H]ADD{16,8,SUBX},{S,U}[H]SUB{16,8,ADDX}: !Rd, !Rn, !Rm
- * SEL: !Rd, !Rn, !Rm
- * SMLA<x><y>,SMLA{D,W<y>},SMLSD,SMML{A,S}: !Rd, !Rn, !Rm, !Rs
- * SMLAL<x><y>,SMLA{D,LD},SMLSLD,SMMULL,SMULW<y>: !RdHi, !RdLo, !Rm, !Rs
- * SMMUL,SMUAD,SMUL<x><y>,SMUSD: !Rd, !Rm, !Rs
- * SSAT/16: !Rd, !Rm
- * STM(1/2): !Rn, register_list* (R15 in reg list not recommended)
- * STRT immediate pre/post-indexed: Rd*, !Rn
- * STRT register pre/post-indexed: Rd*, !Rn, !Rm
- * STRT scaled register pre/post-indexed: Rd*, !Rn, !Rm
- * STREX: !Rd, !Rn, !Rm
- * SWP/B: !Rd, !Rn, !Rm
- * {S,U}XTA{B,B16,H}: !Rd, !Rn, !Rm
- * {S,U}XT{B,B16,H}: !Rd, !Rm
- * UM{AA,LA,UL}L: !RdHi, !RdLo, !Rm, !Rs
- * USA{D8,A8,T,T16}: !Rd, !Rm, !Rs
- *
- * May transfer control by writing R15 (possible mode changes or alternate
- * mode accesses marked by "*"):
- * ALU op (* with s-bit), B, BL, BKPT, BLX(1/2), BX, BXJ, CPS*, CPY,
- * LDM(1), LDM(2/3)*, LDR, MOV, RFE*, SWI*
- *
- * Instructions that do not take general registers, nor transfer control:
- * CDP/2, SETEND, SRS*
- */
index 2ba7deb3072e5962c6cd3de55d4a2ed2594c1589..1656c87501c0f5bee77ebaab931e8249d70ce451 100644 (file)
@@ -134,7 +134,8 @@ static void __kprobes singlestep(struct kprobe *p, struct pt_regs *regs,
                                 struct kprobe_ctlblk *kcb)
 {
        regs->ARM_pc += 4;
-       p->ainsn.insn_handler(p, regs);
+       if (p->ainsn.insn_check_cc(regs->ARM_cpsr))
+               p->ainsn.insn_handler(p, regs);
 }
 
 /*
index 979da3947f4272c933739064d576f991c412eeac..139e3c8273696198fa0995a761a5368875961b51 100644 (file)
@@ -746,7 +746,8 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 
        tail = (struct frame_tail __user *)regs->ARM_fp - 1;
 
-       while (tail && !((unsigned long)tail & 0x3))
+       while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+              tail && !((unsigned long)tail & 0x3))
                tail = user_backtrace(tail, entry);
 }
 
index 2bf27f364d098c210ed7cca9ece5edca613a9f2b..8182f45ca493724f732f59cfe38e69a7c45afe44 100644 (file)
@@ -767,12 +767,20 @@ long arch_ptrace(struct task_struct *child, long request,
 
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
                case PTRACE_GETHBPREGS:
+                       if (ptrace_get_breakpoints(child) < 0)
+                               return -ESRCH;
+
                        ret = ptrace_gethbpregs(child, addr,
                                                (unsigned long __user *)data);
+                       ptrace_put_breakpoints(child);
                        break;
                case PTRACE_SETHBPREGS:
+                       if (ptrace_get_breakpoints(child) < 0)
+                               return -ESRCH;
+
                        ret = ptrace_sethbpregs(child, addr,
                                                (unsigned long __user *)data);
+                       ptrace_put_breakpoints(child);
                        break;
 #endif
 
index cb83983176447ee04d2492a957e691b459f3f7f4..0340224cf73c5c9db2d16185bd698a6ba6518fb8 100644 (file)
@@ -597,45 +597,19 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
        return err;
 }
 
-static inline void setup_syscall_restart(struct pt_regs *regs)
-{
-       regs->ARM_r0 = regs->ARM_ORIG_r0;
-       regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
-}
-
 /*
  * OK, we're invoking a handler
  */    
 static int
 handle_signal(unsigned long sig, struct k_sigaction *ka,
              siginfo_t *info, sigset_t *oldset,
-             struct pt_regs * regs, int syscall)
+             struct pt_regs * regs)
 {
        struct thread_info *thread = current_thread_info();
        struct task_struct *tsk = current;
        int usig = sig;
        int ret;
 
-       /*
-        * If we were from a system call, check for system call restarting...
-        */
-       if (syscall) {
-               switch (regs->ARM_r0) {
-               case -ERESTART_RESTARTBLOCK:
-               case -ERESTARTNOHAND:
-                       regs->ARM_r0 = -EINTR;
-                       break;
-               case -ERESTARTSYS:
-                       if (!(ka->sa.sa_flags & SA_RESTART)) {
-                               regs->ARM_r0 = -EINTR;
-                               break;
-                       }
-                       /* fallthrough */
-               case -ERESTARTNOINTR:
-                       setup_syscall_restart(regs);
-               }
-       }
-
        /*
         * translate the signal
         */
@@ -685,6 +659,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
  */
 static void do_signal(struct pt_regs *regs, int syscall)
 {
+       unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
        struct k_sigaction ka;
        siginfo_t info;
        int signr;
@@ -698,18 +673,61 @@ static void do_signal(struct pt_regs *regs, int syscall)
        if (!user_mode(regs))
                return;
 
+       /*
+        * If we were from a system call, check for system call restarting...
+        */
+       if (syscall) {
+               continue_addr = regs->ARM_pc;
+               restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
+               retval = regs->ARM_r0;
+
+               /*
+                * Prepare for system call restart.  We do this here so that a
+                * debugger will see the already changed PSW.
+                */
+               switch (retval) {
+               case -ERESTARTNOHAND:
+               case -ERESTARTSYS:
+               case -ERESTARTNOINTR:
+                       regs->ARM_r0 = regs->ARM_ORIG_r0;
+                       regs->ARM_pc = restart_addr;
+                       break;
+               case -ERESTART_RESTARTBLOCK:
+                       regs->ARM_r0 = -EINTR;
+                       break;
+               }
+       }
+
        if (try_to_freeze())
                goto no_signal;
 
+       /*
+        * Get the signal to deliver.  When running under ptrace, at this
+        * point the debugger may change all our registers ...
+        */
        signr = get_signal_to_deliver(&info, &ka, regs, NULL);
        if (signr > 0) {
                sigset_t *oldset;
 
+               /*
+                * Depending on the signal settings we may need to revert the
+                * decision to restart the system call.  But skip this if a
+                * debugger has chosen to restart at a different PC.
+                */
+               if (regs->ARM_pc == restart_addr) {
+                       if (retval == -ERESTARTNOHAND
+                           || (retval == -ERESTARTSYS
+                               && !(ka.sa.sa_flags & SA_RESTART))) {
+                               regs->ARM_r0 = -EINTR;
+                               regs->ARM_pc = continue_addr;
+                       }
+               }
+
                if (test_thread_flag(TIF_RESTORE_SIGMASK))
                        oldset = &current->saved_sigmask;
                else
                        oldset = &current->blocked;
-               if (handle_signal(signr, &ka, &info, oldset, regs, syscall) == 0) {
+               if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
                        /*
                         * A signal was successfully delivered; the saved
                         * sigmask will have been stored in the signal frame,
@@ -723,11 +741,14 @@ static void do_signal(struct pt_regs *regs, int syscall)
        }
 
  no_signal:
-       /*
-        * No signal to deliver to the process - restart the syscall.
-        */
        if (syscall) {
-               if (regs->ARM_r0 == -ERESTART_RESTARTBLOCK) {
+               /*
+                * Handle restarting a different system call.  As above,
+                * if a debugger has chosen to restart at a different PC,
+                * ignore the restart.
+                */
+               if (retval == -ERESTART_RESTARTBLOCK
+                   && regs->ARM_pc == continue_addr) {
                        if (thumb_mode(regs)) {
                                regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
                                regs->ARM_pc -= 2;
@@ -750,11 +771,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
 #endif
                        }
                }
-               if (regs->ARM_r0 == -ERESTARTNOHAND ||
-                   regs->ARM_r0 == -ERESTARTSYS ||
-                   regs->ARM_r0 == -ERESTARTNOINTR) {
-                       setup_syscall_restart(regs);
-               }
 
                /* If there's no signal to deliver, we just put the saved sigmask
                 * back.
index 8fe05ad932e4bd0b113c11c19f7e816650ace696..f29b8a29b17436f80f3b960a8be0337aa91811c7 100644 (file)
@@ -479,7 +479,7 @@ static void broadcast_timer_set_mode(enum clock_event_mode mode,
 {
 }
 
-static void broadcast_timer_setup(struct clock_event_device *evt)
+static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
 {
        evt->name       = "dummy_timer";
        evt->features   = CLOCK_EVT_FEAT_ONESHOT |
index 4ad8da15ef2b3972f4412631ceb1a5cf1d63eba2..af0aaebf4de62e16e70fcfcad8ccafece2879217 100644 (file)
@@ -311,7 +311,7 @@ asmlinkage long sys_oabi_semtimedop(int semid,
        long err;
        int i;
 
-       if (nsops < 1)
+       if (nsops < 1 || nsops > SEMOPM)
                return -EINVAL;
        sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
        if (!sops)
index 19390231a0e98d5e792c64e487269e5f75a2ba23..2d299bf5d72fc05085dacf6ce5bcb20d9c02e1ea 100644 (file)
@@ -83,6 +83,7 @@ config ARCH_AT91CAP9
        select CPU_ARM926T
        select GENERIC_CLOCKEVENTS
        select HAVE_FB_ATMEL
+       select HAVE_NET_MACB
 
 config ARCH_AT572D940HF
        bool "AT572D940HF"
index 1f9d3cb64c50d66aa7f10f48cdd5fc532eb36a90..d8df59a3426d82a75de3255bf586292659072616 100644 (file)
 #include <mach/board.h>
 #include "generic.h"
 
+static void __init at91eb01_init_irq(void)
+{
+       at91x40_init_interrupts(NULL);
+}
+
 static void __init at91eb01_map_io(void)
 {
        at91x40_initialize(40000000);
@@ -38,7 +43,7 @@ static void __init at91eb01_map_io(void)
 MACHINE_START(AT91EB01, "Atmel AT91 EB01")
        /* Maintainer: Greg Ungerer <gerg@snapgear.com> */
        .timer          = &at91x40_timer,
-       .init_irq       = at91x40_init_interrupts,
+       .init_irq       = at91eb01_init_irq,
        .map_io         = at91eb01_map_io,
 MACHINE_END
 
index 3bef931d0b1c915b5de540ee860dcf37cf27cba6..0700f2125305d99fd769f46726fe1e09afaa3dcc 100644 (file)
@@ -27,6 +27,7 @@
 #define ARCH_ID_AT91SAM9G45    0x819b05a0
 #define ARCH_ID_AT91SAM9G45MRL 0x819b05a2      /* aka 9G45-ES2 & non ES lots */
 #define ARCH_ID_AT91SAM9G45ES  0x819b05a1      /* 9G45-ES (Engineering Sample) */
+#define ARCH_ID_AT91SAM9X5     0x819a05a0
 #define ARCH_ID_AT91CAP9       0x039A03A0
 
 #define ARCH_ID_AT91SAM9XE128  0x329973a0
@@ -55,6 +56,12 @@ static inline unsigned long at91_cpu_fully_identify(void)
 #define ARCH_EXID_AT91SAM9G46  0x00000003
 #define ARCH_EXID_AT91SAM9G45  0x00000004
 
+#define ARCH_EXID_AT91SAM9G15  0x00000000
+#define ARCH_EXID_AT91SAM9G35  0x00000001
+#define ARCH_EXID_AT91SAM9X35  0x00000002
+#define ARCH_EXID_AT91SAM9G25  0x00000003
+#define ARCH_EXID_AT91SAM9X25  0x00000004
+
 static inline unsigned long at91_exid_identify(void)
 {
        return at91_sys_read(AT91_DBGU_EXID);
@@ -143,6 +150,27 @@ static inline unsigned long at91cap9_rev_identify(void)
 #define cpu_is_at91sam9m11()   (0)
 #endif
 
+#ifdef CONFIG_ARCH_AT91SAM9X5
+#define cpu_is_at91sam9x5()    (at91_cpu_identify() == ARCH_ID_AT91SAM9X5)
+#define cpu_is_at91sam9g15()   (cpu_is_at91sam9x5() && \
+                               (at91_exid_identify() == ARCH_EXID_AT91SAM9G15))
+#define cpu_is_at91sam9g35()   (cpu_is_at91sam9x5() && \
+                               (at91_exid_identify() == ARCH_EXID_AT91SAM9G35))
+#define cpu_is_at91sam9x35()   (cpu_is_at91sam9x5() && \
+                               (at91_exid_identify() == ARCH_EXID_AT91SAM9X35))
+#define cpu_is_at91sam9g25()   (cpu_is_at91sam9x5() && \
+                               (at91_exid_identify() == ARCH_EXID_AT91SAM9G25))
+#define cpu_is_at91sam9x25()   (cpu_is_at91sam9x5() && \
+                               (at91_exid_identify() == ARCH_EXID_AT91SAM9X25))
+#else
+#define cpu_is_at91sam9x5()    (0)
+#define cpu_is_at91sam9g15()   (0)
+#define cpu_is_at91sam9g35()   (0)
+#define cpu_is_at91sam9x35()   (0)
+#define cpu_is_at91sam9g25()   (0)
+#define cpu_is_at91sam9x25()   (0)
+#endif
+
 #ifdef CONFIG_ARCH_AT91CAP9
 #define cpu_is_at91cap9()      (at91_cpu_identify() == ARCH_ID_AT91CAP9)
 #define cpu_is_at91cap9_revB() (at91cap9_rev_identify() == ARCH_REVISION_CAP9_B)
index 32f147998cd9536b7daf47236da1b75e9029087f..c0deacae778d28c3ef8ff50c004ac65f01ef3ce6 100644 (file)
@@ -63,6 +63,7 @@ config MACH_DAVINCI_EVM
        depends on ARCH_DAVINCI_DM644x
        select MISC_DEVICES
        select EEPROM_AT24
+       select I2C
        help
          Configure this option to specify the whether the board used
          for development is a DM644x EVM
@@ -72,6 +73,7 @@ config MACH_SFFSDR
        depends on ARCH_DAVINCI_DM644x
        select MISC_DEVICES
        select EEPROM_AT24
+       select I2C
        help
          Say Y here to select the Lyrtech Small Form Factor
          Software Defined Radio (SFFSDR) board.
@@ -105,6 +107,7 @@ config MACH_DAVINCI_DM6467_EVM
        select MACH_DAVINCI_DM6467TEVM
        select MISC_DEVICES
        select EEPROM_AT24
+       select I2C
        help
          Configure this option to specify the whether the board used
          for development is a DM6467 EVM
@@ -118,6 +121,7 @@ config MACH_DAVINCI_DM365_EVM
        depends on ARCH_DAVINCI_DM365
        select MISC_DEVICES
        select EEPROM_AT24
+       select I2C
        help
          Configure this option to specify whether the board used
          for development is a DM365 EVM
@@ -129,6 +133,7 @@ config MACH_DAVINCI_DA830_EVM
        select GPIO_PCF857X
        select MISC_DEVICES
        select EEPROM_AT24
+       select I2C
        help
          Say Y here to select the TI DA830/OMAP-L137/AM17x Evaluation Module.
 
@@ -205,6 +210,7 @@ config MACH_MITYOMAPL138
        depends on ARCH_DAVINCI_DA850
        select MISC_DEVICES
        select EEPROM_AT24
+       select I2C
        help
          Say Y here to select the Critical Link MityDSP-L138/MityARM-1808
          System on Module.  Information on this SoM may be found at
index 2aa79c54f98e664d5a48701fdf0cec553090c9aa..606a6f27ed6c20e4e38034c180e592a407e68eea 100644 (file)
@@ -29,7 +29,7 @@
 #include <mach/mux.h>
 #include <mach/spi.h>
 
-#define MITYOMAPL138_PHY_ID            "0:03"
+#define MITYOMAPL138_PHY_ID            ""
 
 #define FACTORY_CONFIG_MAGIC   0x012C0138
 #define FACTORY_CONFIG_VERSION 0x00010001
@@ -414,7 +414,7 @@ static struct resource mityomapl138_nandflash_resource[] = {
 
 static struct platform_device mityomapl138_nandflash_device = {
        .name           = "davinci_nand",
-       .id             = 0,
+       .id             = 1,
        .dev            = {
                .platform_data  = &mityomapl138_nandflash_data,
        },
index 625d4b66718ba89bc3062c8b9b7b9189b13cc0c5..58a02dc7b15a5b76f8deb2f540e55f030d484d55 100644 (file)
@@ -39,7 +39,8 @@
 #define DA8XX_GPIO_BASE                        0x01e26000
 #define DA8XX_I2C1_BASE                        0x01e28000
 #define DA8XX_SPI0_BASE                        0x01c41000
-#define DA8XX_SPI1_BASE                        0x01f0e000
+#define DA830_SPI1_BASE                        0x01e12000
+#define DA850_SPI1_BASE                        0x01f0e000
 
 #define DA8XX_EMAC_CTRL_REG_OFFSET     0x3000
 #define DA8XX_EMAC_MOD_REG_OFFSET      0x2000
@@ -762,8 +763,8 @@ static struct resource da8xx_spi0_resources[] = {
 
 static struct resource da8xx_spi1_resources[] = {
        [0] = {
-               .start  = DA8XX_SPI1_BASE,
-               .end    = DA8XX_SPI1_BASE + SZ_4K - 1,
+               .start  = DA830_SPI1_BASE,
+               .end    = DA830_SPI1_BASE + SZ_4K - 1,
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
@@ -832,5 +833,10 @@ int __init da8xx_register_spi(int instance, struct spi_board_info *info,
 
        da8xx_spi_pdata[instance].num_chipselect = len;
 
+       if (instance == 1 && cpu_is_davinci_da850()) {
+               da8xx_spi1_resources[0].start = DA850_SPI1_BASE;
+               da8xx_spi1_resources[0].end = DA850_SPI1_BASE + SZ_4K - 1;
+       }
+
        return platform_device_register(&da8xx_spi_device[instance]);
 }
index 9f1befc5ac387d2d0124c080c88458a67dc0b30f..f8b7ea4f6235f6f5dc66aa15d9bfe203523f4486 100644 (file)
@@ -24,6 +24,9 @@
 
 #define UART_SHIFT     2
 
+#define davinci_uart_v2p(x)    ((x) - PAGE_OFFSET + PLAT_PHYS_OFFSET)
+#define davinci_uart_p2v(x)    ((x) - PLAT_PHYS_OFFSET + PAGE_OFFSET)
+
                .pushsection .data
 davinci_uart_phys:     .word   0
 davinci_uart_virt:     .word   0
@@ -34,7 +37,7 @@ davinci_uart_virt:    .word   0
                /* Use davinci_uart_phys/virt if already configured */
 10:            mrc     p15, 0, \rp, c1, c0
                tst     \rp, #1                 @ MMU enabled?
-               ldreq   \rp, =__virt_to_phys(davinci_uart_phys)
+               ldreq   \rp, =davinci_uart_v2p(davinci_uart_phys)
                ldrne   \rp, =davinci_uart_phys
                add     \rv, \rp, #4            @ davinci_uart_virt
                ldr     \rp, [\rp, #0]
@@ -48,18 +51,18 @@ davinci_uart_virt:  .word   0
                tst     \rp, #1                 @ MMU enabled?
 
                /* Copy uart phys address from decompressor uart info */
-               ldreq   \rv, =__virt_to_phys(davinci_uart_phys)
+               ldreq   \rv, =davinci_uart_v2p(davinci_uart_phys)
                ldrne   \rv, =davinci_uart_phys
                ldreq   \rp, =DAVINCI_UART_INFO
-               ldrne   \rp, =__phys_to_virt(DAVINCI_UART_INFO)
+               ldrne   \rp, =davinci_uart_p2v(DAVINCI_UART_INFO)
                ldr     \rp, [\rp, #0]
                str     \rp, [\rv]
 
                /* Copy uart virt address from decompressor uart info */
-               ldreq   \rv, =__virt_to_phys(davinci_uart_virt)
+               ldreq   \rv, =davinci_uart_v2p(davinci_uart_virt)
                ldrne   \rv, =davinci_uart_virt
                ldreq   \rp, =DAVINCI_UART_INFO
-               ldrne   \rp, =__phys_to_virt(DAVINCI_UART_INFO)
+               ldrne   \rp, =davinci_uart_p2v(DAVINCI_UART_INFO)
                ldr     \rp, [\rp, #4]
                str     \rp, [\rv]
 
index 8051110b8ac3417cf2817878204f9f1f1e28da42..c9e6ce185a66fa62a6a028613413e582894d6896 100644 (file)
@@ -22,7 +22,7 @@
  *
  * This area sits just below the page tables (see arch/arm/kernel/head.S).
  */
-#define DAVINCI_UART_INFO      (PHYS_OFFSET + 0x3ff8)
+#define DAVINCI_UART_INFO      (PLAT_PHYS_OFFSET + 0x3ff8)
 
 #define DAVINCI_UART0_BASE     (IO_PHYS + 0x20000)
 #define DAVINCI_UART1_BASE     (IO_PHYS + 0x20400)
index 2cf390fbd9809876a55a2b9dd6687e40cea41d60..47a69cbc31a873dbc19fc9f69d4b231095f3d356 100644 (file)
@@ -257,11 +257,16 @@ static const struct fsl_usb2_platform_data otg_device_pdata __initconst = {
        .workaround     = FLS_USB2_WORKAROUND_ENGCM09152,
 };
 
+static int vpr200_usbh_init(struct platform_device *pdev)
+{
+       return mx35_initialize_usb_hw(pdev->id,
+                       MXC_EHCI_INTERFACE_SINGLE_UNI | MXC_EHCI_INTERNAL_PHY);
+}
+
 /* USB HOST config */
 static const struct mxc_usbh_platform_data usb_host_pdata __initconst = {
-       .portsc         = MXC_EHCI_MODE_SERIAL,
-       .flags          = MXC_EHCI_INTERFACE_SINGLE_UNI |
-                         MXC_EHCI_INTERNAL_PHY,
+       .init = vpr200_usbh_init,
+       .portsc = MXC_EHCI_MODE_SERIAL,
 };
 
 static struct platform_device *devices[] __initdata = {
index 10a1bea1054888ecfe0a24689722090a2bc778c0..6206b1191fe8e574efe7c480864fd8201060244c 100644 (file)
@@ -193,7 +193,7 @@ static iomux_v3_cfg_t mx53_loco_pads[] = {
        .wakeup         = wake,                                 \
 }
 
-static const struct gpio_keys_button loco_buttons[] __initconst = {
+static struct gpio_keys_button loco_buttons[] = {
        GPIO_BUTTON(MX53_LOCO_POWER, KEY_POWER, 1, "power", 0),
        GPIO_BUTTON(MX53_LOCO_UI1, KEY_VOLUMEUP, 1, "volume-up", 0),
        GPIO_BUTTON(MX53_LOCO_UI2, KEY_VOLUMEDOWN, 1, "volume-down", 0),
index 1ad97fed1e948ccecf16db4562ad54c80252fadb..5dcc59d5b9ec9feab991fc34302273702388b21e 100644 (file)
@@ -295,11 +295,11 @@ static int name##_set_rate(struct clk *clk, unsigned long rate)           \
        unsigned long diff, parent_rate, calc_rate;                     \
        int i;                                                          \
                                                                        \
-       parent_rate = clk_get_rate(clk->parent);                        \
        div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV;       \
        bm_busy = BM_CLKCTRL_##dr##_BUSY;                               \
                                                                        \
        if (clk->parent == &ref_xtal_clk) {                             \
+               parent_rate = clk_get_rate(clk->parent);                \
                div = DIV_ROUND_UP(parent_rate, rate);                  \
                if (clk == &cpu_clk) {                                  \
                        div_max = BM_CLKCTRL_CPU_DIV_XTAL >>            \
@@ -309,6 +309,11 @@ static int name##_set_rate(struct clk *clk, unsigned long rate)            \
                if (div == 0 || div > div_max)                          \
                        return -EINVAL;                                 \
        } else {                                                        \
+               /*                                                      \
+                * hack alert: this block modifies clk->parent, too,    \
+                * so the base to use it the grand parent.              \
+                */                                                     \
+               parent_rate = clk_get_rate(clk->parent->parent);        \
                rate >>= PARENT_RATE_SHIFT;                             \
                parent_rate >>= PARENT_RATE_SHIFT;                      \
                diff = parent_rate;                                     \
index b2b1e37bb6bba3cefa5961f39d026c1a5f616d53..d6e34dd9e7e75c6dde4de60a004e319dd7f02547 100644 (file)
@@ -115,6 +115,7 @@ int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
                                  sdrc_cs0->rfr_ctrl, sdrc_cs0->actim_ctrla,
                                  sdrc_cs0->actim_ctrlb, sdrc_cs0->mr,
                                  0, 0, 0, 0);
+       clk->rate = rate;
 
        return 0;
 }
index 6de0ad0eea654f7c5ef4148005b69ead76871f7b..9cdcca59792455ede51f2046fcf66df898561779 100644 (file)
@@ -711,7 +711,7 @@ static struct regulator_consumer_supply bq24022_consumers[] = {
 static struct regulator_init_data bq24022_init_data = {
        .constraints = {
                .max_uA         = 500000,
-               .valid_ops_mask = REGULATOR_CHANGE_CURRENT,
+               .valid_ops_mask = REGULATOR_CHANGE_CURRENT|REGULATOR_CHANGE_STATUS,
        },
        .num_consumer_supplies  = ARRAY_SIZE(bq24022_consumers),
        .consumer_supplies      = bq24022_consumers,
index a72993dde2b301f301a9c4caab8d98dd2f3e1388..9984ef70bd794ff8e5a97ffff07ebc08eebb0988 100644 (file)
@@ -599,7 +599,7 @@ static struct regulator_consumer_supply bq24022_consumers[] = {
 static struct regulator_init_data bq24022_init_data = {
        .constraints = {
                .max_uA         = 500000,
-               .valid_ops_mask = REGULATOR_CHANGE_CURRENT,
+               .valid_ops_mask = REGULATOR_CHANGE_CURRENT | REGULATOR_CHANGE_STATUS,
        },
        .num_consumer_supplies  = ARRAY_SIZE(bq24022_consumers),
        .consumer_supplies      = bq24022_consumers,
index 0c5d749d7b5f17f8ad7241506a90dc8dc2d63a89..9a732195aa1cbd1ebb515d63502628e33277a979 100644 (file)
@@ -4,5 +4,5 @@
  * operation to deadlock the system.
  */
 #define mb()           dsb()
-#define rmb()          dmb()
+#define rmb()          dsb()
 #define wmb()          mb()
index cc115174899bdf4253471065134a90a658f3a5d6..425b42e91ef68a83e46e875c82a9ca8719aa492f 100644 (file)
@@ -23,7 +23,7 @@
 
 #include <asm/outercache.h>
 
-#define rmb()          dmb()
+#define rmb()          dsb()
 #define wmb()          do { dsb(); outer_sync(); } while (0)
 #define mb()           wmb()
 
index e5f6fc42834892e7622664b496d3aa0288eaa2ff..e591513bb53ecdfa06b9d8bf23d483bed7dbe733 100644 (file)
@@ -392,7 +392,7 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn)
         * Convert start_pfn/end_pfn to a struct page pointer.
         */
        start_pg = pfn_to_page(start_pfn - 1) + 1;
-       end_pg = pfn_to_page(end_pfn);
+       end_pg = pfn_to_page(end_pfn - 1) + 1;
 
        /*
         * Convert to physical addresses, and
@@ -426,6 +426,14 @@ static void __init free_unused_memmap(struct meminfo *mi)
 
                bank_start = bank_pfn_start(bank);
 
+#ifdef CONFIG_SPARSEMEM
+               /*
+                * Take care not to free memmap entries that don't exist
+                * due to SPARSEMEM sections which aren't present.
+                */
+               bank_start = min(bank_start,
+                                ALIGN(prev_bank_end, PAGES_PER_SECTION));
+#endif
                /*
                 * If we had a previous bank, and there is a space
                 * between the current bank and the previous, free it.
@@ -440,6 +448,12 @@ static void __init free_unused_memmap(struct meminfo *mi)
                 */
                prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
        }
+
+#ifdef CONFIG_SPARSEMEM
+       if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
+               free_memmap(prev_bank_end,
+                           ALIGN(prev_bank_end, PAGES_PER_SECTION));
+#endif
 }
 
 static void __init free_highpages(void)
index ce233bcbf5060c14bfe89c0093020a9c17005904..42af97664c9d844f156789f23a8c1fac7a643b68 100644 (file)
@@ -395,7 +395,7 @@ ENTRY(xscale_dma_a0_map_area)
        teq     r2, #DMA_TO_DEVICE
        beq     xscale_dma_clean_range
        b       xscale_dma_flush_range
-ENDPROC(xscsale_dma_a0_map_area)
+ENDPROC(xscale_dma_a0_map_area)
 
 /*
  *     dma_unmap_area(start, size, dir)
index 7a107246fd98849b6f8efe34316d977629f4bef8..6cd6d7f686f639623da2c10b0bcc6e9213d4b4a1 100644 (file)
@@ -295,6 +295,12 @@ static int mxc_gpio_direction_output(struct gpio_chip *chip,
        return 0;
 }
 
+/*
+ * This lock class tells lockdep that GPIO irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key gpio_lock_class;
+
 int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt)
 {
        int i, j;
@@ -311,6 +317,7 @@ int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt)
                __raw_writel(~0, port[i].base + GPIO_ISR);
                for (j = port[i].virtual_irq_start;
                        j < port[i].virtual_irq_start + 32; j++) {
+                       irq_set_lockdep_class(j, &gpio_lock_class);
                        irq_set_chip_and_handler(j, &gpio_irq_chip,
                                                 handle_level_irq);
                        set_irq_flags(j, IRQF_VALID);
index 4ddce565b353745d32f31b0023ff1df1ad93f02d..8397a2dd19f2d6caad2adb5824ca13537c8d19fd 100644 (file)
@@ -124,6 +124,8 @@ imx_ssi_fiq_start:
 1:
                @ return from FIQ
                subs    pc, lr, #4
+
+               .align
 imx_ssi_fiq_base:
                .word 0x0
 imx_ssi_fiq_rx_buffer:
index 8a51fd58f656e15f6f1fa7d51eb0501e18143149..34fc31ee9081ff8624d77dfb664ebe059d4c9343 100644 (file)
@@ -793,6 +793,8 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
        clk_enable(obj->clk);
        errs = iommu_report_fault(obj, &da);
        clk_disable(obj->clk);
+       if (errs == 0)
+               return IRQ_HANDLED;
 
        /* Fault callback or TLB/PTE Dynamic loading */
        if (obj->isr && !obj->isr(obj, da, errs, obj->isr_priv))
index 8e256cc5dcd9a8e18e843cc671b1f47979f0d8e8..351c80fbba7efb106beb121456c4959f2c185a41 100644 (file)
@@ -997,9 +997,6 @@ config IRQ_GT641XX
 config IRQ_GIC
        bool
 
-config IRQ_CPU_OCTEON
-       bool
-
 config MIPS_BOARDS_GEN
        bool
 
@@ -1359,8 +1356,6 @@ config CPU_SB1
 config CPU_CAVIUM_OCTEON
        bool "Cavium Octeon processor"
        depends on SYS_HAS_CPU_CAVIUM_OCTEON
-       select IRQ_CPU
-       select IRQ_CPU_OCTEON
        select CPU_HAS_PREFETCH
        select CPU_SUPPORTS_64BIT_KERNEL
        select SYS_SUPPORTS_SMP
index 05f120ff90f9c244312a57f9a2619a627e9e28ab..5c956fe8760ffc9e99c5b928eaedfb6af7ef2cbe 100644 (file)
@@ -127,13 +127,10 @@ const char *get_system_type(void)
 void __init board_setup(void)
 {
        unsigned long bcsr1, bcsr2;
-       u32 pin_func;
 
        bcsr1 = DB1000_BCSR_PHYS_ADDR;
        bcsr2 = DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS;
 
-       pin_func = 0;
-
 #ifdef CONFIG_MIPS_DB1000
        printk(KERN_INFO "AMD Alchemy Au1000/Db1000 Board\n");
 #endif
@@ -164,12 +161,16 @@ void __init board_setup(void)
        /* Not valid for Au1550 */
 #if defined(CONFIG_IRDA) && \
    (defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100))
-       /* Set IRFIRSEL instead of GPIO15 */
-       pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF;
-       au_writel(pin_func, SYS_PINFUNC);
-       /* Power off until the driver is in use */
-       bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK,
-                               BCSR_RESETS_IRDA_MODE_OFF);
+       {
+               u32 pin_func;
+
+               /* Set IRFIRSEL instead of GPIO15 */
+               pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF;
+               au_writel(pin_func, SYS_PINFUNC);
+               /* Power off until the driver is in use */
+               bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK,
+                        BCSR_RESETS_IRDA_MODE_OFF);
+       }
 #endif
        bcsr_write(BCSR_PCMCIA, 0);     /* turn off PCMCIA power */
 
@@ -177,31 +178,35 @@ void __init board_setup(void)
        alchemy_gpio1_input_enable();
 
 #ifdef CONFIG_MIPS_MIRAGE
-       /* GPIO[20] is output */
-       alchemy_gpio_direction_output(20, 0);
+       {
+               u32 pin_func;
 
-       /* Set GPIO[210:208] instead of SSI_0 */
-       pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0;
+               /* GPIO[20] is output */
+               alchemy_gpio_direction_output(20, 0);
 
-       /* Set GPIO[215:211] for LEDs */
-       pin_func |= 5 << 2;
+               /* Set GPIO[210:208] instead of SSI_0 */
+               pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0;
 
-       /* Set GPIO[214:213] for more LEDs */
-       pin_func |= 5 << 12;
+               /* Set GPIO[215:211] for LEDs */
+               pin_func |= 5 << 2;
 
-       /* Set GPIO[207:200] instead of PCMCIA/LCD */
-       pin_func |= SYS_PF_LCD | SYS_PF_PC;
-       au_writel(pin_func, SYS_PINFUNC);
+               /* Set GPIO[214:213] for more LEDs */
+               pin_func |= 5 << 12;
 
-       /*
-        * Enable speaker amplifier.  This should
-        * be part of the audio driver.
-        */
-       alchemy_gpio_direction_output(209, 1);
+               /* Set GPIO[207:200] instead of PCMCIA/LCD */
+               pin_func |= SYS_PF_LCD | SYS_PF_PC;
+               au_writel(pin_func, SYS_PINFUNC);
 
-       pm_power_off = mirage_power_off;
-       _machine_halt = mirage_power_off;
-       _machine_restart = (void(*)(char *))mips_softreset;
+               /*
+                * Enable speaker amplifier.  This should
+                * be part of the audio driver.
+                */
+               alchemy_gpio_direction_output(209, 1);
+
+               pm_power_off = mirage_power_off;
+               _machine_halt = mirage_power_off;
+               _machine_restart = (void(*)(char *))mips_softreset;
+       }
 #endif
 
 #ifdef CONFIG_MIPS_BOSPORUS
index 15125c2fda7d7397de3dcee2ec1f1555babc1f85..34a90a4bb6f4496a67906998ddb9ae8ffc3262f3 100644 (file)
@@ -51,10 +51,9 @@ void __init prom_init(void)
        prom_init_cmdline();
 
        memsize_str = prom_getenv("memsize");
-       if (!memsize_str)
+       if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize))
                memsize = 0x04000000;
-       else
-               strict_strtoul(memsize_str, 0, &memsize);
+
        add_memory_region(0, memsize, BOOT_MEM_RAM);
 }
 
index 425dfa5d6e12c69380b160edde7edf58477860af..bb571bcdb8f2f06d5d2cb8b2d787591f35b4e66d 100644 (file)
@@ -325,9 +325,7 @@ int __init ar7_gpio_init(void)
                size = 0x1f;
        }
 
-       gpch->regs = ioremap_nocache(AR7_REGS_GPIO,
-                                       AR7_REGS_GPIO + 0x10);
-
+       gpch->regs = ioremap_nocache(AR7_REGS_GPIO, size);
        if (!gpch->regs) {
                printk(KERN_ERR "%s: failed to ioremap regs\n",
                                        gpch->chip.label);
index 88c9d963be88872df85924046a614858c59593af..9a6243676e2203b4c3f9addcc3c236fb27ba6060 100644 (file)
@@ -16,8 +16,8 @@
 
 int main(int argc, char *argv[])
 {
+       unsigned long long vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr;
        struct stat sb;
-       uint64_t vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr;
 
        if (argc != 3) {
                fprintf(stderr, "Usage: %s <pathname> <vmlinux_load_addr>\n",
index caae22858163a193b681e0adb920590d3d0bcd20..cad555ebeca3b855356d928b66e736e6a286cef6 100644 (file)
@@ -1,11 +1,7 @@
-config CAVIUM_OCTEON_SPECIFIC_OPTIONS
-       bool "Enable Octeon specific options"
-       depends on CPU_CAVIUM_OCTEON
-       default "y"
+if CPU_CAVIUM_OCTEON
 
 config CAVIUM_CN63XXP1
        bool "Enable CN63XXP1 errata worarounds"
-       depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
        default "n"
        help
          The CN63XXP1 chip requires build time workarounds to
@@ -16,7 +12,6 @@ config CAVIUM_CN63XXP1
 
 config CAVIUM_OCTEON_2ND_KERNEL
        bool "Build the kernel to be used as a 2nd kernel on the same chip"
-       depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
        default "n"
        help
          This option configures this kernel to be linked at a different
@@ -26,7 +21,6 @@ config CAVIUM_OCTEON_2ND_KERNEL
 
 config CAVIUM_OCTEON_HW_FIX_UNALIGNED
        bool "Enable hardware fixups of unaligned loads and stores"
-       depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
        default "y"
        help
          Configure the Octeon hardware to automatically fix unaligned loads
@@ -38,7 +32,6 @@ config CAVIUM_OCTEON_HW_FIX_UNALIGNED
 
 config CAVIUM_OCTEON_CVMSEG_SIZE
        int "Number of L1 cache lines reserved for CVMSEG memory"
-       depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
        range 0 54
        default 1
        help
@@ -50,7 +43,6 @@ config CAVIUM_OCTEON_CVMSEG_SIZE
 
 config CAVIUM_OCTEON_LOCK_L2
        bool "Lock often used kernel code in the L2"
-       depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
        default "y"
        help
          Enable locking parts of the kernel into the L2 cache.
@@ -93,7 +85,6 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY
 config ARCH_SPARSEMEM_ENABLE
        def_bool y
        select SPARSEMEM_STATIC
-       depends on CPU_CAVIUM_OCTEON
 
 config CAVIUM_OCTEON_HELPER
        def_bool y
@@ -107,6 +98,8 @@ config NEED_SG_DMA_LENGTH
 
 config SWIOTLB
        def_bool y
-       depends on CPU_CAVIUM_OCTEON
        select IOMMU_HELPER
        select NEED_SG_DMA_LENGTH
+
+
+endif # CPU_CAVIUM_OCTEON
index 650ac9ba734c9bb254baec52f6d076b9ce53583f..b4db69fbc40ce212a557b235679ec0ebf2208060 100644 (file)
@@ -17,6 +17,6 @@
 #define SMP_CACHE_SHIFT                L1_CACHE_SHIFT
 #define SMP_CACHE_BYTES                L1_CACHE_BYTES
 
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
 
 #endif /* _ASM_CACHE_H */
index fa4328f9124f7d539f4ecf3ec78caaec43def36a..65f9bdd02f1f787d35f03cc8a51f11c87704e1ee 100644 (file)
@@ -14,6 +14,9 @@
 #ifndef __ASM_CEVT_R4K_H
 #define __ASM_CEVT_R4K_H
 
+#include <linux/clockchips.h>
+#include <asm/time.h>
+
 DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
 
 void mips_event_handler(struct clock_event_device *dev);
index 655f849bd08d8767e81f6846ce205ce40e487980..7aa37ddfca4ba59b25603183a1d5bca23099fa9c 100644 (file)
@@ -5,7 +5,9 @@
 #include <asm/cache.h>
 #include <asm-generic/dma-coherent.h>
 
+#ifndef CONFIG_SGI_IP27        /* Kludge to fix 2.6.39 build for IP27 */
 #include <dma-coherence.h>
+#endif
 
 extern struct dma_map_ops *mips_dma_map_ops;
 
index f5e856015329cbeda2d8e33303f2baf6817ab804..c565b7c3f0b5b071833c751e4edddfb61aba84f4 100644 (file)
@@ -70,6 +70,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
                                         unsigned long addr, pte_t *ptep)
 {
+       flush_tlb_mm(vma->vm_mm);
 }
 
 static inline int huge_pte_none(pte_t pte)
index 32978d32561a035810cf4e8e90fb6516510c99c0..ed72e6a26b736d2686f0fc68f7e297be5c805250 100644 (file)
@@ -88,7 +88,7 @@ struct bcm_tag {
        char kernel_crc[CRC_LEN];
        /* 228-235: Unused at present */
        char reserved1[8];
-       /* 236-239: CRC32 of header excluding tagVersion */
+       /* 236-239: CRC32 of header excluding last 20 bytes */
        char header_crc[CRC_LEN];
        /* 240-255: Unused at present */
        char reserved2[16];
index 9ce9f64cb76fa8fce272100f00373694e8f6d4f7..2d8e447cb8281577ea2d7b01064c62f649f1d58c 100644 (file)
@@ -211,7 +211,7 @@ EXPORT_SYMBOL(vdma_free);
  */
 int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size)
 {
-       int first, pages, npages;
+       int first, pages;
 
        if (laddr > 0xffffff) {
                if (vdma_debug)
@@ -228,8 +228,7 @@ int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size)
                return -EINVAL; /* invalid physical address */
        }
 
-       npages = pages =
-           (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1;
+       pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1;
        first = laddr >> 12;
        if (vdma_debug)
                printk("vdma_remap: first=%x, pages=%x\n", first, pages);
index 5ebe75a68350a186e5d5b1a9965ac5a757092b3d..d7feb898692c5cf6c1997bcaab742d29e9a91bb2 100644 (file)
@@ -242,9 +242,7 @@ EXPORT_SYMBOL_GPL(jz4740_dma_get_residue);
 
 static void jz4740_dma_chan_irq(struct jz4740_dma_chan *dma)
 {
-       uint32_t status;
-
-       status = jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id));
+       (void) jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id));
 
        jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0,
                JZ_DMA_STATUS_CTRL_ENABLE | JZ_DMA_STATUS_CTRL_TRANSFER_DONE);
index fe01678d94fd4c439cd2353e3b74f57da6efebaf..eaa853a54af68e0ce573d5f27615adbff7223c00 100644 (file)
@@ -89,7 +89,7 @@ static int jz4740_clockevent_set_next(unsigned long evt,
 
 static struct clock_event_device jz4740_clockevent = {
        .name = "jz4740-timer",
-       .features = CLOCK_EVT_FEAT_PERIODIC,
+       .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
        .set_next_event = jz4740_clockevent_set_next,
        .set_mode = jz4740_clockevent_set_mode,
        .rating = 200,
index b2c01512905587fb775869396aeb5719b14a94a9..654d5c3900b66282a06c0a0a6a02383a957c3a72 100644 (file)
@@ -27,11 +27,13 @@ void jz4740_timer_enable_watchdog(void)
 {
        writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_CLEAR);
 }
+EXPORT_SYMBOL_GPL(jz4740_timer_enable_watchdog);
 
 void jz4740_timer_disable_watchdog(void)
 {
        writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_SET);
 }
+EXPORT_SYMBOL_GPL(jz4740_timer_disable_watchdog);
 
 void __init jz4740_timer_init(void)
 {
index 94ca2b018af70ff3191d4e44055311efc09e800d..feb8021a305f49a4e6f366183e27626316398ed3 100644 (file)
@@ -23,6 +23,7 @@
 
 #define JAL 0x0c000000         /* jump & link: ip --> ra, jump to target */
 #define ADDR_MASK 0x03ffffff   /*  op_code|addr : 31...26|25 ....0 */
+#define JUMP_RANGE_MASK ((1UL << 28) - 1)
 
 #define INSN_NOP 0x00000000    /* nop */
 #define INSN_JAL(addr) \
@@ -44,12 +45,12 @@ static inline void ftrace_dyn_arch_init_insns(void)
 
        /* jal (ftrace_caller + 8), jump over the first two instruction */
        buf = (u32 *)&insn_jal_ftrace_caller;
-       uasm_i_jal(&buf, (FTRACE_ADDR + 8));
+       uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        /* j ftrace_graph_caller */
        buf = (u32 *)&insn_j_ftrace_graph_caller;
-       uasm_i_j(&buf, (unsigned long)ftrace_graph_caller);
+       uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
 #endif
 }
 
index d21c388c0116f06abfee3ebef3d9764245d818ed..584e6b55c86583201cbd0b54ae0c2ba6ed12e381 100644 (file)
@@ -540,8 +540,8 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
                secure_computing(regs->regs[2]);
 
        if (unlikely(current->audit_context) && entryexit)
-               audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]),
-                                  regs->regs[2]);
+               audit_syscall_exit(AUDITSC_RESULT(regs->regs[7]),
+                                  -regs->regs[2]);
 
        if (!(current->ptrace & PT_PTRACED))
                goto out;
index 7f5468b38d4cb08e3b1b6abf7bce6b8733003b41..7f1377eb22d3cc63de31a4cc66ca419f8549ff27 100644 (file)
@@ -565,7 +565,7 @@ einval:     li      v0, -ENOSYS
        sys     sys_ioprio_get          2       /* 4315 */
        sys     sys_utimensat           4
        sys     sys_signalfd            3
-       sys     sys_ni_syscall          0
+       sys     sys_ni_syscall          0       /* was timerfd */
        sys     sys_eventfd             1
        sys     sys_fallocate           6       /* 4320 */
        sys     sys_timerfd_create      2
index a2e1fcbc41dce87add7a456301548fb1f38aaba6..7c0ef7f128bfb3a817d74e91b7ba70558821c657 100644 (file)
@@ -404,7 +404,7 @@ sys_call_table:
        PTR     sys_ioprio_get
        PTR     sys_utimensat                   /* 5275 */
        PTR     sys_signalfd
-       PTR     sys_ni_syscall
+       PTR     sys_ni_syscall                  /* was timerfd */
        PTR     sys_eventfd
        PTR     sys_fallocate
        PTR     sys_timerfd_create              /* 5280 */
index b2c7624995b8068e3d8ae21b7faa56c016225ec9..de6c5563beabb21bcfcfe76dcd2ca04b433fe8dd 100644 (file)
@@ -403,7 +403,7 @@ EXPORT(sysn32_call_table)
        PTR     sys_ioprio_get
        PTR     compat_sys_utimensat
        PTR     compat_sys_signalfd             /* 6280 */
-       PTR     sys_ni_syscall
+       PTR     sys_ni_syscall                  /* was timerfd */
        PTR     sys_eventfd
        PTR     sys_fallocate
        PTR     sys_timerfd_create
index 049a9c8c49a0fbf2f12055d4d98ad4e87d5ca8d0..b0541dda8830a349d8e296556bc47cb538f57b39 100644 (file)
@@ -522,7 +522,7 @@ sys_call_table:
        PTR     sys_ioprio_get                  /* 4315 */
        PTR     compat_sys_utimensat
        PTR     compat_sys_signalfd
-       PTR     sys_ni_syscall
+       PTR     sys_ni_syscall                  /* was timerfd */
        PTR     sys_eventfd
        PTR     sys32_fallocate                 /* 4320 */
        PTR     sys_timerfd_create
index 71350f7f2d8857507cc8da104c3dcee9a6a0a072..e9b3af27d844b5db837dc641bdd9c87ba326f9ba 100644 (file)
@@ -374,7 +374,8 @@ void __noreturn die(const char *str, struct pt_regs *regs)
        unsigned long dvpret = dvpe();
 #endif /* CONFIG_MIPS_MT_SMTC */
 
-       notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV);
+       if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
+               sig = 0;
 
        console_verbose();
        spin_lock_irq(&die_lock);
@@ -383,9 +384,6 @@ void __noreturn die(const char *str, struct pt_regs *regs)
        mips_mt_regdump(dvpret);
 #endif /* CONFIG_MIPS_MT_SMTC */
 
-       if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
-               sig = 0;
-
        printk("%s[#%d]:\n", str, ++die_counter);
        show_registers(regs);
        add_taint(TAINT_DIE);
index 832afbb87588bedcf2b8265495fd44b6874ccabb..e4b0b0bec039299746c41637b2d83a0f9ae693a2 100644 (file)
@@ -74,6 +74,7 @@ SECTIONS
                INIT_TASK_DATA(PAGE_SIZE)
                NOSAVE_DATA
                CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+               READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
                DATA_DATA
                CONSTRUCTORS
        }
index 11b193f848f88b7aa1c5ab5a842c2cf4e80ed7d7..d93830ad6113619e9f50f4c368bf4cbefd083d2d 100644 (file)
@@ -29,9 +29,10 @@ unsigned long memsize, highmemsize;
 
 #define parse_even_earlier(res, option, p)                             \
 do {                                                                   \
-       int ret;                                                        \
+       unsigned int tmp __maybe_unused;                                \
+                                                                       \
        if (strncmp(option, (char *)p, strlen(option)) == 0)            \
-               ret = strict_strtol((char *)p + strlen(option"="), 10, &res); \
+               tmp = strict_strtol((char *)p + strlen(option"="), 10, &res); \
 } while (0)
 
 void __init prom_init_env(void)
index b4923a75cb4b8c12354a3e0821797349db4cd0ef..71bddf8f7d259affedba95b71ee18f2c448d12c9 100644 (file)
@@ -1075,7 +1075,6 @@ static int __cpuinit probe_scache(void)
        unsigned long flags, addr, begin, end, pow2;
        unsigned int config = read_c0_config();
        struct cpuinfo_mips *c = &current_cpu_data;
-       int tmp;
 
        if (config & CONF_SC)
                return 0;
@@ -1108,7 +1107,6 @@ static int __cpuinit probe_scache(void)
 
        /* Now search for the wrap around point. */
        pow2 = (128 * 1024);
-       tmp = 0;
        for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
                cache_op(Index_Load_Tag_SD, addr);
                __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
index 5ef294fbb6e7e118684c3913183cbcfb089dd0aa..f5734c2c809779364600d8364870f4b156ada1e6 100644 (file)
@@ -1151,8 +1151,8 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
        struct uasm_reloc *r = relocs;
        u32 *f;
        unsigned int final_len;
-       struct mips_huge_tlb_info htlb_info;
-       enum vmalloc64_mode vmalloc_mode;
+       struct mips_huge_tlb_info htlb_info __maybe_unused;
+       enum vmalloc64_mode vmalloc_mode __maybe_unused;
 
        memset(tlb_handler, 0, sizeof(tlb_handler));
        memset(labels, 0, sizeof(labels));
index 414f0c99b1965f01baf2f81a9858dec1a5e17f0a..31180c321a1a29b1f7c5d7a2c1b1a5f6188ee0ed 100644 (file)
@@ -193,8 +193,6 @@ extern struct plat_smp_ops msmtc_smp_ops;
 
 void __init prom_init(void)
 {
-       int result;
-
        prom_argc = fw_arg0;
        _prom_argv = (int *) fw_arg1;
        _prom_envp = (int *) fw_arg2;
@@ -360,20 +358,14 @@ void __init prom_init(void)
 #ifdef CONFIG_SERIAL_8250_CONSOLE
        console_config();
 #endif
-       /* Early detection of CMP support */
-       result = gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ);
-
 #ifdef CONFIG_MIPS_CMP
-       if (result)
+       /* Early detection of CMP support */
+       if (gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ))
                register_smp_ops(&cmp_smp_ops);
+       else
 #endif
 #ifdef CONFIG_MIPS_MT_SMP
-#ifdef CONFIG_MIPS_CMP
-       if (!result)
                register_smp_ops(&vsmp_smp_ops);
-#else
-       register_smp_ops(&vsmp_smp_ops);
-#endif
 #endif
 #ifdef CONFIG_MIPS_MT_SMTC
        register_smp_ops(&msmtc_smp_ops);
index 9027061f0ead0d8ba259681dacdee9da9330e1ce..e85c977328da5183b0206f1a17e8597ba8304979 100644 (file)
@@ -56,7 +56,6 @@ static DEFINE_RAW_SPINLOCK(mips_irq_lock);
 static inline int mips_pcibios_iack(void)
 {
        int irq;
-       u32 dummy;
 
        /*
         * Determine highest priority pending interrupt by performing
@@ -83,7 +82,7 @@ static inline int mips_pcibios_iack(void)
                BONITO_PCIMAP_CFG = 0x20000;
 
                /* Flush Bonito register block */
-               dummy = BONITO_PCIMAP_CFG;
+               (void) BONITO_PCIMAP_CFG;
                iob();    /* sync */
 
                irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg);
index f9b9dcdfa9ddc1c5db23066f9259339e8e2ccc47..98fd0099d964becd05c7e57fd5404be50c5773f3 100644 (file)
@@ -97,7 +97,7 @@ static int msp_per_irq_set_affinity(struct irq_data *d,
 
 static struct irq_chip msp_per_irq_controller = {
        .name = "MSP_PER",
-       .irq_enable = unmask_per_irq.
+       .irq_enable = unmask_per_irq,
        .irq_disable = mask_per_irq,
        .irq_ack = msp_per_irq_ack,
 #ifdef CONFIG_SMP
index dbb5c7b4b70fcebdd25229bf084470dd9d60d6ef..f8a751c032823f25f088530d53acb23fdd11d5e2 100644 (file)
@@ -35,7 +35,7 @@ LEAF(swsusp_arch_resume)
 0:
        PTR_L t1, PBE_ADDRESS(t0)   /* source */
        PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */
-       PTR_ADDIU t3, t1, PAGE_SIZE
+       PTR_ADDU t3, t1, PAGE_SIZE
 1:
        REG_L t8, (t1)
        REG_S t8, (t2)
index 37de05d595e725ebdc59513181d3f43709ce1cb4..6c47dfeb7be326bf2dc4c5a7a1cbb1984b87efec 100644 (file)
@@ -185,7 +185,7 @@ int __init rb532_gpio_init(void)
        struct resource *r;
 
        r = rb532_gpio_reg0_res;
-       rb532_gpio_chip->regbase = ioremap_nocache(r->start, r->end - r->start);
+       rb532_gpio_chip->regbase = ioremap_nocache(r->start, resource_size(r));
 
        if (!rb532_gpio_chip->regbase) {
                printk(KERN_ERR "rb532: cannot remap GPIO register 0\n");
index deddbf0ebe5c839175353b3f7f8cce68a594106c..698904daf901d17a921ae5c3c24effa85706fb6a 100644 (file)
@@ -132,7 +132,7 @@ static struct platform_device eth1_device = {
  */
 static int __init sgiseeq_devinit(void)
 {
-       unsigned int tmp;
+       unsigned int pbdma __maybe_unused;
        int res, i;
 
        eth0_pd.hpc = hpc3c0;
@@ -151,7 +151,7 @@ static int __init sgiseeq_devinit(void)
 
        /* Second HPC is missing? */
        if (ip22_is_fullhouse() ||
-           get_dbe(tmp, (unsigned int *)&hpc3c1->pbdma[1]))
+           get_dbe(pbdma, (unsigned int *)&hpc3c1->pbdma[1]))
                return 0;
 
        sgimc->giopar |= SGIMC_GIOPAR_MASTEREXP1 | SGIMC_GIOPAR_EXP164 |
index 603fc91c1030539b8bc33ad88902b240ea84a203..1a94c989418843e22267110bab6e43e6d1d2c937 100644 (file)
@@ -32,7 +32,7 @@
 static unsigned long dosample(void)
 {
        u32 ct0, ct1;
-       u8 msb, lsb;
+       u8 msb;
 
        /* Start the counter. */
        sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL |
@@ -46,7 +46,7 @@ static unsigned long dosample(void)
        /* Latch and spin until top byte of counter2 is zero */
        do {
                writeb(SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT, &sgint->tcword);
-               lsb = readb(&sgint->tcnt2);
+               (void) readb(&sgint->tcnt2);
                msb = readb(&sgint->tcnt2);
                ct1 = read_c0_count();
        } while (msb);
index a1fa4abb3f6a719ad0122a0861f3b2d659d293a4..cd0d5b06cd8385dc9c73d89bcd0fa6691a78622e 100644 (file)
@@ -29,7 +29,6 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget,
                          unsigned long xtalk_addr, size_t size)
 {
        nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode);
-       volatile hubreg_t junk;
        unsigned i;
 
        /* use small-window mapping if possible */
@@ -64,7 +63,7 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget,
                 * after we write it.
                 */
                IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);
-               junk = HUB_L(IIO_ITTE_GET(nasid, i));
+               (void) HUB_L(IIO_ITTE_GET(nasid, i));
 
                return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE);
        }
index c3d30a88daf3a1555f9a1495f058210479a3b0ce..1d1919a44e88c350685866d8420b24fa9e168dc9 100644 (file)
@@ -54,11 +54,8 @@ void __init setup_replication_mask(void)
 
 static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid)
 {
-       cnodeid_t client_cnode;
        kern_vars_t *kvp;
 
-       client_cnode = NASID_TO_COMPACT_NODEID(client_nasid);
-
        kvp = &hub_data(client_nasid)->kern_vars;
 
        KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp;
index c76151b56568ee241e345fe779d3454df0179add..0904d4d30cb338d1bdd4beaaf136472b488f4657 100644 (file)
@@ -95,7 +95,7 @@ static void __init sni_a20r_timer_setup(void)
 static __init unsigned long dosample(void)
 {
        u32 ct0, ct1;
-       volatile u8 msb, lsb;
+       volatile u8 msb;
 
        /* Start the counter. */
        outb_p(0x34, 0x43);
@@ -108,7 +108,7 @@ static __init unsigned long dosample(void)
        /* Latch and spin until top byte of counter0 is zero */
        do {
                outb(0x00, 0x43);
-               lsb = inb(0x40);
+               (void) inb(0x40);
                msb = inb(0x40);
                ct1 = read_c0_count();
        } while (msb);
index 6b6dc20b0bebab9b1154e7cd5bab13ebd669c85a..bdf0563ba423886a5cde9ff2a63a47e8a3365604 100644 (file)
@@ -393,8 +393,8 @@ typedef struct fec {
        uint    fec_addr_low;           /* lower 32 bits of station address     */
        ushort  fec_addr_high;          /* upper 16 bits of station address     */
        ushort  res1;                   /* reserved                             */
-       uint    fec_hash_table_high;    /* upper 32-bits of hash table          */
-       uint    fec_hash_table_low;     /* lower 32-bits of hash table          */
+       uint    fec_grp_hash_table_high;        /* upper 32-bits of hash table          */
+       uint    fec_grp_hash_table_low; /* lower 32-bits of hash table          */
        uint    fec_r_des_start;        /* beginning of Rx descriptor ring      */
        uint    fec_x_des_start;        /* beginning of Tx descriptor ring      */
        uint    fec_r_buff_size;        /* Rx buffer size                       */
index 55613e33e263ac7abb3b1d478f4a27bb9912b86f..a6ae1cfad86ce86040553fa94346ad2043c3a0bb 100644 (file)
@@ -933,12 +933,16 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
        if (data && !(data & DABR_TRANSLATION))
                return -EIO;
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
+       if (ptrace_get_breakpoints(task) < 0)
+               return -ESRCH;
+
        bp = thread->ptrace_bps[0];
        if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) {
                if (bp) {
                        unregister_hw_breakpoint(bp);
                        thread->ptrace_bps[0] = NULL;
                }
+               ptrace_put_breakpoints(task);
                return 0;
        }
        if (bp) {
@@ -948,9 +952,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
                                        (DABR_DATA_WRITE | DABR_DATA_READ),
                                                        &attr.bp_type);
                ret =  modify_user_hw_breakpoint(bp, &attr);
-               if (ret)
+               if (ret) {
+                       ptrace_put_breakpoints(task);
                        return ret;
+               }
                thread->ptrace_bps[0] = bp;
+               ptrace_put_breakpoints(task);
                thread->dabr = data;
                return 0;
        }
@@ -965,9 +972,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
                                                        ptrace_triggered, task);
        if (IS_ERR(bp)) {
                thread->ptrace_bps[0] = NULL;
+               ptrace_put_breakpoints(task);
                return PTR_ERR(bp);
        }
 
+       ptrace_put_breakpoints(task);
+
 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
 
        /* Move contents to the DABR register */
index 188272934cfb3c6818754dae0260216b5c4ccb65..104faa8aa23c783da70b2dd97dac842ff8198c3c 100644 (file)
@@ -318,17 +318,20 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = {
        .end = mpc83xx_suspend_end,
 };
 
+static struct of_device_id pmc_match[];
 static int pmc_probe(struct platform_device *ofdev)
 {
+       const struct of_device_id *match;
        struct device_node *np = ofdev->dev.of_node;
        struct resource res;
        struct pmc_type *type;
        int ret = 0;
 
-       if (!ofdev->dev.of_match)
+       match = of_match_device(pmc_match, &ofdev->dev);
+       if (!match)
                return -EINVAL;
 
-       type = ofdev->dev.of_match->data;
+       type = match->data;
 
        if (!of_device_is_available(np))
                return -ENODEV;
index d5679dc1e20faddf7d5a20df6bc1374aee74e264..01cd2f089512149b7817199b7915677c6fa86b24 100644 (file)
@@ -304,8 +304,10 @@ static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi,
        return 0;
 }
 
+static const struct of_device_id fsl_of_msi_ids[];
 static int __devinit fsl_of_msi_probe(struct platform_device *dev)
 {
+       const struct of_device_id *match;
        struct fsl_msi *msi;
        struct resource res;
        int err, i, j, irq_index, count;
@@ -316,9 +318,10 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
        u32 offset;
        static const u32 all_avail[] = { 0, NR_MSI_IRQS };
 
-       if (!dev->dev.of_match)
+       match = of_match_device(fsl_of_msi_ids, &dev->dev);
+       if (!match)
                return -EINVAL;
-       features = dev->dev.of_match->data;
+       features = match->data;
 
        printk(KERN_DEBUG "Setting up Freescale MSI support\n");
 
index 44bca3f994b0892bed53541e238e1d8bf9aeff58..8b16c479585b271db83330e68529634e5e5e4735 100644 (file)
@@ -76,7 +76,7 @@ static void prng_seed(int nbytes)
 
        /* Add the entropy */
        while (nbytes >= 8) {
-               *((__u64 *)parm_block) ^= *((__u64 *)buf+i);
+               *((__u64 *)parm_block) ^= *((__u64 *)(buf+i));
                prng_add_entropy();
                i += 8;
                nbytes -= 8;
index 72b2e2f2d32d823d36f4135702d67bde05a05d30..7e91c58072e259345610c3b8a17a93b48dc5d297 100644 (file)
@@ -9,9 +9,22 @@
 #define _ASM_S390_DIAG_H
 
 /*
- * Diagnose 10: Release pages
+ * Diagnose 10: Release page range
  */
-extern void diag10(unsigned long addr);
+static inline void diag10_range(unsigned long start_pfn, unsigned long num_pfn)
+{
+       unsigned long start_addr, end_addr;
+
+       start_addr = start_pfn << PAGE_SHIFT;
+       end_addr = (start_pfn + num_pfn - 1) << PAGE_SHIFT;
+
+       asm volatile(
+               "0:     diag    %0,%1,0x10\n"
+               "1:\n"
+               EX_TABLE(0b, 1b)
+               EX_TABLE(1b, 1b)
+               : : "a" (start_addr), "a" (end_addr));
+}
 
 /*
  * Diagnose 14: Input spool file manipulation
index a6f0e7cc9cde2f341fba3fef347ea93240bb9372..8c277caa8d3aa81d0d8f14c28b2e71f12ec3e791 100644 (file)
@@ -23,7 +23,7 @@ static inline int init_new_context(struct task_struct *tsk,
 #ifdef CONFIG_64BIT
        mm->context.asce_bits |= _ASCE_TYPE_REGION3;
 #endif
-       if (current->mm->context.alloc_pgste) {
+       if (current->mm && current->mm->context.alloc_pgste) {
                /*
                 * alloc_pgste indicates, that any NEW context will be created
                 * with extended page tables. The old context is unchanged. The
index c032d11da8a1e8df9a4556ee71fd333c7ae08b3b..8237fc07ac79ef27c2794c5367b9c0e05e37af6b 100644 (file)
@@ -8,27 +8,6 @@
 #include <linux/module.h>
 #include <asm/diag.h>
 
-/*
- * Diagnose 10: Release pages
- */
-void diag10(unsigned long addr)
-{
-       if (addr >= 0x7ff00000)
-               return;
-       asm volatile(
-#ifdef CONFIG_64BIT
-               "       sam31\n"
-               "       diag    %0,%0,0x10\n"
-               "0:     sam64\n"
-#else
-               "       diag    %0,%0,0x10\n"
-               "0:\n"
-#endif
-               EX_TABLE(0b, 0b)
-               : : "a" (addr));
-}
-EXPORT_SYMBOL(diag10);
-
 /*
  * Diagnose 14: Input spool file manipulation
  */
index c83726c9fe033d9f74ed9524c5a12e0722386142..3d4a78fc1adc3f6403eaf4ebc93f03211c5e1f86 100644 (file)
@@ -672,6 +672,7 @@ static struct insn opcode_b2[] = {
        { "rp", 0x77, INSTR_S_RD },
        { "stcke", 0x78, INSTR_S_RD },
        { "sacf", 0x79, INSTR_S_RD },
+       { "spp", 0x80, INSTR_S_RD },
        { "stsi", 0x7d, INSTR_S_RD },
        { "srnm", 0x99, INSTR_S_RD },
        { "stfpc", 0x9c, INSTR_S_RD },
index 648f64239a9d169d0f9f983d59a667f14fafea9d..1b67fc6ebdc25f1407e569f3114bc3cb4827fd72 100644 (file)
@@ -836,7 +836,7 @@ restart_base:
        stosm   __SF_EMPTY(%r15),0x04   # now we can turn dat on
        basr    %r14,0
        l       %r14,restart_addr-.(%r14)
-       br      %r14                    # branch to start_secondary
+       basr    %r14,%r14               # branch to start_secondary
 restart_addr:
        .long   start_secondary
        .align  8
index 9d3603d6c5118dac66f0c27f9bba3a9e1b305c7f..9fd864563499bb7e46f0cd85d4a03550b32a15ac 100644 (file)
@@ -841,7 +841,7 @@ restart_base:
        mvc     __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1)
        xc      __LC_STEAL_TIMER(8),__LC_STEAL_TIMER
        stosm   __SF_EMPTY(%r15),0x04   # now we can turn dat on
-       jg      start_secondary
+       brasl   %r14,start_secondary
        .align  8
 restart_vtime:
        .long   0x7fffffff,0xffffffff
index c66ffd8dbbb732416ad3d803d00ec02da6125555..1f1dba9dcf58f713211bbb6b3fc063171681ed70 100644 (file)
@@ -91,7 +91,7 @@ static long cmm_alloc_pages(long nr, long *counter,
                        } else
                                free_page((unsigned long) npa);
                }
-               diag10(addr);
+               diag10_range(addr >> PAGE_SHIFT, 1);
                pa->pages[pa->index++] = addr;
                (*counter)++;
                spin_unlock(&cmm_lock);
index 4cf85fef407c098554416757eecf58c869bdd448..ab988135e5c682a90370f998d5d72956fd40961c 100644 (file)
@@ -543,7 +543,6 @@ static void pfault_interrupt(unsigned int ext_int_code,
        struct task_struct *tsk;
        __u16 subcode;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
        /*
         * Get the external interruption subcode & pfault
         * initial/completion signal bit. VM stores this 
@@ -553,6 +552,7 @@ static void pfault_interrupt(unsigned int ext_int_code,
        subcode = ext_int_code >> 16;
        if ((subcode & 0xff00) != __SUBCODE_MASK)
                return;
+       kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++;
 
        /*
         * Get the token (= address of the task structure of the affected task).
index 4952872d6f0ae1c05c58bd7d046fe24029aef5ac..33cbd373cce4f39e1181299b9098c2334fc231ec 100644 (file)
@@ -1021,20 +1021,14 @@ deallocate_exit:
        return rc;
 }
 
-long hwsampler_query_min_interval(void)
+unsigned long hwsampler_query_min_interval(void)
 {
-       if (min_sampler_rate)
-               return min_sampler_rate;
-       else
-               return -EINVAL;
+       return min_sampler_rate;
 }
 
-long hwsampler_query_max_interval(void)
+unsigned long hwsampler_query_max_interval(void)
 {
-       if (max_sampler_rate)
-               return max_sampler_rate;
-       else
-               return -EINVAL;
+       return max_sampler_rate;
 }
 
 unsigned long hwsampler_get_sample_overflow_count(unsigned int cpu)
index 8c72b59316b5de7b0f02b440404a057aaf768860..1912f3bb190cfbf9c7cf7218e84b2cd1555baf57 100644 (file)
@@ -102,8 +102,8 @@ int hwsampler_setup(void);
 int hwsampler_shutdown(void);
 int hwsampler_allocate(unsigned long sdbt, unsigned long sdb);
 int hwsampler_deallocate(void);
-long hwsampler_query_min_interval(void);
-long hwsampler_query_max_interval(void);
+unsigned long hwsampler_query_min_interval(void);
+unsigned long hwsampler_query_max_interval(void);
 int hwsampler_start_all(unsigned long interval);
 int hwsampler_stop_all(void);
 int hwsampler_deactivate(unsigned int cpu);
index c63d7e58352bb343db50a47bb0ea857fc5952f9b..5995e9bc72d9c291d57df38ab66c0a1929f43dd6 100644 (file)
@@ -145,15 +145,11 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
         * create hwsampler files only if hwsampler_setup() succeeds.
         */
        oprofile_min_interval = hwsampler_query_min_interval();
-       if (oprofile_min_interval < 0) {
-               oprofile_min_interval = 0;
+       if (oprofile_min_interval == 0)
                return -ENODEV;
-       }
        oprofile_max_interval = hwsampler_query_max_interval();
-       if (oprofile_max_interval < 0) {
-               oprofile_max_interval = 0;
+       if (oprofile_max_interval == 0)
                return -ENODEV;
-       }
 
        if (oprofile_timer_init(ops))
                return -ENODEV;
index 2130ca674e9bdd1674a22d339011a7f364bf0ec5..3d7b209b2178cc63c520975a35d4146c99042ba4 100644 (file)
@@ -117,7 +117,11 @@ void user_enable_single_step(struct task_struct *child)
 
        set_tsk_thread_flag(child, TIF_SINGLESTEP);
 
+       if (ptrace_get_breakpoints(child) < 0)
+               return;
+
        set_single_step(child, pc);
+       ptrace_put_breakpoints(child);
 }
 
 void user_disable_single_step(struct task_struct *child)
index f679c57644d5f44b6bc710e060d86ace2c73b4ee..1e34f29e58bb723537429dd0aacb9e0fe7df11b9 100644 (file)
@@ -165,7 +165,7 @@ static int __devinit apc_probe(struct platform_device *op)
        return 0;
 }
 
-static struct of_device_id __initdata apc_match[] = {
+static struct of_device_id apc_match[] = {
        {
                .name = APC_OBPNAME,
        },
index 948068a083fca3eab756401d6fb30e00e295e352..d1840dbdaa2fef0179e55c06b24830246e397fc0 100644 (file)
@@ -452,8 +452,10 @@ static void __devinit sabre_pbm_init(struct pci_pbm_info *pbm,
        sabre_scan_bus(pbm, &op->dev);
 }
 
+static const struct of_device_id sabre_match[];
 static int __devinit sabre_probe(struct platform_device *op)
 {
+       const struct of_device_id *match;
        const struct linux_prom64_registers *pr_regs;
        struct device_node *dp = op->dev.of_node;
        struct pci_pbm_info *pbm;
@@ -463,7 +465,8 @@ static int __devinit sabre_probe(struct platform_device *op)
        const u32 *vdma;
        u64 clear_irq;
 
-       hummingbird_p = op->dev.of_match && (op->dev.of_match->data != NULL);
+       match = of_match_device(sabre_match, &op->dev);
+       hummingbird_p = match && (match->data != NULL);
        if (!hummingbird_p) {
                struct device_node *cpu_dp;
 
index fecfcb2063c805a11760e515d549b40d4c322b3a..283fbc329a4397c9d8ff9b159611fe1bf62d4e8b 100644 (file)
@@ -1458,11 +1458,15 @@ out_err:
        return err;
 }
 
+static const struct of_device_id schizo_match[];
 static int __devinit schizo_probe(struct platform_device *op)
 {
-       if (!op->dev.of_match)
+       const struct of_device_id *match;
+
+       match = of_match_device(schizo_match, &op->dev);
+       if (!match)
                return -EINVAL;
-       return __schizo_init(op, (unsigned long) op->dev.of_match->data);
+       return __schizo_init(op, (unsigned long)match->data);
 }
 
 /* The ordering of this table is very important.  Some Tomatillo
index 93d7b4465f8d2493dc542d0e836ac255182e25f2..6a585d393580d38d651f5053e08bded4081c6bba 100644 (file)
@@ -69,7 +69,7 @@ static int __devinit pmc_probe(struct platform_device *op)
        return 0;
 }
 
-static struct of_device_id __initdata pmc_match[] = {
+static struct of_device_id pmc_match[] = {
        {
                .name = PMC_OBPNAME,
        },
index 91c10fb7085849c1de8a5d1f2b58df90336af61e..850a1360c0d6f1128d21b6225a55b9f2740fa45c 100644 (file)
@@ -53,6 +53,7 @@ cpumask_t smp_commenced_mask = CPU_MASK_NONE;
 void __cpuinit smp_store_cpu_info(int id)
 {
        int cpu_node;
+       int mid;
 
        cpu_data(id).udelay_val = loops_per_jiffy;
 
@@ -60,10 +61,13 @@ void __cpuinit smp_store_cpu_info(int id)
        cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
                                                     "clock-frequency", 0);
        cpu_data(id).prom_node = cpu_node;
-       cpu_data(id).mid = cpu_get_hwmid(cpu_node);
+       mid = cpu_get_hwmid(cpu_node);
 
-       if (cpu_data(id).mid < 0)
-               panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
+       if (mid < 0) {
+               printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node);
+               mid = 0;
+       }
+       cpu_data(id).mid = mid;
 }
 
 void __init smp_cpus_done(unsigned int max_cpus)
index 4e236391b6359ab59223034c61892c6b3fce7a28..96046a4024c23276e41769403a5155ec62ea2cd3 100644 (file)
@@ -168,7 +168,7 @@ static int __devinit clock_probe(struct platform_device *op)
        return 0;
 }
 
-static struct of_device_id __initdata clock_match[] = {
+static struct of_device_id clock_match[] = {
        {
                .name = "eeprom",
        },
index 3632cb34e914b241ed394b99092b6a5aa63601c0..0084c3361e15afa77530071ada7a6e30c3ece3b5 100644 (file)
@@ -289,10 +289,16 @@ cc_end_cruft:
 
        /* Also, handle the alignment code out of band. */
 cc_dword_align:
-       cmp     %g1, 6
-       bl,a    ccte
+       cmp     %g1, 16
+       bge     1f
+        srl    %g1, 1, %o3
+2:     cmp     %o3, 0
+       be,a    ccte
         andcc  %g1, 0xf, %o3
-       andcc   %o0, 0x1, %g0
+       andcc   %o3, %o0, %g0   ! Check %o0 only (%o1 has the same last 2 bits)
+       be,a    2b
+        srl    %o3, 1, %o3
+1:     andcc   %o0, 0x1, %g0
        bne     ccslow
         andcc  %o0, 0x2, %g0
        be      1f
index 6ea77979531c95e1bf005eb0cc3e4a87fa6c913d..42827cafa6af3ac72f4c57d62e08968b81360faa 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <stdio.h>
 #include <stdlib.h>
+#include <unistd.h>
 #include <errno.h>
 #include <signal.h>
 #include <string.h>
@@ -75,6 +76,26 @@ void setup_hostinfo(char *buf, int len)
                 host.release, host.version, host.machine);
 }
 
+/*
+ * We cannot use glibc's abort(). It makes use of tgkill() which
+ * has no effect within UML's kernel threads.
+ * After that glibc would execute an invalid instruction to kill
+ * the calling process and UML crashes with SIGSEGV.
+ */
+static inline void __attribute__ ((noreturn)) uml_abort(void)
+{
+       sigset_t sig;
+
+       fflush(NULL);
+
+       if (!sigemptyset(&sig) && !sigaddset(&sig, SIGABRT))
+               sigprocmask(SIG_UNBLOCK, &sig, 0);
+
+       for (;;)
+               if (kill(getpid(), SIGABRT) < 0)
+                       exit(127);
+}
+
 void os_dump_core(void)
 {
        int pid;
@@ -116,5 +137,5 @@ void os_dump_core(void)
        while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0)
                os_kill_ptraced_process(pid, 0);
 
-       abort();
+       uml_abort();
 }
index d87988bacf3ec497a6a5db9ee1e2b54214b4cdf2..34595d5e1038615eeac0ac943a15e9bcf9f4e0d9 100644 (file)
@@ -78,6 +78,7 @@
 #define                APIC_DEST_LOGICAL       0x00800
 #define                APIC_DEST_PHYSICAL      0x00000
 #define                APIC_DM_FIXED           0x00000
+#define                APIC_DM_FIXED_MASK      0x00700
 #define                APIC_DM_LOWEST          0x00100
 #define                APIC_DM_SMI             0x00200
 #define                APIC_DM_REMRD           0x00300
index 7db7723d1f325f8016666806f790a015a4005c6b..d56187c6b8385425bb893f8d70003cb70290243a 100644 (file)
@@ -299,6 +299,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
 /* Install a pte for a particular vaddr in kernel space. */
 void set_pte_vaddr(unsigned long vaddr, pte_t pte);
 
+extern void native_pagetable_reserve(u64 start, u64 end);
 #ifdef CONFIG_X86_32
 extern void native_pagetable_setup_start(pgd_t *base);
 extern void native_pagetable_setup_done(pgd_t *base);
index 3e094af443c396a6ccf9f08628ecafc2a5d974ab..130f1eeee5fed416bb41ed4fa54a97bdcc31a601 100644 (file)
@@ -94,6 +94,8 @@
 /* after this # consecutive successes, bump up the throttle if it was lowered */
 #define COMPLETE_THRESHOLD 5
 
+#define UV_LB_SUBNODEID 0x10
+
 /*
  * number of entries in the destination side payload queue
  */
  * The distribution specification (32 bytes) is interpreted as a 256-bit
  * distribution vector. Adjacent bits correspond to consecutive even numbered
  * nodeIDs. The result of adding the index of a given bit to the 15-bit
- * 'base_dest_nodeid' field of the header corresponds to the
+ * 'base_dest_nasid' field of the header corresponds to the
  * destination nodeID associated with that specified bit.
  */
 struct bau_target_uvhubmask {
@@ -176,7 +178,7 @@ struct bau_msg_payload {
 struct bau_msg_header {
        unsigned int dest_subnodeid:6;  /* must be 0x10, for the LB */
        /* bits 5:0 */
-       unsigned int base_dest_nodeid:15; /* nasid of the */
+       unsigned int base_dest_nasid:15; /* nasid of the */
        /* bits 20:6 */                   /* first bit in uvhub map */
        unsigned int command:8; /* message type */
        /* bits 28:21 */
@@ -378,6 +380,10 @@ struct ptc_stats {
        unsigned long d_rcanceled; /* number of messages canceled by resets */
 };
 
+struct hub_and_pnode {
+       short uvhub;
+       short pnode;
+};
 /*
  * one per-cpu; to locate the software tables
  */
@@ -399,10 +405,12 @@ struct bau_control {
        int baudisabled;
        int set_bau_off;
        short cpu;
+       short osnode;
        short uvhub_cpu;
        short uvhub;
        short cpus_in_socket;
        short cpus_in_uvhub;
+       short partition_base_pnode;
        unsigned short message_number;
        unsigned short uvhub_quiesce;
        short socket_acknowledge_count[DEST_Q_SIZE];
@@ -422,15 +430,16 @@ struct bau_control {
        int congested_period;
        cycles_t period_time;
        long period_requests;
+       struct hub_and_pnode *target_hub_and_pnode;
 };
 
 static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp)
 {
        return constant_test_bit(uvhub, &dstp->bits[0]);
 }
-static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp)
+static inline void bau_uvhub_set(int pnode, struct bau_target_uvhubmask *dstp)
 {
-       __set_bit(uvhub, &dstp->bits[0]);
+       __set_bit(pnode, &dstp->bits[0]);
 }
 static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp,
                                    int nbits)
index a501741c2335a854fa43d14f73761d8bf8f9c82e..4298002d0c83a9c09c19c725c22a33e43921d128 100644 (file)
@@ -398,6 +398,8 @@ struct uv_blade_info {
        unsigned short  nr_online_cpus;
        unsigned short  pnode;
        short           memory_nid;
+       spinlock_t      nmi_lock;
+       unsigned long   nmi_count;
 };
 extern struct uv_blade_info *uv_blade_info;
 extern short *uv_node_to_blade;
index 20cafeac7455594d73a3bc6837da0a34fcdee3eb..f5bb64a823d77a1d3ebf97d40e80e913c83e0d31 100644 (file)
@@ -5,7 +5,7 @@
  *
  * SGI UV MMR definitions
  *
- * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved.
  */
 
 #ifndef _ASM_X86_UV_UV_MMRS_H
@@ -1099,5 +1099,19 @@ union uvh_rtc1_int_config_u {
     } s;
 };
 
+/* ========================================================================= */
+/*                               UVH_SCRATCH5                                */
+/* ========================================================================= */
+#define UVH_SCRATCH5 0x2d0200UL
+#define UVH_SCRATCH5_32 0x00778
+
+#define UVH_SCRATCH5_SCRATCH5_SHFT 0
+#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
+union uvh_scratch5_u {
+    unsigned long      v;
+    struct uvh_scratch5_s {
+       unsigned long   scratch5 : 64;  /* RW, W1CS */
+    } s;
+};
 
 #endif /* __ASM_UV_MMRS_X86_H__ */
index 643ebf2e2ad8733ddbec5ca884ee21bf405c0226..d3d859035af9e1968d0ad9d4a39000331c27c8f1 100644 (file)
@@ -67,6 +67,17 @@ struct x86_init_oem {
        void (*banner)(void);
 };
 
+/**
+ * struct x86_init_mapping - platform specific initial kernel pagetable setup
+ * @pagetable_reserve: reserve a range of addresses for kernel pagetable usage
+ *
+ * For more details on the purpose of this hook, look in
+ * init_memory_mapping and the commit that added it.
+ */
+struct x86_init_mapping {
+       void (*pagetable_reserve)(u64 start, u64 end);
+};
+
 /**
  * struct x86_init_paging - platform specific paging functions
  * @pagetable_setup_start:     platform specific pre paging_init() call
@@ -123,6 +134,7 @@ struct x86_init_ops {
        struct x86_init_mpparse         mpparse;
        struct x86_init_irqs            irqs;
        struct x86_init_oem             oem;
+       struct x86_init_mapping         mapping;
        struct x86_init_paging          paging;
        struct x86_init_timers          timers;
        struct x86_init_iommu           iommu;
index 33b10a0fc095b9d458f851b20e4c5dda37cfc9ed..7acd2d2ac965f9932f39986ab1fe23361c6de90e 100644 (file)
 #include <asm/smp.h>
 #include <asm/x86_init.h>
 #include <asm/emergency-restart.h>
+#include <asm/nmi.h>
+
+/* BMC sets a bit this MMR non-zero before sending an NMI */
+#define UVH_NMI_MMR                            UVH_SCRATCH5
+#define UVH_NMI_MMR_CLEAR                      (UVH_NMI_MMR + 8)
+#define UV_NMI_PENDING_MASK                    (1UL << 63)
+DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count);
 
 DEFINE_PER_CPU(int, x2apic_extra_bits);
 
@@ -642,18 +649,46 @@ void __cpuinit uv_cpu_init(void)
  */
 int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
 {
+       unsigned long real_uv_nmi;
+       int bid;
+
        if (reason != DIE_NMIUNKNOWN)
                return NOTIFY_OK;
 
        if (in_crash_kexec)
                /* do nothing if entering the crash kernel */
                return NOTIFY_OK;
+
        /*
-        * Use a lock so only one cpu prints at a time
-        * to prevent intermixed output.
+        * Each blade has an MMR that indicates when an NMI has been sent
+        * to cpus on the blade. If an NMI is detected, atomically
+        * clear the MMR and update a per-blade NMI count used to
+        * cause each cpu on the blade to notice a new NMI.
+        */
+       bid = uv_numa_blade_id();
+       real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
+
+       if (unlikely(real_uv_nmi)) {
+               spin_lock(&uv_blade_info[bid].nmi_lock);
+               real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
+               if (real_uv_nmi) {
+                       uv_blade_info[bid].nmi_count++;
+                       uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK);
+               }
+               spin_unlock(&uv_blade_info[bid].nmi_lock);
+       }
+
+       if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
+               return NOTIFY_DONE;
+
+       __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
+
+       /*
+        * Use a lock so only one cpu prints at a time.
+        * This prevents intermixed output.
         */
        spin_lock(&uv_nmi_lock);
-       pr_info("NMI stack dump cpu %u:\n", smp_processor_id());
+       pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
        dump_stack();
        spin_unlock(&uv_nmi_lock);
 
@@ -661,7 +696,8 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
 }
 
 static struct notifier_block uv_dump_stack_nmi_nb = {
-       .notifier_call  = uv_handle_nmi
+       .notifier_call  = uv_handle_nmi,
+       .priority = NMI_LOCAL_LOW_PRIOR - 1,
 };
 
 void uv_register_nmi_notifier(void)
@@ -720,8 +756,9 @@ void __init uv_system_init(void)
        printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
 
        bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
-       uv_blade_info = kmalloc(bytes, GFP_KERNEL);
+       uv_blade_info = kzalloc(bytes, GFP_KERNEL);
        BUG_ON(!uv_blade_info);
+
        for (blade = 0; blade < uv_num_possible_blades(); blade++)
                uv_blade_info[blade].memory_nid = -1;
 
@@ -747,6 +784,7 @@ void __init uv_system_init(void)
                        uv_blade_info[blade].pnode = pnode;
                        uv_blade_info[blade].nr_possible_cpus = 0;
                        uv_blade_info[blade].nr_online_cpus = 0;
+                       spin_lock_init(&uv_blade_info[blade].nmi_lock);
                        max_pnode = max(pnode, max_pnode);
                        blade++;
                }
index 3532d3bf81050ef145a861b09c294980bf1674c3..6f9d1f6063e9c62841d1a3148d015ed009ef02f3 100644 (file)
@@ -613,7 +613,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
 #endif
 
        /* As a rule processors have APIC timer running in deep C states */
-       if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400))
+       if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400))
                set_cpu_cap(c, X86_FEATURE_ARAT);
 
        /*
index 167f97b5596e2f6e6e01df1f0b1096cff3392b2f..bb0adad3514339d5a2f25d471af5f568f9b4d03e 100644 (file)
@@ -509,6 +509,7 @@ recurse:
 out_free:
        if (b) {
                kobject_put(&b->kobj);
+               list_del(&b->miscj);
                kfree(b);
        }
        return err;
index 6f8c5e9da97f0f3e4c0ac89557a3b2f1c3a5f01e..0f034460260d5d7eb4ceb1d1b923c6de8393f2bf 100644 (file)
@@ -446,18 +446,20 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
         */
        rdmsr(MSR_IA32_MISC_ENABLE, l, h);
 
+       h = lvtthmr_init;
        /*
         * The initial value of thermal LVT entries on all APs always reads
         * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
         * sequence to them and LVT registers are reset to 0s except for
         * the mask bits which are set to 1s when APs receive INIT IPI.
-        * Always restore the value that BIOS has programmed on AP based on
-        * BSP's info we saved since BIOS is always setting the same value
-        * for all threads/cores
+        * If BIOS takes over the thermal interrupt and sets its interrupt
+        * delivery mode to SMI (not fixed), it restores the value that the
+        * BIOS has programmed on AP based on BSP's info we saved since BIOS
+        * is always setting the same value for all threads/cores.
         */
-       apic_write(APIC_LVTTHMR, lvtthmr_init);
+       if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED)
+               apic_write(APIC_LVTTHMR, lvtthmr_init);
 
-       h = lvtthmr_init;
 
        if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
                printk(KERN_DEBUG
index e61539b07d2c25f5f7e81df9947f543e8138e244..447a28de6f097784aba7591f1856225671748084 100644 (file)
@@ -184,26 +184,23 @@ static __initconst const u64 snb_hw_cache_event_ids
        },
  },
  [ C(LL  ) ] = {
-       /*
-        * TBD: Need Off-core Response Performance Monitoring support
-        */
        [ C(OP_READ) ] = {
-               /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */
+               /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
                [ C(RESULT_ACCESS) ] = 0x01b7,
-               /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */
-               [ C(RESULT_MISS)   ] = 0x01bb,
+               /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
+               [ C(RESULT_MISS)   ] = 0x01b7,
        },
        [ C(OP_WRITE) ] = {
-               /* OFFCORE_RESPONSE_0.ANY_RFO.LOCAL_CACHE */
+               /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
                [ C(RESULT_ACCESS) ] = 0x01b7,
-               /* OFFCORE_RESPONSE_1.ANY_RFO.ANY_LLC_MISS */
-               [ C(RESULT_MISS)   ] = 0x01bb,
+               /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
+               [ C(RESULT_MISS)   ] = 0x01b7,
        },
        [ C(OP_PREFETCH) ] = {
-               /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */
+               /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
                [ C(RESULT_ACCESS) ] = 0x01b7,
-               /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */
-               [ C(RESULT_MISS)   ] = 0x01bb,
+               /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
+               [ C(RESULT_MISS)   ] = 0x01b7,
        },
  },
  [ C(DTLB) ] = {
@@ -285,26 +282,26 @@ static __initconst const u64 westmere_hw_cache_event_ids
  },
  [ C(LL  ) ] = {
        [ C(OP_READ) ] = {
-               /* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */
+               /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
                [ C(RESULT_ACCESS) ] = 0x01b7,
-               /* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */
-               [ C(RESULT_MISS)   ] = 0x01bb,
+               /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
+               [ C(RESULT_MISS)   ] = 0x01b7,
        },
        /*
         * Use RFO, not WRITEBACK, because a write miss would typically occur
         * on RFO.
         */
        [ C(OP_WRITE) ] = {
-               /* OFFCORE_RESPONSE_1.ANY_RFO.LOCAL_CACHE */
-               [ C(RESULT_ACCESS) ] = 0x01bb,
-               /* OFFCORE_RESPONSE_0.ANY_RFO.ANY_LLC_MISS */
+               /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
+               [ C(RESULT_ACCESS) ] = 0x01b7,
+               /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
                [ C(RESULT_MISS)   ] = 0x01b7,
        },
        [ C(OP_PREFETCH) ] = {
-               /* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */
+               /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
                [ C(RESULT_ACCESS) ] = 0x01b7,
-               /* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */
-               [ C(RESULT_MISS)   ] = 0x01bb,
+               /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
+               [ C(RESULT_MISS)   ] = 0x01b7,
        },
  },
  [ C(DTLB) ] = {
@@ -352,16 +349,36 @@ static __initconst const u64 westmere_hw_cache_event_ids
 };
 
 /*
- * OFFCORE_RESPONSE MSR bits (subset), See IA32 SDM Vol 3 30.6.1.3
+ * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
+ * See IA32 SDM Vol 3B 30.6.1.3
  */
 
-#define DMND_DATA_RD     (1 << 0)
-#define DMND_RFO         (1 << 1)
-#define DMND_WB          (1 << 3)
-#define PF_DATA_RD       (1 << 4)
-#define PF_DATA_RFO      (1 << 5)
-#define RESP_UNCORE_HIT  (1 << 8)
-#define RESP_MISS        (0xf600) /* non uncore hit */
+#define NHM_DMND_DATA_RD       (1 << 0)
+#define NHM_DMND_RFO           (1 << 1)
+#define NHM_DMND_IFETCH                (1 << 2)
+#define NHM_DMND_WB            (1 << 3)
+#define NHM_PF_DATA_RD         (1 << 4)
+#define NHM_PF_DATA_RFO                (1 << 5)
+#define NHM_PF_IFETCH          (1 << 6)
+#define NHM_OFFCORE_OTHER      (1 << 7)
+#define NHM_UNCORE_HIT         (1 << 8)
+#define NHM_OTHER_CORE_HIT_SNP (1 << 9)
+#define NHM_OTHER_CORE_HITM    (1 << 10)
+                               /* reserved */
+#define NHM_REMOTE_CACHE_FWD   (1 << 12)
+#define NHM_REMOTE_DRAM                (1 << 13)
+#define NHM_LOCAL_DRAM         (1 << 14)
+#define NHM_NON_DRAM           (1 << 15)
+
+#define NHM_ALL_DRAM           (NHM_REMOTE_DRAM|NHM_LOCAL_DRAM)
+
+#define NHM_DMND_READ          (NHM_DMND_DATA_RD)
+#define NHM_DMND_WRITE         (NHM_DMND_RFO|NHM_DMND_WB)
+#define NHM_DMND_PREFETCH      (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
+
+#define NHM_L3_HIT     (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
+#define NHM_L3_MISS    (NHM_NON_DRAM|NHM_ALL_DRAM|NHM_REMOTE_CACHE_FWD)
+#define NHM_L3_ACCESS  (NHM_L3_HIT|NHM_L3_MISS)
 
 static __initconst const u64 nehalem_hw_cache_extra_regs
                                [PERF_COUNT_HW_CACHE_MAX]
@@ -370,16 +387,16 @@ static __initconst const u64 nehalem_hw_cache_extra_regs
 {
  [ C(LL  ) ] = {
        [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = DMND_DATA_RD|RESP_UNCORE_HIT,
-               [ C(RESULT_MISS)   ] = DMND_DATA_RD|RESP_MISS,
+               [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
+               [ C(RESULT_MISS)   ] = NHM_DMND_READ|NHM_L3_MISS,
        },
        [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = DMND_RFO|DMND_WB|RESP_UNCORE_HIT,
-               [ C(RESULT_MISS)   ] = DMND_RFO|DMND_WB|RESP_MISS,
+               [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
+               [ C(RESULT_MISS)   ] = NHM_DMND_WRITE|NHM_L3_MISS,
        },
        [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_UNCORE_HIT,
-               [ C(RESULT_MISS)   ] = PF_DATA_RD|PF_DATA_RFO|RESP_MISS,
+               [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
+               [ C(RESULT_MISS)   ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
        },
  }
 };
index c969fd9d156651c9d0cc6dae1a606bb091cbcde6..f1a6244d7d93769c35d219b79de6358d8a4096e3 100644 (file)
@@ -1183,12 +1183,13 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
                                         struct pt_regs *regs)
 {
        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+       unsigned long flags;
 
        /* This is possible if op is under delayed unoptimizing */
        if (kprobe_disabled(&op->kp))
                return;
 
-       preempt_disable();
+       local_irq_save(flags);
        if (kprobe_running()) {
                kprobes_inc_nmissed_count(&op->kp);
        } else {
@@ -1207,7 +1208,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
                opt_pre_handler(&op->kp, regs);
                __this_cpu_write(current_kprobe, NULL);
        }
-       preempt_enable_no_resched();
+       local_irq_restore(flags);
 }
 
 static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
index 45892dc4b72a37f01627725db2654591fc4dad6b..f65e5b521dbd4b3d28c8dc30faecec02073e56fb 100644 (file)
@@ -608,6 +608,9 @@ static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
        unsigned len, type;
        struct perf_event *bp;
 
+       if (ptrace_get_breakpoints(tsk) < 0)
+               return -ESRCH;
+
        data &= ~DR_CONTROL_RESERVED;
        old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
 restore:
@@ -655,6 +658,9 @@ restore:
                }
                goto restore;
        }
+
+       ptrace_put_breakpoints(tsk);
+
        return ((orig_ret < 0) ? orig_ret : rc);
 }
 
@@ -668,10 +674,17 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
 
        if (n < HBP_NUM) {
                struct perf_event *bp;
+
+               if (ptrace_get_breakpoints(tsk) < 0)
+                       return -ESRCH;
+
                bp = thread->ptrace_bps[n];
                if (!bp)
-                       return 0;
-               val = bp->hw.info.address;
+                       val = 0;
+               else
+                       val = bp->hw.info.address;
+
+               ptrace_put_breakpoints(tsk);
        } else if (n == 6) {
                val = thread->debugreg6;
         } else if (n == 7) {
@@ -686,6 +699,10 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
        struct perf_event *bp;
        struct thread_struct *t = &tsk->thread;
        struct perf_event_attr attr;
+       int err = 0;
+
+       if (ptrace_get_breakpoints(tsk) < 0)
+               return -ESRCH;
 
        if (!t->ptrace_bps[nr]) {
                ptrace_breakpoint_init(&attr);
@@ -709,24 +726,23 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
                 * writing for the user. And anyway this is the previous
                 * behaviour.
                 */
-               if (IS_ERR(bp))
-                       return PTR_ERR(bp);
+               if (IS_ERR(bp)) {
+                       err = PTR_ERR(bp);
+                       goto put;
+               }
 
                t->ptrace_bps[nr] = bp;
        } else {
-               int err;
-
                bp = t->ptrace_bps[nr];
 
                attr = bp->attr;
                attr.bp_addr = addr;
                err = modify_user_hw_breakpoint(bp, &attr);
-               if (err)
-                       return err;
        }
 
-
-       return 0;
+put:
+       ptrace_put_breakpoints(tsk);
+       return err;
 }
 
 /*
index 29092b38d816ee2577e5e89fb74cc023dbafc411..1d5c46df0d787abc497a1d025a9a8fd2cd347026 100644 (file)
@@ -21,26 +21,26 @@ r_base = .
        /* Get our own relocated address */
        call    1f
 1:     popl    %ebx
-       subl    $1b, %ebx
+       subl    $(1b - r_base), %ebx
 
        /* Compute the equivalent real-mode segment */
        movl    %ebx, %ecx
        shrl    $4, %ecx
        
        /* Patch post-real-mode segment jump */
-       movw    dispatch_table(%ebx,%eax,2),%ax
-       movw    %ax, 101f(%ebx)
-       movw    %cx, 102f(%ebx)
+       movw    (dispatch_table - r_base)(%ebx,%eax,2),%ax
+       movw    %ax, (101f - r_base)(%ebx)
+       movw    %cx, (102f - r_base)(%ebx)
 
        /* Set up the IDT for real mode. */
-       lidtl   machine_real_restart_idt(%ebx)
+       lidtl   (machine_real_restart_idt - r_base)(%ebx)
 
        /*
         * Set up a GDT from which we can load segment descriptors for real
         * mode.  The GDT is not used in real mode; it is just needed here to
         * prepare the descriptors.
         */
-       lgdtl   machine_real_restart_gdt(%ebx)
+       lgdtl   (machine_real_restart_gdt - r_base)(%ebx)
 
        /*
         * Load the data segment registers with 16-bit compatible values
index c11514e9128b53cf0b6ad66d844c7703a6a8523a..75ef4b18e9b7963280c808bd59d1abe5579673ca 100644 (file)
@@ -61,6 +61,10 @@ struct x86_init_ops x86_init __initdata = {
                .banner                 = default_banner,
        },
 
+       .mapping = {
+               .pagetable_reserve              = native_pagetable_reserve,
+       },
+
        .paging = {
                .pagetable_setup_start  = native_pagetable_setup_start,
                .pagetable_setup_done   = native_pagetable_setup_done,
index 286d289b039b876ef046565d29552827d76f243c..37b8b0fe8320952c89158d29a508568affbd8c8e 100644 (file)
@@ -81,6 +81,11 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
                end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
 }
 
+void __init native_pagetable_reserve(u64 start, u64 end)
+{
+       memblock_x86_reserve_range(start, end, "PGTABLE");
+}
+
 struct map_range {
        unsigned long start;
        unsigned long end;
@@ -272,9 +277,24 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
 
        __flush_tlb_all();
 
+       /*
+        * Reserve the kernel pagetable pages we used (pgt_buf_start -
+        * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
+        * so that they can be reused for other purposes.
+        *
+        * On native it just means calling memblock_x86_reserve_range, on Xen it
+        * also means marking RW the pagetable pages that we allocated before
+        * but that haven't been used.
+        *
+        * In fact on xen we mark RO the whole range pgt_buf_start -
+        * pgt_buf_top, because we have to make sure that when
+        * init_memory_mapping reaches the pagetable pages area, it maps
+        * RO all the pagetable pages, including the ones that are beyond
+        * pgt_buf_end at that time.
+        */
        if (!after_bootmem && pgt_buf_end > pgt_buf_start)
-               memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT,
-                                pgt_buf_end << PAGE_SHIFT, "PGTABLE");
+               x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start),
+                               PFN_PHYS(pgt_buf_end));
 
        if (!after_bootmem)
                early_memtest(start, end);
index e8c00cc72033cbf4ee5e4f44a3a59a10386596fe..85b52fc03084c57601fe619d7f6fb126d7a177fd 100644 (file)
@@ -306,7 +306,7 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
                bi->end = min(bi->end, high);
 
                /* and there's no empty block */
-               if (bi->start == bi->end) {
+               if (bi->start >= bi->end) {
                        numa_remove_memblk_from(i--, mi);
                        continue;
                }
index 7cb6424317f642bcbb44caabdd6c92cf0fbfa605..c58e0ea39ef5facdaa630fa8138cb77fb530aa06 100644 (file)
@@ -699,16 +699,17 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
                                          struct mm_struct *mm,
                                          unsigned long va, unsigned int cpu)
 {
-       int tcpu;
-       int uvhub;
        int locals = 0;
        int remotes = 0;
        int hubs = 0;
+       int tcpu;
+       int tpnode;
        struct bau_desc *bau_desc;
        struct cpumask *flush_mask;
        struct ptc_stats *stat;
        struct bau_control *bcp;
        struct bau_control *tbcp;
+       struct hub_and_pnode *hpp;
 
        /* kernel was booted 'nobau' */
        if (nobau)
@@ -750,11 +751,18 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
        bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
        bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
 
-       /* cpu statistics */
        for_each_cpu(tcpu, flush_mask) {
-               uvhub = uv_cpu_to_blade_id(tcpu);
-               bau_uvhub_set(uvhub, &bau_desc->distribution);
-               if (uvhub == bcp->uvhub)
+               /*
+                * The distribution vector is a bit map of pnodes, relative
+                * to the partition base pnode (and the partition base nasid
+                * in the header).
+                * Translate cpu to pnode and hub using an array stored
+                * in local memory.
+                */
+               hpp = &bcp->socket_master->target_hub_and_pnode[tcpu];
+               tpnode = hpp->pnode - bcp->partition_base_pnode;
+               bau_uvhub_set(tpnode, &bau_desc->distribution);
+               if (hpp->uvhub == bcp->uvhub)
                        locals++;
                else
                        remotes++;
@@ -855,7 +863,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
  * an interrupt, but causes an error message to be returned to
  * the sender.
  */
-static void uv_enable_timeouts(void)
+static void __init uv_enable_timeouts(void)
 {
        int uvhub;
        int nuvhubs;
@@ -1326,10 +1334,10 @@ static int __init uv_ptc_init(void)
 }
 
 /*
- * initialize the sending side's sending buffers
+ * Initialize the sending side's sending buffers.
  */
 static void
-uv_activation_descriptor_init(int node, int pnode)
+uv_activation_descriptor_init(int node, int pnode, int base_pnode)
 {
        int i;
        int cpu;
@@ -1352,11 +1360,11 @@ uv_activation_descriptor_init(int node, int pnode)
        n = pa >> uv_nshift;
        m = pa & uv_mmask;
 
+       /* the 14-bit pnode */
        uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
                              (n << UV_DESC_BASE_PNODE_SHIFT | m));
-
        /*
-        * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
+        * Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
         * cpu even though we only use the first one; one descriptor can
         * describe a broadcast to 256 uv hubs.
         */
@@ -1365,12 +1373,13 @@ uv_activation_descriptor_init(int node, int pnode)
                memset(bd2, 0, sizeof(struct bau_desc));
                bd2->header.sw_ack_flag = 1;
                /*
-                * base_dest_nodeid is the nasid of the first uvhub
-                * in the partition. The bit map will indicate uvhub numbers,
-                * which are 0-N in a partition. Pnodes are unique system-wide.
+                * The base_dest_nasid set in the message header is the nasid
+                * of the first uvhub in the partition. The bit map will
+                * indicate destination pnode numbers relative to that base.
+                * They may not be consecutive if nasid striding is being used.
                 */
-               bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode);
-               bd2->header.dest_subnodeid = 0x10; /* the LB */
+               bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
+               bd2->header.dest_subnodeid = UV_LB_SUBNODEID;
                bd2->header.command = UV_NET_ENDPOINT_INTD;
                bd2->header.int_both = 1;
                /*
@@ -1442,7 +1451,7 @@ uv_payload_queue_init(int node, int pnode)
 /*
  * Initialization of each UV hub's structures
  */
-static void __init uv_init_uvhub(int uvhub, int vector)
+static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode)
 {
        int node;
        int pnode;
@@ -1450,11 +1459,11 @@ static void __init uv_init_uvhub(int uvhub, int vector)
 
        node = uvhub_to_first_node(uvhub);
        pnode = uv_blade_to_pnode(uvhub);
-       uv_activation_descriptor_init(node, pnode);
+       uv_activation_descriptor_init(node, pnode, base_pnode);
        uv_payload_queue_init(node, pnode);
        /*
-        * the below initialization can't be in firmware because the
-        * messaging IRQ will be determined by the OS
+        * The below initialization can't be in firmware because the
+        * messaging IRQ will be determined by the OS.
         */
        apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
        uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
@@ -1491,10 +1500,11 @@ calculate_destination_timeout(void)
 /*
  * initialize the bau_control structure for each cpu
  */
-static int __init uv_init_per_cpu(int nuvhubs)
+static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode)
 {
        int i;
        int cpu;
+       int tcpu;
        int pnode;
        int uvhub;
        int have_hmaster;
@@ -1528,6 +1538,15 @@ static int __init uv_init_per_cpu(int nuvhubs)
                bcp = &per_cpu(bau_control, cpu);
                memset(bcp, 0, sizeof(struct bau_control));
                pnode = uv_cpu_hub_info(cpu)->pnode;
+               if ((pnode - base_part_pnode) >= UV_DISTRIBUTION_SIZE) {
+                       printk(KERN_EMERG
+                               "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
+                               cpu, pnode, base_part_pnode,
+                               UV_DISTRIBUTION_SIZE);
+                       return 1;
+               }
+               bcp->osnode = cpu_to_node(cpu);
+               bcp->partition_base_pnode = uv_partition_base_pnode;
                uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
                *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
                bdp = &uvhub_descs[uvhub];
@@ -1536,7 +1555,7 @@ static int __init uv_init_per_cpu(int nuvhubs)
                bdp->pnode = pnode;
                /* kludge: 'assuming' one node per socket, and assuming that
                   disabling a socket just leaves a gap in node numbers */
-               socket = (cpu_to_node(cpu) & 1);
+               socket = bcp->osnode & 1;
                bdp->socket_mask |= (1 << socket);
                sdp = &bdp->socket[socket];
                sdp->cpu_number[sdp->num_cpus] = cpu;
@@ -1585,6 +1604,20 @@ static int __init uv_init_per_cpu(int nuvhubs)
 nextsocket:
                        socket++;
                        socket_mask = (socket_mask >> 1);
+                       /* each socket gets a local array of pnodes/hubs */
+                       bcp = smaster;
+                       bcp->target_hub_and_pnode = kmalloc_node(
+                               sizeof(struct hub_and_pnode) *
+                               num_possible_cpus(), GFP_KERNEL, bcp->osnode);
+                       memset(bcp->target_hub_and_pnode, 0,
+                               sizeof(struct hub_and_pnode) *
+                               num_possible_cpus());
+                       for_each_present_cpu(tcpu) {
+                               bcp->target_hub_and_pnode[tcpu].pnode =
+                                       uv_cpu_hub_info(tcpu)->pnode;
+                               bcp->target_hub_and_pnode[tcpu].uvhub =
+                                       uv_cpu_hub_info(tcpu)->numa_blade_id;
+                       }
                }
        }
        kfree(uvhub_descs);
@@ -1637,21 +1670,22 @@ static int __init uv_bau_init(void)
        spin_lock_init(&disable_lock);
        congested_cycles = microsec_2_cycles(congested_response_us);
 
-       if (uv_init_per_cpu(nuvhubs)) {
-               nobau = 1;
-               return 0;
-       }
-
        uv_partition_base_pnode = 0x7fffffff;
-       for (uvhub = 0; uvhub < nuvhubs; uvhub++)
+       for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
                if (uv_blade_nr_possible_cpus(uvhub) &&
                        (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
                        uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
+       }
+
+       if (uv_init_per_cpu(nuvhubs, uv_partition_base_pnode)) {
+               nobau = 1;
+               return 0;
+       }
 
        vector = UV_BAU_MESSAGE;
        for_each_possible_blade(uvhub)
                if (uv_blade_nr_possible_cpus(uvhub))
-                       uv_init_uvhub(uvhub, vector);
+                       uv_init_uvhub(uvhub, vector, uv_partition_base_pnode);
 
        uv_enable_timeouts();
        alloc_intr_gate(vector, uv_bau_message_intr1);
index aef7af92b28b620003a7fa102983324bf9041e09..0684f3c74d5361a317148c99c468f11e25257ce8 100644 (file)
@@ -1275,6 +1275,20 @@ static __init void xen_pagetable_setup_start(pgd_t *base)
 {
 }
 
+static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
+{
+       /* reserve the range used */
+       native_pagetable_reserve(start, end);
+
+       /* set as RW the rest */
+       printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
+                       PFN_PHYS(pgt_buf_top));
+       while (end < PFN_PHYS(pgt_buf_top)) {
+               make_lowmem_page_readwrite(__va(end));
+               end += PAGE_SIZE;
+       }
+}
+
 static void xen_post_allocator_init(void);
 
 static __init void xen_pagetable_setup_done(pgd_t *base)
@@ -1495,7 +1509,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
         * it is RO.
         */
        if (((!is_early_ioremap_ptep(ptep) &&
-                       pfn >= pgt_buf_start && pfn < pgt_buf_end)) ||
+                       pfn >= pgt_buf_start && pfn < pgt_buf_top)) ||
                        (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
                pte = pte_wrprotect(pte);
 
@@ -2105,6 +2119,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
 
 void __init xen_init_mmu_ops(void)
 {
+       x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
        x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
        x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
        pv_mmu_ops = xen_mmu_ops;
index f0605ab2a76119dd94bccd5809ad881443c25fd7..471fdcc5df85e3135dfefac29aa81a889d0ed567 100644 (file)
@@ -114,6 +114,13 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
 }
 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
 
+struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
+{
+       return container_of(task_subsys_state(tsk, blkio_subsys_id),
+                           struct blkio_cgroup, css);
+}
+EXPORT_SYMBOL_GPL(task_blkio_cgroup);
+
 static inline void
 blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
 {
index 10919fae2d3aa2e6ebc1e3cdc2d1636939c997ad..c774930cc20659de992ccebc21d35ffd1e80e234 100644 (file)
@@ -291,6 +291,7 @@ static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
 #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
 extern struct blkio_cgroup blkio_root_cgroup;
 extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
+extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
 extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
        struct blkio_group *blkg, void *key, dev_t dev,
        enum blkio_policy_id plid);
@@ -314,6 +315,8 @@ void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
 struct cgroup;
 static inline struct blkio_cgroup *
 cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
+static inline struct blkio_cgroup *
+task_blkio_cgroup(struct task_struct *tsk) { return NULL; }
 
 static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
                struct blkio_group *blkg, void *key, dev_t dev,
index a2e58eeb35499d5f44502a650c822ec000007d4e..3fe00a14822a203cacc0529ad0610cf5ce7dae06 100644 (file)
@@ -316,8 +316,10 @@ EXPORT_SYMBOL(__blk_run_queue);
  */
 void blk_run_queue_async(struct request_queue *q)
 {
-       if (likely(!blk_queue_stopped(q)))
+       if (likely(!blk_queue_stopped(q))) {
+               __cancel_delayed_work(&q->delay_work);
                queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
+       }
 }
 EXPORT_SYMBOL(blk_run_queue_async);
 
index 0475a22a420dfb09d34ba51698e8b2a07f391b32..252a81a306f7bef0179f953ab1535a4a06a40798 100644 (file)
@@ -160,9 +160,8 @@ static void throtl_put_tg(struct throtl_grp *tg)
 }
 
 static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
-                       struct cgroup *cgroup)
+                       struct blkio_cgroup *blkcg)
 {
-       struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
        struct throtl_grp *tg = NULL;
        void *key = td;
        struct backing_dev_info *bdi = &td->queue->backing_dev_info;
@@ -229,12 +228,12 @@ done:
 
 static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
 {
-       struct cgroup *cgroup;
        struct throtl_grp *tg = NULL;
+       struct blkio_cgroup *blkcg;
 
        rcu_read_lock();
-       cgroup = task_cgroup(current, blkio_subsys_id);
-       tg = throtl_find_alloc_tg(td, cgroup);
+       blkcg = task_blkio_cgroup(current);
+       tg = throtl_find_alloc_tg(td, blkcg);
        if (!tg)
                tg = &td->root_tg;
        rcu_read_unlock();
index 5b52011e3a40a38c1235e1729554abfbb9f99c7d..ab7a9e6a9b1cb12c1952f5fd725ca2ea0dd20fd1 100644 (file)
@@ -1014,10 +1014,9 @@ void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
        cfqg->needs_update = true;
 }
 
-static struct cfq_group *
-cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
+static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd,
+               struct blkio_cgroup *blkcg, int create)
 {
-       struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
        struct cfq_group *cfqg = NULL;
        void *key = cfqd;
        int i, j;
@@ -1079,12 +1078,12 @@ done:
  */
 static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
 {
-       struct cgroup *cgroup;
+       struct blkio_cgroup *blkcg;
        struct cfq_group *cfqg = NULL;
 
        rcu_read_lock();
-       cgroup = task_cgroup(current, blkio_subsys_id);
-       cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create);
+       blkcg = task_blkio_cgroup(current);
+       cfqg = cfq_find_alloc_cfqg(cfqd, blkcg, create);
        if (!cfqg && create)
                cfqg = &cfqd->root_group;
        rcu_read_unlock();
index ff9d832a163de8442c2adc1d8fa18be75ec4f417..d38c40fe4ddbfeace9bb1460505aac4add9d23f1 100644 (file)
@@ -561,27 +561,6 @@ void ahci_start_engine(struct ata_port *ap)
 {
        void __iomem *port_mmio = ahci_port_base(ap);
        u32 tmp;
-       u8 status;
-
-       status = readl(port_mmio + PORT_TFDATA) & 0xFF;
-
-       /*
-        * At end of section 10.1 of AHCI spec (rev 1.3), it states
-        * Software shall not set PxCMD.ST to 1 until it is determined
-        * that a functoinal device is present on the port as determined by
-        * PxTFD.STS.BSY=0, PxTFD.STS.DRQ=0 and PxSSTS.DET=3h
-        *
-        * Even though most AHCI host controllers work without this check,
-        * specific controller will fail under this condition
-        */
-       if (status & (ATA_BUSY | ATA_DRQ))
-               return;
-       else {
-               ahci_scr_read(&ap->link, SCR_STATUS, &tmp);
-
-               if ((tmp & 0xf) != 0x3)
-                       return;
-       }
 
        /* start DMA */
        tmp = readl(port_mmio + PORT_CMD);
index f26f2fe3480ad0847b6ce059a0b4206e182a9f46..dad9fd660f379907b6bb80856d8e047959d1917a 100644 (file)
@@ -3316,7 +3316,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
        struct ata_eh_context *ehc = &link->eh_context;
        struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
        enum ata_lpm_policy old_policy = link->lpm_policy;
-       bool no_dipm = ap->flags & ATA_FLAG_NO_DIPM;
+       bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
        unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
        unsigned int err_mask;
        int rc;
index bdd2719f3f68382084fb04170b1c14022e442425..bc9e702186dd64daf5445ea09710e5e8c7ee59eb 100644 (file)
@@ -2643,16 +2643,19 @@ fore200e_init(struct fore200e* fore200e, struct device *parent)
 }
 
 #ifdef CONFIG_SBUS
+static const struct of_device_id fore200e_sba_match[];
 static int __devinit fore200e_sba_probe(struct platform_device *op)
 {
+       const struct of_device_id *match;
        const struct fore200e_bus *bus;
        struct fore200e *fore200e;
        static int index = 0;
        int err;
 
-       if (!op->dev.of_match)
+       match = of_match_device(fore200e_sba_match, &op->dev);
+       if (!match)
                return -EINVAL;
-       bus = op->dev.of_match->data;
+       bus = match->data;
 
        fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
        if (!fore200e)
index 8066d086578a7124a4873bb8376389dc9a010be9..e086fbbbe853ed1d083e368092a9b71b59ec2b8a 100644 (file)
@@ -2547,7 +2547,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
        disk->major = MajorNumber;
        disk->first_minor = n << DAC960_MaxPartitionsBits;
        disk->fops = &DAC960_BlockDeviceOperations;
-       disk->events = DISK_EVENT_MEDIA_CHANGE;
    }
   /*
     Indicate the Block Device Registration completed successfully,
index 456c0cc90dcfd58d87de3d34ace5dda354795112..8eba86bba5992ec2b546559c96f7a848e0c2c4e4 100644 (file)
@@ -1736,7 +1736,6 @@ static int __init fd_probe_drives(void)
                disk->major = FLOPPY_MAJOR;
                disk->first_minor = drive;
                disk->fops = &floppy_fops;
-               disk->events = DISK_EVENT_MEDIA_CHANGE;
                sprintf(disk->disk_name, "fd%d", drive);
                disk->private_data = &unit[drive];
                set_capacity(disk, 880*2);
index c871eae14120423f2cd618742730a6f6b51edeb3..ede16c64ff07dbb524cdf84ba8e3622d9efe306b 100644 (file)
@@ -1964,7 +1964,6 @@ static int __init atari_floppy_init (void)
                unit[i].disk->first_minor = i;
                sprintf(unit[i].disk->disk_name, "fd%d", i);
                unit[i].disk->fops = &floppy_fops;
-               unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE;
                unit[i].disk->private_data = &unit[i];
                unit[i].disk->queue = blk_init_queue(do_fd_request,
                                        &ataflop_lock);
index 301d7a9a41a6b51be0ae02f6ca92b2fb063316a7..db8f88586c8d3b2d97dbc910835a0de846ac1fd1 100644 (file)
@@ -4205,7 +4205,6 @@ static int __init floppy_init(void)
                disks[dr]->major = FLOPPY_MAJOR;
                disks[dr]->first_minor = TOMINOR(dr);
                disks[dr]->fops = &floppy_fops;
-               disks[dr]->events = DISK_EVENT_MEDIA_CHANGE;
                sprintf(disks[dr]->disk_name, "fd%d", dr);
 
                init_timer(&motor_off_timer[dr]);
index 2f2ccf6862519c4bb44493ccab140cbbdab37a93..8690e31d993287e16a68caca0e2125c0c7e6bdb0 100644 (file)
@@ -320,7 +320,6 @@ static void pcd_init_units(void)
                disk->first_minor = unit;
                strcpy(disk->disk_name, cd->name);      /* umm... */
                disk->fops = &pcd_bdops;
-               disk->events = DISK_EVENT_MEDIA_CHANGE;
        }
 }
 
index 21dfdb7768695cfe8d9517dc59f4cf7a5b1df279..869e7676d46f47abcb1ffe1812efd3316d234f74 100644 (file)
@@ -837,7 +837,6 @@ static void pd_probe_drive(struct pd_unit *disk)
        p->fops = &pd_fops;
        p->major = major;
        p->first_minor = (disk - pd) << PD_BITS;
-       p->events = DISK_EVENT_MEDIA_CHANGE;
        disk->gd = p;
        p->private_data = disk;
        p->queue = pd_queue;
index 7adeb1edbf43faf02b5f68f79d93f6d839baa9e4..f21b520ef4195228713d34c2f2157ce1b928e37d 100644 (file)
@@ -294,7 +294,6 @@ static void __init pf_init_units(void)
                disk->first_minor = unit;
                strcpy(disk->disk_name, pf->name);
                disk->fops = &pf_fops;
-               disk->events = DISK_EVENT_MEDIA_CHANGE;
                if (!(*drives[unit])[D_PRT])
                        pf_drive_count++;
        }
index 16dc3645291cd7bbb3eab203b83dde68939b61d7..9712fad82bc6cf2be78daaa503432e8725ab2a4f 100644 (file)
@@ -92,6 +92,8 @@ struct rbd_client {
        struct list_head        node;
 };
 
+struct rbd_req_coll;
+
 /*
  * a single io request
  */
@@ -100,6 +102,24 @@ struct rbd_request {
        struct bio              *bio;           /* cloned bio */
        struct page             **pages;        /* list of used pages */
        u64                     len;
+       int                     coll_index;
+       struct rbd_req_coll     *coll;
+};
+
+struct rbd_req_status {
+       int done;
+       int rc;
+       u64 bytes;
+};
+
+/*
+ * a collection of requests
+ */
+struct rbd_req_coll {
+       int                     total;
+       int                     num_done;
+       struct kref             kref;
+       struct rbd_req_status   status[0];
 };
 
 struct rbd_snap {
@@ -416,6 +436,17 @@ static void rbd_put_client(struct rbd_device *rbd_dev)
        rbd_dev->client = NULL;
 }
 
+/*
+ * Destroy requests collection
+ */
+static void rbd_coll_release(struct kref *kref)
+{
+       struct rbd_req_coll *coll =
+               container_of(kref, struct rbd_req_coll, kref);
+
+       dout("rbd_coll_release %p\n", coll);
+       kfree(coll);
+}
 
 /*
  * Create a new header structure, translate header format from the on-disk
@@ -590,6 +621,14 @@ static u64 rbd_get_segment(struct rbd_image_header *header,
        return len;
 }
 
+static int rbd_get_num_segments(struct rbd_image_header *header,
+                               u64 ofs, u64 len)
+{
+       u64 start_seg = ofs >> header->obj_order;
+       u64 end_seg = (ofs + len - 1) >> header->obj_order;
+       return end_seg - start_seg + 1;
+}
+
 /*
  * bio helpers
  */
@@ -735,6 +774,50 @@ static void rbd_destroy_ops(struct ceph_osd_req_op *ops)
        kfree(ops);
 }
 
+static void rbd_coll_end_req_index(struct request *rq,
+                                  struct rbd_req_coll *coll,
+                                  int index,
+                                  int ret, u64 len)
+{
+       struct request_queue *q;
+       int min, max, i;
+
+       dout("rbd_coll_end_req_index %p index %d ret %d len %lld\n",
+            coll, index, ret, len);
+
+       if (!rq)
+               return;
+
+       if (!coll) {
+               blk_end_request(rq, ret, len);
+               return;
+       }
+
+       q = rq->q;
+
+       spin_lock_irq(q->queue_lock);
+       coll->status[index].done = 1;
+       coll->status[index].rc = ret;
+       coll->status[index].bytes = len;
+       max = min = coll->num_done;
+       while (max < coll->total && coll->status[max].done)
+               max++;
+
+       for (i = min; i<max; i++) {
+               __blk_end_request(rq, coll->status[i].rc,
+                                 coll->status[i].bytes);
+               coll->num_done++;
+               kref_put(&coll->kref, rbd_coll_release);
+       }
+       spin_unlock_irq(q->queue_lock);
+}
+
+static void rbd_coll_end_req(struct rbd_request *req,
+                            int ret, u64 len)
+{
+       rbd_coll_end_req_index(req->rq, req->coll, req->coll_index, ret, len);
+}
+
 /*
  * Send ceph osd request
  */
@@ -749,6 +832,8 @@ static int rbd_do_request(struct request *rq,
                          int flags,
                          struct ceph_osd_req_op *ops,
                          int num_reply,
+                         struct rbd_req_coll *coll,
+                         int coll_index,
                          void (*rbd_cb)(struct ceph_osd_request *req,
                                         struct ceph_msg *msg),
                          struct ceph_osd_request **linger_req,
@@ -763,12 +848,20 @@ static int rbd_do_request(struct request *rq,
        struct ceph_osd_request_head *reqhead;
        struct rbd_image_header *header = &dev->header;
 
-       ret = -ENOMEM;
        req_data = kzalloc(sizeof(*req_data), GFP_NOIO);
-       if (!req_data)
-               goto done;
+       if (!req_data) {
+               if (coll)
+                       rbd_coll_end_req_index(rq, coll, coll_index,
+                                              -ENOMEM, len);
+               return -ENOMEM;
+       }
 
-       dout("rbd_do_request len=%lld ofs=%lld\n", len, ofs);
+       if (coll) {
+               req_data->coll = coll;
+               req_data->coll_index = coll_index;
+       }
+
+       dout("rbd_do_request obj=%s ofs=%lld len=%lld\n", obj, len, ofs);
 
        down_read(&header->snap_rwsem);
 
@@ -777,9 +870,9 @@ static int rbd_do_request(struct request *rq,
                                      ops,
                                      false,
                                      GFP_NOIO, pages, bio);
-       if (IS_ERR(req)) {
+       if (!req) {
                up_read(&header->snap_rwsem);
-               ret = PTR_ERR(req);
+               ret = -ENOMEM;
                goto done_pages;
        }
 
@@ -828,7 +921,8 @@ static int rbd_do_request(struct request *rq,
                ret = ceph_osdc_wait_request(&dev->client->osdc, req);
                if (ver)
                        *ver = le64_to_cpu(req->r_reassert_version.version);
-               dout("reassert_ver=%lld\n", le64_to_cpu(req->r_reassert_version.version));
+               dout("reassert_ver=%lld\n",
+                    le64_to_cpu(req->r_reassert_version.version));
                ceph_osdc_put_request(req);
        }
        return ret;
@@ -837,10 +931,8 @@ done_err:
        bio_chain_put(req_data->bio);
        ceph_osdc_put_request(req);
 done_pages:
+       rbd_coll_end_req(req_data, ret, len);
        kfree(req_data);
-done:
-       if (rq)
-               blk_end_request(rq, ret, len);
        return ret;
 }
 
@@ -874,7 +966,7 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
                bytes = req_data->len;
        }
 
-       blk_end_request(req_data->rq, rc, bytes);
+       rbd_coll_end_req(req_data, rc, bytes);
 
        if (req_data->bio)
                bio_chain_put(req_data->bio);
@@ -934,6 +1026,7 @@ static int rbd_req_sync_op(struct rbd_device *dev,
                          flags,
                          ops,
                          2,
+                         NULL, 0,
                          NULL,
                          linger_req, ver);
        if (ret < 0)
@@ -959,7 +1052,9 @@ static int rbd_do_op(struct request *rq,
                     u64 snapid,
                     int opcode, int flags, int num_reply,
                     u64 ofs, u64 len,
-                    struct bio *bio)
+                    struct bio *bio,
+                    struct rbd_req_coll *coll,
+                    int coll_index)
 {
        char *seg_name;
        u64 seg_ofs;
@@ -995,7 +1090,10 @@ static int rbd_do_op(struct request *rq,
                             flags,
                             ops,
                             num_reply,
+                            coll, coll_index,
                             rbd_req_cb, 0, NULL);
+
+       rbd_destroy_ops(ops);
 done:
        kfree(seg_name);
        return ret;
@@ -1008,13 +1106,15 @@ static int rbd_req_write(struct request *rq,
                         struct rbd_device *rbd_dev,
                         struct ceph_snap_context *snapc,
                         u64 ofs, u64 len,
-                        struct bio *bio)
+                        struct bio *bio,
+                        struct rbd_req_coll *coll,
+                        int coll_index)
 {
        return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP,
                         CEPH_OSD_OP_WRITE,
                         CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
                         2,
-                        ofs, len, bio);
+                        ofs, len, bio, coll, coll_index);
 }
 
 /*
@@ -1024,14 +1124,16 @@ static int rbd_req_read(struct request *rq,
                         struct rbd_device *rbd_dev,
                         u64 snapid,
                         u64 ofs, u64 len,
-                        struct bio *bio)
+                        struct bio *bio,
+                        struct rbd_req_coll *coll,
+                        int coll_index)
 {
        return rbd_do_op(rq, rbd_dev, NULL,
                         (snapid ? snapid : CEPH_NOSNAP),
                         CEPH_OSD_OP_READ,
                         CEPH_OSD_FLAG_READ,
                         2,
-                        ofs, len, bio);
+                        ofs, len, bio, coll, coll_index);
 }
 
 /*
@@ -1063,7 +1165,9 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev,
 {
        struct ceph_osd_req_op *ops;
        struct page **pages = NULL;
-       int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0);
+       int ret;
+
+       ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0);
        if (ret < 0)
                return ret;
 
@@ -1077,6 +1181,7 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev,
                          CEPH_OSD_FLAG_READ,
                          ops,
                          1,
+                         NULL, 0,
                          rbd_simple_req_cb, 0, NULL);
 
        rbd_destroy_ops(ops);
@@ -1274,6 +1379,20 @@ static int rbd_req_sync_exec(struct rbd_device *dev,
        return ret;
 }
 
+static struct rbd_req_coll *rbd_alloc_coll(int num_reqs)
+{
+       struct rbd_req_coll *coll =
+                       kzalloc(sizeof(struct rbd_req_coll) +
+                               sizeof(struct rbd_req_status) * num_reqs,
+                               GFP_ATOMIC);
+
+       if (!coll)
+               return NULL;
+       coll->total = num_reqs;
+       kref_init(&coll->kref);
+       return coll;
+}
+
 /*
  * block device queue callback
  */
@@ -1291,6 +1410,8 @@ static void rbd_rq_fn(struct request_queue *q)
                bool do_write;
                int size, op_size = 0;
                u64 ofs;
+               int num_segs, cur_seg = 0;
+               struct rbd_req_coll *coll;
 
                /* peek at request from block layer */
                if (!rq)
@@ -1321,6 +1442,14 @@ static void rbd_rq_fn(struct request_queue *q)
                     do_write ? "write" : "read",
                     size, blk_rq_pos(rq) * 512ULL);
 
+               num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size);
+               coll = rbd_alloc_coll(num_segs);
+               if (!coll) {
+                       spin_lock_irq(q->queue_lock);
+                       __blk_end_request_all(rq, -ENOMEM);
+                       goto next;
+               }
+
                do {
                        /* a bio clone to be passed down to OSD req */
                        dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt);
@@ -1328,35 +1457,41 @@ static void rbd_rq_fn(struct request_queue *q)
                                                  rbd_dev->header.block_name,
                                                  ofs, size,
                                                  NULL, NULL);
+                       kref_get(&coll->kref);
                        bio = bio_chain_clone(&rq_bio, &next_bio, &bp,
                                              op_size, GFP_ATOMIC);
                        if (!bio) {
-                               spin_lock_irq(q->queue_lock);
-                               __blk_end_request_all(rq, -ENOMEM);
-                               goto next;
+                               rbd_coll_end_req_index(rq, coll, cur_seg,
+                                                      -ENOMEM, op_size);
+                               goto next_seg;
                        }
 
+
                        /* init OSD command: write or read */
                        if (do_write)
                                rbd_req_write(rq, rbd_dev,
                                              rbd_dev->header.snapc,
                                              ofs,
-                                             op_size, bio);
+                                             op_size, bio,
+                                             coll, cur_seg);
                        else
                                rbd_req_read(rq, rbd_dev,
                                             cur_snap_id(rbd_dev),
                                             ofs,
-                                            op_size, bio);
+                                            op_size, bio,
+                                            coll, cur_seg);
 
+next_seg:
                        size -= op_size;
                        ofs += op_size;
 
+                       cur_seg++;
                        rq_bio = next_bio;
                } while (size > 0);
+               kref_put(&coll->kref, rbd_coll_release);
 
                if (bp)
                        bio_pair_release(bp);
-
                spin_lock_irq(q->queue_lock);
 next:
                rq = blk_fetch_request(q);
index 24a482f2fbd60fcfd0917374bd6197a76d6ddf85..fd5adcd55944ac3a9384f86bbe3d7507df0f9964 100644 (file)
@@ -858,7 +858,6 @@ static int __devinit swim_floppy_init(struct swim_priv *swd)
                swd->unit[drive].disk->first_minor = drive;
                sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive);
                swd->unit[drive].disk->fops = &floppy_fops;
-               swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
                swd->unit[drive].disk->private_data = &swd->unit[drive];
                swd->unit[drive].disk->queue = swd->queue;
                set_capacity(swd->unit[drive].disk, 2880);
index 4c10f56facbff8b5c0e56615c0ebc79a1179903f..773bfa7927775396f80c004b4d6551c485efe4c6 100644 (file)
@@ -1163,7 +1163,6 @@ static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device
        disk->major = FLOPPY_MAJOR;
        disk->first_minor = i;
        disk->fops = &floppy_fops;
-       disk->events = DISK_EVENT_MEDIA_CHANGE;
        disk->private_data = &floppy_states[i];
        disk->queue = swim3_queue;
        disk->flags |= GENHD_FL_REMOVABLE;
index 68b9430c7cfe56f7f23330cb152b103aef3098b9..0e376d46bdd1d6f73a61583882eec6faa654a832 100644 (file)
@@ -2334,7 +2334,6 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
        disk->major = UB_MAJOR;
        disk->first_minor = lun->id * UB_PARTS_PER_LUN;
        disk->fops = &ub_bd_fops;
-       disk->events = DISK_EVENT_MEDIA_CHANGE;
        disk->private_data = lun;
        disk->driverfs_dev = &sc->intf->dev;
 
index 645ff765cd129cbb3046e57ea477e4b387205d45..6c7fd7db6dffc9e8cb287d30be803c8c59d0d770 100644 (file)
@@ -1005,7 +1005,6 @@ static int __devinit ace_setup(struct ace_device *ace)
        ace->gd->major = ace_major;
        ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
        ace->gd->fops = &ace_fops;
-       ace->gd->events = DISK_EVENT_MEDIA_CHANGE;
        ace->gd->queue = ace->queue;
        ace->gd->private_data = ace;
        snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
index 514dd8efaf7385f03ab6d7566f4eaac38d4fd5aa..75fb965b8f72b1e9fa2a8001f7fb16fb12388d5c 100644 (file)
@@ -986,6 +986,9 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t
 
        cdinfo(CD_OPEN, "entering cdrom_open\n"); 
 
+       /* open is event synchronization point, check events first */
+       check_disk_change(bdev);
+
        /* if this was a O_NONBLOCK open and we should honor the flags,
         * do a quick open without drive/disc integrity checks. */
        cdi->use_count++;
@@ -1012,9 +1015,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t
 
        cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n",
                        cdi->name, cdi->use_count);
-       /* Do this on open.  Don't wait for mount, because they might
-           not be mounting, but opening with O_NONBLOCK */
-       check_disk_change(bdev);
        return 0;
 err_release:
        if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
index b2b034fea34e6b8eabf7054928e34bc4d6fc0e17..3ceaf006e7f080fabc94848e9fc3b53f3dd59f0a 100644 (file)
@@ -803,7 +803,6 @@ static int __devinit probe_gdrom(struct platform_device *devptr)
                goto probe_fail_cdrom_register;
        }
        gd.disk->fops = &gdrom_bdops;
-       gd.disk->events = DISK_EVENT_MEDIA_CHANGE;
        /* latch on to the interrupt */
        err = gdrom_set_interrupt_handlers();
        if (err)
index 4e874c5fa60595036a7e10b26257384715b06b56..e427fbe459994a8491945a65fd085201f40ed01f 100644 (file)
@@ -626,7 +626,6 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        gendisk->queue = q;
        gendisk->fops = &viocd_fops;
        gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE;
-       gendisk->events = DISK_EVENT_MEDIA_CHANGE;
        set_capacity(gendisk, 0);
        gendisk->private_data = d;
        d->viocd_disk = gendisk;
index 43ac61978d8b2661672418901bde4242462a6165..ac6739e085e3cede7ede5588854aeca8503f1c7c 100644 (file)
@@ -619,15 +619,18 @@ static void __devinit n2rng_driver_version(void)
                pr_info("%s", version);
 }
 
+static const struct of_device_id n2rng_match[];
 static int __devinit n2rng_probe(struct platform_device *op)
 {
+       const struct of_device_id *match;
        int victoria_falls;
        int err = -ENOMEM;
        struct n2rng *np;
 
-       if (!op->dev.of_match)
+       match = of_match_device(n2rng_match, &op->dev);
+       if (!match)
                return -EINVAL;
-       victoria_falls = (op->dev.of_match->data != NULL);
+       victoria_falls = (match->data != NULL);
 
        n2rng_driver_version();
        np = kzalloc(sizeof(*np), GFP_KERNEL);
index cc6c9b2546a30eed5dad30ac8a928e19514bc1c9..64c6b85306150723ad8206f11fab7f54d8dd597b 100644 (file)
@@ -2554,9 +2554,11 @@ static struct pci_driver ipmi_pci_driver = {
 };
 #endif /* CONFIG_PCI */
 
+static struct of_device_id ipmi_match[];
 static int __devinit ipmi_probe(struct platform_device *dev)
 {
 #ifdef CONFIG_OF
+       const struct of_device_id *match;
        struct smi_info *info;
        struct resource resource;
        const __be32 *regsize, *regspacing, *regshift;
@@ -2566,7 +2568,8 @@ static int __devinit ipmi_probe(struct platform_device *dev)
 
        dev_info(&dev->dev, "probing via device tree\n");
 
-       if (!dev->dev.of_match)
+       match = of_match_device(ipmi_match, &dev->dev);
+       if (!match)
                return -EINVAL;
 
        ret = of_address_to_resource(np, 0, &resource);
@@ -2601,7 +2604,7 @@ static int __devinit ipmi_probe(struct platform_device *dev)
                return -ENOMEM;
        }
 
-       info->si_type           = (enum si_type) dev->dev.of_match->data;
+       info->si_type           = (enum si_type) match->data;
        info->addr_source       = SI_DEVICETREE;
        info->irq_setup         = std_irq_setup;
 
index d6412c16385f72940da3d501052742a250bdb8ed..39ccdeada79101fe2e910328110843fd569d3188 100644 (file)
@@ -715,13 +715,13 @@ static int __devexit hwicap_remove(struct device *dev)
 }
 
 #ifdef CONFIG_OF
-static int __devinit hwicap_of_probe(struct platform_device *op)
+static int __devinit hwicap_of_probe(struct platform_device *op,
+                                    const struct hwicap_driver_config *config)
 {
        struct resource res;
        const unsigned int *id;
        const char *family;
        int rc;
-       const struct hwicap_driver_config *config = op->dev.of_match->data;
        const struct config_registers *regs;
 
 
@@ -751,20 +751,24 @@ static int __devinit hwicap_of_probe(struct platform_device *op)
                        regs);
 }
 #else
-static inline int hwicap_of_probe(struct platform_device *op)
+static inline int hwicap_of_probe(struct platform_device *op,
+                                 const struct hwicap_driver_config *config)
 {
        return -EINVAL;
 }
 #endif /* CONFIG_OF */
 
+static const struct of_device_id __devinitconst hwicap_of_match[];
 static int __devinit hwicap_drv_probe(struct platform_device *pdev)
 {
+       const struct of_device_id *match;
        struct resource *res;
        const struct config_registers *regs;
        const char *family;
 
-       if (pdev->dev.of_match)
-               return hwicap_of_probe(pdev);
+       match = of_match_device(hwicap_of_match, &pdev->dev);
+       if (match)
+               return hwicap_of_probe(pdev, match->data);
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res)
index 0fc0a79852de0f919bd2ac9ac117533ed8a0f991..6db161f64ae0e9ab42a17f04608ab3ac18638141 100644 (file)
@@ -32,10 +32,9 @@ static DEFINE_MUTEX(clocks_mutex);
  * Then we take the most specific entry - with the following
  * order of precedence: dev+con > dev only > con only.
  */
-static struct clk *clk_find(const char *dev_id, const char *con_id)
+static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
 {
-       struct clk_lookup *p;
-       struct clk *clk = NULL;
+       struct clk_lookup *p, *cl = NULL;
        int match, best = 0;
 
        list_for_each_entry(p, &clocks, node) {
@@ -52,27 +51,27 @@ static struct clk *clk_find(const char *dev_id, const char *con_id)
                }
 
                if (match > best) {
-                       clk = p->clk;
+                       cl = p;
                        if (match != 3)
                                best = match;
                        else
                                break;
                }
        }
-       return clk;
+       return cl;
 }
 
 struct clk *clk_get_sys(const char *dev_id, const char *con_id)
 {
-       struct clk *clk;
+       struct clk_lookup *cl;
 
        mutex_lock(&clocks_mutex);
-       clk = clk_find(dev_id, con_id);
-       if (clk && !__clk_get(clk))
-               clk = NULL;
+       cl = clk_find(dev_id, con_id);
+       if (cl && !__clk_get(cl->clk))
+               cl = NULL;
        mutex_unlock(&clocks_mutex);
 
-       return clk ? clk : ERR_PTR(-ENOENT);
+       return cl ? cl->clk : ERR_PTR(-ENOENT);
 }
 EXPORT_SYMBOL(clk_get_sys);
 
index c1f0045ceb8e708ced1a6163ae921b03efe4e9d5..af8e7b1aa290a2e0d682de5d1aefb9e8808c23e1 100644 (file)
@@ -1019,7 +1019,7 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
        struct ppc4xx_edac_pdata *pdata = NULL;
        const struct device_node *np = op->dev.of_node;
 
-       if (op->dev.of_match == NULL)
+       if (of_match_device(ppc4xx_edac_match, &op->dev) == NULL)
                return -EINVAL;
 
        /* Initial driver pointers and private data */
index f903d7b6f34a92bca580b5ed01cf422c9a8caf7c..23d1468ad253d54c9c0745a858b6b155123e76e7 100644 (file)
@@ -2199,7 +2199,6 @@ static int ohci_set_config_rom(struct fw_card *card,
 {
        struct fw_ohci *ohci;
        unsigned long flags;
-       int ret = -EBUSY;
        __be32 *next_config_rom;
        dma_addr_t uninitialized_var(next_config_rom_bus);
 
@@ -2240,22 +2239,37 @@ static int ohci_set_config_rom(struct fw_card *card,
 
        spin_lock_irqsave(&ohci->lock, flags);
 
+       /*
+        * If there is not an already pending config_rom update,
+        * push our new allocation into the ohci->next_config_rom
+        * and then mark the local variable as null so that we
+        * won't deallocate the new buffer.
+        *
+        * OTOH, if there is a pending config_rom update, just
+        * use that buffer with the new config_rom data, and
+        * let this routine free the unused DMA allocation.
+        */
+
        if (ohci->next_config_rom == NULL) {
                ohci->next_config_rom = next_config_rom;
                ohci->next_config_rom_bus = next_config_rom_bus;
+               next_config_rom = NULL;
+       }
 
-               copy_config_rom(ohci->next_config_rom, config_rom, length);
+       copy_config_rom(ohci->next_config_rom, config_rom, length);
 
-               ohci->next_header = config_rom[0];
-               ohci->next_config_rom[0] = 0;
+       ohci->next_header = config_rom[0];
+       ohci->next_config_rom[0] = 0;
 
-               reg_write(ohci, OHCI1394_ConfigROMmap,
-                         ohci->next_config_rom_bus);
-               ret = 0;
-       }
+       reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
 
        spin_unlock_irqrestore(&ohci->lock, flags);
 
+       /* If we didn't use the DMA allocation, delete it. */
+       if (next_config_rom != NULL)
+               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
+                                 next_config_rom, next_config_rom_bus);
+
        /*
         * Now initiate a bus reset to have the changes take
         * effect. We clean up the old config rom memory and DMA
@@ -2263,13 +2277,10 @@ static int ohci_set_config_rom(struct fw_card *card,
         * controller could need to access it before the bus reset
         * takes effect.
         */
-       if (ret == 0)
-               fw_schedule_bus_reset(&ohci->card, true, true);
-       else
-               dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
-                                 next_config_rom, next_config_rom_bus);
 
-       return ret;
+       fw_schedule_bus_reset(&ohci->card, true, true);
+
+       return 0;
 }
 
 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
index 11d7a72c22d9141697697db81b375e15a8a15f0b..140b9525b48a47a83cf159c010f9818969ee3877 100644 (file)
@@ -1516,17 +1516,33 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
 }
 EXPORT_SYMBOL(drm_fb_helper_initial_config);
 
-bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
+/**
+ * drm_fb_helper_hotplug_event - respond to a hotplug notification by
+ *                               probing all the outputs attached to the fb.
+ * @fb_helper: the drm_fb_helper
+ *
+ * LOCKING:
+ * Called at runtime, must take mode config lock.
+ *
+ * Scan the connectors attached to the fb_helper and try to put together a
+ * setup after *notification of a change in output configuration.
+ *
+ * RETURNS:
+ * 0 on success and a non-zero error code otherwise.
+ */
+int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
 {
+       struct drm_device *dev = fb_helper->dev;
        int count = 0;
        u32 max_width, max_height, bpp_sel;
        bool bound = false, crtcs_bound = false;
        struct drm_crtc *crtc;
 
        if (!fb_helper->fb)
-               return false;
+               return 0;
 
-       list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) {
+       mutex_lock(&dev->mode_config.mutex);
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                if (crtc->fb)
                        crtcs_bound = true;
                if (crtc->fb == fb_helper->fb)
@@ -1535,7 +1551,8 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
 
        if (!bound && crtcs_bound) {
                fb_helper->delayed_hotplug = true;
-               return false;
+               mutex_unlock(&dev->mode_config.mutex);
+               return 0;
        }
        DRM_DEBUG_KMS("\n");
 
@@ -1546,6 +1563,7 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
        count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
                                                    max_height);
        drm_setup_crtcs(fb_helper);
+       mutex_unlock(&dev->mode_config.mutex);
 
        return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
 }
index 741457bd1c46ef1f54af12296e348a053430329e..a1f12cb043deb233484f9a4957300fa475c3a024 100644 (file)
@@ -932,11 +932,34 @@ EXPORT_SYMBOL(drm_vblank_put);
 
 void drm_vblank_off(struct drm_device *dev, int crtc)
 {
+       struct drm_pending_vblank_event *e, *t;
+       struct timeval now;
        unsigned long irqflags;
+       unsigned int seq;
 
        spin_lock_irqsave(&dev->vbl_lock, irqflags);
        vblank_disable_and_save(dev, crtc);
        DRM_WAKEUP(&dev->vbl_queue[crtc]);
+
+       /* Send any queued vblank events, lest the natives grow disquiet */
+       seq = drm_vblank_count_and_time(dev, crtc, &now);
+       list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+               if (e->pipe != crtc)
+                       continue;
+               DRM_DEBUG("Sending premature vblank event on disable: \
+                         wanted %d, current %d\n",
+                         e->event.sequence, seq);
+
+               e->event.sequence = seq;
+               e->event.tv_sec = now.tv_sec;
+               e->event.tv_usec = now.tv_usec;
+               drm_vblank_put(dev, e->pipe);
+               list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+               wake_up_interruptible(&e->base.file_priv->event_wait);
+               trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+                                                e->event.sequence);
+       }
+
        spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 }
 EXPORT_SYMBOL(drm_vblank_off);
index 5d00b0fc0d91d837f6aa47b49901cb04bd6db1c0..959186cbf3280b90b911ff54373531ee577eef97 100644 (file)
@@ -431,7 +431,7 @@ EXPORT_SYMBOL(drm_mm_search_free_in_range);
 void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
 {
        list_replace(&old->node_list, &new->node_list);
-       list_replace(&old->node_list, &new->hole_stack);
+       list_replace(&old->hole_stack, &new->hole_stack);
        new->hole_follows = old->hole_follows;
        new->mm = old->mm;
        new->start = old->start;
@@ -699,8 +699,8 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
                                entry->size);
                total_used += entry->size;
                if (entry->hole_follows) {
-                       hole_start = drm_mm_hole_node_start(&mm->head_node);
-                       hole_end = drm_mm_hole_node_end(&mm->head_node);
+                       hole_start = drm_mm_hole_node_start(entry);
+                       hole_end = drm_mm_hole_node_end(entry);
                        hole_size = hole_end - hole_start;
                        seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
                                        hole_start, hole_end, hole_size);
index c34a8dd31d02fdc2111070e0589f9a02e16eb4a8..32d1b3e829c8d35433f0b170052e9be96f358a1c 100644 (file)
@@ -49,7 +49,7 @@ module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
 unsigned int i915_powersave = 1;
 module_param_named(powersave, i915_powersave, int, 0600);
 
-unsigned int i915_semaphores = 1;
+unsigned int i915_semaphores = 0;
 module_param_named(semaphores, i915_semaphores, int, 0600);
 
 unsigned int i915_enable_rc6 = 0;
index e522c702b04e6387a07ae07f5917fb81835705ac..2166ee071ddbe99655c3c977beecec68bb6a7a19 100644 (file)
@@ -5605,9 +5605,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
        intel_clock_t clock;
 
        if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
-               fp = FP0(pipe);
+               fp = I915_READ(FP0(pipe));
        else
-               fp = FP1(pipe);
+               fp = I915_READ(FP1(pipe));
 
        clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
        if (IS_PINEVIEW(dev)) {
@@ -6579,8 +6579,10 @@ intel_user_framebuffer_create(struct drm_device *dev,
                return ERR_PTR(-ENOENT);
 
        intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
-       if (!intel_fb)
+       if (!intel_fb) {
+               drm_gem_object_unreference_unlocked(&obj->base);
                return ERR_PTR(-ENOMEM);
+       }
 
        ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
        if (ret) {
index cb8578b7e4432c1aad04f79324be3d588c1e74e0..a4d80314e7f8cf5edb13bce97328a60f1f26f6ce 100644 (file)
@@ -1470,7 +1470,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
 
        if (!HAS_PCH_CPT(dev) &&
            I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
-               struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
+               struct drm_crtc *crtc = intel_dp->base.base.crtc;
+
                /* Hardware workaround: leaving our transcoder select
                 * set to transcoder B while it's off will prevent the
                 * corresponding HDMI output on transcoder A.
@@ -1485,7 +1486,19 @@ intel_dp_link_down(struct intel_dp *intel_dp)
                /* Changes to enable or select take place the vblank
                 * after being written.
                 */
-               intel_wait_for_vblank(dev, intel_crtc->pipe);
+               if (crtc == NULL) {
+                       /* We can arrive here never having been attached
+                        * to a CRTC, for instance, due to inheriting
+                        * random state from the BIOS.
+                        *
+                        * If the pipe is not running, play safe and
+                        * wait for the clocks to stabilise before
+                        * continuing.
+                        */
+                       POSTING_READ(intel_dp->output_reg);
+                       msleep(50);
+               } else
+                       intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
        }
 
        I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
index a562bd2648c7bb76f2db4df8a167f755db57ae1b..67cb076d271b5eb00a4c87a9022f0957f3ff569e 100644 (file)
@@ -539,6 +539,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
        struct drm_device *dev = dev_priv->dev;
        struct drm_connector *connector = dev_priv->int_lvds_connector;
 
+       if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+               return NOTIFY_OK;
+
        /*
         * check and update the status of LVDS connector after receiving
         * the LID nofication event.
index 5045f8b921d644088ef4d8345dda673fadfba424..c3e953b089923d552de922e8007c13e1e8eb94e5 100644 (file)
@@ -152,8 +152,6 @@ nouveau_mem_vram_fini(struct drm_device *dev)
 {
        struct drm_nouveau_private *dev_priv = dev->dev_private;
 
-       nouveau_bo_ref(NULL, &dev_priv->vga_ram);
-
        ttm_bo_device_release(&dev_priv->ttm.bdev);
 
        nouveau_ttm_global_release(dev_priv);
index 4bce801bc588ffd04e1cdd3800e2e82ebf33008f..c77111eca6acefc8ab31acb7bb060547b64b5f7a 100644 (file)
@@ -42,7 +42,8 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
 
        nvbe->nr_pages = 0;
        while (num_pages--) {
-               if (dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE) {
+               /* this code path isn't called and is incorrect anyways */
+               if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/
                        nvbe->pages[nvbe->nr_pages] =
                                        dma_addrs[nvbe->nr_pages];
                        nvbe->ttm_alloced[nvbe->nr_pages] = true;
index a30adec5beaa7c1149a0089e688fc71832debb7b..915fbce8959517f4c51a9114a8224617ecbe265b 100644 (file)
@@ -768,6 +768,11 @@ static void nouveau_card_takedown(struct drm_device *dev)
        engine->mc.takedown(dev);
        engine->display.late_takedown(dev);
 
+       if (dev_priv->vga_ram) {
+               nouveau_bo_unpin(dev_priv->vga_ram);
+               nouveau_bo_ref(NULL, &dev_priv->vga_ram);
+       }
+
        mutex_lock(&dev->struct_mutex);
        ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
        ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
index e9bc135d918927744931f56fef55cab229215440..9073e3bfb08c7cde39ee411700e0ba838611fc2c 100644 (file)
@@ -862,9 +862,15 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
                SYSTEM_ACCESS_MODE_NOT_IN_SYS |
                SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
                EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
-       WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
-       WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
-       WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+       if (rdev->flags & RADEON_IS_IGP) {
+               WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
+               WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
+               WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
+       } else {
+               WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+               WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+               WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+       }
        WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
@@ -1774,7 +1780,10 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
 
 
        mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
-       mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
+       if (rdev->flags & RADEON_IS_IGP)
+               mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
+       else
+               mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
        switch (rdev->config.evergreen.max_tile_pipes) {
        case 1:
@@ -2923,11 +2932,6 @@ static int evergreen_startup(struct radeon_device *rdev)
                rdev->asic->copy = NULL;
                dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
        }
-       /* XXX: ontario has problems blitting to gart at the moment */
-       if (rdev->family == CHIP_PALM) {
-               rdev->asic->copy = NULL;
-               radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
-       }
 
        /* allocate wb buffer */
        r = radeon_wb_init(rdev);
index 9aaa3f0c9372135e8d55c0cb79e925f1656264e1..fc40e0cc34516bb96af19f1a7615b6889a7425e3 100644 (file)
 #define                BURSTLENGTH_SHIFT                               9
 #define                BURSTLENGTH_MASK                                0x00000200
 #define                CHANSIZE_OVERRIDE                               (1 << 11)
+#define        FUS_MC_ARB_RAMCFG                               0x2768
 #define        MC_VM_AGP_TOP                                   0x2028
 #define        MC_VM_AGP_BOT                                   0x202C
 #define        MC_VM_AGP_BASE                                  0x2030
 #define        MC_VM_MD_L1_TLB0_CNTL                           0x2654
 #define        MC_VM_MD_L1_TLB1_CNTL                           0x2658
 #define        MC_VM_MD_L1_TLB2_CNTL                           0x265C
+
+#define        FUS_MC_VM_MD_L1_TLB0_CNTL                       0x265C
+#define        FUS_MC_VM_MD_L1_TLB1_CNTL                       0x2660
+#define        FUS_MC_VM_MD_L1_TLB2_CNTL                       0x2664
+
 #define        MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR              0x203C
 #define        MC_VM_SYSTEM_APERTURE_HIGH_ADDR                 0x2038
 #define        MC_VM_SYSTEM_APERTURE_LOW_ADDR                  0x2034
index 7aade20f63a8c5fb932776a084aceffe5366faff..3d8a7634bbe99ade0344aeb2061bc5f3ac183757 100644 (file)
@@ -674,7 +674,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
 
        cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
        cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
-       cgts_tcc_disable = RREG32(CGTS_TCC_DISABLE);
+       cgts_tcc_disable = 0xff000000;
        gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
        gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
        cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
@@ -871,7 +871,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
 
        smx_dc_ctl0 = RREG32(SMX_DC_CTL0);
        smx_dc_ctl0 &= ~NUMBER_OF_SETS(0x1ff);
-       smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
+       smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.cayman.sx_num_of_sets);
        WREG32(SMX_DC_CTL0, smx_dc_ctl0);
 
        WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4) | CRC_SIMD_ID_WADDR_DISABLE);
@@ -887,20 +887,20 @@ static void cayman_gpu_init(struct radeon_device *rdev)
 
        WREG32(TA_CNTL_AUX, DISABLE_CUBE_ANISO);
 
-       WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
-                                       POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
-                                       SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
+       WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.cayman.sx_max_export_size / 4) - 1) |
+                                       POSITION_BUFFER_SIZE((rdev->config.cayman.sx_max_export_pos_size / 4) - 1) |
+                                       SMX_BUFFER_SIZE((rdev->config.cayman.sx_max_export_smx_size / 4) - 1)));
 
-       WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.evergreen.sc_prim_fifo_size) |
-                                SC_HIZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_hiz_tile_fifo_size) |
-                                SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.evergreen.sc_earlyz_tile_fifo_size)));
+       WREG32(PA_SC_FIFO_SIZE, (SC_PRIM_FIFO_SIZE(rdev->config.cayman.sc_prim_fifo_size) |
+                                SC_HIZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_hiz_tile_fifo_size) |
+                                SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.cayman.sc_earlyz_tile_fifo_size)));
 
 
        WREG32(VGT_NUM_INSTANCES, 1);
 
        WREG32(CP_PERFMON_CNTL, 0);
 
-       WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.evergreen.sq_num_cf_insts) |
+       WREG32(SQ_MS_FIFO_SIZES, (CACHE_FIFO_SIZE(16 * rdev->config.cayman.sq_num_cf_insts) |
                                  FETCH_FIFO_HIWATER(0x4) |
                                  DONE_FIFO_HIWATER(0xe0) |
                                  ALU_UPDATE_FIFO_HIWATER(0x8)));
index f5d12fb103fadd982d6b3ae4d2ad5a227ee1f50b..90dfb2b8cf0318529b28b84b0bfd831764e5d59e 100644 (file)
@@ -431,7 +431,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
                }
        }
 
-       /* Acer laptop (Acer TravelMate 5730G) has an HDMI port
+       /* Acer laptop (Acer TravelMate 5730/5730G) has an HDMI port
         * on the laptop and a DVI port on the docking station and
         * both share the same encoder, hpd pin, and ddc line.
         * So while the bios table is technically correct,
@@ -440,7 +440,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
         * with different crtcs which isn't possible on the hardware
         * side and leaves no crtcs for LVDS or VGA.
         */
-       if ((dev->pdev->device == 0x95c4) &&
+       if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) &&
            (dev->pdev->subsystem_vendor == 0x1025) &&
            (dev->pdev->subsystem_device == 0x013c)) {
                if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
@@ -1574,9 +1574,17 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
                        ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
                        ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
                        bool bad_record = false;
-                       u8 *record = (u8 *)(mode_info->atom_context->bios +
-                                           data_offset +
-                                           le16_to_cpu(lvds_info->info.usModePatchTableOffset));
+                       u8 *record;
+
+                       if ((frev == 1) && (crev < 2))
+                               /* absolute */
+                               record = (u8 *)(mode_info->atom_context->bios +
+                                               le16_to_cpu(lvds_info->info.usModePatchTableOffset));
+                       else
+                               /* relative */
+                               record = (u8 *)(mode_info->atom_context->bios +
+                                               data_offset +
+                                               le16_to_cpu(lvds_info->info.usModePatchTableOffset));
                        while (*record != ATOM_RECORD_END_TYPE) {
                                switch (*record) {
                                case LCD_MODE_PATCH_RECORD_MODE_TYPE:
@@ -1599,9 +1607,10 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
                                                        memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
                                                               fake_edid_record->ucFakeEDIDLength);
 
-                                                       if (drm_edid_is_valid(edid))
+                                                       if (drm_edid_is_valid(edid)) {
                                                                rdev->mode_info.bios_hardcoded_edid = edid;
-                                                       else
+                                                               rdev->mode_info.bios_hardcoded_edid_size = edid_size;
+                                                       } else
                                                                kfree(edid);
                                                }
                                        }
index ed5dfe58f29c0845e8eb8a478560b9a01fe17677..9d95792bea3eab3c961b1221918beb1e4378d2f9 100644 (file)
@@ -15,6 +15,9 @@
 #define ATPX_VERSION 0
 #define ATPX_GPU_PWR 2
 #define ATPX_MUX_SELECT 3
+#define ATPX_I2C_MUX_SELECT 4
+#define ATPX_SWITCH_START 5
+#define ATPX_SWITCH_END 6
 
 #define ATPX_INTEGRATED 0
 #define ATPX_DISCRETE 1
@@ -149,13 +152,35 @@ static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id)
        return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id);
 }
 
+static int radeon_atpx_switch_i2c_mux(acpi_handle handle, int mux_id)
+{
+       return radeon_atpx_execute(handle, ATPX_I2C_MUX_SELECT, mux_id);
+}
+
+static int radeon_atpx_switch_start(acpi_handle handle, int gpu_id)
+{
+       return radeon_atpx_execute(handle, ATPX_SWITCH_START, gpu_id);
+}
+
+static int radeon_atpx_switch_end(acpi_handle handle, int gpu_id)
+{
+       return radeon_atpx_execute(handle, ATPX_SWITCH_END, gpu_id);
+}
 
 static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
 {
+       int gpu_id;
+
        if (id == VGA_SWITCHEROO_IGD)
-               radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 0);
+               gpu_id = ATPX_INTEGRATED;
        else
-               radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 1);
+               gpu_id = ATPX_DISCRETE;
+
+       radeon_atpx_switch_start(radeon_atpx_priv.atpx_handle, gpu_id);
+       radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, gpu_id);
+       radeon_atpx_switch_i2c_mux(radeon_atpx_priv.atpx_handle, gpu_id);
+       radeon_atpx_switch_end(radeon_atpx_priv.atpx_handle, gpu_id);
+
        return 0;
 }
 
index bdf2fa1189ae7a3c1aa5e1ae41a1ded25c96b9f7..3189a7efb2e97e0ee25f111db13e42c5b41c7c76 100644 (file)
@@ -167,9 +167,6 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
                return -EINVAL;
        }
 
-       radeon_crtc->cursor_width = width;
-       radeon_crtc->cursor_height = height;
-
        obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
        if (!obj) {
                DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
@@ -180,6 +177,9 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
        if (ret)
                goto fail;
 
+       radeon_crtc->cursor_width = width;
+       radeon_crtc->cursor_height = height;
+
        radeon_lock_cursor(crtc, true);
        /* XXX only 27 bit offset for legacy cursor */
        radeon_set_cursor(crtc, obj, gpu_addr);
index 8a955bbdb6082521c725b81bbf5ee0e34e7934b6..a533f52fd163b51c3c89c6f501792e167b9a727a 100644 (file)
@@ -181,9 +181,9 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
        p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
 
        for (i = 0; i < pages; i++, p++) {
-               /* On TTM path, we only use the DMA API if TTM_PAGE_FLAG_DMA32
-                * is requested. */
-               if (dma_addr[i] != DMA_ERROR_CODE) {
+               /* we reverted the patch using dma_addr in TTM for now but this
+                * code stops building on alpha so just comment it out for now */
+               if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */
                        rdev->gart.ttm_alloced[p] = true;
                        rdev->gart.pages_addr[p] = dma_addr[i];
                } else {
index 871df0376b1c9cbc604ef0620289756c8ee525d4..bd58af658581b3d3672aa363f7e5c7418abaaefa 100644 (file)
@@ -234,6 +234,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        return -EINVAL;
                }
                break;
+       case RADEON_INFO_FUSION_GART_WORKING:
+               value = 1;
+               break;
        default:
                DRM_DEBUG_KMS("Invalid request %d\n", info->request);
                return -EINVAL;
index 6334f8ac12093af149581ee95d080ff6e84fe249..0aa8e85a94575f4c092affcb2dc3e08bd8717bdf 100644 (file)
@@ -33,6 +33,7 @@ cayman 0x9400
 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
 0x00009100 SPI_CONFIG_CNTL
 0x0000913C SPI_CONFIG_CNTL_1
+0x00009508 TA_CNTL_AUX
 0x00009830 DB_DEBUG
 0x00009834 DB_DEBUG2
 0x00009838 DB_DEBUG3
index 7e1637176e082ba15ecbc832ba3d951d878a06d4..0e28cae7ea43a713c93c5b4277ee4c63c1ec4b41 100644 (file)
@@ -46,6 +46,7 @@ evergreen 0x9400
 0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
 0x00009100 SPI_CONFIG_CNTL
 0x0000913C SPI_CONFIG_CNTL_1
+0x00009508 TA_CNTL_AUX
 0x00009700 VC_CNTL
 0x00009714 VC_ENHANCE
 0x00009830 DB_DEBUG
index e01cacba685f771f0f82a660b1e1bc21cd0b40e8..498b284e5ef955d11b3ccc3b4a9aaad7bd275df6 100644 (file)
@@ -219,9 +219,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
        int i;
        struct vga_switcheroo_client *active = NULL;
 
-       if (new_client->active == true)
-               return 0;
-
        for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
                if (vgasr_priv.clients[i].active == true) {
                        active = &vgasr_priv.clients[i];
@@ -372,6 +369,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
                goto out;
        }
 
+       if (client->active == true)
+               goto out;
+
        /* okay we want a switch - test if devices are willing to switch */
        can_switch = true;
        for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
index de5819199e2e35cd65d8b2816431328a3ddd58cf..57240740b161d190329b5ca3b7dfaf0d049ef02e 100644 (file)
@@ -98,7 +98,6 @@ static const struct attribute_group twl4030_madc_group = {
 static int __devinit twl4030_madc_hwmon_probe(struct platform_device *pdev)
 {
        int ret;
-       int status;
        struct device *hwmon;
 
        ret = sysfs_create_group(&pdev->dev.kobj, &twl4030_madc_group);
@@ -107,7 +106,7 @@ static int __devinit twl4030_madc_hwmon_probe(struct platform_device *pdev)
        hwmon = hwmon_device_register(&pdev->dev);
        if (IS_ERR(hwmon)) {
                dev_err(&pdev->dev, "hwmon_device_register failed.\n");
-               status = PTR_ERR(hwmon);
+               ret = PTR_ERR(hwmon);
                goto err_reg;
        }
 
index 72c0415f6f944922470ad962361c086ac4290809..455e909bc768637a1a7f6d793f626baef082f46d 100644 (file)
                                 SMBHSTSTS_BUS_ERR | SMBHSTSTS_DEV_ERR | \
                                 SMBHSTSTS_INTR)
 
+/* Older devices have their ID defined in <linux/pci_ids.h> */
+#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS  0x1c22
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS     0x1d22
 /* Patsburg also has three 'Integrated Device Function' SMBus controllers */
 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0        0x1d70
 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1        0x1d71
 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2        0x1d72
+#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS     0x2330
+#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS        0x3b30
 
 struct i801_priv {
        struct i2c_adapter adapter;
index 75b984c519acd18750f5ce2b6002e60c152c5b21..107397a606b48957638a9569c2672ba363a5c57d 100644 (file)
@@ -560,15 +560,18 @@ static struct i2c_adapter mpc_ops = {
        .timeout = HZ,
 };
 
+static const struct of_device_id mpc_i2c_of_match[];
 static int __devinit fsl_i2c_probe(struct platform_device *op)
 {
+       const struct of_device_id *match;
        struct mpc_i2c *i2c;
        const u32 *prop;
        u32 clock = MPC_I2C_CLOCK_LEGACY;
        int result = 0;
        int plen;
 
-       if (!op->dev.of_match)
+       match = of_match_device(mpc_i2c_of_match, &op->dev);
+       if (!match)
                return -EINVAL;
 
        i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
@@ -605,8 +608,8 @@ static int __devinit fsl_i2c_probe(struct platform_device *op)
                        clock = *prop;
        }
 
-       if (op->dev.of_match->data) {
-               struct mpc_i2c_data *data = op->dev.of_match->data;
+       if (match->data) {
+               struct mpc_i2c_data *data = match->data;
                data->setup(op->dev.of_node, i2c, clock, data->prescaler);
        } else {
                /* Backwards compatibility */
index 0eb1515541e72446a4e81e3bf54702bcf0635347..2dbba163b10202cf51aba175f7ffbf9cfd57aea2 100644 (file)
@@ -1,7 +1,7 @@
 /* ------------------------------------------------------------------------ *
  * i2c-parport.c I2C bus over parallel port                                 *
  * ------------------------------------------------------------------------ *
-   Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
+   Copyright (C) 2003-2011 Jean Delvare <khali@linux-fr.org>
    
    Based on older i2c-philips-par.c driver
    Copyright (C) 1995-2000 Simon G. Vogl
@@ -33,6 +33,8 @@
 #include <linux/i2c-algo-bit.h>
 #include <linux/i2c-smbus.h>
 #include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
 #include "i2c-parport.h"
 
 /* ----- Device list ------------------------------------------------------ */
@@ -43,10 +45,11 @@ struct i2c_par {
        struct i2c_algo_bit_data algo_data;
        struct i2c_smbus_alert_setup alert_data;
        struct i2c_client *ara;
-       struct i2c_par *next;
+       struct list_head node;
 };
 
-static struct i2c_par *adapter_list;
+static LIST_HEAD(adapter_list);
+static DEFINE_MUTEX(adapter_list_lock);
 
 /* ----- Low-level parallel port access ----------------------------------- */
 
@@ -228,8 +231,9 @@ static void i2c_parport_attach (struct parport *port)
        }
 
        /* Add the new adapter to the list */
-       adapter->next = adapter_list;
-       adapter_list = adapter;
+       mutex_lock(&adapter_list_lock);
+       list_add_tail(&adapter->node, &adapter_list);
+       mutex_unlock(&adapter_list_lock);
         return;
 
 ERROR1:
@@ -241,11 +245,11 @@ ERROR0:
 
 static void i2c_parport_detach (struct parport *port)
 {
-       struct i2c_par *adapter, *prev;
+       struct i2c_par *adapter, *_n;
 
        /* Walk the list */
-       for (prev = NULL, adapter = adapter_list; adapter;
-            prev = adapter, adapter = adapter->next) {
+       mutex_lock(&adapter_list_lock);
+       list_for_each_entry_safe(adapter, _n, &adapter_list, node) {
                if (adapter->pdev->port == port) {
                        if (adapter->ara) {
                                parport_disable_irq(port);
@@ -259,14 +263,11 @@ static void i2c_parport_detach (struct parport *port)
                                
                        parport_release(adapter->pdev);
                        parport_unregister_device(adapter->pdev);
-                       if (prev)
-                               prev->next = adapter->next;
-                       else
-                               adapter_list = adapter->next;
+                       list_del(&adapter->node);
                        kfree(adapter);
-                       return;
                }
        }
+       mutex_unlock(&adapter_list_lock);
 }
 
 static struct parport_driver i2c_parport_driver = {
index a97e3fec814826c8676f60743025f0ba6988ca0c..04be9f82e14bda8518c4d9562c680731e42c23b3 100644 (file)
@@ -65,7 +65,7 @@ static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
                jiffies, expires);
 
        timer->expires = jiffies + expires;
-       timer->data = (unsigned long)&alg_data;
+       timer->data = (unsigned long)alg_data;
 
        add_timer(timer);
 }
index c24946f512564e35925d0e6c090b7fd4a6c4eccd..1de1c19dad30f2e0ef089f0901e8d6d0b2584b43 100644 (file)
@@ -281,17 +281,24 @@ struct ser_req {
        u8                      command;
        u8                      ref_off;
        u16                     scratch;
-       __be16                  sample;
        struct spi_message      msg;
        struct spi_transfer     xfer[6];
+       /*
+        * DMA (thus cache coherency maintenance) requires the
+        * transfer buffers to live in their own cache lines.
+        */
+       __be16 sample ____cacheline_aligned;
 };
 
 struct ads7845_ser_req {
        u8                      command[3];
-       u8                      pwrdown[3];
-       u8                      sample[3];
        struct spi_message      msg;
        struct spi_transfer     xfer[2];
+       /*
+        * DMA (thus cache coherency maintenance) requires the
+        * transfer buffers to live in their own cache lines.
+        */
+       u8 sample[3] ____cacheline_aligned;
 };
 
 static int ads7846_read12_ser(struct device *dev, unsigned command)
index 6ae054f8e0aa50b5e07f7d43f21f582bb19cf98f..9175d49d25469d0eaa7794960580c14d7740b9a9 100644 (file)
@@ -68,8 +68,23 @@ struct wm831x_ts {
        unsigned int pd_irq;
        bool pressure;
        bool pen_down;
+       struct work_struct pd_data_work;
 };
 
+static void wm831x_pd_data_work(struct work_struct *work)
+{
+       struct wm831x_ts *wm831x_ts =
+               container_of(work, struct wm831x_ts, pd_data_work);
+
+       if (wm831x_ts->pen_down) {
+               enable_irq(wm831x_ts->data_irq);
+               dev_dbg(wm831x_ts->wm831x->dev, "IRQ PD->DATA done\n");
+       } else {
+               enable_irq(wm831x_ts->pd_irq);
+               dev_dbg(wm831x_ts->wm831x->dev, "IRQ DATA->PD done\n");
+       }
+}
+
 static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data)
 {
        struct wm831x_ts *wm831x_ts = irq_data;
@@ -110,6 +125,9 @@ static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data)
        }
 
        if (!wm831x_ts->pen_down) {
+               /* Switch from data to pen down */
+               dev_dbg(wm831x->dev, "IRQ DATA->PD\n");
+
                disable_irq_nosync(wm831x_ts->data_irq);
 
                /* Don't need data any more */
@@ -128,6 +146,10 @@ static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data)
                                         ABS_PRESSURE, 0);
 
                input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 0);
+
+               schedule_work(&wm831x_ts->pd_data_work);
+       } else {
+               input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 1);
        }
 
        input_sync(wm831x_ts->input_dev);
@@ -141,6 +163,11 @@ static irqreturn_t wm831x_ts_pen_down_irq(int irq, void *irq_data)
        struct wm831x *wm831x = wm831x_ts->wm831x;
        int ena = 0;
 
+       if (wm831x_ts->pen_down)
+               return IRQ_HANDLED;
+
+       disable_irq_nosync(wm831x_ts->pd_irq);
+
        /* Start collecting data */
        if (wm831x_ts->pressure)
                ena |= WM831X_TCH_Z_ENA;
@@ -149,14 +176,14 @@ static irqreturn_t wm831x_ts_pen_down_irq(int irq, void *irq_data)
                        WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | WM831X_TCH_Z_ENA,
                        WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | ena);
 
-       input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 1);
-       input_sync(wm831x_ts->input_dev);
-
        wm831x_set_bits(wm831x, WM831X_INTERRUPT_STATUS_1,
                        WM831X_TCHPD_EINT, WM831X_TCHPD_EINT);
 
        wm831x_ts->pen_down = true;
-       enable_irq(wm831x_ts->data_irq);
+
+       /* Switch from pen down to data */
+       dev_dbg(wm831x->dev, "IRQ PD->DATA\n");
+       schedule_work(&wm831x_ts->pd_data_work);
 
        return IRQ_HANDLED;
 }
@@ -182,13 +209,28 @@ static void wm831x_ts_input_close(struct input_dev *idev)
        struct wm831x_ts *wm831x_ts = input_get_drvdata(idev);
        struct wm831x *wm831x = wm831x_ts->wm831x;
 
+       /* Shut the controller down, disabling all other functionality too */
        wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
-                       WM831X_TCH_ENA | WM831X_TCH_CVT_ENA |
-                       WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA |
-                       WM831X_TCH_Z_ENA, 0);
+                       WM831X_TCH_ENA | WM831X_TCH_X_ENA |
+                       WM831X_TCH_Y_ENA | WM831X_TCH_Z_ENA, 0);
 
-       if (wm831x_ts->pen_down)
+       /* Make sure any pending IRQs are done, the above will prevent
+        * new ones firing.
+        */
+       synchronize_irq(wm831x_ts->data_irq);
+       synchronize_irq(wm831x_ts->pd_irq);
+
+       /* Make sure the IRQ completion work is quiesced */
+       flush_work_sync(&wm831x_ts->pd_data_work);
+
+       /* If we ended up with the pen down then make sure we revert back
+        * to pen detection state for the next time we start up.
+        */
+       if (wm831x_ts->pen_down) {
                disable_irq(wm831x_ts->data_irq);
+               enable_irq(wm831x_ts->pd_irq);
+               wm831x_ts->pen_down = false;
+       }
 }
 
 static __devinit int wm831x_ts_probe(struct platform_device *pdev)
@@ -198,7 +240,7 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
        struct wm831x_pdata *core_pdata = dev_get_platdata(pdev->dev.parent);
        struct wm831x_touch_pdata *pdata = NULL;
        struct input_dev *input_dev;
-       int error;
+       int error, irqf;
 
        if (core_pdata)
                pdata = core_pdata->touch;
@@ -212,6 +254,7 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
 
        wm831x_ts->wm831x = wm831x;
        wm831x_ts->input_dev = input_dev;
+       INIT_WORK(&wm831x_ts->pd_data_work, wm831x_pd_data_work);
 
        /*
         * If we have a direct IRQ use it, otherwise use the interrupt
@@ -270,9 +313,14 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
        wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
                        WM831X_TCH_RATE_MASK, 6);
 
+       if (pdata && pdata->data_irqf)
+               irqf = pdata->data_irqf;
+       else
+               irqf = IRQF_TRIGGER_HIGH;
+
        error = request_threaded_irq(wm831x_ts->data_irq,
                                     NULL, wm831x_ts_data_irq,
-                                    IRQF_ONESHOT,
+                                    irqf | IRQF_ONESHOT,
                                     "Touchscreen data", wm831x_ts);
        if (error) {
                dev_err(&pdev->dev, "Failed to request data IRQ %d: %d\n",
@@ -281,9 +329,14 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
        }
        disable_irq(wm831x_ts->data_irq);
 
+       if (pdata && pdata->pd_irqf)
+               irqf = pdata->pd_irqf;
+       else
+               irqf = IRQF_TRIGGER_HIGH;
+
        error = request_threaded_irq(wm831x_ts->pd_irq,
                                     NULL, wm831x_ts_pen_down_irq,
-                                    IRQF_ONESHOT,
+                                    irqf | IRQF_ONESHOT,
                                     "Touchscreen pen down", wm831x_ts);
        if (error) {
                dev_err(&pdev->dev, "Failed to request pen down IRQ %d: %d\n",
index e7089a1f6cb6c8bf2e54acc95ef8fac58c03ba59..b37e6186d0fa007e2edc23f8186643b803429b2b 100644 (file)
@@ -349,6 +349,7 @@ static const struct i2c_device_id lm3530_id[] = {
        {LM3530_NAME, 0},
        {}
 };
+MODULE_DEVICE_TABLE(i2c, lm3530_id);
 
 static struct i2c_driver lm3530_i2c_driver = {
        .probe = lm3530_probe,
index ccbd39a38c4607817d12553d2860a7a5817b851e..c545039287ad7b9b374696ddc01a2203dba22a85 100644 (file)
@@ -356,6 +356,8 @@ config DVB_USB_LME2510
        select DVB_TDA826X if !DVB_FE_CUSTOMISE
        select DVB_STV0288 if !DVB_FE_CUSTOMISE
        select DVB_IX2505V if !DVB_FE_CUSTOMISE
+       select DVB_STV0299 if !DVB_FE_CUSTOMISE
+       select DVB_PLL if !DVB_FE_CUSTOMISE
        help
          Say Y here to support the LME DM04/QQBOX DVB-S USB2.0 .
 
index ccc2d1af49d4d65952d543d701d4096fa39666f9..6927c726ce35b4ec1c8139eb838e7822dded28da 100644 (file)
@@ -1520,6 +1520,7 @@ static int init_channel(struct ngene_channel *chan)
        if (dev->ci.en && (io & NGENE_IO_TSOUT)) {
                dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1);
                set_transfer(chan, 1);
+               chan->dev->channel[2].DataFormatFlags = DF_SWAP32;
                set_transfer(&chan->dev->channel[2], 1);
                dvb_register_device(adapter, &chan->ci_dev,
                                    &ngene_dvbdev_ci, (void *) chan,
index 585680ffbfb64b121a4eb01eff27da0346660f6b..b1193dfc5087bb0ed2f5b24e09e9665107330c6f 100644 (file)
@@ -376,7 +376,7 @@ static int __devinit saa7706h_probe(struct i2c_client *client,
        v4l_info(client, "chip found @ 0x%02x (%s)\n",
                        client->addr << 1, client->adapter->name);
 
-       state = kmalloc(sizeof(struct saa7706h_state), GFP_KERNEL);
+       state = kzalloc(sizeof(struct saa7706h_state), GFP_KERNEL);
        if (state == NULL)
                return -ENOMEM;
        sd = &state->sd;
index 7c0d77751f6e66ba5d77a683fb1ba3890c0c4626..0991e1973678ce0751c455cb85249264935415fb 100644 (file)
@@ -176,7 +176,7 @@ static int __devinit tef6862_probe(struct i2c_client *client,
        v4l_info(client, "chip found @ 0x%02x (%s)\n",
                        client->addr << 1, client->adapter->name);
 
-       state = kmalloc(sizeof(struct tef6862_state), GFP_KERNEL);
+       state = kzalloc(sizeof(struct tef6862_state), GFP_KERNEL);
        if (state == NULL)
                return -ENOMEM;
        state->freq = TEF6862_LO_FREQ;
index ebd68edf5b245566338aebe8bc0d86ae0c836bab..8fc0f081b4701ddaf3c7ab0e79b7944f5fede297 100644 (file)
@@ -46,7 +46,7 @@
 #define MOD_AUTHOR     "Jarod Wilson <jarod@wilsonet.com>"
 #define MOD_DESC       "Driver for SoundGraph iMON MultiMedia IR/Display"
 #define MOD_NAME       "imon"
-#define MOD_VERSION    "0.9.2"
+#define MOD_VERSION    "0.9.3"
 
 #define DISPLAY_MINOR_BASE     144
 #define DEVICE_NAME    "lcd%d"
@@ -460,8 +460,9 @@ static int display_close(struct inode *inode, struct file *file)
 }
 
 /**
- * Sends a packet to the device -- this function must be called
- * with ictx->lock held.
+ * Sends a packet to the device -- this function must be called with
+ * ictx->lock held, or its unlock/lock sequence while waiting for tx
+ * to complete can/will lead to a deadlock.
  */
 static int send_packet(struct imon_context *ictx)
 {
@@ -991,12 +992,21 @@ static void imon_touch_display_timeout(unsigned long data)
  * the iMON remotes, and those used by the Windows MCE remotes (which is
  * really just RC-6), but only one or the other at a time, as the signals
  * are decoded onboard the receiver.
+ *
+ * This function gets called two different ways, one way is from
+ * rc_register_device, for initial protocol selection/setup, and the other is
+ * via a userspace-initiated protocol change request, either by direct sysfs
+ * prodding or by something like ir-keytable. In the rc_register_device case,
+ * the imon context lock is already held, but when initiated from userspace,
+ * it is not, so we must acquire it prior to calling send_packet, which
+ * requires that the lock is held.
  */
 static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
 {
        int retval;
        struct imon_context *ictx = rc->priv;
        struct device *dev = ictx->dev;
+       bool unlock = false;
        unsigned char ir_proto_packet[] = {
                0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
 
@@ -1029,6 +1039,11 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
 
        memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet));
 
+       if (!mutex_is_locked(&ictx->lock)) {
+               unlock = true;
+               mutex_lock(&ictx->lock);
+       }
+
        retval = send_packet(ictx);
        if (retval)
                goto out;
@@ -1037,6 +1052,9 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
        ictx->pad_mouse = false;
 
 out:
+       if (unlock)
+               mutex_unlock(&ictx->lock);
+
        return retval;
 }
 
@@ -2134,6 +2152,7 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
                goto rdev_setup_failed;
        }
 
+       mutex_unlock(&ictx->lock);
        return ictx;
 
 rdev_setup_failed:
@@ -2205,6 +2224,7 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf,
                goto urb_submit_failed;
        }
 
+       mutex_unlock(&ictx->lock);
        return ictx;
 
 urb_submit_failed:
@@ -2299,6 +2319,8 @@ static int __devinit imon_probe(struct usb_interface *interface,
        usb_set_intfdata(interface, ictx);
 
        if (ifnum == 0) {
+               mutex_lock(&ictx->lock);
+
                if (product == 0xffdc && ictx->rf_device) {
                        sysfs_err = sysfs_create_group(&interface->dev.kobj,
                                                       &imon_rf_attr_group);
@@ -2309,13 +2331,14 @@ static int __devinit imon_probe(struct usb_interface *interface,
 
                if (ictx->display_supported)
                        imon_init_display(ictx, interface);
+
+               mutex_unlock(&ictx->lock);
        }
 
        dev_info(dev, "iMON device (%04x:%04x, intf%d) on "
                 "usb<%d:%d> initialized\n", vendor, product, ifnum,
                 usbdev->bus->busnum, usbdev->devnum);
 
-       mutex_unlock(&ictx->lock);
        mutex_unlock(&driver_lock);
 
        return 0;
index accaf6c9789a6bcae94157be03cd6240e15ad120..43908a70bd8b8df31e6965844883d68004328726 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/io.h>
 #include <linux/interrupt.h>
 #include <linux/sched.h>
+#include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/input.h>
 #include <linux/bitops.h>
index 044fb7a382d6bd888898cc889b78cdba15eeec90..0c273ec465c96142437ae4fe1272aee3a4e5f662 100644 (file)
@@ -220,6 +220,8 @@ static struct usb_device_id mceusb_dev_table[] = {
        { USB_DEVICE(VENDOR_PHILIPS, 0x206c) },
        /* Philips/Spinel plus IR transceiver for ASUS */
        { USB_DEVICE(VENDOR_PHILIPS, 0x2088) },
+       /* Philips IR transceiver (Dell branded) */
+       { USB_DEVICE(VENDOR_PHILIPS, 0x2093) },
        /* Realtek MCE IR Receiver and card reader */
        { USB_DEVICE(VENDOR_REALTEK, 0x0161),
          .driver_info = MULTIFUNCTION },
index f53f9c68d38dcddca8842a05e2e904b7174ae10a..a2706648e3651f30468310fcb9213bd7af7c4658 100644 (file)
@@ -707,7 +707,8 @@ static void ir_close(struct input_dev *idev)
 {
        struct rc_dev *rdev = input_get_drvdata(idev);
 
-       rdev->close(rdev);
+        if (rdev)
+               rdev->close(rdev);
 }
 
 /* class for /sys/class/rc */
@@ -733,6 +734,7 @@ static struct {
        { RC_TYPE_SONY,         "sony"          },
        { RC_TYPE_RC5_SZ,       "rc-5-sz"       },
        { RC_TYPE_LIRC,         "lirc"          },
+       { RC_TYPE_OTHER,        "other"         },
 };
 
 #define PROTO_NONE     "none"
index c820e2f53527333b6fe7a42961776deadb15c45a..3f442003623d30494cf63632355bd35e174cc97c 100644 (file)
@@ -524,7 +524,7 @@ void cx88_ir_irq(struct cx88_core *core)
        for (todo = 32; todo > 0; todo -= bits) {
                ev.pulse = samples & 0x80000000 ? false : true;
                bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples));
-               ev.duration = (bits * NSEC_PER_SEC) / (1000 * ir_samplerate);
+               ev.duration = (bits * (NSEC_PER_SEC / 1000)) / ir_samplerate;
                ir_raw_event_store_with_filter(ir->dev, &ev);
                samples <<= bits;
        }
index 5e1c9a81984ca837955d2f67a895e5265bb37e6a..303ffa7df4aca521c617ed1629678deb91ea3606 100644 (file)
@@ -174,7 +174,7 @@ static int m52790_probe(struct i2c_client *client,
        v4l_info(client, "chip found @ 0x%x (%s)\n",
                        client->addr << 1, client->adapter->name);
 
-       state = kmalloc(sizeof(struct m52790_state), GFP_KERNEL);
+       state = kzalloc(sizeof(struct m52790_state), GFP_KERNEL);
        if (state == NULL)
                return -ENOMEM;
 
index 3973f9a94753258da45666085da2b35f9d4ae986..ddb4c091dedc6b6740c013251524829c40fc6d5e 100644 (file)
@@ -136,11 +136,50 @@ unsigned long soc_camera_apply_sensor_flags(struct soc_camera_link *icl,
 }
 EXPORT_SYMBOL(soc_camera_apply_sensor_flags);
 
+#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \
+       ((x) >> 24) & 0xff
+
+static int soc_camera_try_fmt(struct soc_camera_device *icd,
+                             struct v4l2_format *f)
+{
+       struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
+       struct v4l2_pix_format *pix = &f->fmt.pix;
+       int ret;
+
+       dev_dbg(&icd->dev, "TRY_FMT(%c%c%c%c, %ux%u)\n",
+               pixfmtstr(pix->pixelformat), pix->width, pix->height);
+
+       pix->bytesperline = 0;
+       pix->sizeimage = 0;
+
+       ret = ici->ops->try_fmt(icd, f);
+       if (ret < 0)
+               return ret;
+
+       if (!pix->sizeimage) {
+               if (!pix->bytesperline) {
+                       const struct soc_camera_format_xlate *xlate;
+
+                       xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
+                       if (!xlate)
+                               return -EINVAL;
+
+                       ret = soc_mbus_bytes_per_line(pix->width,
+                                                     xlate->host_fmt);
+                       if (ret > 0)
+                               pix->bytesperline = ret;
+               }
+               if (pix->bytesperline)
+                       pix->sizeimage = pix->bytesperline * pix->height;
+       }
+
+       return 0;
+}
+
 static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
                                      struct v4l2_format *f)
 {
        struct soc_camera_device *icd = file->private_data;
-       struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
 
        WARN_ON(priv != file->private_data);
 
@@ -149,7 +188,7 @@ static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
                return -EINVAL;
 
        /* limit format to hardware capabilities */
-       return ici->ops->try_fmt(icd, f);
+       return soc_camera_try_fmt(icd, f);
 }
 
 static int soc_camera_enum_input(struct file *file, void *priv,
@@ -362,9 +401,6 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd)
        icd->user_formats = NULL;
 }
 
-#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \
-       ((x) >> 24) & 0xff
-
 /* Called with .vb_lock held, or from the first open(2), see comment there */
 static int soc_camera_set_fmt(struct soc_camera_device *icd,
                              struct v4l2_format *f)
@@ -377,7 +413,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd,
                pixfmtstr(pix->pixelformat), pix->width, pix->height);
 
        /* We always call try_fmt() before set_fmt() or set_crop() */
-       ret = ici->ops->try_fmt(icd, f);
+       ret = soc_camera_try_fmt(icd, f);
        if (ret < 0)
                return ret;
 
index 5d4cf3b3d435076a2f8db0ff102aa1815894bbea..22fa8202d5ca314466c4c11d8f13c877777db454 100644 (file)
@@ -171,7 +171,7 @@ static int tda9840_probe(struct i2c_client *client,
        v4l_info(client, "chip found @ 0x%x (%s)\n",
                        client->addr << 1, client->adapter->name);
 
-       sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
+       sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
        if (sd == NULL)
                return -ENOMEM;
        v4l2_i2c_subdev_init(sd, client, &tda9840_ops);
index 19621ed523ec605c6f55945eb43c1cf19dd5febb..827425c5b866e9e895e8393a7d8e5c5c8ca98723 100644 (file)
@@ -152,7 +152,7 @@ static int tea6415c_probe(struct i2c_client *client,
 
        v4l_info(client, "chip found @ 0x%x (%s)\n",
                        client->addr << 1, client->adapter->name);
-       sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
+       sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
        if (sd == NULL)
                return -ENOMEM;
        v4l2_i2c_subdev_init(sd, client, &tea6415c_ops);
index 5ea840401f21865120623691ecc0d20cc084decc..f350b6c24500110dbce1f064e24365dc5974fe7b 100644 (file)
@@ -125,7 +125,7 @@ static int tea6420_probe(struct i2c_client *client,
        v4l_info(client, "chip found @ 0x%x (%s)\n",
                        client->addr << 1, client->adapter->name);
 
-       sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
+       sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
        if (sd == NULL)
                return -ENOMEM;
        v4l2_i2c_subdev_init(sd, client, &tea6420_ops);
index f8138c75be8be3a25834ca4471da721b5163e5d1..1aab96a882034a41867641e3d1022a3643303dc2 100644 (file)
@@ -230,7 +230,7 @@ static int upd64031a_probe(struct i2c_client *client,
        v4l_info(client, "chip found @ 0x%x (%s)\n",
                        client->addr << 1, client->adapter->name);
 
-       state = kmalloc(sizeof(struct upd64031a_state), GFP_KERNEL);
+       state = kzalloc(sizeof(struct upd64031a_state), GFP_KERNEL);
        if (state == NULL)
                return -ENOMEM;
        sd = &state->sd;
index 28e0e6b6ca8491d815de9f822e19fae412b23ca6..9bbe61700fd5c238d253d309ea37a21a4f3ca6a1 100644 (file)
@@ -202,7 +202,7 @@ static int upd64083_probe(struct i2c_client *client,
        v4l_info(client, "chip found @ 0x%x (%s)\n",
                        client->addr << 1, client->adapter->name);
 
-       state = kmalloc(sizeof(struct upd64083_state), GFP_KERNEL);
+       state = kzalloc(sizeof(struct upd64083_state), GFP_KERNEL);
        if (state == NULL)
                return -ENOMEM;
        sd = &state->sd;
index 5aeaf876ba9b2ca4a04221e41fd4cf2bfbd7c0e1..4aae501f02d08f22148f5baf88d3620f3125f205 100644 (file)
@@ -155,8 +155,10 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
        sd->v4l2_dev = v4l2_dev;
        if (sd->internal_ops && sd->internal_ops->registered) {
                err = sd->internal_ops->registered(sd);
-               if (err)
+               if (err) {
+                       module_put(sd->owner);
                        return err;
+               }
        }
 
        /* This just returns 0 if either of the two args is NULL */
@@ -164,6 +166,7 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
        if (err) {
                if (sd->internal_ops && sd->internal_ops->unregistered)
                        sd->internal_ops->unregistered(sd);
+               module_put(sd->owner);
                return err;
        }
 
index 0b80644906769e698c3c3f7195be6fc3b2941c42..812729ebf09e53a5461a9df2667ad1f1ff9a286a 100644 (file)
@@ -155,25 +155,25 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
 
        switch (cmd) {
        case VIDIOC_QUERYCTRL:
-               return v4l2_subdev_queryctrl(sd, arg);
+               return v4l2_queryctrl(sd->ctrl_handler, arg);
 
        case VIDIOC_QUERYMENU:
-               return v4l2_subdev_querymenu(sd, arg);
+               return v4l2_querymenu(sd->ctrl_handler, arg);
 
        case VIDIOC_G_CTRL:
-               return v4l2_subdev_g_ctrl(sd, arg);
+               return v4l2_g_ctrl(sd->ctrl_handler, arg);
 
        case VIDIOC_S_CTRL:
-               return v4l2_subdev_s_ctrl(sd, arg);
+               return v4l2_s_ctrl(sd->ctrl_handler, arg);
 
        case VIDIOC_G_EXT_CTRLS:
-               return v4l2_subdev_g_ext_ctrls(sd, arg);
+               return v4l2_g_ext_ctrls(sd->ctrl_handler, arg);
 
        case VIDIOC_S_EXT_CTRLS:
-               return v4l2_subdev_s_ext_ctrls(sd, arg);
+               return v4l2_s_ext_ctrls(sd->ctrl_handler, arg);
 
        case VIDIOC_TRY_EXT_CTRLS:
-               return v4l2_subdev_try_ext_ctrls(sd, arg);
+               return v4l2_try_ext_ctrls(sd->ctrl_handler, arg);
 
        case VIDIOC_DQEVENT:
                if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
index 643ad52e3ca2fe49399a6de0682a495789c32dde..4796bbf0ae4e5b4fdd1476ac8e6cd4e9e43ec6c5 100644 (file)
@@ -1000,7 +1000,6 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
        gd->major = I2O_MAJOR;
        gd->queue = queue;
        gd->fops = &i2o_block_fops;
-       gd->events = DISK_EVENT_MEDIA_CHANGE;
        gd->private_data = dev;
 
        dev->gd = gd;
index d4a851c6b5bf17f4da0b8326275a82c458c97983..0b4d5b23bec9b36d212fa068c6663b8f23365638 100644 (file)
@@ -144,7 +144,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
        int iter, i;
        unsigned long flags;
 
-       data->chip->irq_ack(irq_data);
+       data->chip->irq_ack(data);
 
        for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
                u32 status;
index 53450f433f1018dbf042b2dbf1ccaa490f802b60..3ab9ffa00aadf8c805477e2c323c2f1f889da2a8 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/dma-mapping.h>
 #include <linux/spinlock.h>
 #include <linux/gpio.h>
-#include <linux/regulator/consumer.h>
 #include <plat/usb.h>
 
 #define USBHS_DRIVER_NAME      "usbhs-omap"
@@ -700,8 +699,7 @@ static int usbhs_enable(struct device *dev)
        dev_dbg(dev, "starting TI HSUSB Controller\n");
        if (!pdata) {
                dev_dbg(dev, "missing platform_data\n");
-               ret =  -ENODEV;
-               goto end_enable;
+               return  -ENODEV;
        }
 
        spin_lock_irqsave(&omap->lock, flags);
@@ -719,14 +717,14 @@ static int usbhs_enable(struct device *dev)
                        gpio_request(pdata->ehci_data->reset_gpio_port[0],
                                                "USB1 PHY reset");
                        gpio_direction_output
-                               (pdata->ehci_data->reset_gpio_port[0], 1);
+                               (pdata->ehci_data->reset_gpio_port[0], 0);
                }
 
                if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) {
                        gpio_request(pdata->ehci_data->reset_gpio_port[1],
                                                "USB2 PHY reset");
                        gpio_direction_output
-                               (pdata->ehci_data->reset_gpio_port[1], 1);
+                               (pdata->ehci_data->reset_gpio_port[1], 0);
                }
 
                /* Hold the PHY in RESET for enough time till DIR is high */
@@ -906,16 +904,17 @@ static int usbhs_enable(struct device *dev)
 
                if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
                        gpio_set_value
-                               (pdata->ehci_data->reset_gpio_port[0], 0);
+                               (pdata->ehci_data->reset_gpio_port[0], 1);
 
                if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
                        gpio_set_value
-                               (pdata->ehci_data->reset_gpio_port[1], 0);
+                               (pdata->ehci_data->reset_gpio_port[1], 1);
        }
 
 end_count:
        omap->count++;
-       goto end_enable;
+       spin_unlock_irqrestore(&omap->lock, flags);
+       return 0;
 
 err_tll:
        if (pdata->ehci_data->phy_reset) {
@@ -931,8 +930,6 @@ err_tll:
        clk_disable(omap->usbhost_fs_fck);
        clk_disable(omap->usbhost_hs_fck);
        clk_disable(omap->usbhost_ick);
-
-end_enable:
        spin_unlock_irqrestore(&omap->lock, flags);
        return ret;
 }
index 16422de0823a35447624353ba94d154ec1ed11be..2c0d4d16491ab4c3ef35d881c3fb11334b25ed7d 100644 (file)
@@ -447,12 +447,13 @@ static int __init load_twl4030_script(struct twl4030_script *tscript,
                if (err)
                        goto out;
        }
-       if (tscript->flags & TWL4030_SLEEP_SCRIPT)
+       if (tscript->flags & TWL4030_SLEEP_SCRIPT) {
                if (order)
                        pr_warning("TWL4030: Bad order of scripts (sleep "\
                                        "script before wakeup) Leads to boot"\
                                        "failure on some boards\n");
                err = twl4030_config_sleep_sequence(address);
+       }
 out:
        return err;
 }
index 63667a8f140c458722c54b33b3e91d54e4cca029..d6d62fd07ee9fd0b492d6c7e0a80d116840009b2 100644 (file)
@@ -284,6 +284,7 @@ int mmc_add_card(struct mmc_card *card)
                type = "SD-combo";
                if (mmc_card_blockaddr(card))
                        type = "SDHC-combo";
+               break;
        default:
                type = "?";
                break;
index 2e032f0e8cf47e45730d99e8f9554c688eb756b1..a6c329040140420ab73ded04fa08c7ca9cdaa07e 100644 (file)
@@ -832,7 +832,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
                return IRQ_HANDLED;
        }
 
-       if (end_command)
+       if (end_command && host->cmd)
                mmc_omap_cmd_done(host, host->cmd);
        if (host->data != NULL) {
                if (transfer_error)
index f9b611fc773e19d9133e79681280164c9e4d8538..60e4186a434592bc2a42b0a365578eed4ed525f3 100644 (file)
@@ -124,8 +124,10 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
 #endif
 }
 
+static const struct of_device_id sdhci_of_match[];
 static int __devinit sdhci_of_probe(struct platform_device *ofdev)
 {
+       const struct of_device_id *match;
        struct device_node *np = ofdev->dev.of_node;
        struct sdhci_of_data *sdhci_of_data;
        struct sdhci_host *host;
@@ -134,9 +136,10 @@ static int __devinit sdhci_of_probe(struct platform_device *ofdev)
        int size;
        int ret;
 
-       if (!ofdev->dev.of_match)
+       match = of_match_device(sdhci_of_match, &ofdev->dev);
+       if (!match)
                return -EINVAL;
-       sdhci_of_data = ofdev->dev.of_match->data;
+       sdhci_of_data = match->data;
 
        if (!of_device_is_available(np))
                return -ENODEV;
index a136be7063478bc9f9f6b130cc92010c47737fc5..f8b5f37007b2a0b8115e62032664e6d96a04e214 100644 (file)
@@ -957,6 +957,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
        host->ioaddr = pci_ioremap_bar(pdev, bar);
        if (!host->ioaddr) {
                dev_err(&pdev->dev, "failed to remap registers\n");
+               ret = -ENOMEM;
                goto release;
        }
 
index 9e15f41f87be8f4a79bd61b94ace660942512eb6..5d20661bc357ba387601b6b2089185bceb144aac 100644 (file)
@@ -1334,6 +1334,13 @@ static void sdhci_tasklet_finish(unsigned long param)
 
        host = (struct sdhci_host*)param;
 
+        /*
+         * If this tasklet gets rescheduled while running, it will
+         * be run again afterwards but without any active request.
+         */
+       if (!host->mrq)
+               return;
+
        spin_lock_irqsave(&host->lock, flags);
 
        del_timer(&host->timer);
@@ -1345,7 +1352,7 @@ static void sdhci_tasklet_finish(unsigned long param)
         * upon error conditions.
         */
        if (!(host->flags & SDHCI_DEVICE_DEAD) &&
-               (mrq->cmd->error ||
+           ((mrq->cmd && mrq->cmd->error) ||
                 (mrq->data && (mrq->data->error ||
                  (mrq->data->stop && mrq->data->stop->error))) ||
                   (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
index 62d37de6de76d7551d575f22df39909479030aec..710339a85c84676d9c72e2105f24108344248da2 100644 (file)
@@ -728,15 +728,15 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                tmio_mmc_set_clock(host, ios->clock);
 
        /* Power sequence - OFF -> UP -> ON */
-       if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
+       if (ios->power_mode == MMC_POWER_UP) {
+               /* power up SD bus */
+               if (host->set_pwr)
+                       host->set_pwr(host->pdev, 1);
+       } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
                /* power down SD bus */
                if (ios->power_mode == MMC_POWER_OFF && host->set_pwr)
                        host->set_pwr(host->pdev, 0);
                tmio_mmc_clk_stop(host);
-       } else if (ios->power_mode == MMC_POWER_UP) {
-               /* power up SD bus */
-               if (host->set_pwr)
-                       host->set_pwr(host->pdev, 1);
        } else {
                /* start bus clock */
                tmio_mmc_clk_start(host);
index bd483f0c57e1ae31ca98c1336cd3f4bed248310b..c1d33464aee8926e66afcccc23dbbbacdb718ba8 100644 (file)
@@ -214,11 +214,13 @@ static void __devinit of_free_probes(const char **probes)
 }
 #endif
 
+static struct of_device_id of_flash_match[];
 static int __devinit of_flash_probe(struct platform_device *dev)
 {
 #ifdef CONFIG_MTD_PARTITIONS
        const char **part_probe_types;
 #endif
+       const struct of_device_id *match;
        struct device_node *dp = dev->dev.of_node;
        struct resource res;
        struct of_flash *info;
@@ -232,9 +234,10 @@ static int __devinit of_flash_probe(struct platform_device *dev)
        struct mtd_info **mtd_list = NULL;
        resource_size_t res_size;
 
-       if (!dev->dev.of_match)
+       match = of_match_device(of_flash_match, &dev->dev);
+       if (!match)
                return -EINVAL;
-       probe_type = dev->dev.of_match->data;
+       probe_type = match->data;
 
        reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
 
index dc280bc8eba2e0e0a8bc033b164d11b2707410bd..6c884ef1b0690ea89fe5ed315c51f761897442ba 100644 (file)
@@ -2536,7 +2536,7 @@ config S6GMAC
 source "drivers/net/stmmac/Kconfig"
 
 config PCH_GBE
-       tristate "PCH Gigabit Ethernet"
+       tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE"
        depends on PCI
        select MII
        ---help---
@@ -2548,6 +2548,12 @@ config PCH_GBE
          to Gigabit Ethernet.
          This driver enables Gigabit Ethernet function.
 
+         This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
+         Output Hub), ML7223.
+         ML7223 IOH is for MP(Media Phone) use.
+         ML7223 is companion chip for Intel Atom E6xx series.
+         ML7223 is completely compatible for Intel EG20T PCH.
+
 endif # NETDEV_1000
 
 #
index 01b604ad155eec1d9f2f345c4b7537cafa7128dd..e5a7375685ad1b0edded33c6e91ca0ec2710b78d 100644 (file)
@@ -144,7 +144,7 @@ obj-$(CONFIG_NE3210) += ne3210.o 8390.o
 obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
 obj-$(CONFIG_B44) += b44.o
 obj-$(CONFIG_FORCEDETH) += forcedeth.o
-obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
+obj-$(CONFIG_NE_H8300) += ne-h8300.o
 obj-$(CONFIG_AX88796) += ax88796.o
 obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
 obj-$(CONFIG_FTMAC100) += ftmac100.o
@@ -219,7 +219,7 @@ obj-$(CONFIG_SC92031) += sc92031.o
 obj-$(CONFIG_LP486E) += lp486e.o
 
 obj-$(CONFIG_ETH16I) += eth16i.o
-obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o
+obj-$(CONFIG_ZORRO8390) += zorro8390.o
 obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
 obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
 obj-$(CONFIG_EQUALIZER) += eql.o
@@ -231,7 +231,7 @@ obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o
 obj-$(CONFIG_DECLANCE) += declance.o
 obj-$(CONFIG_ATARILANCE) += atarilance.o
 obj-$(CONFIG_A2065) += a2065.o
-obj-$(CONFIG_HYDRA) += hydra.o 8390.o
+obj-$(CONFIG_HYDRA) += hydra.o
 obj-$(CONFIG_ARIADNE) += ariadne.o
 obj-$(CONFIG_CS89x0) += cs89x0.o
 obj-$(CONFIG_MACSONIC) += macsonic.o
index 88495c48a81d3fef21340997537e5ed4f460ec2e..241b185e6569da605eea3fcdd089098b42262a6e 100644 (file)
@@ -106,7 +106,7 @@ MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "M
 MODULE_LICENSE("GPL");
 MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
 module_param_array(speed_duplex, int, NULL, 0);
-MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotitate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
+MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
 module_param_array(coalesce, bool, NULL, 0);
 MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
 module_param_array(dynamic_ipg, bool, NULL, 0);
index 4af235d41fda656990a7a280976ecd6e477d2a49..fbfb5b47c506c87433e30f6c47ea9e4a584f2c04 100644 (file)
@@ -527,7 +527,7 @@ static void __init etherh_banner(void)
  * Read the ethernet address string from the on board rom.
  * This is an ascii string...
  */
-static int __init etherh_addr(char *addr, struct expansion_card *ec)
+static int __devinit etherh_addr(char *addr, struct expansion_card *ec)
 {
        struct in_chunk_dir cd;
        char *s;
@@ -655,7 +655,7 @@ static const struct net_device_ops etherh_netdev_ops = {
 static u32 etherh_regoffsets[16];
 static u32 etherm_regoffsets[16];
 
-static int __init
+static int __devinit
 etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
 {
        const struct etherh_data *data = id->data;
index 7cb375e0e29cd5e3219a86106f2288201cf20635..925929d764ca50eff2812842c611e0cfbdb3005a 100644 (file)
@@ -566,9 +566,9 @@ struct atl1c_adapter {
 #define __AT_TESTING        0x0001
 #define __AT_RESETTING      0x0002
 #define __AT_DOWN           0x0003
-       u8 work_event;
-#define ATL1C_WORK_EVENT_RESET                 0x01
-#define ATL1C_WORK_EVENT_LINK_CHANGE   0x02
+       unsigned long work_event;
+#define        ATL1C_WORK_EVENT_RESET          0
+#define        ATL1C_WORK_EVENT_LINK_CHANGE    1
        u32 msg_enable;
 
        bool have_msi;
index 7d9d5067a65ce1bb368bd2128d75c5ef87446828..a6e1c36e48e6812b7590a0e2e595abb956b417ab 100644 (file)
@@ -325,7 +325,7 @@ static void atl1c_link_chg_event(struct atl1c_adapter *adapter)
                }
        }
 
-       adapter->work_event |= ATL1C_WORK_EVENT_LINK_CHANGE;
+       set_bit(ATL1C_WORK_EVENT_LINK_CHANGE, &adapter->work_event);
        schedule_work(&adapter->common_task);
 }
 
@@ -337,20 +337,16 @@ static void atl1c_common_task(struct work_struct *work)
        adapter = container_of(work, struct atl1c_adapter, common_task);
        netdev = adapter->netdev;
 
-       if (adapter->work_event & ATL1C_WORK_EVENT_RESET) {
-               adapter->work_event &= ~ATL1C_WORK_EVENT_RESET;
+       if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) {
                netif_device_detach(netdev);
                atl1c_down(adapter);
                atl1c_up(adapter);
                netif_device_attach(netdev);
-               return;
        }
 
-       if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) {
-               adapter->work_event &= ~ATL1C_WORK_EVENT_LINK_CHANGE;
+       if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE,
+               &adapter->work_event))
                atl1c_check_link_status(adapter);
-       }
-       return;
 }
 
 
@@ -369,7 +365,7 @@ static void atl1c_tx_timeout(struct net_device *netdev)
        struct atl1c_adapter *adapter = netdev_priv(netdev);
 
        /* Do the reset outside of interrupt context */
-       adapter->work_event |= ATL1C_WORK_EVENT_RESET;
+       set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event);
        schedule_work(&adapter->common_task);
 }
 
index 66823eded7a3b7d4ffa7525728d8c587f1628275..2353eca32593f7623490656e555cedb24bcc483a 100644 (file)
@@ -213,7 +213,7 @@ struct be_rx_stats {
 
 struct be_rx_compl_info {
        u32 rss_hash;
-       u16 vid;
+       u16 vlan_tag;
        u16 pkt_size;
        u16 rxq_idx;
        u16 mac_id;
index 1e2d825bb94a2b608bc4d8eacac2fa8e24dafaf3..9dc9394fd4cae51f5ec0b566daa6102d7b80e571 100644 (file)
@@ -132,7 +132,7 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
                struct be_async_event_grp5_pvid_state *evt)
 {
        if (evt->enabled)
-               adapter->pvid = evt->tag;
+               adapter->pvid = le16_to_cpu(evt->tag);
        else
                adapter->pvid = 0;
 }
index 7cb5a114c7338af0c584607c7bcd1e33dba4c0e2..9187fb4e08f1169baaec2c6974944d924d7d07d5 100644 (file)
@@ -1018,7 +1018,8 @@ static void be_rx_compl_process(struct be_adapter *adapter,
                        kfree_skb(skb);
                        return;
                }
-               vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
+               vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
+                                       rxcp->vlan_tag);
        } else {
                netif_receive_skb(skb);
        }
@@ -1076,7 +1077,8 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
        if (likely(!rxcp->vlanf))
                napi_gro_frags(&eq_obj->napi);
        else
-               vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
+               vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
+                               rxcp->vlan_tag);
 }
 
 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
@@ -1102,7 +1104,8 @@ static void be_parse_rx_compl_v1(struct be_adapter *adapter,
        rxcp->pkt_type =
                AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
        rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl);
-       rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl);
+       rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
+                                       compl);
 }
 
 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
@@ -1128,7 +1131,8 @@ static void be_parse_rx_compl_v0(struct be_adapter *adapter,
        rxcp->pkt_type =
                AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
        rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl);
-       rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl);
+       rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
+                                       compl);
 }
 
 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
@@ -1155,9 +1159,11 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
                rxcp->vlanf = 0;
 
        if (!lancer_chip(adapter))
-               rxcp->vid = swab16(rxcp->vid);
+               rxcp->vlan_tag = swab16(rxcp->vlan_tag);
 
-       if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid])
+       if (((adapter->pvid & VLAN_VID_MASK) ==
+               (rxcp->vlan_tag & VLAN_VID_MASK)) &&
+               !adapter->vlan_tag[rxcp->vlan_tag])
                rxcp->vlanf = 0;
 
        /* As the compl has been parsed, reset it; we wont touch it again */
@@ -1873,6 +1879,7 @@ static void be_worker(struct work_struct *work)
                be_detect_dump_ue(adapter);
 
 reschedule:
+       adapter->work_counter++;
        schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
 }
 
index 8e6d618b53052139981cf5cc6df6720d11382a0c..d8383a9af9ad21a756a090bc1fd7342f3af005ee 100644 (file)
@@ -8413,6 +8413,8 @@ bnx2_remove_one(struct pci_dev *pdev)
 
        unregister_netdev(dev);
 
+       del_timer_sync(&bp->timer);
+
        if (bp->mips_firmware)
                release_firmware(bp->mips_firmware);
        if (bp->rv2p_firmware)
index e83ac6dd6fc076b85b752957b9d7a88985cb0412..16581df5ee4e62936afd0ac74ad50dbd108f5782 100644 (file)
@@ -2019,15 +2019,23 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
        u32 *parsing_data, u32 xmit_type)
 {
-       *parsing_data |= ((tcp_hdrlen(skb)/4) <<
-               ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
-               ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
+       *parsing_data |=
+                       ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
+                       ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
+                       ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
 
-       *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) <<
-               ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
-               ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
+       if (xmit_type & XMIT_CSUM_TCP) {
+               *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
+                       ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+                       ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
 
-       return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
+               return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
+       } else
+               /* We support checksum offload for TCP and UDP only.
+                * No need to pass the UDP header length - it's a constant.
+                */
+               return skb_transport_header(skb) +
+                               sizeof(struct udphdr) - skb->data;
 }
 
 /**
@@ -2043,7 +2051,7 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
        struct eth_tx_parse_bd_e1x *pbd,
        u32 xmit_type)
 {
-       u8 hlen = (skb_network_header(skb) - skb->data) / 2;
+       u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
 
        /* for now NS flag is not used in Linux */
        pbd->global_data =
@@ -2051,9 +2059,15 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
                         ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
 
        pbd->ip_hlen_w = (skb_transport_header(skb) -
-                       skb_network_header(skb)) / 2;
+                       skb_network_header(skb)) >> 1;
 
-       hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
+       hlen += pbd->ip_hlen_w;
+
+       /* We support checksum offload for TCP and UDP only */
+       if (xmit_type & XMIT_CSUM_TCP)
+               hlen += tcp_hdrlen(skb) / 2;
+       else
+               hlen += sizeof(struct udphdr) / 2;
 
        pbd->total_hlen_w = cpu_to_le16(hlen);
        hlen = hlen*2;
index 494bf960442d3ff533507661ce394d0d128a3d6f..31912f17653f74173368e36a03c924a4b1443975 100644 (file)
@@ -1482,8 +1482,11 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
 
 static int agg_device_up(const struct aggregator *agg)
 {
-       return (netif_running(agg->slave->dev) &&
-               netif_carrier_ok(agg->slave->dev));
+       struct port *port = agg->lag_ports;
+       if (!port)
+               return 0;
+       return (netif_running(port->slave->dev) &&
+               netif_carrier_ok(port->slave->dev));
 }
 
 /**
index b28baff708641b7b9375891222cc47f133eafdfb..01b8a6af275babb1be74839a489d2133728ddf05 100644 (file)
@@ -39,7 +39,7 @@
 
 typedef struct mac_addr {
        u8 mac_addr_value[ETH_ALEN];
-} mac_addr_t;
+} __packed mac_addr_t;
 
 enum {
        BOND_AD_STABLE = 0,
@@ -134,12 +134,12 @@ typedef struct lacpdu {
        u8 tlv_type_terminator;      // = terminator
        u8 terminator_length;        // = 0
        u8 reserved_50[50];          // = 0
-} lacpdu_t;
+} __packed lacpdu_t;
 
 typedef struct lacpdu_header {
        struct ethhdr hdr;
        struct lacpdu lacpdu;
-} lacpdu_header_t;
+} __packed lacpdu_header_t;
 
 // Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard)
 typedef struct bond_marker {
@@ -155,12 +155,12 @@ typedef struct bond_marker {
        u8 tlv_type_terminator;      //  = 0x00
        u8 terminator_length;        //  = 0x00
        u8 reserved_90[90];          //  = 0
-} bond_marker_t;
+} __packed bond_marker_t;
 
 typedef struct bond_marker_header {
        struct ethhdr hdr;
        struct bond_marker marker;
-} bond_marker_header_t;
+} __packed bond_marker_header_t;
 
 #pragma pack()
 
index bd1d811c204f276cbf2aa74f07035cd8ae0875ef..5fedc3375562316c3e13f0a9a7ff4b94854c9c34 100644 (file)
@@ -247,8 +247,10 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
 }
 #endif /* CONFIG_PPC_MPC512x */
 
+static struct of_device_id mpc5xxx_can_table[];
 static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
 {
+       const struct of_device_id *match;
        struct mpc5xxx_can_data *data;
        struct device_node *np = ofdev->dev.of_node;
        struct net_device *dev;
@@ -258,9 +260,10 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
        int irq, mscan_clksrc = 0;
        int err = -ENOMEM;
 
-       if (!ofdev->dev.of_match)
+       match = of_match_device(mpc5xxx_can_table, &ofdev->dev);
+       if (!match)
                return -EINVAL;
-       data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data;
+       data = match->data;
 
        base = of_iomap(np, 0);
        if (!base) {
index a358ea9445a20b5cc37341502e9590dd9ab91253..f501bba1fc6fc9d48cb923f9c963bea83cd3b406 100644 (file)
@@ -346,10 +346,10 @@ static void sja1000_rx(struct net_device *dev)
                    | (priv->read_reg(priv, REG_ID2) >> 5);
        }
 
+       cf->can_dlc = get_can_dlc(fi & 0x0F);
        if (fi & FI_RTR) {
                id |= CAN_RTR_FLAG;
        } else {
-               cf->can_dlc = get_can_dlc(fi & 0x0F);
                for (i = 0; i < cf->can_dlc; i++)
                        cf->data[i] = priv->read_reg(priv, dreg++);
        }
index b423965a78d16fe702ddbba08d7dd8d529cdf767..1b49df6b2470874b0cb76cf45214f247c6d3bf29 100644 (file)
@@ -583,7 +583,9 @@ static int slcan_open(struct tty_struct *tty)
        /* Done.  We have linked the TTY line to a channel. */
        rtnl_unlock();
        tty->receive_room = 65536;      /* We don't flow control */
-       return sl->dev->base_addr;
+
+       /* TTY layer expects 0 on success */
+       return 0;
 
 err_free_chan:
        sl->tty = NULL;
index 3e2e734fecb73b7ee586be244bd8775521a8c225..f3bbdcef338cfbd881cc4b8bc4e361791bf49bb6 100644 (file)
@@ -55,15 +55,20 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                cmd->duplex = -1;
        }
 
-       cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full
-                      | SUPPORTED_100baseT_Full |  SUPPORTED_100baseT_Half
-                      | SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half
-                      | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
-
-       cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Autoneg
-                        | ADVERTISED_FIBRE);
+       if (cmd->speed == SPEED_10000) {
+               cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+               cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
+               cmd->port = PORT_FIBRE;
+       } else {
+               cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full
+                              | SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full
+                              | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg
+                              | SUPPORTED_TP);
+               cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg
+                                | ADVERTISED_TP);
+               cmd->port = PORT_TP;
+       }
 
-       cmd->port = PORT_FIBRE;
        cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
 
        return 0;
index f75d3144b8a508e9668c48be6592eff49fbb7e3f..cf79cf759e13608146f897e957128b927bb7479e 100644 (file)
@@ -2688,9 +2688,6 @@ static int ehea_open(struct net_device *dev)
                netif_start_queue(dev);
        }
 
-       init_waitqueue_head(&port->swqe_avail_wq);
-       init_waitqueue_head(&port->restart_wq);
-
        mutex_unlock(&port->port_lock);
 
        return ret;
@@ -3040,11 +3037,14 @@ static void ehea_rereg_mrs(void)
 
                                        if (dev->flags & IFF_UP) {
                                                mutex_lock(&port->port_lock);
-                                               port_napi_enable(port);
                                                ret = ehea_restart_qps(dev);
-                                               check_sqs(port);
-                                               if (!ret)
+                                               if (!ret) {
+                                                       check_sqs(port);
+                                                       port_napi_enable(port);
                                                        netif_wake_queue(dev);
+                                               } else {
+                                                       netdev_err(dev, "Unable to restart QPS\n");
+                                               }
                                                mutex_unlock(&port->port_lock);
                                        }
                                }
@@ -3273,6 +3273,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
 
        INIT_WORK(&port->reset_task, ehea_reset_port);
 
+       init_waitqueue_head(&port->swqe_avail_wq);
+       init_waitqueue_head(&port->restart_wq);
+
        ret = register_netdev(dev);
        if (ret) {
                pr_err("register_netdev failed. ret=%d\n", ret);
index 24cb953900dde794e60fe3d5921c7e0698f800e8..5131e61c358cd8b9cde0ce4aa7dcfaf36df11249 100644 (file)
@@ -998,8 +998,10 @@ static const struct net_device_ops fs_enet_netdev_ops = {
 #endif
 };
 
+static struct of_device_id fs_enet_match[];
 static int __devinit fs_enet_probe(struct platform_device *ofdev)
 {
+       const struct of_device_id *match;
        struct net_device *ndev;
        struct fs_enet_private *fep;
        struct fs_platform_info *fpi;
@@ -1007,14 +1009,15 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev)
        const u8 *mac_addr;
        int privsize, len, ret = -ENODEV;
 
-       if (!ofdev->dev.of_match)
+       match = of_match_device(fs_enet_match, &ofdev->dev);
+       if (!match)
                return -EINVAL;
 
        fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
        if (!fpi)
                return -ENOMEM;
 
-       if (!IS_FEC(ofdev->dev.of_match)) {
+       if (!IS_FEC(match)) {
                data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
                if (!data || len != 4)
                        goto out_free_fpi;
@@ -1049,7 +1052,7 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev)
        fep->dev = &ofdev->dev;
        fep->ndev = ndev;
        fep->fpi = fpi;
-       fep->ops = ofdev->dev.of_match->data;
+       fep->ops = match->data;
 
        ret = fep->ops->setup_data(ndev);
        if (ret)
index 61035fc5599b1bd552bf05e1a77289bd24bee53f..b9fbc83d64a7faa09acfdc2537427bda5f4c6f14 100644 (file)
@@ -226,8 +226,8 @@ static void set_multicast_finish(struct net_device *dev)
        }
 
        FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
-       FW(fecp, hash_table_high, fep->fec.hthi);
-       FW(fecp, hash_table_low, fep->fec.htlo);
+       FW(fecp, grp_hash_table_high, fep->fec.hthi);
+       FW(fecp, grp_hash_table_low, fep->fec.htlo);
 }
 
 static void set_multicast_list(struct net_device *dev)
@@ -273,8 +273,8 @@ static void restart(struct net_device *dev)
        /*
         * Reset all multicast.
         */
-       FW(fecp, hash_table_high, fep->fec.hthi);
-       FW(fecp, hash_table_low, fep->fec.htlo);
+       FW(fecp, grp_hash_table_high, fep->fec.hthi);
+       FW(fecp, grp_hash_table_low, fep->fec.htlo);
 
        /*
         * Set maximum receive buffer size.
index 7e840d373ab36b73fce7676684a1110056ccb4b5..6a2e150e75bb2d774d2afc50f3913f558c92ae77 100644 (file)
@@ -101,17 +101,20 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus)
        return 0;
 }
 
+static struct of_device_id fs_enet_mdio_fec_match[];
 static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
 {
+       const struct of_device_id *match;
        struct resource res;
        struct mii_bus *new_bus;
        struct fec_info *fec;
        int (*get_bus_freq)(struct device_node *);
        int ret = -ENOMEM, clock, speed;
 
-       if (!ofdev->dev.of_match)
+       match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev);
+       if (!match)
                return -EINVAL;
-       get_bus_freq = ofdev->dev.of_match->data;
+       get_bus_freq = match->data;
 
        new_bus = mdiobus_alloc();
        if (!new_bus)
index a31661948c420e0d467c7e8b7bb4ecae4c078de0..9bd7746cbfcfd1fc6b294cbdb2f499e7f21d5e96 100644 (file)
@@ -139,11 +139,11 @@ static int ftmac100_reset(struct ftmac100 *priv)
                         * that hardware reset completed (what the f*ck).
                         * We still need to wait for a while.
                         */
-                       usleep_range(500, 1000);
+                       udelay(500);
                        return 0;
                }
 
-               usleep_range(1000, 10000);
+               udelay(1000);
        }
 
        netdev_err(netdev, "software reset failed\n");
@@ -772,7 +772,7 @@ static int ftmac100_mdio_read(struct net_device *netdev, int phy_id, int reg)
                if ((phycr & FTMAC100_PHYCR_MIIRD) == 0)
                        return phycr & FTMAC100_PHYCR_MIIRDATA;
 
-               usleep_range(100, 1000);
+               udelay(100);
        }
 
        netdev_err(netdev, "mdio read timed out\n");
@@ -801,7 +801,7 @@ static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
                if ((phycr & FTMAC100_PHYCR_MIIWR) == 0)
                        return;
 
-               usleep_range(100, 1000);
+               udelay(100);
        }
 
        netdev_err(netdev, "mdio write timed out\n");
index c5ef62ceb8403ac777a36886156bec25a94006dd..1cd481c04202fac3c0df0f93047772a3b789ec6c 100644 (file)
@@ -98,15 +98,15 @@ static const struct net_device_ops hydra_netdev_ops = {
        .ndo_open               = hydra_open,
        .ndo_stop               = hydra_close,
 
-       .ndo_start_xmit         = ei_start_xmit,
-       .ndo_tx_timeout         = ei_tx_timeout,
-       .ndo_get_stats          = ei_get_stats,
-       .ndo_set_multicast_list = ei_set_multicast_list,
+       .ndo_start_xmit         = __ei_start_xmit,
+       .ndo_tx_timeout         = __ei_tx_timeout,
+       .ndo_get_stats          = __ei_get_stats,
+       .ndo_set_multicast_list = __ei_set_multicast_list,
        .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_set_mac_address    = eth_mac_addr,
        .ndo_change_mtu         = eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ei_poll,
+       .ndo_poll_controller    = __ei_poll,
 #endif
 };
 
@@ -125,7 +125,7 @@ static int __devinit hydra_init(struct zorro_dev *z)
        0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
     };
 
-    dev = alloc_ei_netdev();
+    dev = ____alloc_ei_netdev(0);
     if (!dev)
        return -ENOMEM;
 
index 0a6c6a2e7550f17235827811be88140ba891a0f6..d4fc00b1ff93442f699844517736a0ac1b6a0bde 100644 (file)
@@ -49,6 +49,10 @@ static u32 mii_get_an(struct mii_if_info *mii, u16 addr)
                result |= ADVERTISED_100baseT_Half;
        if (advert & ADVERTISE_100FULL)
                result |= ADVERTISED_100baseT_Full;
+       if (advert & ADVERTISE_PAUSE_CAP)
+               result |= ADVERTISED_Pause;
+       if (advert & ADVERTISE_PAUSE_ASYM)
+               result |= ADVERTISED_Asym_Pause;
 
        return result;
 }
index 30be8c634ebdd3722bd3c3e965348ccd638d12e2..7298a34bc7951d733f4de4071acc14aa5eab42e4 100644 (file)
@@ -167,7 +167,7 @@ static void cleanup_card(struct net_device *dev)
 #ifndef MODULE
 struct net_device * __init ne_probe(int unit)
 {
-       struct net_device *dev = alloc_ei_netdev();
+       struct net_device *dev = ____alloc_ei_netdev(0);
        int err;
 
        if (!dev)
@@ -197,15 +197,15 @@ static const struct net_device_ops ne_netdev_ops = {
        .ndo_open               = ne_open,
        .ndo_stop               = ne_close,
 
-       .ndo_start_xmit         = ei_start_xmit,
-       .ndo_tx_timeout         = ei_tx_timeout,
-       .ndo_get_stats          = ei_get_stats,
-       .ndo_set_multicast_list = ei_set_multicast_list,
+       .ndo_start_xmit         = __ei_start_xmit,
+       .ndo_tx_timeout         = __ei_tx_timeout,
+       .ndo_get_stats          = __ei_get_stats,
+       .ndo_set_multicast_list = __ei_set_multicast_list,
        .ndo_validate_addr      = eth_validate_addr,
-       .ndo_set_mac_address    = eth_mac_addr,
+       .ndo_set_mac_address    = eth_mac_addr,
        .ndo_change_mtu         = eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ei_poll,
+       .ndo_poll_controller    = __ei_poll,
 #endif
 };
 
@@ -637,7 +637,7 @@ int init_module(void)
        int err;
 
        for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
-               struct net_device *dev = alloc_ei_netdev();
+               struct net_device *dev = ____alloc_ei_netdev(0);
                if (!dev)
                        break;
                if (io[this_dev]) {
index dfb67eb2a94b726cb590f8b3a015d98a719866d1..eb41e44921e665c3863dd82860b7c44413c7ef5e 100644 (file)
@@ -671,6 +671,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
                goto done;
 
        spin_lock_irqsave(&target_list_lock, flags);
+restart:
        list_for_each_entry(nt, &target_list, list) {
                netconsole_target_get(nt);
                if (nt->np.dev == dev) {
@@ -683,9 +684,16 @@ static int netconsole_netdev_event(struct notifier_block *this,
                                 * rtnl_lock already held
                                 */
                                if (nt->np.dev) {
+                                       spin_unlock_irqrestore(
+                                                             &target_list_lock,
+                                                             flags);
                                        __netpoll_cleanup(&nt->np);
+                                       spin_lock_irqsave(&target_list_lock,
+                                                         flags);
                                        dev_put(nt->np.dev);
                                        nt->np.dev = NULL;
+                                       netconsole_target_put(nt);
+                                       goto restart;
                                }
                                /* Fall through */
                        case NETDEV_GOING_DOWN:
index 2ef2f9cdefa6ff1f0d991d734c340960486f420a..56d049a472da4ed41e426968d00e0034dda2bf1e 100644 (file)
@@ -34,6 +34,10 @@ const char pch_driver_version[] = DRV_VERSION;
 #define PCH_GBE_COPYBREAK_DEFAULT      256
 #define PCH_GBE_PCI_BAR                        1
 
+/* Macros for ML7223 */
+#define PCI_VENDOR_ID_ROHM                     0x10db
+#define PCI_DEVICE_ID_ROHM_ML7223_GBE          0x8013
+
 #define PCH_GBE_TX_WEIGHT         64
 #define PCH_GBE_RX_WEIGHT         64
 #define PCH_GBE_RX_BUFFER_WRITE   16
@@ -43,8 +47,7 @@ const char pch_driver_version[] = DRV_VERSION;
 
 #define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
        PCH_GBE_CHIP_TYPE_INTERNAL | \
-       PCH_GBE_RGMII_MODE_RGMII   | \
-       PCH_GBE_CRS_SEL              \
+       PCH_GBE_RGMII_MODE_RGMII     \
        )
 
 /* Ethertype field values */
@@ -1494,12 +1497,11 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
                        /* Write meta date of skb */
                        skb_put(skb, length);
                        skb->protocol = eth_type_trans(skb, netdev);
-                       if ((tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) ==
-                           PCH_GBE_RXD_ACC_STAT_TCPIPOK) {
-                               skb->ip_summed = CHECKSUM_UNNECESSARY;
-                       } else {
+                       if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
                                skb->ip_summed = CHECKSUM_NONE;
-                       }
+                       else
+                               skb->ip_summed = CHECKSUM_UNNECESSARY;
+
                        napi_gro_receive(&adapter->napi, skb);
                        (*work_done)++;
                        pr_debug("Receive skb->ip_summed: %d length: %d\n",
@@ -2420,6 +2422,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
         .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
         .class_mask = (0xFFFF00)
         },
+       {.vendor = PCI_VENDOR_ID_ROHM,
+        .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
+        .subvendor = PCI_ANY_ID,
+        .subdevice = PCI_ANY_ID,
+        .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+        .class_mask = (0xFFFF00)
+        },
        /* required last entry */
        {0}
 };
index 493b0de3848bf17fadd6741ad57d5b4701b2f074..397c36810a159ad7dfc143a819303e059008fe56 100644 (file)
@@ -170,6 +170,16 @@ static const struct {
 };
 #undef _R
 
+static const struct rtl_firmware_info {
+       int mac_version;
+       const char *fw_name;
+} rtl_firmware_infos[] = {
+       { .mac_version = RTL_GIGA_MAC_VER_25, .fw_name = FIRMWARE_8168D_1 },
+       { .mac_version = RTL_GIGA_MAC_VER_26, .fw_name = FIRMWARE_8168D_2 },
+       { .mac_version = RTL_GIGA_MAC_VER_29, .fw_name = FIRMWARE_8105E_1 },
+       { .mac_version = RTL_GIGA_MAC_VER_30, .fw_name = FIRMWARE_8105E_1 }
+};
+
 enum cfg_version {
        RTL_CFG_0 = 0x00,
        RTL_CFG_1,
@@ -565,6 +575,7 @@ struct rtl8169_private {
        u32 saved_wolopts;
 
        const struct firmware *fw;
+#define RTL_FIRMWARE_UNKNOWN   ERR_PTR(-EAGAIN);
 };
 
 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -1789,25 +1800,26 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
 
 static void rtl_release_firmware(struct rtl8169_private *tp)
 {
-       release_firmware(tp->fw);
-       tp->fw = NULL;
+       if (!IS_ERR_OR_NULL(tp->fw))
+               release_firmware(tp->fw);
+       tp->fw = RTL_FIRMWARE_UNKNOWN;
 }
 
-static int rtl_apply_firmware(struct rtl8169_private *tp, const char *fw_name)
+static void rtl_apply_firmware(struct rtl8169_private *tp)
 {
-       const struct firmware **fw = &tp->fw;
-       int rc = !*fw;
-
-       if (rc) {
-               rc = request_firmware(fw, fw_name, &tp->pci_dev->dev);
-               if (rc < 0)
-                       goto out;
-       }
+       const struct firmware *fw = tp->fw;
 
        /* TODO: release firmware once rtl_phy_write_fw signals failures. */
-       rtl_phy_write_fw(tp, *fw);
-out:
-       return rc;
+       if (!IS_ERR_OR_NULL(fw))
+               rtl_phy_write_fw(tp, fw);
+}
+
+static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
+{
+       if (rtl_readphy(tp, reg) != val)
+               netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
+       else
+               rtl_apply_firmware(tp);
 }
 
 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
@@ -2246,10 +2258,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
 
        rtl_writephy(tp, 0x1f, 0x0005);
        rtl_writephy(tp, 0x05, 0x001b);
-       if ((rtl_readphy(tp, 0x06) != 0xbf00) ||
-           (rtl_apply_firmware(tp, FIRMWARE_8168D_1) < 0)) {
-               netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
-       }
+
+       rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
 
        rtl_writephy(tp, 0x1f, 0x0000);
 }
@@ -2351,10 +2361,8 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
 
        rtl_writephy(tp, 0x1f, 0x0005);
        rtl_writephy(tp, 0x05, 0x001b);
-       if ((rtl_readphy(tp, 0x06) != 0xb300) ||
-           (rtl_apply_firmware(tp, FIRMWARE_8168D_2) < 0)) {
-               netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
-       }
+
+       rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
 
        rtl_writephy(tp, 0x1f, 0x0000);
 }
@@ -2474,8 +2482,7 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
        rtl_writephy(tp, 0x18, 0x0310);
        msleep(100);
 
-       if (rtl_apply_firmware(tp, FIRMWARE_8105E_1) < 0)
-               netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
+       rtl_apply_firmware(tp);
 
        rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
 }
@@ -3237,6 +3244,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        tp->timer.data = (unsigned long) dev;
        tp->timer.function = rtl8169_phy_timer;
 
+       tp->fw = RTL_FIRMWARE_UNKNOWN;
+
        rc = register_netdev(dev);
        if (rc < 0)
                goto err_out_msi_4;
@@ -3288,10 +3297,10 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
 
        cancel_delayed_work_sync(&tp->task);
 
-       rtl_release_firmware(tp);
-
        unregister_netdev(dev);
 
+       rtl_release_firmware(tp);
+
        if (pci_dev_run_wake(pdev))
                pm_runtime_get_noresume(&pdev->dev);
 
@@ -3303,6 +3312,37 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
        pci_set_drvdata(pdev, NULL);
 }
 
+static void rtl_request_firmware(struct rtl8169_private *tp)
+{
+       int i;
+
+       /* Return early if the firmware is already loaded / cached. */
+       if (!IS_ERR(tp->fw))
+               goto out;
+
+       for (i = 0; i < ARRAY_SIZE(rtl_firmware_infos); i++) {
+               const struct rtl_firmware_info *info = rtl_firmware_infos + i;
+
+               if (info->mac_version == tp->mac_version) {
+                       const char *name = info->fw_name;
+                       int rc;
+
+                       rc = request_firmware(&tp->fw, name, &tp->pci_dev->dev);
+                       if (rc < 0) {
+                               netif_warn(tp, ifup, tp->dev, "unable to load "
+                                       "firmware patch %s (%d)\n", name, rc);
+                               goto out_disable_request_firmware;
+                       }
+                       goto out;
+               }
+       }
+
+out_disable_request_firmware:
+       tp->fw = NULL;
+out:
+       return;
+}
+
 static int rtl8169_open(struct net_device *dev)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
@@ -3334,11 +3374,13 @@ static int rtl8169_open(struct net_device *dev)
 
        smp_mb();
 
+       rtl_request_firmware(tp);
+
        retval = request_irq(dev->irq, rtl8169_interrupt,
                             (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
                             dev->name, dev);
        if (retval < 0)
-               goto err_release_ring_2;
+               goto err_release_fw_2;
 
        napi_enable(&tp->napi);
 
@@ -3359,7 +3401,8 @@ static int rtl8169_open(struct net_device *dev)
 out:
        return retval;
 
-err_release_ring_2:
+err_release_fw_2:
+       rtl_release_firmware(tp);
        rtl8169_rx_clear(tp);
 err_free_rx_1:
        dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
index d98479030ef2c3e3dbac1d9a0c0f4a3f1262e91f..3dd45ed61f0a33f5368c56df8347e1f3f5a7f1af 100644 (file)
@@ -50,6 +50,20 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
        return &nic_data->mcdi;
 }
 
+static inline void
+efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
+{
+       struct siena_nic_data *nic_data = efx->nic_data;
+       value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
+}
+
+static inline void
+efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
+{
+       struct siena_nic_data *nic_data = efx->nic_data;
+       __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
+}
+
 void efx_mcdi_init(struct efx_nic *efx)
 {
        struct efx_mcdi_iface *mcdi;
@@ -70,8 +84,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
                            const u8 *inbuf, size_t inlen)
 {
        struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
-       unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
-       unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
+       unsigned pdu = MCDI_PDU(efx);
+       unsigned doorbell = MCDI_DOORBELL(efx);
        unsigned int i;
        efx_dword_t hdr;
        u32 xflags, seqno;
@@ -92,30 +106,28 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
                             MCDI_HEADER_SEQ, seqno,
                             MCDI_HEADER_XFLAGS, xflags);
 
-       efx_writed(efx, &hdr, pdu);
+       efx_mcdi_writed(efx, &hdr, pdu);
 
-       for (i = 0; i < inlen; i += 4) {
-               _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
-               /* use wmb() within loop to inhibit write combining */
-               wmb();
-       }
+       for (i = 0; i < inlen; i += 4)
+               efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i),
+                               pdu + 4 + i);
 
        /* ring the doorbell with a distinctive value */
-       _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
-       wmb();
+       EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc);
+       efx_mcdi_writed(efx, &hdr, doorbell);
 }
 
 static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
 {
        struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
-       unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+       unsigned int pdu = MCDI_PDU(efx);
        int i;
 
        BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
        BUG_ON(outlen & 3 || outlen >= 0x100);
 
        for (i = 0; i < outlen; i += 4)
-               *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
+               efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i);
 }
 
 static int efx_mcdi_poll(struct efx_nic *efx)
@@ -123,7 +135,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
        struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
        unsigned int time, finish;
        unsigned int respseq, respcmd, error;
-       unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+       unsigned int pdu = MCDI_PDU(efx);
        unsigned int rc, spins;
        efx_dword_t reg;
 
@@ -149,8 +161,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
 
                time = get_seconds();
 
-               rmb();
-               efx_readd(efx, &reg, pdu);
+               efx_mcdi_readd(efx, &reg, pdu);
 
                /* All 1's indicates that shared memory is in reset (and is
                 * not a valid header). Wait for it to come out reset before
@@ -177,7 +188,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
                          respseq, mcdi->seqno);
                rc = EIO;
        } else if (error) {
-               efx_readd(efx, &reg, pdu + 4);
+               efx_mcdi_readd(efx, &reg, pdu + 4);
                switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
 #define TRANSLATE_ERROR(name)                                  \
                case MC_CMD_ERR_ ## name:                       \
@@ -211,21 +222,21 @@ out:
 /* Test and clear MC-rebooted flag for this port/function */
 int efx_mcdi_poll_reboot(struct efx_nic *efx)
 {
-       unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
+       unsigned int addr = MCDI_REBOOT_FLAG(efx);
        efx_dword_t reg;
        uint32_t value;
 
        if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
                return false;
 
-       efx_readd(efx, &reg, addr);
+       efx_mcdi_readd(efx, &reg, addr);
        value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
 
        if (value == 0)
                return 0;
 
        EFX_ZERO_DWORD(reg);
-       efx_writed(efx, &reg, addr);
+       efx_mcdi_writed(efx, &reg, addr);
 
        if (value == MC_STATUS_DWORD_ASSERT)
                return -EINTR;
index 10f1cb79c1472cfb91f9a50f16f827248bd00c96..9b29a8d7c449d56d7fd838dd1204435165d2fbda 100644 (file)
@@ -1937,6 +1937,13 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
 
                size = min_t(size_t, table->step, 16);
 
+               if (table->offset >= efx->type->mem_map_size) {
+                       /* No longer mapped; return dummy data */
+                       memcpy(buf, "\xde\xc0\xad\xde", 4);
+                       buf += table->rows * size;
+                       continue;
+               }
+
                for (i = 0; i < table->rows; i++) {
                        switch (table->step) {
                        case 4: /* 32-bit register or SRAM */
index a42db6e35be39166d86f72887def5619c581dc58..d91701abd331c0680756b3b92a2f3ed95cf649fb 100644 (file)
@@ -143,10 +143,12 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
 /**
  * struct siena_nic_data - Siena NIC state
  * @mcdi: Management-Controller-to-Driver Interface
+ * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
  * @wol_filter_id: Wake-on-LAN packet filter id
  */
 struct siena_nic_data {
        struct efx_mcdi_iface mcdi;
+       void __iomem *mcdi_smem;
        int wol_filter_id;
 };
 
index e4dd8986b1feaa6c5b848dc8f8c948c4305592d8..837869b71db97f590b1b133eea717f78b2284ab1 100644 (file)
@@ -220,12 +220,26 @@ static int siena_probe_nic(struct efx_nic *efx)
        efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
        efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
 
+       /* Initialise MCDI */
+       nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
+                                             FR_CZ_MC_TREG_SMEM,
+                                             FR_CZ_MC_TREG_SMEM_STEP *
+                                             FR_CZ_MC_TREG_SMEM_ROWS);
+       if (!nic_data->mcdi_smem) {
+               netif_err(efx, probe, efx->net_dev,
+                         "could not map MCDI at %llx+%x\n",
+                         (unsigned long long)efx->membase_phys +
+                         FR_CZ_MC_TREG_SMEM,
+                         FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
+               rc = -ENOMEM;
+               goto fail1;
+       }
        efx_mcdi_init(efx);
 
        /* Recover from a failed assertion before probing */
        rc = efx_mcdi_handle_assertion(efx);
        if (rc)
-               goto fail1;
+               goto fail2;
 
        /* Let the BMC know that the driver is now in charge of link and
         * filter settings. We must do this before we reset the NIC */
@@ -280,6 +294,7 @@ fail4:
 fail3:
        efx_mcdi_drv_attach(efx, false, NULL);
 fail2:
+       iounmap(nic_data->mcdi_smem);
 fail1:
        kfree(efx->nic_data);
        return rc;
@@ -359,6 +374,8 @@ static int siena_init_nic(struct efx_nic *efx)
 
 static void siena_remove_nic(struct efx_nic *efx)
 {
+       struct siena_nic_data *nic_data = efx->nic_data;
+
        efx_nic_free_buffer(efx, &efx->irq_status);
 
        siena_reset_hw(efx, RESET_TYPE_ALL);
@@ -368,7 +385,8 @@ static void siena_remove_nic(struct efx_nic *efx)
                efx_mcdi_drv_attach(efx, false, NULL);
 
        /* Tear down the private nic state */
-       kfree(efx->nic_data);
+       iounmap(nic_data->mcdi_smem);
+       kfree(nic_data);
        efx->nic_data = NULL;
 }
 
@@ -606,8 +624,7 @@ struct efx_nic_type siena_a0_nic_type = {
        .default_mac_ops = &efx_mcdi_mac_operations,
 
        .revision = EFX_REV_SIENA_A0,
-       .mem_map_size = (FR_CZ_MC_TREG_SMEM +
-                        FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
+       .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */
        .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
        .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
        .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
index 86cbb9ea2f269ee2532078dea529eebdb8cc0a9f..8ec1a9a0bb9ae007c69865b2599f07d3f23c99c8 100644 (file)
@@ -853,7 +853,9 @@ static int slip_open(struct tty_struct *tty)
        /* Done.  We have linked the TTY line to a channel. */
        rtnl_unlock();
        tty->receive_room = 65536;      /* We don't flow control */
-       return sl->dev->base_addr;
+
+       /* TTY layer expects 0 on success */
+       return 0;
 
 err_free_bufs:
        sl_free_bufs(sl);
index eb4f59fb01e90cacc722a732e23ef46181747c61..bff2f7999ff0d36740632c8d35bdcbc067d9a756 100644 (file)
@@ -3237,15 +3237,18 @@ static void happy_meal_pci_exit(void)
 #endif
 
 #ifdef CONFIG_SBUS
+static const struct of_device_id hme_sbus_match[];
 static int __devinit hme_sbus_probe(struct platform_device *op)
 {
+       const struct of_device_id *match;
        struct device_node *dp = op->dev.of_node;
        const char *model = of_get_property(dp, "model", NULL);
        int is_qfe;
 
-       if (!op->dev.of_match)
+       match = of_match_device(hme_sbus_match, &op->dev);
+       if (!match)
                return -EINVAL;
-       is_qfe = (op->dev.of_match->data != NULL);
+       is_qfe = (match->data != NULL);
 
        if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
                is_qfe = 1;
index b8c5f35577e45eb81e5f6a847b2831f9ce6f3619..7a5daefb6f3311e6e6d169113d7977e908045cbb 100644 (file)
@@ -12327,8 +12327,10 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
                if (val & VCPU_CFGSHDW_ASPM_DBNC)
                        tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
                if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
-                   (val & VCPU_CFGSHDW_WOL_MAGPKT))
+                   (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
                        tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
+                       device_set_wakeup_enable(&tp->pdev->dev, true);
+               }
                goto done;
        }
 
@@ -12461,8 +12463,10 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
                        tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
 
                if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
-                   (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
+                   (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
                        tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
+                       device_set_wakeup_enable(&tp->pdev->dev, true);
+               }
 
                if (cfg2 & (1 << 17))
                        tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
index 341f7056a800ec2146c6187e3e9aca5ff6f7be5d..c924ea2bce07a4c06aa8d6ab5d9c23620b446915 100644 (file)
@@ -460,7 +460,7 @@ static const struct driver_info     cdc_info = {
        .manage_power = cdc_manage_power,
 };
 
-static const struct driver_info mbm_info = {
+static const struct driver_info wwan_info = {
        .description =  "Mobile Broadband Network Device",
        .flags =        FLAG_WWAN,
        .bind =         usbnet_cdc_bind,
@@ -471,6 +471,7 @@ static const struct driver_info mbm_info = {
 
 /*-------------------------------------------------------------------------*/
 
+#define HUAWEI_VENDOR_ID       0x12D1
 
 static const struct usb_device_id      products [] = {
 /*
@@ -566,7 +567,7 @@ static const struct usb_device_id   products [] = {
 {
        USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM,
                        USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
-       .driver_info            = 0,
+       .driver_info = (unsigned long)&wwan_info,
 },
 
 /*
@@ -587,8 +588,17 @@ static const struct usb_device_id  products [] = {
 }, {
        USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM,
                        USB_CDC_PROTO_NONE),
-       .driver_info = (unsigned long)&mbm_info,
+       .driver_info = (unsigned long)&wwan_info,
 
+}, {
+       /* Various Huawei modems with a network port like the UMG1831 */
+       .match_flags    =   USB_DEVICE_ID_MATCH_VENDOR
+                | USB_DEVICE_ID_MATCH_INT_INFO,
+       .idVendor               = HUAWEI_VENDOR_ID,
+       .bInterfaceClass        = USB_CLASS_COMM,
+       .bInterfaceSubClass     = USB_CDC_SUBCLASS_ETHERNET,
+       .bInterfaceProtocol     = 255,
+       .driver_info = (unsigned long)&wwan_info,
 },
        { },            // END
 };
index 967371f04454205a1b147bddf27443a1e14f003a..1033ef6476a41a67ab0f8201dc0ad9218a8fe6de 100644 (file)
 #include <linux/usb/usbnet.h>
 #include <linux/usb/cdc.h>
 
-#define        DRIVER_VERSION                          "7-Feb-2011"
+#define        DRIVER_VERSION                          "23-Apr-2011"
 
 /* CDC NCM subclass 3.2.1 */
 #define USB_CDC_NCM_NDP16_LENGTH_MIN           0x10
 
 /* Maximum NTB length */
-#define        CDC_NCM_NTB_MAX_SIZE_TX                 16384   /* bytes */
+#define        CDC_NCM_NTB_MAX_SIZE_TX                 (16384 + 4) /* bytes, must be short terminated */
 #define        CDC_NCM_NTB_MAX_SIZE_RX                 16384   /* bytes */
 
 /* Minimum value for MaxDatagramSize, ch. 6.2.9 */
index 7d42f9a2c06868027ac77c6265c76f9ce2288c27..81126ff85e0576e40b4d01aaf404044ffacfe1df 100644 (file)
@@ -65,6 +65,7 @@
 #define IPHETH_USBINTF_PROTO    1
 
 #define IPHETH_BUF_SIZE         1516
+#define IPHETH_IP_ALIGN                2       /* padding at front of URB */
 #define IPHETH_TX_TIMEOUT       (5 * HZ)
 
 #define IPHETH_INTFNUM          2
@@ -202,18 +203,21 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
                return;
        }
 
-       len = urb->actual_length;
-       buf = urb->transfer_buffer;
+       if (urb->actual_length <= IPHETH_IP_ALIGN) {
+               dev->net->stats.rx_length_errors++;
+               return;
+       }
+       len = urb->actual_length - IPHETH_IP_ALIGN;
+       buf = urb->transfer_buffer + IPHETH_IP_ALIGN;
 
-       skb = dev_alloc_skb(NET_IP_ALIGN + len);
+       skb = dev_alloc_skb(len);
        if (!skb) {
                err("%s: dev_alloc_skb: -ENOMEM", __func__);
                dev->net->stats.rx_dropped++;
                return;
        }
 
-       skb_reserve(skb, NET_IP_ALIGN);
-       memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN);
+       memcpy(skb_put(skb, len), buf, len);
        skb->dev = dev->net;
        skb->protocol = eth_type_trans(skb, dev->net);
 
index 47a6c870b51fae361b39de950ee4a523ced52475..48d4efdb4959a61b6704d58178879593ca4edad2 100644 (file)
@@ -730,7 +730,7 @@ static int smsc95xx_phy_initialize(struct usbnet *dev)
                msleep(10);
                bmcr = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR);
                timeout++;
-       } while ((bmcr & MII_BMCR) && (timeout < 100));
+       } while ((bmcr & BMCR_RESET) && (timeout < 100));
 
        if (timeout >= 100) {
                netdev_warn(dev->net, "timeout on PHY Reset");
index 069c1cf0fdf73675dbb865b27ee4d673bd773840..9ab439d144ed0207758c8c5535d3fa1dcb3359b8 100644 (file)
@@ -645,6 +645,7 @@ int usbnet_stop (struct net_device *net)
        struct driver_info      *info = dev->driver_info;
        int                     retval;
 
+       clear_bit(EVENT_DEV_OPEN, &dev->flags);
        netif_stop_queue (net);
 
        netif_info(dev, ifdown, dev->net,
@@ -736,6 +737,7 @@ int usbnet_open (struct net_device *net)
                }
        }
 
+       set_bit(EVENT_DEV_OPEN, &dev->flags);
        netif_start_queue (net);
        netif_info(dev, ifup, dev->net,
                   "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
@@ -1259,6 +1261,9 @@ void usbnet_disconnect (struct usb_interface *intf)
        if (dev->driver_info->unbind)
                dev->driver_info->unbind (dev, intf);
 
+       usb_kill_urb(dev->interrupt);
+       usb_free_urb(dev->interrupt);
+
        free_netdev(net);
        usb_put_dev (xdev);
 }
@@ -1498,6 +1503,10 @@ int usbnet_resume (struct usb_interface *intf)
        int                     retval;
 
        if (!--dev->suspend_count) {
+               /* resume interrupt URBs */
+               if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags))
+                       usb_submit_urb(dev->interrupt, GFP_NOIO);
+
                spin_lock_irq(&dev->txq.lock);
                while ((res = usb_get_from_anchor(&dev->deferred))) {
 
@@ -1516,9 +1525,12 @@ int usbnet_resume (struct usb_interface *intf)
                smp_mb();
                clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
                spin_unlock_irq(&dev->txq.lock);
-               if (!(dev->txq.qlen >= TX_QLEN(dev)))
-                       netif_start_queue(dev->net);
-               tasklet_schedule (&dev->bh);
+
+               if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+                       if (!(dev->txq.qlen >= TX_QLEN(dev)))
+                               netif_start_queue(dev->net);
+                       tasklet_schedule (&dev->bh);
+               }
        }
        return 0;
 }
index 2de9b90c5f8f079859b695979acb4871c5a0ea08..3b99f64104fd528f7e1582931cacb58ff3bf7a92 100644 (file)
@@ -403,6 +403,17 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
        if (tb[IFLA_ADDRESS] == NULL)
                random_ether_addr(dev->dev_addr);
 
+       if (tb[IFLA_IFNAME])
+               nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
+       else
+               snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
+
+       if (strchr(dev->name, '%')) {
+               err = dev_alloc_name(dev, dev->name);
+               if (err < 0)
+                       goto err_alloc_name;
+       }
+
        err = register_netdevice(dev);
        if (err < 0)
                goto err_register_dev;
@@ -422,6 +433,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
 
 err_register_dev:
        /* nothing to do */
+err_alloc_name:
 err_configure_peer:
        unregister_netdevice(peer);
        return err;
index 0d47c3a0530788ab0e596836898eb399fd27e44c..c16ed961153a7705ff742a6212c7e322d28f76fd 100644 (file)
@@ -178,6 +178,7 @@ static void
 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
 {
        int i;
+       unsigned long flags;
        u32 events = le32_to_cpu(adapter->shared->ecr);
        if (!events)
                return;
@@ -190,10 +191,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
 
        /* Check if there is an error on xmit/recv queues */
        if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
-               spin_lock(&adapter->cmd_lock);
+               spin_lock_irqsave(&adapter->cmd_lock, flags);
                VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                                       VMXNET3_CMD_GET_QUEUE_STATUS);
-               spin_unlock(&adapter->cmd_lock);
+               spin_unlock_irqrestore(&adapter->cmd_lock, flags);
 
                for (i = 0; i < adapter->num_tx_queues; i++)
                        if (adapter->tqd_start[i].status.stopped)
@@ -2733,13 +2734,14 @@ static void
 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
 {
        u32 cfg;
+       unsigned long flags;
 
        /* intr settings */
-       spin_lock(&adapter->cmd_lock);
+       spin_lock_irqsave(&adapter->cmd_lock, flags);
        VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
                               VMXNET3_CMD_GET_CONF_INTR);
        cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
-       spin_unlock(&adapter->cmd_lock);
+       spin_unlock_irqrestore(&adapter->cmd_lock, flags);
        adapter->intr.type = cfg & 0x3;
        adapter->intr.mask_mode = (cfg >> 2) & 0x3;
 
index 51f2ef142a5b635cca3f15239b830d2b554d6d6d..976467253d2006324af9355952e753d24c0ea301 100644 (file)
@@ -311,6 +311,9 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
                /* toggle the LRO feature*/
                netdev->features ^= NETIF_F_LRO;
 
+               /* Update private LRO flag */
+               adapter->lro = lro_requested;
+
                /* update harware LRO capability accordingly */
                if (lro_requested)
                        adapter->shared->devRead.misc.uptFeatures |=
index 17d04ff8d678021aaf0f52f2da55794e017272b5..1482fa6508334cdd32b979b5a0a22bf8e826082b 100644 (file)
@@ -2141,6 +2141,8 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
 static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
 {
        struct ath_softc *sc = hw->priv;
+       struct ath_hw *ah = sc->sc_ah;
+       struct ath_common *common = ath9k_hw_common(ah);
        int timeout = 200; /* ms */
        int i, j;
 
@@ -2149,6 +2151,12 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
 
        cancel_delayed_work_sync(&sc->tx_complete_work);
 
+       if (sc->sc_flags & SC_OP_INVALID) {
+               ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
+               mutex_unlock(&sc->mutex);
+               return;
+       }
+
        if (drop)
                timeout = 1;
 
index dcd19bc337d1ddf96890432f085ee6ee83c9658d..b29c80def35e89f10284ae794d267ca3a19d3e55 100644 (file)
@@ -506,7 +506,7 @@ bool ath_stoprecv(struct ath_softc *sc)
                        "confusing the DMA engine when we start RX up\n");
                ATH_DBG_WARN_ON_ONCE(!stopped);
        }
-       return stopped || reset;
+       return stopped && !reset;
 }
 
 void ath_flushrecv(struct ath_softc *sc)
index d59b0168c14ad3b6c9b8b6c5ab40cbb524c0fb87..5af40d9170a096c4c598c9ae7c170cbb149c2f1a 100644 (file)
@@ -72,6 +72,7 @@ MODULE_FIRMWARE("b43/ucode11.fw");
 MODULE_FIRMWARE("b43/ucode13.fw");
 MODULE_FIRMWARE("b43/ucode14.fw");
 MODULE_FIRMWARE("b43/ucode15.fw");
+MODULE_FIRMWARE("b43/ucode16_mimo.fw");
 MODULE_FIRMWARE("b43/ucode5.fw");
 MODULE_FIRMWARE("b43/ucode9.fw");
 
index 5c40502f869a89458545456870826236a27904d5..79ac081832fb771ff6ce607f37ce50e9bd5bc783 100644 (file)
@@ -316,12 +316,18 @@ int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 
        hdr_len = ieee80211_hdrlen(fc);
 
-       /* Find index into station table for destination station */
-       sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
-       if (sta_id == IWL_INVALID_STATION) {
-               IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
-                              hdr->addr1);
-               goto drop_unlock;
+       /* For management frames use broadcast id to do not break aggregation */
+       if (!ieee80211_is_data(fc))
+               sta_id = ctx->bcast_sta_id;
+       else {
+               /* Find index into station table for destination station */
+               sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
+
+               if (sta_id == IWL_INVALID_STATION) {
+                       IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
+                                      hdr->addr1);
+                       goto drop_unlock;
+               }
        }
 
        IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
@@ -1127,12 +1133,16 @@ int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
             q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
 
                tx_info = &txq->txb[txq->q.read_ptr];
-               iwl4965_tx_status(priv, tx_info,
-                                txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
+
+               if (WARN_ON_ONCE(tx_info->skb == NULL))
+                       continue;
 
                hdr = (struct ieee80211_hdr *)tx_info->skb->data;
-               if (hdr && ieee80211_is_data_qos(hdr->frame_control))
+               if (ieee80211_is_data_qos(hdr->frame_control))
                        nfreed++;
+
+               iwl4965_tx_status(priv, tx_info,
+                                txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
                tx_info->skb = NULL;
 
                priv->cfg->ops->lib->txq_free_tfd(priv, txq);
index c1511b14b239c69884292e5e8e33296debbdebc4..42db0fc8b921983d83bc244a8e4351ad207a0a0b 100644 (file)
@@ -2155,6 +2155,13 @@ int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
                        goto set_ch_out;
                }
 
+               if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
+                   !iwl_legacy_is_channel_ibss(ch_info)) {
+                       IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n");
+                       ret = -EINVAL;
+                       goto set_ch_out;
+               }
+
                spin_lock_irqsave(&priv->lock, flags);
 
                for_each_context(priv, ctx) {
index 9ee849d669f30532c8ddba597ef09d0827eb49b2..f43ac1eb901428371952d0e906dcfee849204e1f 100644 (file)
@@ -1411,6 +1411,12 @@ iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
        return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
 }
 
+static inline int
+iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch)
+{
+       return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
+}
+
 static inline void
 __iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
 {
index 15eb8b70715741c0a92e347b0d087964df2b32f9..bda0d61b2c0d1e5aefaea5b13fd4e25a84832ec1 100644 (file)
@@ -48,8 +48,21 @@ module_param(led_mode, int, S_IRUGO);
 MODULE_PARM_DESC(led_mode, "0=system default, "
                "1=On(RF On)/Off(RF Off), 2=blinking");
 
+/* Throughput          OFF time(ms)    ON time (ms)
+ *     >300                    25              25
+ *     >200 to 300             40              40
+ *     >100 to 200             55              55
+ *     >70 to 100              65              65
+ *     >50 to 70               75              75
+ *     >20 to 50               85              85
+ *     >10 to 20               95              95
+ *     >5 to 10                110             110
+ *     >1 to 5                 130             130
+ *     >0 to 1                 167             167
+ *     <=0                                     SOLID ON
+ */
 static const struct ieee80211_tpt_blink iwl_blink[] = {
-       { .throughput = 0 * 1024 - 1, .blink_time = 334 },
+       { .throughput = 0, .blink_time = 334 },
        { .throughput = 1 * 1024 - 1, .blink_time = 260 },
        { .throughput = 5 * 1024 - 1, .blink_time = 220 },
        { .throughput = 10 * 1024 - 1, .blink_time = 190 },
@@ -101,6 +114,11 @@ static int iwl_legacy_led_cmd(struct iwl_priv *priv,
        if (priv->blink_on == on && priv->blink_off == off)
                return 0;
 
+       if (off == 0) {
+               /* led is SOLID_ON */
+               on = IWL_LED_SOLID;
+       }
+
        IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
                        priv->cfg->base_params->led_compensation);
        led_cmd.on = iwl_legacy_blink_compensation(priv, on,
index d484c3678163f9191570f59dade85c9d642e018e..a62fe24ee594cbbb5b462701ca3ec369b422a437 100644 (file)
@@ -2984,15 +2984,15 @@ static void iwl4965_bg_txpower_work(struct work_struct *work)
        struct iwl_priv *priv = container_of(work, struct iwl_priv,
                        txpower_work);
 
+       mutex_lock(&priv->mutex);
+
        /* If a scan happened to start before we got here
         * then just return; the statistics notification will
         * kick off another scheduled work to compensate for
         * any temperature delta we missed here. */
        if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
            test_bit(STATUS_SCANNING, &priv->status))
-               return;
-
-       mutex_lock(&priv->mutex);
+               goto out;
 
        /* Regardless of if we are associated, we must reconfigure the
         * TX power since frames can be sent on non-radar channels while
@@ -3002,7 +3002,7 @@ static void iwl4965_bg_txpower_work(struct work_struct *work)
        /* Update last_temperature to keep is_calib_needed from running
         * when it isn't needed... */
        priv->last_temperature = priv->temperature;
-
+out:
        mutex_unlock(&priv->mutex);
 }
 
index dfdbea6e8f99530f05ce3c7fec062f2a6ff1a381..fbbde0712fa58ffb495f66e5e31d1265c95c162e 100644 (file)
@@ -335,7 +335,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
        struct ieee80211_channel *channel = conf->channel;
        const struct iwl_channel_info *ch_info;
        int ret = 0;
-       bool ht_changed[NUM_IWL_RXON_CTX] = {};
 
        IWL_DEBUG_MAC80211(priv, "changed %#x", changed);
 
@@ -383,10 +382,8 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
 
                for_each_context(priv, ctx) {
                        /* Configure HT40 channels */
-                       if (ctx->ht.enabled != conf_is_ht(conf)) {
+                       if (ctx->ht.enabled != conf_is_ht(conf))
                                ctx->ht.enabled = conf_is_ht(conf);
-                               ht_changed[ctx->ctxid] = true;
-                       }
 
                        if (ctx->ht.enabled) {
                                if (conf_is_ht40_minus(conf)) {
@@ -455,8 +452,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
                if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
                        continue;
                iwlagn_commit_rxon(priv, ctx);
-               if (ht_changed[ctx->ctxid])
-                       iwlagn_update_qos(priv, ctx);
        }
  out:
        mutex_unlock(&priv->mutex);
index a709d05c5868f3e6a19d586bcdffd560cb69bf6f..0712b67283a4f7e1b4a4ae702623ba32fd61a933 100644 (file)
@@ -568,12 +568,17 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
 
        hdr_len = ieee80211_hdrlen(fc);
 
-       /* Find index into station table for destination station */
-       sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
-       if (sta_id == IWL_INVALID_STATION) {
-               IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
-                              hdr->addr1);
-               goto drop_unlock;
+       /* For management frames use broadcast id to do not break aggregation */
+       if (!ieee80211_is_data(fc))
+               sta_id = ctx->bcast_sta_id;
+       else {
+               /* Find index into station table for destination station */
+               sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
+               if (sta_id == IWL_INVALID_STATION) {
+                       IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
+                                      hdr->addr1);
+                       goto drop_unlock;
+               }
        }
 
        IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
@@ -1224,12 +1229,16 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
             q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
 
                tx_info = &txq->txb[txq->q.read_ptr];
-               iwlagn_tx_status(priv, tx_info,
-                                txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
+
+               if (WARN_ON_ONCE(tx_info->skb == NULL))
+                       continue;
 
                hdr = (struct ieee80211_hdr *)tx_info->skb->data;
-               if (hdr && ieee80211_is_data_qos(hdr->frame_control))
+               if (ieee80211_is_data_qos(hdr->frame_control))
                        nfreed++;
+
+               iwlagn_tx_status(priv, tx_info,
+                                txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
                tx_info->skb = NULL;
 
                if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
index 7e8a658b7670ee580b5a8a84094beb9c6de86dd7..f3ac62431a3085c7eee695e98987ba140cb53d02 100644 (file)
@@ -1339,8 +1339,8 @@ int lbs_execute_next_command(struct lbs_private *priv)
                                    cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) {
                                        lbs_deb_host(
                                               "EXEC_NEXT_CMD: ignore ENTER_PS cmd\n");
-                                       list_del(&cmdnode->list);
                                        spin_lock_irqsave(&priv->driver_lock, flags);
+                                       list_del(&cmdnode->list);
                                        lbs_complete_command(priv, cmdnode, 0);
                                        spin_unlock_irqrestore(&priv->driver_lock, flags);
 
@@ -1352,8 +1352,8 @@ int lbs_execute_next_command(struct lbs_private *priv)
                                    (priv->psstate == PS_STATE_PRE_SLEEP)) {
                                        lbs_deb_host(
                                               "EXEC_NEXT_CMD: ignore EXIT_PS cmd in sleep\n");
-                                       list_del(&cmdnode->list);
                                        spin_lock_irqsave(&priv->driver_lock, flags);
+                                       list_del(&cmdnode->list);
                                        lbs_complete_command(priv, cmdnode, 0);
                                        spin_unlock_irqrestore(&priv->driver_lock, flags);
                                        priv->needtowakeup = 1;
@@ -1366,7 +1366,9 @@ int lbs_execute_next_command(struct lbs_private *priv)
                                       "EXEC_NEXT_CMD: sending EXIT_PS\n");
                        }
                }
+               spin_lock_irqsave(&priv->driver_lock, flags);
                list_del(&cmdnode->list);
+               spin_unlock_irqrestore(&priv->driver_lock, flags);
                lbs_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n",
                            le16_to_cpu(cmd->command));
                lbs_submit_command(priv, cmdnode);
index b78a38d9172a52d8517faba9df5cb914d5215e1c..8c7c522a056acc76faa9e411b6231d10784432a7 100644 (file)
@@ -126,7 +126,7 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z,
 
     board = z->resource.start;
     ioaddr = board+cards[i].offset;
-    dev = alloc_ei_netdev();
+    dev = ____alloc_ei_netdev(0);
     if (!dev)
        return -ENOMEM;
     if (!request_mem_region(ioaddr, NE_IO_EXTENT*2, DRV_NAME)) {
@@ -146,15 +146,15 @@ static int __devinit zorro8390_init_one(struct zorro_dev *z,
 static const struct net_device_ops zorro8390_netdev_ops = {
        .ndo_open               = zorro8390_open,
        .ndo_stop               = zorro8390_close,
-       .ndo_start_xmit         = ei_start_xmit,
-       .ndo_tx_timeout         = ei_tx_timeout,
-       .ndo_get_stats          = ei_get_stats,
-       .ndo_set_multicast_list = ei_set_multicast_list,
+       .ndo_start_xmit         = __ei_start_xmit,
+       .ndo_tx_timeout         = __ei_tx_timeout,
+       .ndo_get_stats          = __ei_get_stats,
+       .ndo_set_multicast_list = __ei_set_multicast_list,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_change_mtu         = eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = ei_poll,
+       .ndo_poll_controller    = __ei_poll,
 #endif
 };
 
index ebf51ad1b71478f08532edcdda8b3acf9d711bb7..a806cb321d2e25c2f896cda9aa4c329eb38a3051 100644 (file)
@@ -579,7 +579,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
        }
        size0 = calculate_iosize(size, min_size, size1,
                        resource_size(b_res), 4096);
-       size1 = !add_size? size0:
+       size1 = (!add_head || (add_head && !add_size)) ? size0 :
                calculate_iosize(size, min_size+add_size, size1,
                        resource_size(b_res), 4096);
        if (!size0 && !size1) {
@@ -677,7 +677,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
                align += aligns[order];
        }
        size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
-       size1 = !add_size ? size :
+       size1 = (!add_head || (add_head && !add_size)) ? size0 :
                calculate_memsize(size, min_size+add_size, 0,
                                resource_size(b_res), min_align);
        if (!size0 && !size1) {
index 5f2dd386152b52236991b73c60b61fa361555af9..2c1abf63957f23d0353c209af843f8e8bcf3c50c 100644 (file)
@@ -585,8 +585,9 @@ static bool eeepc_wlan_rfkill_blocked(struct eeepc_laptop *eeepc)
        return true;
 }
 
-static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
+static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
 {
+       struct pci_dev *port;
        struct pci_dev *dev;
        struct pci_bus *bus;
        bool blocked = eeepc_wlan_rfkill_blocked(eeepc);
@@ -599,9 +600,16 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
        mutex_lock(&eeepc->hotplug_lock);
 
        if (eeepc->hotplug_slot) {
-               bus = pci_find_bus(0, 1);
+               port = acpi_get_pci_dev(handle);
+               if (!port) {
+                       pr_warning("Unable to find port\n");
+                       goto out_unlock;
+               }
+
+               bus = port->subordinate;
+
                if (!bus) {
-                       pr_warning("Unable to find PCI bus 1?\n");
+                       pr_warning("Unable to find PCI bus?\n");
                        goto out_unlock;
                }
 
@@ -609,6 +617,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
                        pr_err("Unable to read PCI config space?\n");
                        goto out_unlock;
                }
+
                absent = (l == 0xffffffff);
 
                if (blocked != absent) {
@@ -647,6 +656,17 @@ out_unlock:
        mutex_unlock(&eeepc->hotplug_lock);
 }
 
+static void eeepc_rfkill_hotplug_update(struct eeepc_laptop *eeepc, char *node)
+{
+       acpi_status status = AE_OK;
+       acpi_handle handle;
+
+       status = acpi_get_handle(NULL, node, &handle);
+
+       if (ACPI_SUCCESS(status))
+               eeepc_rfkill_hotplug(eeepc, handle);
+}
+
 static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
 {
        struct eeepc_laptop *eeepc = data;
@@ -654,7 +674,7 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
        if (event != ACPI_NOTIFY_BUS_CHECK)
                return;
 
-       eeepc_rfkill_hotplug(eeepc);
+       eeepc_rfkill_hotplug(eeepc, handle);
 }
 
 static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc,
@@ -672,6 +692,11 @@ static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc,
                                                     eeepc);
                if (ACPI_FAILURE(status))
                        pr_warning("Failed to register notify on %s\n", node);
+               /*
+                * Refresh pci hotplug in case the rfkill state was
+                * changed during setup.
+                */
+               eeepc_rfkill_hotplug(eeepc, handle);
        } else
                return -ENODEV;
 
@@ -693,6 +718,12 @@ static void eeepc_unregister_rfkill_notifier(struct eeepc_laptop *eeepc,
                if (ACPI_FAILURE(status))
                        pr_err("Error removing rfkill notify handler %s\n",
                                node);
+                       /*
+                        * Refresh pci hotplug in case the rfkill
+                        * state was changed after
+                        * eeepc_unregister_rfkill_notifier()
+                        */
+               eeepc_rfkill_hotplug(eeepc, handle);
        }
 }
 
@@ -816,11 +847,7 @@ static void eeepc_rfkill_exit(struct eeepc_laptop *eeepc)
                rfkill_destroy(eeepc->wlan_rfkill);
                eeepc->wlan_rfkill = NULL;
        }
-       /*
-        * Refresh pci hotplug in case the rfkill state was changed after
-        * eeepc_unregister_rfkill_notifier()
-        */
-       eeepc_rfkill_hotplug(eeepc);
+
        if (eeepc->hotplug_slot)
                pci_hp_deregister(eeepc->hotplug_slot);
 
@@ -889,11 +916,6 @@ static int eeepc_rfkill_init(struct eeepc_laptop *eeepc)
        eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P5");
        eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P6");
        eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P7");
-       /*
-        * Refresh pci hotplug in case the rfkill state was changed during
-        * setup.
-        */
-       eeepc_rfkill_hotplug(eeepc);
 
 exit:
        if (result && result != -ENODEV)
@@ -928,8 +950,11 @@ static int eeepc_hotk_restore(struct device *device)
        struct eeepc_laptop *eeepc = dev_get_drvdata(device);
 
        /* Refresh both wlan rfkill state and pci hotplug */
-       if (eeepc->wlan_rfkill)
-               eeepc_rfkill_hotplug(eeepc);
+       if (eeepc->wlan_rfkill) {
+               eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P5");
+               eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P6");
+               eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P7");
+       }
 
        if (eeepc->bluetooth_rfkill)
                rfkill_set_sw_state(eeepc->bluetooth_rfkill,
index 8f709aec4da0ac2ebb81d09e19baea9932506f26..6fe8cd6e23b5f89fd1efc1145796a9e6c530aa8e 100644 (file)
@@ -934,6 +934,14 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
 /*
  * Backlight device
  */
+struct sony_backlight_props {
+       struct backlight_device *dev;
+       int                     handle;
+       u8                      offset;
+       u8                      maxlvl;
+};
+struct sony_backlight_props sony_bl_props;
+
 static int sony_backlight_update_status(struct backlight_device *bd)
 {
        return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
@@ -954,21 +962,26 @@ static int sony_nc_get_brightness_ng(struct backlight_device *bd)
 {
        int result;
        int *handle = (int *)bl_get_data(bd);
+       struct sony_backlight_props *sdev =
+               (struct sony_backlight_props *)bl_get_data(bd);
 
-       sony_call_snc_handle(*handle, 0x0200, &result);
+       sony_call_snc_handle(sdev->handle, 0x0200, &result);
 
-       return result & 0xff;
+       return (result & 0xff) - sdev->offset;
 }
 
 static int sony_nc_update_status_ng(struct backlight_device *bd)
 {
        int value, result;
        int *handle = (int *)bl_get_data(bd);
+       struct sony_backlight_props *sdev =
+               (struct sony_backlight_props *)bl_get_data(bd);
 
-       value = bd->props.brightness;
-       sony_call_snc_handle(*handle, 0x0100 | (value << 16), &result);
+       value = bd->props.brightness + sdev->offset;
+       if (sony_call_snc_handle(sdev->handle, 0x0100 | (value << 16), &result))
+               return -EIO;
 
-       return sony_nc_get_brightness_ng(bd);
+       return value;
 }
 
 static const struct backlight_ops sony_backlight_ops = {
@@ -981,8 +994,6 @@ static const struct backlight_ops sony_backlight_ng_ops = {
        .update_status = sony_nc_update_status_ng,
        .get_brightness = sony_nc_get_brightness_ng,
 };
-static int backlight_ng_handle;
-static struct backlight_device *sony_backlight_device;
 
 /*
  * New SNC-only Vaios event mapping to driver known keys
@@ -1549,6 +1560,75 @@ static void sony_nc_kbd_backlight_resume(void)
                                &ignore);
 }
 
+static void sony_nc_backlight_ng_read_limits(int handle,
+               struct sony_backlight_props *props)
+{
+       int offset;
+       acpi_status status;
+       u8 brlvl, i;
+       u8 min = 0xff, max = 0x00;
+       struct acpi_object_list params;
+       union acpi_object in_obj;
+       union acpi_object *lvl_enum;
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+       props->handle = handle;
+       props->offset = 0;
+       props->maxlvl = 0xff;
+
+       offset = sony_find_snc_handle(handle);
+       if (offset < 0)
+               return;
+
+       /* try to read the boundaries from ACPI tables, if we fail the above
+        * defaults should be reasonable
+        */
+       params.count = 1;
+       params.pointer = &in_obj;
+       in_obj.type = ACPI_TYPE_INTEGER;
+       in_obj.integer.value = offset;
+       status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params,
+                       &buffer);
+       if (ACPI_FAILURE(status))
+               return;
+
+       lvl_enum = (union acpi_object *) buffer.pointer;
+       if (!lvl_enum) {
+               pr_err("No SN06 return object.");
+               return;
+       }
+       if (lvl_enum->type != ACPI_TYPE_BUFFER) {
+               pr_err("Invalid SN06 return object 0x%.2x\n",
+                      lvl_enum->type);
+               goto out_invalid;
+       }
+
+       /* the buffer lists brightness levels available, brightness levels are
+        * from 0 to 8 in the array, other values are used by ALS control.
+        */
+       for (i = 0; i < 9 && i < lvl_enum->buffer.length; i++) {
+
+               brlvl = *(lvl_enum->buffer.pointer + i);
+               dprintk("Brightness level: %d\n", brlvl);
+
+               if (!brlvl)
+                       break;
+
+               if (brlvl > max)
+                       max = brlvl;
+               if (brlvl < min)
+                       min = brlvl;
+       }
+       props->offset = min;
+       props->maxlvl = max;
+       dprintk("Brightness levels: min=%d max=%d\n", props->offset,
+                       props->maxlvl);
+
+out_invalid:
+       kfree(buffer.pointer);
+       return;
+}
+
 static void sony_nc_backlight_setup(void)
 {
        acpi_handle unused;
@@ -1557,14 +1637,14 @@ static void sony_nc_backlight_setup(void)
        struct backlight_properties props;
 
        if (sony_find_snc_handle(0x12f) != -1) {
-               backlight_ng_handle = 0x12f;
                ops = &sony_backlight_ng_ops;
-               max_brightness = 0xff;
+               sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props);
+               max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
 
        } else if (sony_find_snc_handle(0x137) != -1) {
-               backlight_ng_handle = 0x137;
                ops = &sony_backlight_ng_ops;
-               max_brightness = 0xff;
+               sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props);
+               max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
 
        } else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
                                                &unused))) {
@@ -1577,22 +1657,22 @@ static void sony_nc_backlight_setup(void)
        memset(&props, 0, sizeof(struct backlight_properties));
        props.type = BACKLIGHT_PLATFORM;
        props.max_brightness = max_brightness;
-       sony_backlight_device = backlight_device_register("sony", NULL,
-                                                         &backlight_ng_handle,
-                                                         ops, &props);
+       sony_bl_props.dev = backlight_device_register("sony", NULL,
+                                                     &sony_bl_props,
+                                                     ops, &props);
 
-       if (IS_ERR(sony_backlight_device)) {
-               pr_warning(DRV_PFX "unable to register backlight device\n");
-               sony_backlight_device = NULL;
+       if (IS_ERR(sony_bl_props.dev)) {
+               pr_warn(DRV_PFX "unable to register backlight device\n");
+               sony_bl_props.dev = NULL;
        } else
-               sony_backlight_device->props.brightness =
-                   ops->get_brightness(sony_backlight_device);
+               sony_bl_props.dev->props.brightness =
+                       ops->get_brightness(sony_bl_props.dev);
 }
 
 static void sony_nc_backlight_cleanup(void)
 {
-       if (sony_backlight_device)
-               backlight_device_unregister(sony_backlight_device);
+       if (sony_bl_props.dev)
+               backlight_device_unregister(sony_bl_props.dev);
 }
 
 static int sony_nc_add(struct acpi_device *device)
@@ -2590,7 +2670,7 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
        mutex_lock(&spic_dev.lock);
        switch (cmd) {
        case SONYPI_IOCGBRT:
-               if (sony_backlight_device == NULL) {
+               if (sony_bl_props.dev == NULL) {
                        ret = -EIO;
                        break;
                }
@@ -2603,7 +2683,7 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
                                ret = -EFAULT;
                break;
        case SONYPI_IOCSBRT:
-               if (sony_backlight_device == NULL) {
+               if (sony_bl_props.dev == NULL) {
                        ret = -EIO;
                        break;
                }
@@ -2617,8 +2697,8 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
                        break;
                }
                /* sync the backlight device status */
-               sony_backlight_device->props.brightness =
-                   sony_backlight_get_brightness(sony_backlight_device);
+               sony_bl_props.dev->props.brightness =
+                   sony_backlight_get_brightness(sony_bl_props.dev);
                break;
        case SONYPI_IOCGBAT1CAP:
                if (ec_read16(SONYPI_BAT1_FULL, &val16)) {
index efb3b6b9bcdbe0746011c5762ec875218ac669ce..562fcf0dd2b5b083b7d45aeeb7e78a7c6809425f 100644 (file)
@@ -128,7 +128,8 @@ enum {
 };
 
 /* ACPI HIDs */
-#define TPACPI_ACPI_HKEY_HID           "IBM0068"
+#define TPACPI_ACPI_IBM_HKEY_HID       "IBM0068"
+#define TPACPI_ACPI_LENOVO_HKEY_HID    "LEN0068"
 #define TPACPI_ACPI_EC_HID             "PNP0C09"
 
 /* Input IDs */
@@ -3879,7 +3880,8 @@ errexit:
 }
 
 static const struct acpi_device_id ibm_htk_device_ids[] = {
-       {TPACPI_ACPI_HKEY_HID, 0},
+       {TPACPI_ACPI_IBM_HKEY_HID, 0},
+       {TPACPI_ACPI_LENOVO_HKEY_HID, 0},
        {"", 0},
 };
 
index ac2701b22e7133f8239a3136b18a5f85799f8578..043ee3136e408412c6d9a92db354bafce1ac78fe 100644 (file)
@@ -95,6 +95,9 @@ idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
        else
                table++;
 
+       if (route_port == RIO_INVALID_ROUTE)
+               route_port = IDT_DEFAULT_ROUTE;
+
        rio_mport_write_config_32(mport, destid, hopcount,
                                  LOCAL_RTE_CONF_DESTID_SEL, table);
 
@@ -411,6 +414,12 @@ static int idtg2_switch_init(struct rio_dev *rdev, int do_enum)
        rdev->rswitch->em_handle = idtg2_em_handler;
        rdev->rswitch->sw_sysfs = idtg2_sysfs;
 
+       if (do_enum) {
+               /* Ensure that default routing is disabled on startup */
+               rio_write_config_32(rdev,
+                                   RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE);
+       }
+
        return 0;
 }
 
index 3a971077e7bfc924da664209502fbc70ece30473..d06ee2d44b44794f08b23511b19339024bde2d4d 100644 (file)
@@ -26,6 +26,9 @@ idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
 {
        u32 result;
 
+       if (route_port == RIO_INVALID_ROUTE)
+               route_port = CPS_DEFAULT_ROUTE;
+
        if (table == RIO_GLOBAL_TABLE) {
                rio_mport_write_config_32(mport, destid, hopcount,
                                RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid);
@@ -130,6 +133,9 @@ static int idtcps_switch_init(struct rio_dev *rdev, int do_enum)
                /* set TVAL = ~50us */
                rio_write_config_32(rdev,
                        rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8);
+               /* Ensure that default routing is disabled on startup */
+               rio_write_config_32(rdev,
+                                   RIO_STD_RTE_DEFAULT_PORT, CPS_NO_ROUTE);
        }
 
        return 0;
index 1a62934bfebc5f7982b0583b286268793a94d49f..db8b8028988d38216290a3e6021573f527d9ee55 100644 (file)
@@ -303,6 +303,12 @@ static int tsi57x_switch_init(struct rio_dev *rdev, int do_enum)
        rdev->rswitch->em_init = tsi57x_em_init;
        rdev->rswitch->em_handle = tsi57x_em_handler;
 
+       if (do_enum) {
+               /* Ensure that default routing is disabled on startup */
+               rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT,
+                                   RIO_INVALID_ROUTE);
+       }
+
        return 0;
 }
 
index 8d46838dff8a0aaac95e032211bd9945f2ced1c4..755e1fe914af46911bedf6897cb553240a0e0e3d 100644 (file)
@@ -524,6 +524,8 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
                goto fail2;
        }
 
+       platform_set_drvdata(pdev, davinci_rtc);
+
        davinci_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
                                    &davinci_rtc_ops, THIS_MODULE);
        if (IS_ERR(davinci_rtc->rtc)) {
@@ -553,8 +555,6 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
 
        rtcss_write(davinci_rtc, PRTCSS_RTC_CCTRL_CAEN, PRTCSS_RTC_CCTRL);
 
-       platform_set_drvdata(pdev, davinci_rtc);
-
        device_init_wakeup(&pdev->dev, 0);
 
        return 0;
@@ -562,6 +562,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
 fail4:
        rtc_device_unregister(davinci_rtc->rtc);
 fail3:
+       platform_set_drvdata(pdev, NULL);
        iounmap(davinci_rtc->base);
 fail2:
        release_mem_region(davinci_rtc->pbase, davinci_rtc->base_size);
index 60ce69600828237e63175a7aed8962a6d463a86e..47e681df31e280d27f4ce1a926b0f1c5bedbc13e 100644 (file)
@@ -355,6 +355,7 @@ static int __devinit ds1286_probe(struct platform_device *pdev)
                goto out;
        }
        spin_lock_init(&priv->lock);
+       platform_set_drvdata(pdev, priv);
        rtc = rtc_device_register("ds1286", &pdev->dev,
                                  &ds1286_ops, THIS_MODULE);
        if (IS_ERR(rtc)) {
@@ -362,7 +363,6 @@ static int __devinit ds1286_probe(struct platform_device *pdev)
                goto out;
        }
        priv->rtc = rtc;
-       platform_set_drvdata(pdev, priv);
        return 0;
 
 out:
index 11ae64dcbf3c7b1a365853ae16cb095cbb38ea9f..335551d333b24f10a428c4f332792190181ec82f 100644 (file)
@@ -151,6 +151,7 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
                return -ENXIO;
 
        pdev->dev.platform_data = ep93xx_rtc;
+       platform_set_drvdata(pdev, rtc);
 
        rtc = rtc_device_register(pdev->name,
                                &pdev->dev, &ep93xx_rtc_ops, THIS_MODULE);
@@ -159,8 +160,6 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
                goto exit;
        }
 
-       platform_set_drvdata(pdev, rtc);
-
        err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);
        if (err)
                goto fail;
@@ -168,9 +167,9 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
        return 0;
 
 fail:
-       platform_set_drvdata(pdev, NULL);
        rtc_device_unregister(rtc);
 exit:
+       platform_set_drvdata(pdev, NULL);
        pdev->dev.platform_data = NULL;
        return err;
 }
index 69fe664a2228a801ede6c1cae5973b551a89f258..eda128fc1d38729ebb1a385c10fd30e6345e852b 100644 (file)
@@ -783,6 +783,9 @@ static int m41t80_probe(struct i2c_client *client,
                goto exit;
        }
 
+       clientdata->features = id->driver_data;
+       i2c_set_clientdata(client, clientdata);
+
        rtc = rtc_device_register(client->name, &client->dev,
                                  &m41t80_rtc_ops, THIS_MODULE);
        if (IS_ERR(rtc)) {
@@ -792,8 +795,6 @@ static int m41t80_probe(struct i2c_client *client,
        }
 
        clientdata->rtc = rtc;
-       clientdata->features = id->driver_data;
-       i2c_set_clientdata(client, clientdata);
 
        /* Make sure HT (Halt Update) bit is cleared */
        rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_HOUR);
index 20494b5edc3c74cc787df17a0d25f554bcf37370..3bc046f427e04c373465a0ac25f9125d0e89043f 100644 (file)
@@ -258,6 +258,8 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev)
        }
 
        dev_set_drvdata(&pdev->dev, info);
+       /* XXX - isn't this redundant? */
+       platform_set_drvdata(pdev, info);
 
        info->rtc_dev = rtc_device_register("max8925-rtc", &pdev->dev,
                                        &max8925_rtc_ops, THIS_MODULE);
@@ -267,10 +269,9 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev)
                goto out_rtc;
        }
 
-       platform_set_drvdata(pdev, info);
-
        return 0;
 out_rtc:
+       platform_set_drvdata(pdev, NULL);
        free_irq(chip->irq_base + MAX8925_IRQ_RTC_ALARM0, info);
 out_irq:
        kfree(info);
index 3f7bc6b9fefa94186c020503e8614304606d336b..2e48aa604273ad8e93f737888de136ec549467b5 100644 (file)
@@ -265,6 +265,8 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev)
        info->rtc = max8998->rtc;
        info->irq = max8998->irq_base + MAX8998_IRQ_ALARM0;
 
+       platform_set_drvdata(pdev, info);
+
        info->rtc_dev = rtc_device_register("max8998-rtc", &pdev->dev,
                        &max8998_rtc_ops, THIS_MODULE);
 
@@ -274,8 +276,6 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev)
                goto out_rtc;
        }
 
-       platform_set_drvdata(pdev, info);
-
        ret = request_threaded_irq(info->irq, NULL, max8998_rtc_alarm_irq, 0,
                        "rtc-alarm0", info);
 
@@ -293,6 +293,7 @@ static int __devinit max8998_rtc_probe(struct platform_device *pdev)
        return 0;
 
 out_rtc:
+       platform_set_drvdata(pdev, NULL);
        kfree(info);
        return ret;
 }
index c5ac03793e7910fec863f70130496cd3a3d60e9c..a1a278bc340dbfd0ff6814c8bcfec577f6137c85 100644 (file)
@@ -349,11 +349,15 @@ static int __devinit mc13xxx_rtc_probe(struct platform_device *pdev)
        if (ret)
                goto err_alarm_irq_request;
 
+       mc13xxx_unlock(mc13xxx);
+
        priv->rtc = rtc_device_register(pdev->name,
                        &pdev->dev, &mc13xxx_rtc_ops, THIS_MODULE);
        if (IS_ERR(priv->rtc)) {
                ret = PTR_ERR(priv->rtc);
 
+               mc13xxx_lock(mc13xxx);
+
                mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_TODA, priv);
 err_alarm_irq_request:
 
@@ -365,12 +369,12 @@ err_reset_irq_status:
                mc13xxx_irq_free(mc13xxx, MC13XXX_IRQ_RTCRST, priv);
 err_reset_irq_request:
 
+               mc13xxx_unlock(mc13xxx);
+
                platform_set_drvdata(pdev, NULL);
                kfree(priv);
        }
 
-       mc13xxx_unlock(mc13xxx);
-
        return ret;
 }
 
index 67820626e18fa7fe4f7d889c614336db2533f5c7..fcb113c11122b7a762abd88f700bd53dc1713a15 100644 (file)
@@ -214,6 +214,7 @@ static int __init msm6242_rtc_probe(struct platform_device *dev)
                error = -ENOMEM;
                goto out_free_priv;
        }
+       platform_set_drvdata(dev, priv);
 
        rtc = rtc_device_register("rtc-msm6242", &dev->dev, &msm6242_rtc_ops,
                                  THIS_MODULE);
@@ -223,10 +224,10 @@ static int __init msm6242_rtc_probe(struct platform_device *dev)
        }
 
        priv->rtc = rtc;
-       platform_set_drvdata(dev, priv);
        return 0;
 
 out_unmap:
+       platform_set_drvdata(dev, NULL);
        iounmap(priv->regs);
 out_free_priv:
        kfree(priv);
index 826ab64a8fa91d1a4524e7e3e7a2ae2ce565fec8..d814417bee8c08b68224fafcbeb39e7ca405fd5d 100644 (file)
@@ -418,14 +418,6 @@ static int __init mxc_rtc_probe(struct platform_device *pdev)
                goto exit_put_clk;
        }
 
-       rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops,
-                                 THIS_MODULE);
-       if (IS_ERR(rtc)) {
-               ret = PTR_ERR(rtc);
-               goto exit_put_clk;
-       }
-
-       pdata->rtc = rtc;
        platform_set_drvdata(pdev, pdata);
 
        /* Configure and enable the RTC */
@@ -438,8 +430,19 @@ static int __init mxc_rtc_probe(struct platform_device *pdev)
                pdata->irq = -1;
        }
 
+       rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops,
+                                 THIS_MODULE);
+       if (IS_ERR(rtc)) {
+               ret = PTR_ERR(rtc);
+               goto exit_clr_drvdata;
+       }
+
+       pdata->rtc = rtc;
+
        return 0;
 
+exit_clr_drvdata:
+       platform_set_drvdata(pdev, NULL);
 exit_put_clk:
        clk_disable(pdata->clk);
        clk_put(pdata->clk);
index a633abc428966a91384d92f2613113a3a8861eab..cd4f198cc2eff921b364eaa1d66ae8b97e7cf5a1 100644 (file)
@@ -151,6 +151,8 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev)
 
        pcap_rtc->pcap = dev_get_drvdata(pdev->dev.parent);
 
+       platform_set_drvdata(pdev, pcap_rtc);
+
        pcap_rtc->rtc = rtc_device_register("pcap", &pdev->dev,
                                  &pcap_rtc_ops, THIS_MODULE);
        if (IS_ERR(pcap_rtc->rtc)) {
@@ -158,7 +160,6 @@ static int __devinit pcap_rtc_probe(struct platform_device *pdev)
                goto fail_rtc;
        }
 
-       platform_set_drvdata(pdev, pcap_rtc);
 
        timer_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_1HZ);
        alarm_irq = pcap_to_irq(pcap_rtc->pcap, PCAP_IRQ_TODA);
@@ -177,6 +178,7 @@ fail_alarm:
 fail_timer:
        rtc_device_unregister(pcap_rtc->rtc);
 fail_rtc:
+       platform_set_drvdata(pdev, NULL);
        kfree(pcap_rtc);
        return err;
 }
index 694da39b6dd25ff7c8bfb9830c2a5405d7aa47e0..359da6d020b98c99318dddcc1a007e1be7d452ba 100644 (file)
@@ -249,15 +249,15 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev)
 
        spin_lock_init(&priv->lock);
 
+       platform_set_drvdata(dev, priv);
+
        rtc = rtc_device_register("rtc-rp5c01", &dev->dev, &rp5c01_rtc_ops,
                                  THIS_MODULE);
        if (IS_ERR(rtc)) {
                error = PTR_ERR(rtc);
                goto out_unmap;
        }
-
        priv->rtc = rtc;
-       platform_set_drvdata(dev, priv);
 
        error = sysfs_create_bin_file(&dev->dev.kobj, &priv->nvram_attr);
        if (error)
@@ -268,6 +268,7 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev)
 out_unregister:
        rtc_device_unregister(rtc);
 out_unmap:
+       platform_set_drvdata(dev, NULL);
        iounmap(priv->regs);
 out_free_priv:
        kfree(priv);
index b3466c491cd35cbbd93a64df0f1dbcf070c84e6c..16512ecae31a384fef42e743884b5b0c8d905d7d 100644 (file)
@@ -46,6 +46,7 @@ static struct clk *rtc_clk;
 static void __iomem *s3c_rtc_base;
 static int s3c_rtc_alarmno = NO_IRQ;
 static int s3c_rtc_tickno  = NO_IRQ;
+static bool wake_en;
 static enum s3c_cpu_type s3c_rtc_cpu_type;
 
 static DEFINE_SPINLOCK(s3c_rtc_pie_lock);
@@ -562,8 +563,12 @@ static int s3c_rtc_suspend(struct platform_device *pdev, pm_message_t state)
        }
        s3c_rtc_enable(pdev, 0);
 
-       if (device_may_wakeup(&pdev->dev))
-               enable_irq_wake(s3c_rtc_alarmno);
+       if (device_may_wakeup(&pdev->dev) && !wake_en) {
+               if (enable_irq_wake(s3c_rtc_alarmno) == 0)
+                       wake_en = true;
+               else
+                       dev_err(&pdev->dev, "enable_irq_wake failed\n");
+       }
 
        return 0;
 }
@@ -579,8 +584,10 @@ static int s3c_rtc_resume(struct platform_device *pdev)
                writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
        }
 
-       if (device_may_wakeup(&pdev->dev))
+       if (device_may_wakeup(&pdev->dev) && wake_en) {
                disable_irq_wake(s3c_rtc_alarmno);
+               wake_en = false;
+       }
 
        return 0;
 }
index 475e603fc584f8547e6171eaf8cdb325ec085d1f..86b6f1cc1b10c56277f49ec439b893daf2c25f33 100644 (file)
@@ -1742,11 +1742,20 @@ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
 static inline int _dasd_term_running_cqr(struct dasd_device *device)
 {
        struct dasd_ccw_req *cqr;
+       int rc;
 
        if (list_empty(&device->ccw_queue))
                return 0;
        cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist);
-       return device->discipline->term_IO(cqr);
+       rc = device->discipline->term_IO(cqr);
+       if (!rc)
+               /*
+                * CQR terminated because a more important request is pending.
+                * Undo decreasing of retry counter because this is
+                * not an error case.
+                */
+               cqr->retries++;
+       return rc;
 }
 
 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
index 29143eda9dd903b4a8f897ade5eee20dcaecaf91..85dddb1e4126be512d843ceef08123fd86cc1b0d 100644 (file)
@@ -239,7 +239,6 @@ static void dasd_ext_handler(unsigned int ext_int_code,
        addr_t ip;
        int rc;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++;
        switch (ext_int_code >> 24) {
        case DASD_DIAG_CODE_31BIT:
                ip = (addr_t) param32;
@@ -250,6 +249,7 @@ static void dasd_ext_handler(unsigned int ext_int_code,
        default:
                return;
        }
+       kstat_cpu(smp_processor_id()).irqs[EXTINT_DSD]++;
        if (!ip) {              /* no intparm: unsolicited interrupt */
                DBF_EVENT(DBF_NOTICE, "%s", "caught unsolicited "
                              "interrupt");
index 4b60ede07f0e0f9738d22c70d53c3393eff1b37e..be55fb2b1b1c58f78a3df48ad8ac7be544f3ec51 100644 (file)
@@ -518,6 +518,8 @@ static void __init insert_increment(u16 rn, int standby, int assigned)
                return;
        new_incr->rn = rn;
        new_incr->standby = standby;
+       if (!standby)
+               new_incr->usecount = 1;
        last_rn = 0;
        prev = &sclp_mem_list;
        list_for_each_entry(incr, &sclp_mem_list, list) {
index 83cea9a55e2f8c10df077710a66f23760a7431e6..1b3924c2fffd9b87cdaede8204ffab3cbbe7d129 100644 (file)
@@ -236,7 +236,6 @@ tapeblock_setup_device(struct tape_device * device)
        disk->major = tapeblock_major;
        disk->first_minor = device->first_minor;
        disk->fops = &tapeblock_fops;
-       disk->events = DISK_EVENT_MEDIA_CHANGE;
        disk->private_data = tape_get_device(device);
        disk->queue = blkdat->request_queue;
        set_capacity(disk, 0);
index 414427d64a8ff6f46b3b7e375c7db97a95a66dac..607998f0b7d8580c05c4fec47cf672bfad9d3c14 100644 (file)
@@ -381,10 +381,10 @@ static void kvm_extint_handler(unsigned int ext_int_code,
        u16 subcode;
        u32 param;
 
-       kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++;
        subcode = ext_int_code >> 16;
        if ((subcode & 0xff00) != VIRTIO_SUBCODE_64)
                return;
+       kstat_cpu(smp_processor_id()).irqs[EXTINT_VRT]++;
 
        /* The LSB might be overloaded, we have to mask it */
        vq = (struct virtqueue *)(param64 & ~1UL);
index e2d45c91b8e8fa9d99d2d6abc17739aff12e6012..9689d41c788887f4f3fb12e43c87b0c67ae1aa39 100644 (file)
@@ -1292,8 +1292,10 @@ static struct scsi_host_template qpti_template = {
        .use_clustering         = ENABLE_CLUSTERING,
 };
 
+static const struct of_device_id qpti_match[];
 static int __devinit qpti_sbus_probe(struct platform_device *op)
 {
+       const struct of_device_id *match;
        struct scsi_host_template *tpnt;
        struct device_node *dp = op->dev.of_node;
        struct Scsi_Host *host;
@@ -1301,9 +1303,10 @@ static int __devinit qpti_sbus_probe(struct platform_device *op)
        static int nqptis;
        const char *fcode;
 
-       if (!op->dev.of_match)
+       match = of_match_device(qpti_match, &op->dev);
+       if (!match)
                return -EINVAL;
-       tpnt = op->dev.of_match->data;
+       tpnt = match->data;
 
        /* Sometimes Antares cards come up not completely
         * setup, and we get a report of a zero IRQ.
index e9901b8f8443ec3cf6b75487cf36d57584bccd17..ec1803a48723dc950f56eb3fddb33456ea8d9a61 100644 (file)
@@ -74,8 +74,6 @@ struct kmem_cache *scsi_sdb_cache;
  */
 #define SCSI_QUEUE_DELAY       3
 
-static void scsi_run_queue(struct request_queue *q);
-
 /*
  * Function:   scsi_unprep_request()
  *
@@ -161,7 +159,7 @@ static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
        blk_requeue_request(q, cmd->request);
        spin_unlock_irqrestore(q->queue_lock, flags);
 
-       scsi_run_queue(q);
+       kblockd_schedule_work(q, &device->requeue_work);
 
        return 0;
 }
@@ -400,10 +398,15 @@ static inline int scsi_host_is_busy(struct Scsi_Host *shost)
 static void scsi_run_queue(struct request_queue *q)
 {
        struct scsi_device *sdev = q->queuedata;
-       struct Scsi_Host *shost = sdev->host;
+       struct Scsi_Host *shost;
        LIST_HEAD(starved_list);
        unsigned long flags;
 
+       /* if the device is dead, sdev will be NULL, so no queue to run */
+       if (!sdev)
+               return;
+
+       shost = sdev->host;
        if (scsi_target(sdev)->single_lun)
                scsi_single_lun_run(sdev);
 
@@ -433,7 +436,11 @@ static void scsi_run_queue(struct request_queue *q)
                        continue;
                }
 
-               blk_run_queue_async(sdev->request_queue);
+               spin_unlock(shost->host_lock);
+               spin_lock(sdev->request_queue->queue_lock);
+               __blk_run_queue(sdev->request_queue);
+               spin_unlock(sdev->request_queue->queue_lock);
+               spin_lock(shost->host_lock);
        }
        /* put any unprocessed entries back */
        list_splice(&starved_list, &shost->starved_list);
@@ -442,6 +449,16 @@ static void scsi_run_queue(struct request_queue *q)
        blk_run_queue(q);
 }
 
+void scsi_requeue_run_queue(struct work_struct *work)
+{
+       struct scsi_device *sdev;
+       struct request_queue *q;
+
+       sdev = container_of(work, struct scsi_device, requeue_work);
+       q = sdev->request_queue;
+       scsi_run_queue(q);
+}
+
 /*
  * Function:   scsi_requeue_command()
  *
index 087821fac8fe76d64b50b0beaad7b1dd3c2dff0c..58584dc0724ade4fb4b2d5fc12d48b962d2b91bd 100644 (file)
@@ -242,6 +242,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
        int display_failure_msg = 1, ret;
        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
        extern void scsi_evt_thread(struct work_struct *work);
+       extern void scsi_requeue_run_queue(struct work_struct *work);
 
        sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
                       GFP_ATOMIC);
@@ -264,6 +265,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
        INIT_LIST_HEAD(&sdev->event_list);
        spin_lock_init(&sdev->list_lock);
        INIT_WORK(&sdev->event_work, scsi_evt_thread);
+       INIT_WORK(&sdev->requeue_work, scsi_requeue_run_queue);
 
        sdev->sdev_gendev.parent = get_device(&starget->dev);
        sdev->sdev_target = starget;
index eeb7dd43f9a8ef9128487fd977761d1fae8fbdf7..830822f86e410183c77641bddc9fbe6a29e54194 100644 (file)
@@ -2288,7 +2288,3 @@ err_dev:
        free_netdev(dev);
        return NULL;
 }
-
-EXPORT_SYMBOL(init_ft1000_card);
-EXPORT_SYMBOL(stop_ft1000_card);
-EXPORT_SYMBOL(flarion_ft1000_cnt);
index 935608e72007e25cb075f10f2940ba0977ea3ff0..bdfb1aec58df8a52eb7e42b60d698086abe19825 100644 (file)
@@ -214,6 +214,3 @@ void ft1000CleanupProc(struct net_device *dev)
        remove_proc_entry(FT1000_PROC, init_net.proc_net);
        unregister_netdevice_notifier(&ft1000_netdev_notifier);
 }
-
-EXPORT_SYMBOL(ft1000InitProc);
-EXPORT_SYMBOL(ft1000CleanupProc);
index 5501eb9b3355050eb33526829c51e0e9c94d2fa3..ce8bedaeaac21589f84768e4788bbc4939b36488 100644 (file)
@@ -1,6 +1,6 @@
 config DRM_PSB
        tristate "Intel GMA500 KMS Framebuffer"
-       depends on DRM && PCI
+       depends on DRM && PCI && X86
        select FB_CFB_COPYAREA
         select FB_CFB_FILLRECT
         select FB_CFB_IMAGEBLIT
index 9cc15c1c18d48277f95d8debb8065e46ec541f59..1ea81421805940acca598367a5fe6d3bba43d945 100644 (file)
@@ -28,6 +28,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/pci.h>
+#include <linux/delay.h>
 #include <linux/file.h>
 #include <asm/mrst.h>
 #include <sound/pcm.h>
index 26d815a67eb836906cd72b3c7553b07fc85ab77b..3c6b3abff3c3e3757fadc8b9a0f43b2895ff8bb6 100644 (file)
@@ -29,6 +29,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/pci.h>
+#include <linux/delay.h>
 #include <linux/file.h>
 #include "intel_sst.h"
 #include "intelmid_snd_control.h"
index b5d21f6497f9ef14aa4176550615a6203c6a11c8..22c04eabed4116768959b10681c4c880c0940c7c 100644 (file)
@@ -12,6 +12,7 @@
  */
 #include <linux/cs5535.h>
 #include <linux/gpio.h>
+#include <linux/delay.h>
 #include <asm/olpc.h>
 
 #include "olpc_dcon.h"
index e1408b0e7ae4e33a90a49af8d202d51683f81cca..ab305be96fb5d34e78117b5f4ecddab263adc2a4 100644 (file)
@@ -28,7 +28,7 @@
 
 #define RTSX_STOR "rts_pstor: "
 
-#if CONFIG_RTS_PSTOR_DEBUG
+#ifdef CONFIG_RTS_PSTOR_DEBUG
 #define RTSX_DEBUGP(x...) printk(KERN_DEBUG RTSX_STOR x)
 #define RTSX_DEBUGPN(x...) printk(KERN_DEBUG x)
 #define RTSX_DEBUGPX(x...) printk(x)
index 810e170894f58c3d3180047084dacc31014ec6cd..d89795c6a3ac2545be028198604443b3f37debe0 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/blkdev.h>
 #include <linux/kthread.h>
 #include <linux/sched.h>
+#include <linux/vmalloc.h>
 
 #include "rtsx.h"
 #include "rtsx_transport.h"
index d2f1c715a684201eae59e646ead22096bb154033..4e60780ea80412b7992e1a46b37da22b284ea214 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/kthread.h>
 #include <linux/sched.h>
 #include <linux/workqueue.h>
+#include <linux/vmalloc.h>
 
 #include "rtsx.h"
 #include "rtsx_transport.h"
@@ -1311,11 +1312,11 @@ void rtsx_polling_func(struct rtsx_chip *chip)
 
 #ifdef SUPPORT_OCP
        if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
-               #if CONFIG_RTS_PSTOR_DEBUG
+#ifdef CONFIG_RTS_PSTOR_DEBUG
                if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER | MS_OC_NOW | MS_OC_EVER)) {
                        RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n", chip->ocp_stat);
                }
-               #endif
+#endif
 
                if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
                        if (chip->card_exist & SD_CARD) {
index 20c2464a20f9cf7f421d600d7326f5ccb802b5b7..7de1fae443fce559780a97af49cf32352dc3205e 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/blkdev.h>
 #include <linux/kthread.h>
 #include <linux/sched.h>
+#include <linux/vmalloc.h>
 
 #include "rtsx.h"
 #include "rtsx_transport.h"
index 8d066bd428c4ad94e4785a974c2b98c2acceddb0..b1277a6c7a8b857dd602ee610a764ab7810ec099 100644 (file)
@@ -909,7 +909,7 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
                RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET);
                RTSX_WRITE_REG(chip, CLK_CTL, CHANGE_CLK, 0);
        } else {
-#if CONFIG_RTS_PSTOR_DEBUG
+#ifdef CONFIG_RTS_PSTOR_DEBUG
                rtsx_read_register(chip, SD_VP_CTL, &val);
                RTSX_DEBUGP("SD_VP_CTL: 0x%x\n", val);
                rtsx_read_register(chip, SD_DCMPS_CTL, &val);
@@ -958,7 +958,7 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
        return STATUS_SUCCESS;
 
 Fail:
-#if CONFIG_RTS_PSTOR_DEBUG
+#ifdef CONFIG_RTS_PSTOR_DEBUG
        rtsx_read_register(chip, SD_VP_CTL, &val);
        RTSX_DEBUGP("SD_VP_CTL: 0x%x\n", val);
        rtsx_read_register(chip, SD_DCMPS_CTL, &val);
index 2c668bae6ff47ea66afc3d74f7897ba06b4aafa6..bc83b49a4eb4a975cdc286c484b08e4d4698a39e 100644 (file)
@@ -82,7 +82,7 @@ do {                                                                                                  \
 #define TRACE_GOTO(chip, label)        goto label
 #endif
 
-#if CONFIG_RTS_PSTOR_DEBUG
+#ifdef CONFIG_RTS_PSTOR_DEBUG
 static inline void rtsx_dump(u8 *buf, int buf_len)
 {
        int i;
index 7bcd468b8f2c2f15fff7e53cae3ce63b322c0bf6..9f3add1e8f59d885564703688df6d76554d228c6 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/blkdev.h>
 #include <linux/kthread.h>
 #include <linux/sched.h>
+#include <linux/vmalloc.h>
 
 #include "rtsx.h"
 #include "rtsx_transport.h"
index 2cf77c9408602964a2c5dfe527bc7dc0d316da4f..03dcac4ea4d064a291d28476de2eb8c71b2d8cdb 100644 (file)
@@ -2,6 +2,7 @@ config SOLO6X10
        tristate "Softlogic 6x10 MPEG codec cards"
        depends on PCI && VIDEO_DEV && SND && I2C
        select VIDEOBUF_DMA_SG
+       select SND_PCM
        ---help---
          This driver supports the Softlogic based MPEG-4 and h.264 codec
          codec cards.
index 0f02a4b12ae4a29b05b68710d178686251acd713..4f4f13321f403866c579dfda8a10451b443ea73c 100644 (file)
@@ -876,8 +876,10 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
        }
 
        /* kill threads related to this sdev, if v.c. exists */
-       kthread_stop(vdev->ud.tcp_rx);
-       kthread_stop(vdev->ud.tcp_tx);
+       if (vdev->ud.tcp_rx)
+               kthread_stop(vdev->ud.tcp_rx);
+       if (vdev->ud.tcp_tx)
+               kthread_stop(vdev->ud.tcp_tx);
 
        usbip_uinfo("stop threads\n");
 
@@ -949,9 +951,6 @@ static void vhci_device_init(struct vhci_device *vdev)
 {
        memset(vdev, 0, sizeof(*vdev));
 
-       vdev->ud.tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx");
-       vdev->ud.tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx");
-
        vdev->ud.side   = USBIP_VHCI;
        vdev->ud.status = VDEV_ST_NULL;
        /* vdev->ud.lock   = SPIN_LOCK_UNLOCKED; */
@@ -1139,7 +1138,7 @@ static int vhci_hcd_probe(struct platform_device *pdev)
                usbip_uerr("create hcd failed\n");
                return -ENOMEM;
        }
-
+       hcd->has_tt = 1;
 
        /* this is private data for vhci_hcd */
        the_controller = hcd_to_vhci(hcd);
index 3f2459f30415ef5ec03e84eedf0417bb712d93e1..e2dadbd5ef1e2707956faa774f3c42ec44c039d4 100644 (file)
@@ -21,6 +21,7 @@
 #include "vhci.h"
 
 #include <linux/in.h>
+#include <linux/kthread.h>
 
 /* TODO: refine locking ?*/
 
@@ -220,13 +221,13 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
        vdev->ud.tcp_socket = socket;
        vdev->ud.status     = VDEV_ST_NOTASSIGNED;
 
-       wake_up_process(vdev->ud.tcp_rx);
-       wake_up_process(vdev->ud.tcp_tx);
-
        spin_unlock(&vdev->ud.lock);
        spin_unlock(&the_controller->lock);
        /* end the lock */
 
+       vdev->ud.tcp_rx = kthread_run(vhci_rx_loop, &vdev->ud, "vhci_rx");
+       vdev->ud.tcp_tx = kthread_run(vhci_tx_loop, &vdev->ud, "vhci_tx");
+
        rh_port_connect(rhport, speed);
 
        return count;
index 6a71f52c59b1f0a575b8ba69aeac20a9fd2c733a..76378397b763355c059ace7b5fe6bdd09abaf80c 100644 (file)
@@ -273,7 +273,7 @@ exit:
 }
 
 int prism2_set_default_key(struct wiphy *wiphy, struct net_device *dev,
-                          u8 key_index)
+                          u8 key_index, bool unicast, bool multicast)
 {
        wlandevice_t *wlandev = dev->ml_priv;
 
index 0e8eec516df4d94f0395842a3f4573cd036dbfdc..c911b2419abb8a664f3bfad7d258a94c1363fa5b 100644 (file)
@@ -80,14 +80,17 @@ static int __devinit of_platform_serial_setup(struct platform_device *ofdev,
 /*
  * Try to register a serial port
  */
+static struct of_device_id of_platform_serial_table[];
 static int __devinit of_platform_serial_probe(struct platform_device *ofdev)
 {
+       const struct of_device_id *match;
        struct of_serial_info *info;
        struct uart_port port;
        int port_type;
        int ret;
 
-       if (!ofdev->dev.of_match)
+       match = of_match_device(of_platform_serial_table, &ofdev->dev);
+       if (!match)
                return -EINVAL;
 
        if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL))
@@ -97,7 +100,7 @@ static int __devinit of_platform_serial_probe(struct platform_device *ofdev)
        if (info == NULL)
                return -ENOMEM;
 
-       port_type = (unsigned long)ofdev->dev.of_match->data;
+       port_type = (unsigned long)match->data;
        ret = of_platform_serial_setup(ofdev, port_type, &port);
        if (ret)
                goto out;
index 36613b37c50442c96a3bad47449365aaf31b1e89..3a68e09309f750107528451f7d2b4c40d645f1b1 100644 (file)
@@ -2539,15 +2539,18 @@ static void qe_udc_release(struct device *dev)
 }
 
 /* Driver probe functions */
+static const struct of_device_id qe_udc_match[];
 static int __devinit qe_udc_probe(struct platform_device *ofdev)
 {
+       const struct of_device_id *match;
        struct device_node *np = ofdev->dev.of_node;
        struct qe_ep *ep;
        unsigned int ret = 0;
        unsigned int i;
        const void *prop;
 
-       if (!ofdev->dev.of_match)
+       match = of_match_device(qe_udc_match, &ofdev->dev);
+       if (!match)
                return -EINVAL;
 
        prop = of_get_property(np, "mode", NULL);
@@ -2561,7 +2564,7 @@ static int __devinit qe_udc_probe(struct platform_device *ofdev)
                return -ENOMEM;
        }
 
-       udc_controller->soc_type = (unsigned long)ofdev->dev.of_match->data;
+       udc_controller->soc_type = (unsigned long)match->data;
        udc_controller->usb_regs = of_iomap(np, 0);
        if (!udc_controller->usb_regs) {
                ret = -ENOMEM;
index 7e41a95c5ceb6a9aa70ed437b737eabebf379a4a..627f3a678759a1e007a70a520262a41b48327339 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/slab.h>
 #include <linux/usb/ulpi.h>
 #include <plat/usb.h>
+#include <linux/regulator/consumer.h>
 
 /* EHCI Register Set */
 #define EHCI_INSNREG04                                 (0xA0)
@@ -118,6 +119,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
        struct ehci_hcd                         *omap_ehci;
        int                                     ret = -ENODEV;
        int                                     irq;
+       int                                     i;
+       char                                    supply[7];
 
        if (usb_disabled())
                return -ENODEV;
@@ -158,6 +161,23 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
        hcd->rsrc_len = resource_size(res);
        hcd->regs = regs;
 
+       /* get ehci regulator and enable */
+       for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
+               if (pdata->port_mode[i] != OMAP_EHCI_PORT_MODE_PHY) {
+                       pdata->regulator[i] = NULL;
+                       continue;
+               }
+               snprintf(supply, sizeof(supply), "hsusb%d", i);
+               pdata->regulator[i] = regulator_get(dev, supply);
+               if (IS_ERR(pdata->regulator[i])) {
+                       pdata->regulator[i] = NULL;
+                       dev_dbg(dev,
+                       "failed to get ehci port%d regulator\n", i);
+               } else {
+                       regulator_enable(pdata->regulator[i]);
+               }
+       }
+
        ret = omap_usbhs_enable(dev);
        if (ret) {
                dev_err(dev, "failed to start usbhs with err %d\n", ret);
index 795345ad45e62881fb8e28a1abce639a3088cac8..7b2e69aa2e98fdf2e803bb9d1c26d25ba74cb63e 100644 (file)
@@ -1633,6 +1633,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
                        ints[i].qh = NULL;
                        ints[i].qtd = NULL;
 
+                       urb->status = status;
                        isp1760_urb_done(hcd, urb);
                        if (qtd)
                                pe(hcd, qh, qtd);
index a78f2ebd11b785aabf235bf6dd4e76a529506319..73f75d26436c48635bca66c68a777891287b36c9 100644 (file)
@@ -777,7 +777,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
                if (t1 != t2)
                        xhci_writel(xhci, t2, port_array[port_index]);
 
-               if (DEV_HIGHSPEED(t1)) {
+               if (hcd->speed != HCD_USB3) {
                        /* enable remote wake up for USB 2.0 */
                        u32 __iomem *addr;
                        u32 tmp;
@@ -866,6 +866,21 @@ int xhci_bus_resume(struct usb_hcd *hcd)
                                temp |= PORT_LINK_STROBE | XDEV_U0;
                                xhci_writel(xhci, temp, port_array[port_index]);
                        }
+                       /* wait for the port to enter U0 and report port link
+                        * state change.
+                        */
+                       spin_unlock_irqrestore(&xhci->lock, flags);
+                       msleep(20);
+                       spin_lock_irqsave(&xhci->lock, flags);
+
+                       /* Clear PLC */
+                       temp = xhci_readl(xhci, port_array[port_index]);
+                       if (temp & PORT_PLC) {
+                               temp = xhci_port_state_to_neutral(temp);
+                               temp |= PORT_PLC;
+                               xhci_writel(xhci, temp, port_array[port_index]);
+                       }
+
                        slot_id = xhci_find_slot_id_by_port(hcd,
                                        xhci, port_index + 1);
                        if (slot_id)
@@ -873,7 +888,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
                } else
                        xhci_writel(xhci, temp, port_array[port_index]);
 
-               if (DEV_HIGHSPEED(temp)) {
+               if (hcd->speed != HCD_USB3) {
                        /* disable remote wake up for USB 2.0 */
                        u32 __iomem *addr;
                        u32 tmp;
index 6dfbf9ffd7a63660ef472b6b3a932ecf22fd0341..f47c20197c61aa3c8882f25a064c83e6890ff9cb 100644 (file)
@@ -1887,11 +1887,9 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
                        otg_set_vbus(musb->xceiv, 1);
 
                hcd->self.uses_pio_for_control = 1;
-
-               if (musb->xceiv->last_event == USB_EVENT_NONE)
-                       pm_runtime_put(musb->controller);
-
        }
+       if (musb->xceiv->last_event == USB_EVENT_NONE)
+               pm_runtime_put(musb->controller);
 
        return 0;
 
index 57a27fa954b41fdd7b0fba880fd3c9da2b64b19c..e9e60b6e058394c3bdcdad9af873e6f25e9c21d8 100644 (file)
@@ -270,7 +270,7 @@ static int musb_otg_notifications(struct notifier_block *nb,
                DBG(4, "VBUS Disconnect\n");
 
 #ifdef CONFIG_USB_GADGET_MUSB_HDRC
-               if (is_otg_enabled(musb))
+               if (is_otg_enabled(musb) || is_peripheral_enabled(musb))
                        if (musb->gadget_driver)
 #endif
                        {
index 82acb8dc4aa144dab7888e567238a1ce332e49c8..6183a57eb69d8b913abcde29af3bbdb24714e725 100644 (file)
@@ -66,7 +66,7 @@
  * have.  Allow 1% either way on the nominal for TVs.
  */
 #define NR_MONTYPES    6
-static struct fb_monspecs monspecs[NR_MONTYPES] __initdata = {
+static struct fb_monspecs monspecs[NR_MONTYPES] __devinitdata = {
        {       /* TV           */
                .hfmin  = 15469,
                .hfmax  = 15781,
@@ -873,7 +873,7 @@ static struct fb_ops acornfb_ops = {
 /*
  * Everything after here is initialisation!!!
  */
-static struct fb_videomode modedb[] __initdata = {
+static struct fb_videomode modedb[] __devinitdata = {
        {       /* 320x256 @ 50Hz */
                NULL, 50,  320,  256, 125000,  92,  62,  35, 19,  38, 2,
                FB_SYNC_COMP_HIGH_ACT,
@@ -925,8 +925,7 @@ static struct fb_videomode modedb[] __initdata = {
        }
 };
 
-static struct fb_videomode __initdata
-acornfb_default_mode = {
+static struct fb_videomode acornfb_default_mode __devinitdata = {
        .name =         NULL,
        .refresh =      60,
        .xres =         640,
@@ -942,7 +941,7 @@ acornfb_default_mode = {
        .vmode =        FB_VMODE_NONINTERLACED
 };
 
-static void __init acornfb_init_fbinfo(void)
+static void __devinit acornfb_init_fbinfo(void)
 {
        static int first = 1;
 
@@ -1018,8 +1017,7 @@ static void __init acornfb_init_fbinfo(void)
  *     size can optionally be followed by 'M' or 'K' for
  *     MB or KB respectively.
  */
-static void __init
-acornfb_parse_mon(char *opt)
+static void __devinit acornfb_parse_mon(char *opt)
 {
        char *p = opt;
 
@@ -1066,8 +1064,7 @@ bad:
        current_par.montype = -1;
 }
 
-static void __init
-acornfb_parse_montype(char *opt)
+static void __devinit acornfb_parse_montype(char *opt)
 {
        current_par.montype = -2;
 
@@ -1108,8 +1105,7 @@ acornfb_parse_montype(char *opt)
        }
 }
 
-static void __init
-acornfb_parse_dram(char *opt)
+static void __devinit acornfb_parse_dram(char *opt)
 {
        unsigned int size;
 
@@ -1134,15 +1130,14 @@ acornfb_parse_dram(char *opt)
 static struct options {
        char *name;
        void (*parse)(char *opt);
-} opt_table[] __initdata = {
+} opt_table[] __devinitdata = {
        { "mon",     acornfb_parse_mon     },
        { "montype", acornfb_parse_montype },
        { "dram",    acornfb_parse_dram    },
        { NULL, NULL }
 };
 
-int __init
-acornfb_setup(char *options)
+static int __devinit acornfb_setup(char *options)
 {
        struct options *optp;
        char *opt;
@@ -1179,8 +1174,7 @@ acornfb_setup(char *options)
  * Detect type of monitor connected
  *  For now, we just assume SVGA
  */
-static int __init
-acornfb_detect_monitortype(void)
+static int __devinit acornfb_detect_monitortype(void)
 {
        return 4;
 }
index e0c2284924b63b4a390aa8a4eb862fcfddac2609..5aac00eb1830c1724ed521f2bcc91693d9dbe646 100644 (file)
 
 #define FBPIXMAPSIZE   (1024 * 8)
 
+static DEFINE_MUTEX(registration_lock);
 struct fb_info *registered_fb[FB_MAX] __read_mostly;
 int num_registered_fb __read_mostly;
 
+static struct fb_info *get_fb_info(unsigned int idx)
+{
+       struct fb_info *fb_info;
+
+       if (idx >= FB_MAX)
+               return ERR_PTR(-ENODEV);
+
+       mutex_lock(&registration_lock);
+       fb_info = registered_fb[idx];
+       if (fb_info)
+               atomic_inc(&fb_info->count);
+       mutex_unlock(&registration_lock);
+
+       return fb_info;
+}
+
+static void put_fb_info(struct fb_info *fb_info)
+{
+       if (!atomic_dec_and_test(&fb_info->count))
+               return;
+       if (fb_info->fbops->fb_destroy)
+               fb_info->fbops->fb_destroy(fb_info);
+}
+
 int lock_fb_info(struct fb_info *info)
 {
        mutex_lock(&info->lock);
@@ -647,6 +672,7 @@ int fb_show_logo(struct fb_info *info, int rotate) { return 0; }
 
 static void *fb_seq_start(struct seq_file *m, loff_t *pos)
 {
+       mutex_lock(&registration_lock);
        return (*pos < FB_MAX) ? pos : NULL;
 }
 
@@ -658,6 +684,7 @@ static void *fb_seq_next(struct seq_file *m, void *v, loff_t *pos)
 
 static void fb_seq_stop(struct seq_file *m, void *v)
 {
+       mutex_unlock(&registration_lock);
 }
 
 static int fb_seq_show(struct seq_file *m, void *v)
@@ -690,13 +717,30 @@ static const struct file_operations fb_proc_fops = {
        .release        = seq_release,
 };
 
-static ssize_t
-fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+/*
+ * We hold a reference to the fb_info in file->private_data,
+ * but if the current registered fb has changed, we don't
+ * actually want to use it.
+ *
+ * So look up the fb_info using the inode minor number,
+ * and just verify it against the reference we have.
+ */
+static struct fb_info *file_fb_info(struct file *file)
 {
-       unsigned long p = *ppos;
        struct inode *inode = file->f_path.dentry->d_inode;
        int fbidx = iminor(inode);
        struct fb_info *info = registered_fb[fbidx];
+
+       if (info != file->private_data)
+               info = NULL;
+       return info;
+}
+
+static ssize_t
+fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+       unsigned long p = *ppos;
+       struct fb_info *info = file_fb_info(file);
        u8 *buffer, *dst;
        u8 __iomem *src;
        int c, cnt = 0, err = 0;
@@ -761,9 +805,7 @@ static ssize_t
 fb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
 {
        unsigned long p = *ppos;
-       struct inode *inode = file->f_path.dentry->d_inode;
-       int fbidx = iminor(inode);
-       struct fb_info *info = registered_fb[fbidx];
+       struct fb_info *info = file_fb_info(file);
        u8 *buffer, *src;
        u8 __iomem *dst;
        int c, cnt = 0, err = 0;
@@ -1141,10 +1183,10 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
 
 static long fb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
-       int fbidx = iminor(inode);
-       struct fb_info *info = registered_fb[fbidx];
+       struct fb_info *info = file_fb_info(file);
 
+       if (!info)
+               return -ENODEV;
        return do_fb_ioctl(info, cmd, arg);
 }
 
@@ -1265,12 +1307,13 @@ static int fb_get_fscreeninfo(struct fb_info *info, unsigned int cmd,
 static long fb_compat_ioctl(struct file *file, unsigned int cmd,
                            unsigned long arg)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
-       int fbidx = iminor(inode);
-       struct fb_info *info = registered_fb[fbidx];
-       struct fb_ops *fb = info->fbops;
+       struct fb_info *info = file_fb_info(file);
+       struct fb_ops *fb;
        long ret = -ENOIOCTLCMD;
 
+       if (!info)
+               return -ENODEV;
+       fb = info->fbops;
        switch(cmd) {
        case FBIOGET_VSCREENINFO:
        case FBIOPUT_VSCREENINFO:
@@ -1303,16 +1346,18 @@ static long fb_compat_ioctl(struct file *file, unsigned int cmd,
 static int
 fb_mmap(struct file *file, struct vm_area_struct * vma)
 {
-       int fbidx = iminor(file->f_path.dentry->d_inode);
-       struct fb_info *info = registered_fb[fbidx];
-       struct fb_ops *fb = info->fbops;
+       struct fb_info *info = file_fb_info(file);
+       struct fb_ops *fb;
        unsigned long off;
        unsigned long start;
        u32 len;
 
+       if (!info)
+               return -ENODEV;
        if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
                return -EINVAL;
        off = vma->vm_pgoff << PAGE_SHIFT;
+       fb = info->fbops;
        if (!fb)
                return -ENODEV;
        mutex_lock(&info->mm_lock);
@@ -1361,14 +1406,16 @@ __releases(&info->lock)
        struct fb_info *info;
        int res = 0;
 
-       if (fbidx >= FB_MAX)
-               return -ENODEV;
-       info = registered_fb[fbidx];
-       if (!info)
+       info = get_fb_info(fbidx);
+       if (!info) {
                request_module("fb%d", fbidx);
-       info = registered_fb[fbidx];
-       if (!info)
-               return -ENODEV;
+               info = get_fb_info(fbidx);
+               if (!info)
+                       return -ENODEV;
+       }
+       if (IS_ERR(info))
+               return PTR_ERR(info);
+
        mutex_lock(&info->lock);
        if (!try_module_get(info->fbops->owner)) {
                res = -ENODEV;
@@ -1386,6 +1433,8 @@ __releases(&info->lock)
 #endif
 out:
        mutex_unlock(&info->lock);
+       if (res)
+               put_fb_info(info);
        return res;
 }
 
@@ -1401,6 +1450,7 @@ __releases(&info->lock)
                info->fbops->fb_release(info,1);
        module_put(info->fbops->owner);
        mutex_unlock(&info->lock);
+       put_fb_info(info);
        return 0;
 }
 
@@ -1487,8 +1537,10 @@ static bool fb_do_apertures_overlap(struct apertures_struct *gena,
        return false;
 }
 
+static int do_unregister_framebuffer(struct fb_info *fb_info);
+
 #define VGA_FB_PHYS 0xA0000
-void remove_conflicting_framebuffers(struct apertures_struct *a,
+static void do_remove_conflicting_framebuffers(struct apertures_struct *a,
                                     const char *name, bool primary)
 {
        int i;
@@ -1510,43 +1562,32 @@ void remove_conflicting_framebuffers(struct apertures_struct *a,
                        printk(KERN_INFO "fb: conflicting fb hw usage "
                               "%s vs %s - removing generic driver\n",
                               name, registered_fb[i]->fix.id);
-                       unregister_framebuffer(registered_fb[i]);
+                       do_unregister_framebuffer(registered_fb[i]);
                }
        }
 }
-EXPORT_SYMBOL(remove_conflicting_framebuffers);
 
-/**
- *     register_framebuffer - registers a frame buffer device
- *     @fb_info: frame buffer info structure
- *
- *     Registers a frame buffer device @fb_info.
- *
- *     Returns negative errno on error, or zero for success.
- *
- */
-
-int
-register_framebuffer(struct fb_info *fb_info)
+static int do_register_framebuffer(struct fb_info *fb_info)
 {
        int i;
        struct fb_event event;
        struct fb_videomode mode;
 
-       if (num_registered_fb == FB_MAX)
-               return -ENXIO;
-
        if (fb_check_foreignness(fb_info))
                return -ENOSYS;
 
-       remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id,
+       do_remove_conflicting_framebuffers(fb_info->apertures, fb_info->fix.id,
                                         fb_is_primary_device(fb_info));
 
+       if (num_registered_fb == FB_MAX)
+               return -ENXIO;
+
        num_registered_fb++;
        for (i = 0 ; i < FB_MAX; i++)
                if (!registered_fb[i])
                        break;
        fb_info->node = i;
+       atomic_set(&fb_info->count, 1);
        mutex_init(&fb_info->lock);
        mutex_init(&fb_info->mm_lock);
 
@@ -1592,36 +1633,14 @@ register_framebuffer(struct fb_info *fb_info)
        return 0;
 }
 
-
-/**
- *     unregister_framebuffer - releases a frame buffer device
- *     @fb_info: frame buffer info structure
- *
- *     Unregisters a frame buffer device @fb_info.
- *
- *     Returns negative errno on error, or zero for success.
- *
- *      This function will also notify the framebuffer console
- *      to release the driver.
- *
- *      This is meant to be called within a driver's module_exit()
- *      function. If this is called outside module_exit(), ensure
- *      that the driver implements fb_open() and fb_release() to
- *      check that no processes are using the device.
- */
-
-int
-unregister_framebuffer(struct fb_info *fb_info)
+static int do_unregister_framebuffer(struct fb_info *fb_info)
 {
        struct fb_event event;
        int i, ret = 0;
 
        i = fb_info->node;
-       if (!registered_fb[i]) {
-               ret = -EINVAL;
-               goto done;
-       }
-
+       if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info)
+               return -EINVAL;
 
        if (!lock_fb_info(fb_info))
                return -ENODEV;
@@ -1629,16 +1648,14 @@ unregister_framebuffer(struct fb_info *fb_info)
        ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
        unlock_fb_info(fb_info);
 
-       if (ret) {
-               ret = -EINVAL;
-               goto done;
-       }
+       if (ret)
+               return -EINVAL;
 
        if (fb_info->pixmap.addr &&
            (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT))
                kfree(fb_info->pixmap.addr);
        fb_destroy_modelist(&fb_info->modelist);
-       registered_fb[i]=NULL;
+       registered_fb[i] = NULL;
        num_registered_fb--;
        fb_cleanup_device(fb_info);
        device_destroy(fb_class, MKDEV(FB_MAJOR, i));
@@ -1646,9 +1663,65 @@ unregister_framebuffer(struct fb_info *fb_info)
        fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event);
 
        /* this may free fb info */
-       if (fb_info->fbops->fb_destroy)
-               fb_info->fbops->fb_destroy(fb_info);
-done:
+       put_fb_info(fb_info);
+       return 0;
+}
+
+void remove_conflicting_framebuffers(struct apertures_struct *a,
+                                    const char *name, bool primary)
+{
+       mutex_lock(&registration_lock);
+       do_remove_conflicting_framebuffers(a, name, primary);
+       mutex_unlock(&registration_lock);
+}
+EXPORT_SYMBOL(remove_conflicting_framebuffers);
+
+/**
+ *     register_framebuffer - registers a frame buffer device
+ *     @fb_info: frame buffer info structure
+ *
+ *     Registers a frame buffer device @fb_info.
+ *
+ *     Returns negative errno on error, or zero for success.
+ *
+ */
+int
+register_framebuffer(struct fb_info *fb_info)
+{
+       int ret;
+
+       mutex_lock(&registration_lock);
+       ret = do_register_framebuffer(fb_info);
+       mutex_unlock(&registration_lock);
+
+       return ret;
+}
+
+/**
+ *     unregister_framebuffer - releases a frame buffer device
+ *     @fb_info: frame buffer info structure
+ *
+ *     Unregisters a frame buffer device @fb_info.
+ *
+ *     Returns negative errno on error, or zero for success.
+ *
+ *      This function will also notify the framebuffer console
+ *      to release the driver.
+ *
+ *      This is meant to be called within a driver's module_exit()
+ *      function. If this is called outside module_exit(), ensure
+ *      that the driver implements fb_open() and fb_release() to
+ *      check that no processes are using the device.
+ */
+int
+unregister_framebuffer(struct fb_info *fb_info)
+{
+       int ret;
+
+       mutex_lock(&registration_lock);
+       ret = do_unregister_framebuffer(fb_info);
+       mutex_unlock(&registration_lock);
+
        return ret;
 }
 
index 528bceb220fd5783383d8575e99ff2099fdbdd95..eed5436ffb512dd75b54f3c10a97040f71a0c71d 100644 (file)
@@ -185,17 +185,20 @@ static struct miscdevice mpc8xxx_wdt_miscdev = {
        .fops   = &mpc8xxx_wdt_fops,
 };
 
+static const struct of_device_id mpc8xxx_wdt_match[];
 static int __devinit mpc8xxx_wdt_probe(struct platform_device *ofdev)
 {
        int ret;
+       const struct of_device_id *match;
        struct device_node *np = ofdev->dev.of_node;
        struct mpc8xxx_wdt_type *wdt_type;
        u32 freq = fsl_get_sys_freq();
        bool enabled;
 
-       if (!ofdev->dev.of_match)
+       match = of_match_device(mpc8xxx_wdt_match, &ofdev->dev);
+       if (!match)
                return -EINVAL;
-       wdt_type = ofdev->dev.of_match->data;
+       wdt_type = match->data;
 
        if (!freq || freq == -1)
                return -EINVAL;
index 5147bdd3b8e1cc0c86ae9ac4f32d1bcaa596d4b7..257b00e9842808721780d1af1c09aa2f35558d35 100644 (file)
@@ -1102,6 +1102,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                        if (!bdev->bd_part)
                                goto out_clear;
 
+                       ret = 0;
                        if (disk->fops->open) {
                                ret = disk->fops->open(bdev, mode);
                                if (ret == -ERESTARTSYS) {
@@ -1118,9 +1119,18 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                                        put_disk(disk);
                                        goto restart;
                                }
-                               if (ret)
-                                       goto out_clear;
                        }
+                       /*
+                        * If the device is invalidated, rescan partition
+                        * if open succeeded or failed with -ENOMEDIUM.
+                        * The latter is necessary to prevent ghost
+                        * partitions on a removed medium.
+                        */
+                       if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
+                               rescan_partitions(disk, bdev);
+                       if (ret)
+                               goto out_clear;
+
                        if (!bdev->bd_openers) {
                                bd_set_size(bdev,(loff_t)get_capacity(disk)<<9);
                                bdi = blk_get_backing_dev_info(bdev);
@@ -1128,8 +1138,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                                        bdi = &default_backing_dev_info;
                                bdev_inode_switch_bdi(bdev->bd_inode, bdi);
                        }
-                       if (bdev->bd_invalidated)
-                               rescan_partitions(disk, bdev);
                } else {
                        struct block_device *whole;
                        whole = bdget_disk(disk, 0);
@@ -1153,13 +1161,14 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
                }
        } else {
                if (bdev->bd_contains == bdev) {
-                       if (bdev->bd_disk->fops->open) {
+                       ret = 0;
+                       if (bdev->bd_disk->fops->open)
                                ret = bdev->bd_disk->fops->open(bdev, mode);
-                               if (ret)
-                                       goto out_unlock_bdev;
-                       }
-                       if (bdev->bd_invalidated)
+                       /* the same as first opener case, read comment there */
+                       if (bdev->bd_invalidated && (!ret || ret == -ENOMEDIUM))
                                rescan_partitions(bdev->bd_disk, bdev);
+                       if (ret)
+                               goto out_unlock_bdev;
                }
                /* only one opener holds refs to the module and disk */
                module_put(disk->fops->owner);
index 31610ea73aec2bff3410ec7c65b3b52e2c7a14ba..a8411c22313d743c8ac222b03f7e58caf185d528 100644 (file)
@@ -7,4 +7,4 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
           extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
           extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
           export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \
-          compression.o delayed-ref.o relocation.o
+          compression.o delayed-ref.o relocation.o delayed-inode.o
index 1a21c99a91b8b437d81c44cb6a8edaafba155475..f66fc99597331383890ac4847329faf6a494ddb0 100644 (file)
@@ -178,12 +178,13 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name,
 
        if (value) {
                acl = posix_acl_from_xattr(value, size);
+               if (IS_ERR(acl))
+                       return PTR_ERR(acl);
+
                if (acl) {
                        ret = posix_acl_valid(acl);
                        if (ret)
                                goto out;
-               } else if (IS_ERR(acl)) {
-                       return PTR_ERR(acl);
                }
        }
 
index 57c3bb2884ceabd2ea4be939557a6c273c9e8626..d0b0e43a6a8b1f75923511e6a6624fc9c401fe82 100644 (file)
@@ -22,6 +22,7 @@
 #include "extent_map.h"
 #include "extent_io.h"
 #include "ordered-data.h"
+#include "delayed-inode.h"
 
 /* in memory btrfs inode */
 struct btrfs_inode {
@@ -158,14 +159,27 @@ struct btrfs_inode {
         */
        unsigned force_compress:4;
 
+       struct btrfs_delayed_node *delayed_node;
+
        struct inode vfs_inode;
 };
 
+extern unsigned char btrfs_filetype_table[];
+
 static inline struct btrfs_inode *BTRFS_I(struct inode *inode)
 {
        return container_of(inode, struct btrfs_inode, vfs_inode);
 }
 
+static inline u64 btrfs_ino(struct inode *inode)
+{
+       u64 ino = BTRFS_I(inode)->location.objectid;
+
+       if (ino <= BTRFS_FIRST_FREE_OBJECTID)
+               ino = inode->i_ino;
+       return ino;
+}
+
 static inline void btrfs_i_size_write(struct inode *inode, u64 size)
 {
        i_size_write(inode, size);
index d4cd0f0cd695dd360702543ea0643d133956a652..bfe42b03eaf9b3cc0bdf14b3975300e9a30b8584 100644 (file)
@@ -125,9 +125,10 @@ static int check_compressed_csum(struct inode *inode,
                kunmap_atomic(kaddr, KM_USER0);
 
                if (csum != *cb_sum) {
-                       printk(KERN_INFO "btrfs csum failed ino %lu "
+                       printk(KERN_INFO "btrfs csum failed ino %llu "
                               "extent %llu csum %u "
-                              "wanted %u mirror %d\n", inode->i_ino,
+                              "wanted %u mirror %d\n",
+                              (unsigned long long)btrfs_ino(inode),
                               (unsigned long long)disk_start,
                               csum, *cb_sum, cb->mirror_num);
                        ret = -EIO;
index fad8f23d70f099d47e5b5fa9ca34f9ec08cbb713..b6cbeed226b1d4c4b60ee6c934e1453a31ba984f 100644 (file)
@@ -38,11 +38,6 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
                              struct extent_buffer *src_buf);
 static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
                   struct btrfs_path *path, int level, int slot);
-static int setup_items_for_insert(struct btrfs_trans_handle *trans,
-                       struct btrfs_root *root, struct btrfs_path *path,
-                       struct btrfs_key *cpu_key, u32 *data_size,
-                       u32 total_data, u32 total_size, int nr);
-
 
 struct btrfs_path *btrfs_alloc_path(void)
 {
@@ -74,8 +69,8 @@ noinline void btrfs_set_path_blocking(struct btrfs_path *p)
  * retake all the spinlocks in the path.  You can safely use NULL
  * for held
  */
-static noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
-                                              struct extent_buffer *held)
+noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
+                                       struct extent_buffer *held)
 {
        int i;
 
@@ -3559,11 +3554,10 @@ out:
  * to save stack depth by doing the bulk of the work in a function
  * that doesn't call btrfs_search_slot
  */
-static noinline_for_stack int
-setup_items_for_insert(struct btrfs_trans_handle *trans,
-                     struct btrfs_root *root, struct btrfs_path *path,
-                     struct btrfs_key *cpu_key, u32 *data_size,
-                     u32 total_data, u32 total_size, int nr)
+int setup_items_for_insert(struct btrfs_trans_handle *trans,
+                          struct btrfs_root *root, struct btrfs_path *path,
+                          struct btrfs_key *cpu_key, u32 *data_size,
+                          u32 total_data, u32 total_size, int nr)
 {
        struct btrfs_item *item;
        int i;
index 343304dec6d10c399b34c633b5b6d7c55acb3c26..e7d40791ec9f26f84a41280262bb8056066a23e0 100644 (file)
@@ -105,6 +105,12 @@ struct btrfs_ordered_sum;
 /* For storing free space cache */
 #define BTRFS_FREE_SPACE_OBJECTID -11ULL
 
+/*
+ * The inode number assigned to the special inode for sotring
+ * free ino cache
+ */
+#define BTRFS_FREE_INO_OBJECTID -12ULL
+
 /* dummy objectid represents multiple objectids */
 #define BTRFS_MULTIPLE_OBJECTIDS -255ULL
 
@@ -830,9 +836,6 @@ struct btrfs_block_group_cache {
        u64 bytes_super;
        u64 flags;
        u64 sectorsize;
-       int extents_thresh;
-       int free_extents;
-       int total_bitmaps;
        unsigned int ro:1;
        unsigned int dirty:1;
        unsigned int iref:1;
@@ -847,9 +850,7 @@ struct btrfs_block_group_cache {
        struct btrfs_space_info *space_info;
 
        /* free space cache stuff */
-       spinlock_t tree_lock;
-       struct rb_root free_space_offset;
-       u64 free_space;
+       struct btrfs_free_space_ctl *free_space_ctl;
 
        /* block group cache stuff */
        struct rb_node cache_node;
@@ -869,6 +870,7 @@ struct btrfs_block_group_cache {
 struct reloc_control;
 struct btrfs_device;
 struct btrfs_fs_devices;
+struct btrfs_delayed_root;
 struct btrfs_fs_info {
        u8 fsid[BTRFS_FSID_SIZE];
        u8 chunk_tree_uuid[BTRFS_UUID_SIZE];
@@ -895,7 +897,10 @@ struct btrfs_fs_info {
        /* logical->physical extent mapping */
        struct btrfs_mapping_tree mapping_tree;
 
-       /* block reservation for extent, checksum and root tree */
+       /*
+        * block reservation for extent, checksum, root tree and
+        * delayed dir index item
+        */
        struct btrfs_block_rsv global_block_rsv;
        /* block reservation for delay allocation */
        struct btrfs_block_rsv delalloc_block_rsv;
@@ -1022,6 +1027,7 @@ struct btrfs_fs_info {
         * for the sys_munmap function call path
         */
        struct btrfs_workers fixup_workers;
+       struct btrfs_workers delayed_workers;
        struct task_struct *transaction_kthread;
        struct task_struct *cleaner_kthread;
        int thread_pool_size;
@@ -1079,6 +1085,8 @@ struct btrfs_fs_info {
 
        /* filesystem state */
        u64 fs_state;
+
+       struct btrfs_delayed_root *delayed_root;
 };
 
 /*
@@ -1107,6 +1115,16 @@ struct btrfs_root {
        spinlock_t accounting_lock;
        struct btrfs_block_rsv *block_rsv;
 
+       /* free ino cache stuff */
+       struct mutex fs_commit_mutex;
+       struct btrfs_free_space_ctl *free_ino_ctl;
+       enum btrfs_caching_type cached;
+       spinlock_t cache_lock;
+       wait_queue_head_t cache_wait;
+       struct btrfs_free_space_ctl *free_ino_pinned;
+       u64 cache_progress;
+       struct inode *cache_inode;
+
        struct mutex log_mutex;
        wait_queue_head_t log_writer_wait;
        wait_queue_head_t log_commit_wait[2];
@@ -1161,6 +1179,11 @@ struct btrfs_root {
        /* red-black tree that keeps track of in-memory inodes */
        struct rb_root inode_tree;
 
+       /*
+        * radix tree that keeps track of delayed nodes of every inode,
+        * protected by inode_lock
+        */
+       struct radix_tree_root delayed_nodes_tree;
        /*
         * right now this just gets used so that a root has its own devid
         * for stat.  It may be used for more later
@@ -2034,6 +2057,13 @@ static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info)
 }
 
 /* extent-tree.c */
+static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root,
+                                                int num_items)
+{
+       return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
+               3 * num_items;
+}
+
 void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root, unsigned long count);
@@ -2226,6 +2256,8 @@ void btrfs_release_path(struct btrfs_path *p);
 struct btrfs_path *btrfs_alloc_path(void);
 void btrfs_free_path(struct btrfs_path *p);
 void btrfs_set_path_blocking(struct btrfs_path *p);
+void btrfs_clear_path_blocking(struct btrfs_path *p,
+                              struct extent_buffer *held);
 void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
 
 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
@@ -2237,6 +2269,10 @@ static inline int btrfs_del_item(struct btrfs_trans_handle *trans,
        return btrfs_del_items(trans, root, path, path->slots[0], 1);
 }
 
+int setup_items_for_insert(struct btrfs_trans_handle *trans,
+                          struct btrfs_root *root, struct btrfs_path *path,
+                          struct btrfs_key *cpu_key, u32 *data_size,
+                          u32 total_data, u32 total_size, int nr);
 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
                      *root, struct btrfs_key *key, void *data, u32 data_size);
 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
@@ -2293,7 +2329,7 @@ void btrfs_check_and_init_root_item(struct btrfs_root_item *item);
 /* dir-item.c */
 int btrfs_insert_dir_item(struct btrfs_trans_handle *trans,
                          struct btrfs_root *root, const char *name,
-                         int name_len, u64 dir,
+                         int name_len, struct inode *dir,
                          struct btrfs_key *location, u8 type, u64 index);
 struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
                                             struct btrfs_root *root,
@@ -2338,12 +2374,6 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans,
                          struct btrfs_root *root, u64 offset);
 int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset);
 
-/* inode-map.c */
-int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
-                            struct btrfs_root *fs_root,
-                            u64 dirid, u64 *objectid);
-int btrfs_find_highest_inode(struct btrfs_root *fs_root, u64 *objectid);
-
 /* inode-item.c */
 int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
new file mode 100644 (file)
index 0000000..01e2950
--- /dev/null
@@ -0,0 +1,1695 @@
+/*
+ * Copyright (C) 2011 Fujitsu.  All rights reserved.
+ * Written by Miao Xie <miaox@cn.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#include <linux/slab.h>
+#include "delayed-inode.h"
+#include "disk-io.h"
+#include "transaction.h"
+
+#define BTRFS_DELAYED_WRITEBACK                400
+#define BTRFS_DELAYED_BACKGROUND       100
+
+static struct kmem_cache *delayed_node_cache;
+
+int __init btrfs_delayed_inode_init(void)
+{
+       delayed_node_cache = kmem_cache_create("delayed_node",
+                                       sizeof(struct btrfs_delayed_node),
+                                       0,
+                                       SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+                                       NULL);
+       if (!delayed_node_cache)
+               return -ENOMEM;
+       return 0;
+}
+
+void btrfs_delayed_inode_exit(void)
+{
+       if (delayed_node_cache)
+               kmem_cache_destroy(delayed_node_cache);
+}
+
+static inline void btrfs_init_delayed_node(
+                               struct btrfs_delayed_node *delayed_node,
+                               struct btrfs_root *root, u64 inode_id)
+{
+       delayed_node->root = root;
+       delayed_node->inode_id = inode_id;
+       atomic_set(&delayed_node->refs, 0);
+       delayed_node->count = 0;
+       delayed_node->in_list = 0;
+       delayed_node->inode_dirty = 0;
+       delayed_node->ins_root = RB_ROOT;
+       delayed_node->del_root = RB_ROOT;
+       mutex_init(&delayed_node->mutex);
+       delayed_node->index_cnt = 0;
+       INIT_LIST_HEAD(&delayed_node->n_list);
+       INIT_LIST_HEAD(&delayed_node->p_list);
+       delayed_node->bytes_reserved = 0;
+}
+
+static inline int btrfs_is_continuous_delayed_item(
+                                       struct btrfs_delayed_item *item1,
+                                       struct btrfs_delayed_item *item2)
+{
+       if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
+           item1->key.objectid == item2->key.objectid &&
+           item1->key.type == item2->key.type &&
+           item1->key.offset + 1 == item2->key.offset)
+               return 1;
+       return 0;
+}
+
+static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
+                                                       struct btrfs_root *root)
+{
+       return root->fs_info->delayed_root;
+}
+
+static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
+                                                       struct inode *inode)
+{
+       struct btrfs_delayed_node *node;
+       struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
+       struct btrfs_root *root = btrfs_inode->root;
+       u64 ino = btrfs_ino(inode);
+       int ret;
+
+again:
+       node = ACCESS_ONCE(btrfs_inode->delayed_node);
+       if (node) {
+               atomic_inc(&node->refs);        /* can be accessed */
+               return node;
+       }
+
+       spin_lock(&root->inode_lock);
+       node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
+       if (node) {
+               if (btrfs_inode->delayed_node) {
+                       spin_unlock(&root->inode_lock);
+                       goto again;
+               }
+               btrfs_inode->delayed_node = node;
+               atomic_inc(&node->refs);        /* can be accessed */
+               atomic_inc(&node->refs);        /* cached in the inode */
+               spin_unlock(&root->inode_lock);
+               return node;
+       }
+       spin_unlock(&root->inode_lock);
+
+       node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
+       if (!node)
+               return ERR_PTR(-ENOMEM);
+       btrfs_init_delayed_node(node, root, ino);
+
+       atomic_inc(&node->refs);        /* cached in the btrfs inode */
+       atomic_inc(&node->refs);        /* can be accessed */
+
+       ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
+       if (ret) {
+               kmem_cache_free(delayed_node_cache, node);
+               return ERR_PTR(ret);
+       }
+
+       spin_lock(&root->inode_lock);
+       ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
+       if (ret == -EEXIST) {
+               kmem_cache_free(delayed_node_cache, node);
+               spin_unlock(&root->inode_lock);
+               radix_tree_preload_end();
+               goto again;
+       }
+       btrfs_inode->delayed_node = node;
+       spin_unlock(&root->inode_lock);
+       radix_tree_preload_end();
+
+       return node;
+}
+
+/*
+ * Call it when holding delayed_node->mutex
+ *
+ * If mod = 1, add this node into the prepared list.
+ */
+static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
+                                    struct btrfs_delayed_node *node,
+                                    int mod)
+{
+       spin_lock(&root->lock);
+       if (node->in_list) {
+               if (!list_empty(&node->p_list))
+                       list_move_tail(&node->p_list, &root->prepare_list);
+               else if (mod)
+                       list_add_tail(&node->p_list, &root->prepare_list);
+       } else {
+               list_add_tail(&node->n_list, &root->node_list);
+               list_add_tail(&node->p_list, &root->prepare_list);
+               atomic_inc(&node->refs);        /* inserted into list */
+               root->nodes++;
+               node->in_list = 1;
+       }
+       spin_unlock(&root->lock);
+}
+
+/* Call it when holding delayed_node->mutex */
+static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
+                                      struct btrfs_delayed_node *node)
+{
+       spin_lock(&root->lock);
+       if (node->in_list) {
+               root->nodes--;
+               atomic_dec(&node->refs);        /* not in the list */
+               list_del_init(&node->n_list);
+               if (!list_empty(&node->p_list))
+                       list_del_init(&node->p_list);
+               node->in_list = 0;
+       }
+       spin_unlock(&root->lock);
+}
+
+struct btrfs_delayed_node *btrfs_first_delayed_node(
+                       struct btrfs_delayed_root *delayed_root)
+{
+       struct list_head *p;
+       struct btrfs_delayed_node *node = NULL;
+
+       spin_lock(&delayed_root->lock);
+       if (list_empty(&delayed_root->node_list))
+               goto out;
+
+       p = delayed_root->node_list.next;
+       node = list_entry(p, struct btrfs_delayed_node, n_list);
+       atomic_inc(&node->refs);
+out:
+       spin_unlock(&delayed_root->lock);
+
+       return node;
+}
+
+struct btrfs_delayed_node *btrfs_next_delayed_node(
+                                               struct btrfs_delayed_node *node)
+{
+       struct btrfs_delayed_root *delayed_root;
+       struct list_head *p;
+       struct btrfs_delayed_node *next = NULL;
+
+       delayed_root = node->root->fs_info->delayed_root;
+       spin_lock(&delayed_root->lock);
+       if (!node->in_list) {   /* not in the list */
+               if (list_empty(&delayed_root->node_list))
+                       goto out;
+               p = delayed_root->node_list.next;
+       } else if (list_is_last(&node->n_list, &delayed_root->node_list))
+               goto out;
+       else
+               p = node->n_list.next;
+
+       next = list_entry(p, struct btrfs_delayed_node, n_list);
+       atomic_inc(&next->refs);
+out:
+       spin_unlock(&delayed_root->lock);
+
+       return next;
+}
+
+static void __btrfs_release_delayed_node(
+                               struct btrfs_delayed_node *delayed_node,
+                               int mod)
+{
+       struct btrfs_delayed_root *delayed_root;
+
+       if (!delayed_node)
+               return;
+
+       delayed_root = delayed_node->root->fs_info->delayed_root;
+
+       mutex_lock(&delayed_node->mutex);
+       if (delayed_node->count)
+               btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
+       else
+               btrfs_dequeue_delayed_node(delayed_root, delayed_node);
+       mutex_unlock(&delayed_node->mutex);
+
+       if (atomic_dec_and_test(&delayed_node->refs)) {
+               struct btrfs_root *root = delayed_node->root;
+               spin_lock(&root->inode_lock);
+               if (atomic_read(&delayed_node->refs) == 0) {
+                       radix_tree_delete(&root->delayed_nodes_tree,
+                                         delayed_node->inode_id);
+                       kmem_cache_free(delayed_node_cache, delayed_node);
+               }
+               spin_unlock(&root->inode_lock);
+       }
+}
+
+static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
+{
+       __btrfs_release_delayed_node(node, 0);
+}
+
+struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
+                                       struct btrfs_delayed_root *delayed_root)
+{
+       struct list_head *p;
+       struct btrfs_delayed_node *node = NULL;
+
+       spin_lock(&delayed_root->lock);
+       if (list_empty(&delayed_root->prepare_list))
+               goto out;
+
+       p = delayed_root->prepare_list.next;
+       list_del_init(p);
+       node = list_entry(p, struct btrfs_delayed_node, p_list);
+       atomic_inc(&node->refs);
+out:
+       spin_unlock(&delayed_root->lock);
+
+       return node;
+}
+
+static inline void btrfs_release_prepared_delayed_node(
+                                       struct btrfs_delayed_node *node)
+{
+       __btrfs_release_delayed_node(node, 1);
+}
+
+struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
+{
+       struct btrfs_delayed_item *item;
+       item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
+       if (item) {
+               item->data_len = data_len;
+               item->ins_or_del = 0;
+               item->bytes_reserved = 0;
+               item->block_rsv = NULL;
+               item->delayed_node = NULL;
+               atomic_set(&item->refs, 1);
+       }
+       return item;
+}
+
+/*
+ * __btrfs_lookup_delayed_item - look up the delayed item by key
+ * @delayed_node: pointer to the delayed node
+ * @key:         the key to look up
+ * @prev:        used to store the prev item if the right item isn't found
+ * @next:        used to store the next item if the right item isn't found
+ *
+ * Note: if we don't find the right item, we will return the prev item and
+ * the next item.
+ */
+static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
+                               struct rb_root *root,
+                               struct btrfs_key *key,
+                               struct btrfs_delayed_item **prev,
+                               struct btrfs_delayed_item **next)
+{
+       struct rb_node *node, *prev_node = NULL;
+       struct btrfs_delayed_item *delayed_item = NULL;
+       int ret = 0;
+
+       node = root->rb_node;
+
+       while (node) {
+               delayed_item = rb_entry(node, struct btrfs_delayed_item,
+                                       rb_node);
+               prev_node = node;
+               ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
+               if (ret < 0)
+                       node = node->rb_right;
+               else if (ret > 0)
+                       node = node->rb_left;
+               else
+                       return delayed_item;
+       }
+
+       if (prev) {
+               if (!prev_node)
+                       *prev = NULL;
+               else if (ret < 0)
+                       *prev = delayed_item;
+               else if ((node = rb_prev(prev_node)) != NULL) {
+                       *prev = rb_entry(node, struct btrfs_delayed_item,
+                                        rb_node);
+               } else
+                       *prev = NULL;
+       }
+
+       if (next) {
+               if (!prev_node)
+                       *next = NULL;
+               else if (ret > 0)
+                       *next = delayed_item;
+               else if ((node = rb_next(prev_node)) != NULL) {
+                       *next = rb_entry(node, struct btrfs_delayed_item,
+                                        rb_node);
+               } else
+                       *next = NULL;
+       }
+       return NULL;
+}
+
+struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
+                                       struct btrfs_delayed_node *delayed_node,
+                                       struct btrfs_key *key)
+{
+       struct btrfs_delayed_item *item;
+
+       item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
+                                          NULL, NULL);
+       return item;
+}
+
+struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item(
+                                       struct btrfs_delayed_node *delayed_node,
+                                       struct btrfs_key *key)
+{
+       struct btrfs_delayed_item *item;
+
+       item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
+                                          NULL, NULL);
+       return item;
+}
+
+struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item(
+                                       struct btrfs_delayed_node *delayed_node,
+                                       struct btrfs_key *key)
+{
+       struct btrfs_delayed_item *item, *next;
+
+       item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
+                                          NULL, &next);
+       if (!item)
+               item = next;
+
+       return item;
+}
+
+struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item(
+                                       struct btrfs_delayed_node *delayed_node,
+                                       struct btrfs_key *key)
+{
+       struct btrfs_delayed_item *item, *next;
+
+       item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
+                                          NULL, &next);
+       if (!item)
+               item = next;
+
+       return item;
+}
+
+static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
+                                   struct btrfs_delayed_item *ins,
+                                   int action)
+{
+       struct rb_node **p, *node;
+       struct rb_node *parent_node = NULL;
+       struct rb_root *root;
+       struct btrfs_delayed_item *item;
+       int cmp;
+
+       if (action == BTRFS_DELAYED_INSERTION_ITEM)
+               root = &delayed_node->ins_root;
+       else if (action == BTRFS_DELAYED_DELETION_ITEM)
+               root = &delayed_node->del_root;
+       else
+               BUG();
+       p = &root->rb_node;
+       node = &ins->rb_node;
+
+       while (*p) {
+               parent_node = *p;
+               item = rb_entry(parent_node, struct btrfs_delayed_item,
+                                rb_node);
+
+               cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
+               if (cmp < 0)
+                       p = &(*p)->rb_right;
+               else if (cmp > 0)
+                       p = &(*p)->rb_left;
+               else
+                       return -EEXIST;
+       }
+
+       rb_link_node(node, parent_node, p);
+       rb_insert_color(node, root);
+       ins->delayed_node = delayed_node;
+       ins->ins_or_del = action;
+
+       if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
+           action == BTRFS_DELAYED_INSERTION_ITEM &&
+           ins->key.offset >= delayed_node->index_cnt)
+                       delayed_node->index_cnt = ins->key.offset + 1;
+
+       delayed_node->count++;
+       atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
+       return 0;
+}
+
+static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
+                                             struct btrfs_delayed_item *item)
+{
+       return __btrfs_add_delayed_item(node, item,
+                                       BTRFS_DELAYED_INSERTION_ITEM);
+}
+
+static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
+                                            struct btrfs_delayed_item *item)
+{
+       return __btrfs_add_delayed_item(node, item,
+                                       BTRFS_DELAYED_DELETION_ITEM);
+}
+
+static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
+{
+       struct rb_root *root;
+       struct btrfs_delayed_root *delayed_root;
+
+       delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
+
+       BUG_ON(!delayed_root);
+       BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
+              delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
+
+       if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
+               root = &delayed_item->delayed_node->ins_root;
+       else
+               root = &delayed_item->delayed_node->del_root;
+
+       rb_erase(&delayed_item->rb_node, root);
+       delayed_item->delayed_node->count--;
+       atomic_dec(&delayed_root->items);
+       if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND &&
+           waitqueue_active(&delayed_root->wait))
+               wake_up(&delayed_root->wait);
+}
+
+static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
+{
+       if (item) {
+               __btrfs_remove_delayed_item(item);
+               if (atomic_dec_and_test(&item->refs))
+                       kfree(item);
+       }
+}
+
+struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
+                                       struct btrfs_delayed_node *delayed_node)
+{
+       struct rb_node *p;
+       struct btrfs_delayed_item *item = NULL;
+
+       p = rb_first(&delayed_node->ins_root);
+       if (p)
+               item = rb_entry(p, struct btrfs_delayed_item, rb_node);
+
+       return item;
+}
+
+struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
+                                       struct btrfs_delayed_node *delayed_node)
+{
+       struct rb_node *p;
+       struct btrfs_delayed_item *item = NULL;
+
+       p = rb_first(&delayed_node->del_root);
+       if (p)
+               item = rb_entry(p, struct btrfs_delayed_item, rb_node);
+
+       return item;
+}
+
+struct btrfs_delayed_item *__btrfs_next_delayed_item(
+                                               struct btrfs_delayed_item *item)
+{
+       struct rb_node *p;
+       struct btrfs_delayed_item *next = NULL;
+
+       p = rb_next(&item->rb_node);
+       if (p)
+               next = rb_entry(p, struct btrfs_delayed_item, rb_node);
+
+       return next;
+}
+
+static inline struct btrfs_delayed_node *btrfs_get_delayed_node(
+                                                       struct inode *inode)
+{
+       struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
+       struct btrfs_delayed_node *delayed_node;
+
+       delayed_node = btrfs_inode->delayed_node;
+       if (delayed_node)
+               atomic_inc(&delayed_node->refs);
+
+       return delayed_node;
+}
+
+static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
+                                                  u64 root_id)
+{
+       struct btrfs_key root_key;
+
+       if (root->objectid == root_id)
+               return root;
+
+       root_key.objectid = root_id;
+       root_key.type = BTRFS_ROOT_ITEM_KEY;
+       root_key.offset = (u64)-1;
+       return btrfs_read_fs_root_no_name(root->fs_info, &root_key);
+}
+
+static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
+                                              struct btrfs_root *root,
+                                              struct btrfs_delayed_item *item)
+{
+       struct btrfs_block_rsv *src_rsv;
+       struct btrfs_block_rsv *dst_rsv;
+       u64 num_bytes;
+       int ret;
+
+       if (!trans->bytes_reserved)
+               return 0;
+
+       src_rsv = trans->block_rsv;
+       dst_rsv = &root->fs_info->global_block_rsv;
+
+       num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+       ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
+       if (!ret) {
+               item->bytes_reserved = num_bytes;
+               item->block_rsv = dst_rsv;
+       }
+
+       return ret;
+}
+
+static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
+                                               struct btrfs_delayed_item *item)
+{
+       if (!item->bytes_reserved)
+               return;
+
+       btrfs_block_rsv_release(root, item->block_rsv,
+                               item->bytes_reserved);
+}
+
+static int btrfs_delayed_inode_reserve_metadata(
+                                       struct btrfs_trans_handle *trans,
+                                       struct btrfs_root *root,
+                                       struct btrfs_delayed_node *node)
+{
+       struct btrfs_block_rsv *src_rsv;
+       struct btrfs_block_rsv *dst_rsv;
+       u64 num_bytes;
+       int ret;
+
+       if (!trans->bytes_reserved)
+               return 0;
+
+       src_rsv = trans->block_rsv;
+       dst_rsv = &root->fs_info->global_block_rsv;
+
+       num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+       ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
+       if (!ret)
+               node->bytes_reserved = num_bytes;
+
+       return ret;
+}
+
+static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
+                                               struct btrfs_delayed_node *node)
+{
+       struct btrfs_block_rsv *rsv;
+
+       if (!node->bytes_reserved)
+               return;
+
+       rsv = &root->fs_info->global_block_rsv;
+       btrfs_block_rsv_release(root, rsv,
+                               node->bytes_reserved);
+       node->bytes_reserved = 0;
+}
+
+/*
+ * This helper will insert some continuous items into the same leaf according
+ * to the free space of the leaf.
+ */
+static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
+                               struct btrfs_root *root,
+                               struct btrfs_path *path,
+                               struct btrfs_delayed_item *item)
+{
+       struct btrfs_delayed_item *curr, *next;
+       int free_space;
+       int total_data_size = 0, total_size = 0;
+       struct extent_buffer *leaf;
+       char *data_ptr;
+       struct btrfs_key *keys;
+       u32 *data_size;
+       struct list_head head;
+       int slot;
+       int nitems;
+       int i;
+       int ret = 0;
+
+       BUG_ON(!path->nodes[0]);
+
+       leaf = path->nodes[0];
+       free_space = btrfs_leaf_free_space(root, leaf);
+       INIT_LIST_HEAD(&head);
+
+       next = item;
+
+       /*
+        * count the number of the continuous items that we can insert in batch
+        */
+       while (total_size + next->data_len + sizeof(struct btrfs_item) <=
+              free_space) {
+               total_data_size += next->data_len;
+               total_size += next->data_len + sizeof(struct btrfs_item);
+               list_add_tail(&next->tree_list, &head);
+               nitems++;
+
+               curr = next;
+               next = __btrfs_next_delayed_item(curr);
+               if (!next)
+                       break;
+
+               if (!btrfs_is_continuous_delayed_item(curr, next))
+                       break;
+       }
+
+       if (!nitems) {
+               ret = 0;
+               goto out;
+       }
+
+       /*
+        * we need allocate some memory space, but it might cause the task
+        * to sleep, so we set all locked nodes in the path to blocking locks
+        * first.
+        */
+       btrfs_set_path_blocking(path);
+
+       keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS);
+       if (!keys) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS);
+       if (!data_size) {
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       /* get keys of all the delayed items */
+       i = 0;
+       list_for_each_entry(next, &head, tree_list) {
+               keys[i] = next->key;
+               data_size[i] = next->data_len;
+               i++;
+       }
+
+       /* reset all the locked nodes in the patch to spinning locks. */
+       btrfs_clear_path_blocking(path, NULL);
+
+       /* insert the keys of the items */
+       ret = setup_items_for_insert(trans, root, path, keys, data_size,
+                                    total_data_size, total_size, nitems);
+       if (ret)
+               goto error;
+
+       /* insert the dir index items */
+       slot = path->slots[0];
+       list_for_each_entry_safe(curr, next, &head, tree_list) {
+               data_ptr = btrfs_item_ptr(leaf, slot, char);
+               write_extent_buffer(leaf, &curr->data,
+                                   (unsigned long)data_ptr,
+                                   curr->data_len);
+               slot++;
+
+               btrfs_delayed_item_release_metadata(root, curr);
+
+               list_del(&curr->tree_list);
+               btrfs_release_delayed_item(curr);
+       }
+
+error:
+       kfree(data_size);
+       kfree(keys);
+out:
+       return ret;
+}
+
+/*
+ * This helper can just do simple insertion that needn't extend item for new
+ * data, such as directory name index insertion, inode insertion.
+ */
+static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
+                                    struct btrfs_root *root,
+                                    struct btrfs_path *path,
+                                    struct btrfs_delayed_item *delayed_item)
+{
+       struct extent_buffer *leaf;
+       struct btrfs_item *item;
+       char *ptr;
+       int ret;
+
+       ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
+                                     delayed_item->data_len);
+       if (ret < 0 && ret != -EEXIST)
+               return ret;
+
+       leaf = path->nodes[0];
+
+       item = btrfs_item_nr(leaf, path->slots[0]);
+       ptr = btrfs_item_ptr(leaf, path->slots[0], char);
+
+       write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
+                           delayed_item->data_len);
+       btrfs_mark_buffer_dirty(leaf);
+
+       btrfs_delayed_item_release_metadata(root, delayed_item);
+       return 0;
+}
+
+/*
+ * we insert an item first, then if there are some continuous items, we try
+ * to insert those items into the same leaf.
+ */
+static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
+                                     struct btrfs_path *path,
+                                     struct btrfs_root *root,
+                                     struct btrfs_delayed_node *node)
+{
+       struct btrfs_delayed_item *curr, *prev;
+       int ret = 0;
+
+do_again:
+       mutex_lock(&node->mutex);
+       curr = __btrfs_first_delayed_insertion_item(node);
+       if (!curr)
+               goto insert_end;
+
+       ret = btrfs_insert_delayed_item(trans, root, path, curr);
+       if (ret < 0) {
+               btrfs_release_path(path);
+               goto insert_end;
+       }
+
+       prev = curr;
+       curr = __btrfs_next_delayed_item(prev);
+       if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
+               /* insert the continuous items into the same leaf */
+               path->slots[0]++;
+               btrfs_batch_insert_items(trans, root, path, curr);
+       }
+       btrfs_release_delayed_item(prev);
+       btrfs_mark_buffer_dirty(path->nodes[0]);
+
+       btrfs_release_path(path);
+       mutex_unlock(&node->mutex);
+       goto do_again;
+
+insert_end:
+       mutex_unlock(&node->mutex);
+       return ret;
+}
+
+static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
+                                   struct btrfs_root *root,
+                                   struct btrfs_path *path,
+                                   struct btrfs_delayed_item *item)
+{
+       struct btrfs_delayed_item *curr, *next;
+       struct extent_buffer *leaf;
+       struct btrfs_key key;
+       struct list_head head;
+       int nitems, i, last_item;
+       int ret = 0;
+
+       BUG_ON(!path->nodes[0]);
+
+       leaf = path->nodes[0];
+
+       i = path->slots[0];
+       last_item = btrfs_header_nritems(leaf) - 1;
+       if (i > last_item)
+               return -ENOENT; /* FIXME: Is errno suitable? */
+
+       next = item;
+       INIT_LIST_HEAD(&head);
+       btrfs_item_key_to_cpu(leaf, &key, i);
+       nitems = 0;
+       /*
+        * count the number of the dir index items that we can delete in batch
+        */
+       while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
+               list_add_tail(&next->tree_list, &head);
+               nitems++;
+
+               curr = next;
+               next = __btrfs_next_delayed_item(curr);
+               if (!next)
+                       break;
+
+               if (!btrfs_is_continuous_delayed_item(curr, next))
+                       break;
+
+               i++;
+               if (i > last_item)
+                       break;
+               btrfs_item_key_to_cpu(leaf, &key, i);
+       }
+
+       if (!nitems)
+               return 0;
+
+       ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
+       if (ret)
+               goto out;
+
+       list_for_each_entry_safe(curr, next, &head, tree_list) {
+               btrfs_delayed_item_release_metadata(root, curr);
+               list_del(&curr->tree_list);
+               btrfs_release_delayed_item(curr);
+       }
+
+out:
+       return ret;
+}
+
+static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
+                                     struct btrfs_path *path,
+                                     struct btrfs_root *root,
+                                     struct btrfs_delayed_node *node)
+{
+       struct btrfs_delayed_item *curr, *prev;
+       int ret = 0;
+
+do_again:
+       mutex_lock(&node->mutex);
+       curr = __btrfs_first_delayed_deletion_item(node);
+       if (!curr)
+               goto delete_fail;
+
+       ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
+       if (ret < 0)
+               goto delete_fail;
+       else if (ret > 0) {
+               /*
+                * can't find the item which the node points to, so this node
+                * is invalid, just drop it.
+                */
+               prev = curr;
+               curr = __btrfs_next_delayed_item(prev);
+               btrfs_release_delayed_item(prev);
+               ret = 0;
+               btrfs_release_path(path);
+               if (curr)
+                       goto do_again;
+               else
+                       goto delete_fail;
+       }
+
+       btrfs_batch_delete_items(trans, root, path, curr);
+       btrfs_release_path(path);
+       mutex_unlock(&node->mutex);
+       goto do_again;
+
+delete_fail:
+       btrfs_release_path(path);
+       mutex_unlock(&node->mutex);
+       return ret;
+}
+
+static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
+{
+       struct btrfs_delayed_root *delayed_root;
+
+       if (delayed_node && delayed_node->inode_dirty) {
+               BUG_ON(!delayed_node->root);
+               delayed_node->inode_dirty = 0;
+               delayed_node->count--;
+
+               delayed_root = delayed_node->root->fs_info->delayed_root;
+               atomic_dec(&delayed_root->items);
+               if (atomic_read(&delayed_root->items) <
+                   BTRFS_DELAYED_BACKGROUND &&
+                   waitqueue_active(&delayed_root->wait))
+                       wake_up(&delayed_root->wait);
+       }
+}
+
+static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
+                                     struct btrfs_root *root,
+                                     struct btrfs_path *path,
+                                     struct btrfs_delayed_node *node)
+{
+       struct btrfs_key key;
+       struct btrfs_inode_item *inode_item;
+       struct extent_buffer *leaf;
+       int ret;
+
+       mutex_lock(&node->mutex);
+       if (!node->inode_dirty) {
+               mutex_unlock(&node->mutex);
+               return 0;
+       }
+
+       key.objectid = node->inode_id;
+       btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
+       key.offset = 0;
+       ret = btrfs_lookup_inode(trans, root, path, &key, 1);
+       if (ret > 0) {
+               btrfs_release_path(path);
+               mutex_unlock(&node->mutex);
+               return -ENOENT;
+       } else if (ret < 0) {
+               mutex_unlock(&node->mutex);
+               return ret;
+       }
+
+       btrfs_unlock_up_safe(path, 1);
+       leaf = path->nodes[0];
+       inode_item = btrfs_item_ptr(leaf, path->slots[0],
+                                   struct btrfs_inode_item);
+       write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
+                           sizeof(struct btrfs_inode_item));
+       btrfs_mark_buffer_dirty(leaf);
+       btrfs_release_path(path);
+
+       btrfs_delayed_inode_release_metadata(root, node);
+       btrfs_release_delayed_inode(node);
+       mutex_unlock(&node->mutex);
+
+       return 0;
+}
+
+/* Called when committing the transaction. */
+int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
+                           struct btrfs_root *root)
+{
+       struct btrfs_delayed_root *delayed_root;
+       struct btrfs_delayed_node *curr_node, *prev_node;
+       struct btrfs_path *path;
+       int ret = 0;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+       path->leave_spinning = 1;
+
+       delayed_root = btrfs_get_delayed_root(root);
+
+       curr_node = btrfs_first_delayed_node(delayed_root);
+       while (curr_node) {
+               root = curr_node->root;
+               ret = btrfs_insert_delayed_items(trans, path, root,
+                                                curr_node);
+               if (!ret)
+                       ret = btrfs_delete_delayed_items(trans, path, root,
+                                                        curr_node);
+               if (!ret)
+                       ret = btrfs_update_delayed_inode(trans, root, path,
+                                                        curr_node);
+               if (ret) {
+                       btrfs_release_delayed_node(curr_node);
+                       break;
+               }
+
+               prev_node = curr_node;
+               curr_node = btrfs_next_delayed_node(curr_node);
+               btrfs_release_delayed_node(prev_node);
+       }
+
+       btrfs_free_path(path);
+       return ret;
+}
+
+static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+                                             struct btrfs_delayed_node *node)
+{
+       struct btrfs_path *path;
+       int ret;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+       path->leave_spinning = 1;
+
+       ret = btrfs_insert_delayed_items(trans, path, node->root, node);
+       if (!ret)
+               ret = btrfs_delete_delayed_items(trans, path, node->root, node);
+       if (!ret)
+               ret = btrfs_update_delayed_inode(trans, node->root, path, node);
+       btrfs_free_path(path);
+
+       return ret;
+}
+
+int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+                                    struct inode *inode)
+{
+       struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
+       int ret;
+
+       if (!delayed_node)
+               return 0;
+
+       mutex_lock(&delayed_node->mutex);
+       if (!delayed_node->count) {
+               mutex_unlock(&delayed_node->mutex);
+               btrfs_release_delayed_node(delayed_node);
+               return 0;
+       }
+       mutex_unlock(&delayed_node->mutex);
+
+       ret = __btrfs_commit_inode_delayed_items(trans, delayed_node);
+       btrfs_release_delayed_node(delayed_node);
+       return ret;
+}
+
+void btrfs_remove_delayed_node(struct inode *inode)
+{
+       struct btrfs_delayed_node *delayed_node;
+
+       delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
+       if (!delayed_node)
+               return;
+
+       BTRFS_I(inode)->delayed_node = NULL;
+       btrfs_release_delayed_node(delayed_node);
+}
+
+struct btrfs_async_delayed_node {
+       struct btrfs_root *root;
+       struct btrfs_delayed_node *delayed_node;
+       struct btrfs_work work;
+};
+
+static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
+{
+       struct btrfs_async_delayed_node *async_node;
+       struct btrfs_trans_handle *trans;
+       struct btrfs_path *path;
+       struct btrfs_delayed_node *delayed_node = NULL;
+       struct btrfs_root *root;
+       unsigned long nr = 0;
+       int need_requeue = 0;
+       int ret;
+
+       async_node = container_of(work, struct btrfs_async_delayed_node, work);
+
+       path = btrfs_alloc_path();
+       if (!path)
+               goto out;
+       path->leave_spinning = 1;
+
+       delayed_node = async_node->delayed_node;
+       root = delayed_node->root;
+
+       trans = btrfs_join_transaction(root, 0);
+       if (IS_ERR(trans))
+               goto free_path;
+
+       ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
+       if (!ret)
+               ret = btrfs_delete_delayed_items(trans, path, root,
+                                                delayed_node);
+
+       if (!ret)
+               btrfs_update_delayed_inode(trans, root, path, delayed_node);
+
+       /*
+        * Maybe new delayed items have been inserted, so we need requeue
+        * the work. Besides that, we must dequeue the empty delayed nodes
+        * to avoid the race between delayed items balance and the worker.
+        * The race like this:
+        *      Task1                           Worker thread
+        *                                      count == 0, needn't requeue
+        *                                        also needn't insert the
+        *                                        delayed node into prepare
+        *                                        list again.
+        *      add lots of delayed items
+        *      queue the delayed node
+        *        already in the list,
+        *        and not in the prepare
+        *        list, it means the delayed
+        *        node is being dealt with
+        *        by the worker.
+        *      do delayed items balance
+        *        the delayed node is being
+        *        dealt with by the worker
+        *        now, just wait.
+        *                                      the worker goto idle.
+        * Task1 will sleep until the transaction is commited.
+        */
+       mutex_lock(&delayed_node->mutex);
+       if (delayed_node->count)
+               need_requeue = 1;
+       else
+               btrfs_dequeue_delayed_node(root->fs_info->delayed_root,
+                                          delayed_node);
+       mutex_unlock(&delayed_node->mutex);
+
+       nr = trans->blocks_used;
+
+       btrfs_end_transaction_dmeta(trans, root);
+       __btrfs_btree_balance_dirty(root, nr);
+free_path:
+       btrfs_free_path(path);
+out:
+       if (need_requeue)
+               btrfs_requeue_work(&async_node->work);
+       else {
+               btrfs_release_prepared_delayed_node(delayed_node);
+               kfree(async_node);
+       }
+}
+
+static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
+                                    struct btrfs_root *root, int all)
+{
+       struct btrfs_async_delayed_node *async_node;
+       struct btrfs_delayed_node *curr;
+       int count = 0;
+
+again:
+       curr = btrfs_first_prepared_delayed_node(delayed_root);
+       if (!curr)
+               return 0;
+
+       async_node = kmalloc(sizeof(*async_node), GFP_NOFS);
+       if (!async_node) {
+               btrfs_release_prepared_delayed_node(curr);
+               return -ENOMEM;
+       }
+
+       async_node->root = root;
+       async_node->delayed_node = curr;
+
+       async_node->work.func = btrfs_async_run_delayed_node_done;
+       async_node->work.flags = 0;
+
+       btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work);
+       count++;
+
+       if (all || count < 4)
+               goto again;
+
+       return 0;
+}
+
+void btrfs_balance_delayed_items(struct btrfs_root *root)
+{
+       struct btrfs_delayed_root *delayed_root;
+
+       delayed_root = btrfs_get_delayed_root(root);
+
+       if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
+               return;
+
+       if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
+               int ret;
+               ret = btrfs_wq_run_delayed_node(delayed_root, root, 1);
+               if (ret)
+                       return;
+
+               wait_event_interruptible_timeout(
+                               delayed_root->wait,
+                               (atomic_read(&delayed_root->items) <
+                                BTRFS_DELAYED_BACKGROUND),
+                               HZ);
+               return;
+       }
+
+       btrfs_wq_run_delayed_node(delayed_root, root, 0);
+}
+
+int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
+                                  struct btrfs_root *root, const char *name,
+                                  int name_len, struct inode *dir,
+                                  struct btrfs_disk_key *disk_key, u8 type,
+                                  u64 index)
+{
+       struct btrfs_delayed_node *delayed_node;
+       struct btrfs_delayed_item *delayed_item;
+       struct btrfs_dir_item *dir_item;
+       int ret;
+
+       delayed_node = btrfs_get_or_create_delayed_node(dir);
+       if (IS_ERR(delayed_node))
+               return PTR_ERR(delayed_node);
+
+       delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
+       if (!delayed_item) {
+               ret = -ENOMEM;
+               goto release_node;
+       }
+
+       ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
+       /*
+        * we have reserved enough space when we start a new transaction,
+        * so reserving metadata failure is impossible
+        */
+       BUG_ON(ret);
+
+       delayed_item->key.objectid = btrfs_ino(dir);
+       btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
+       delayed_item->key.offset = index;
+
+       dir_item = (struct btrfs_dir_item *)delayed_item->data;
+       dir_item->location = *disk_key;
+       dir_item->transid = cpu_to_le64(trans->transid);
+       dir_item->data_len = 0;
+       dir_item->name_len = cpu_to_le16(name_len);
+       dir_item->type = type;
+       memcpy((char *)(dir_item + 1), name, name_len);
+
+       mutex_lock(&delayed_node->mutex);
+       ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
+       if (unlikely(ret)) {
+               printk(KERN_ERR "err add delayed dir index item(name: %s) into "
+                               "the insertion tree of the delayed node"
+                               "(root id: %llu, inode id: %llu, errno: %d)\n",
+                               name,
+                               (unsigned long long)delayed_node->root->objectid,
+                               (unsigned long long)delayed_node->inode_id,
+                               ret);
+               BUG();
+       }
+       mutex_unlock(&delayed_node->mutex);
+
+release_node:
+       btrfs_release_delayed_node(delayed_node);
+       return ret;
+}
+
+static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
+                                              struct btrfs_delayed_node *node,
+                                              struct btrfs_key *key)
+{
+       struct btrfs_delayed_item *item;
+
+       mutex_lock(&node->mutex);
+       item = __btrfs_lookup_delayed_insertion_item(node, key);
+       if (!item) {
+               mutex_unlock(&node->mutex);
+               return 1;
+       }
+
+       btrfs_delayed_item_release_metadata(root, item);
+       btrfs_release_delayed_item(item);
+       mutex_unlock(&node->mutex);
+       return 0;
+}
+
+int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
+                                  struct btrfs_root *root, struct inode *dir,
+                                  u64 index)
+{
+       struct btrfs_delayed_node *node;
+       struct btrfs_delayed_item *item;
+       struct btrfs_key item_key;
+       int ret;
+
+       node = btrfs_get_or_create_delayed_node(dir);
+       if (IS_ERR(node))
+               return PTR_ERR(node);
+
+       item_key.objectid = btrfs_ino(dir);
+       btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
+       item_key.offset = index;
+
+       ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
+       if (!ret)
+               goto end;
+
+       item = btrfs_alloc_delayed_item(0);
+       if (!item) {
+               ret = -ENOMEM;
+               goto end;
+       }
+
+       item->key = item_key;
+
+       ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
+       /*
+        * we have reserved enough space when we start a new transaction,
+        * so reserving metadata failure is impossible.
+        */
+       BUG_ON(ret);
+
+       mutex_lock(&node->mutex);
+       ret = __btrfs_add_delayed_deletion_item(node, item);
+       if (unlikely(ret)) {
+               printk(KERN_ERR "err add delayed dir index item(index: %llu) "
+                               "into the deletion tree of the delayed node"
+                               "(root id: %llu, inode id: %llu, errno: %d)\n",
+                               (unsigned long long)index,
+                               (unsigned long long)node->root->objectid,
+                               (unsigned long long)node->inode_id,
+                               ret);
+               BUG();
+       }
+       mutex_unlock(&node->mutex);
+end:
+       btrfs_release_delayed_node(node);
+       return ret;
+}
+
+int btrfs_inode_delayed_dir_index_count(struct inode *inode)
+{
+       struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node;
+       int ret = 0;
+
+       if (!delayed_node)
+               return -ENOENT;
+
+       /*
+        * Since we have held i_mutex of this directory, it is impossible that
+        * a new directory index is added into the delayed node and index_cnt
+        * is updated now. So we needn't lock the delayed node.
+        */
+       if (!delayed_node->index_cnt)
+               return -EINVAL;
+
+       BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
+       return ret;
+}
+
+void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
+                            struct list_head *del_list)
+{
+       struct btrfs_delayed_node *delayed_node;
+       struct btrfs_delayed_item *item;
+
+       delayed_node = btrfs_get_delayed_node(inode);
+       if (!delayed_node)
+               return;
+
+       mutex_lock(&delayed_node->mutex);
+       item = __btrfs_first_delayed_insertion_item(delayed_node);
+       while (item) {
+               atomic_inc(&item->refs);
+               list_add_tail(&item->readdir_list, ins_list);
+               item = __btrfs_next_delayed_item(item);
+       }
+
+       item = __btrfs_first_delayed_deletion_item(delayed_node);
+       while (item) {
+               atomic_inc(&item->refs);
+               list_add_tail(&item->readdir_list, del_list);
+               item = __btrfs_next_delayed_item(item);
+       }
+       mutex_unlock(&delayed_node->mutex);
+       /*
+        * This delayed node is still cached in the btrfs inode, so refs
+        * must be > 1 now, and we needn't check it is going to be freed
+        * or not.
+        *
+        * Besides that, this function is used to read dir, we do not
+        * insert/delete delayed items in this period. So we also needn't
+        * requeue or dequeue this delayed node.
+        */
+       atomic_dec(&delayed_node->refs);
+}
+
+void btrfs_put_delayed_items(struct list_head *ins_list,
+                            struct list_head *del_list)
+{
+       struct btrfs_delayed_item *curr, *next;
+
+       list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
+               list_del(&curr->readdir_list);
+               if (atomic_dec_and_test(&curr->refs))
+                       kfree(curr);
+       }
+
+       list_for_each_entry_safe(curr, next, del_list, readdir_list) {
+               list_del(&curr->readdir_list);
+               if (atomic_dec_and_test(&curr->refs))
+                       kfree(curr);
+       }
+}
+
+int btrfs_should_delete_dir_index(struct list_head *del_list,
+                                 u64 index)
+{
+       struct btrfs_delayed_item *curr, *next;
+       int ret;
+
+       if (list_empty(del_list))
+               return 0;
+
+       list_for_each_entry_safe(curr, next, del_list, readdir_list) {
+               if (curr->key.offset > index)
+                       break;
+
+               list_del(&curr->readdir_list);
+               ret = (curr->key.offset == index);
+
+               if (atomic_dec_and_test(&curr->refs))
+                       kfree(curr);
+
+               if (ret)
+                       return 1;
+               else
+                       continue;
+       }
+       return 0;
+}
+
+/*
+ * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
+ *
+ */
+int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
+                                   filldir_t filldir,
+                                   struct list_head *ins_list)
+{
+       struct btrfs_dir_item *di;
+       struct btrfs_delayed_item *curr, *next;
+       struct btrfs_key location;
+       char *name;
+       int name_len;
+       int over = 0;
+       unsigned char d_type;
+
+       if (list_empty(ins_list))
+               return 0;
+
+       /*
+        * Changing the data of the delayed item is impossible. So
+        * we needn't lock them. And we have held i_mutex of the
+        * directory, nobody can delete any directory indexes now.
+        */
+       list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
+               list_del(&curr->readdir_list);
+
+               if (curr->key.offset < filp->f_pos) {
+                       if (atomic_dec_and_test(&curr->refs))
+                               kfree(curr);
+                       continue;
+               }
+
+               filp->f_pos = curr->key.offset;
+
+               di = (struct btrfs_dir_item *)curr->data;
+               name = (char *)(di + 1);
+               name_len = le16_to_cpu(di->name_len);
+
+               d_type = btrfs_filetype_table[di->type];
+               btrfs_disk_key_to_cpu(&location, &di->location);
+
+               over = filldir(dirent, name, name_len, curr->key.offset,
+                              location.objectid, d_type);
+
+               if (atomic_dec_and_test(&curr->refs))
+                       kfree(curr);
+
+               if (over)
+                       return 1;
+       }
+       return 0;
+}
+
+BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item,
+                        generation, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item,
+                        sequence, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item,
+                        transid, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item,
+                        nbytes, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item,
+                        block_group, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64);
+
+BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
+BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
+
+static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
+                                 struct btrfs_inode_item *inode_item,
+                                 struct inode *inode)
+{
+       btrfs_set_stack_inode_uid(inode_item, inode->i_uid);
+       btrfs_set_stack_inode_gid(inode_item, inode->i_gid);
+       btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
+       btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
+       btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
+       btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
+       btrfs_set_stack_inode_generation(inode_item,
+                                        BTRFS_I(inode)->generation);
+       btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence);
+       btrfs_set_stack_inode_transid(inode_item, trans->transid);
+       btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
+       btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
+       btrfs_set_stack_inode_block_group(inode_item,
+                                         BTRFS_I(inode)->block_group);
+
+       btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
+                                    inode->i_atime.tv_sec);
+       btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
+                                     inode->i_atime.tv_nsec);
+
+       btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
+                                    inode->i_mtime.tv_sec);
+       btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
+                                     inode->i_mtime.tv_nsec);
+
+       btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
+                                    inode->i_ctime.tv_sec);
+       btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
+                                     inode->i_ctime.tv_nsec);
+}
+
+int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
+                              struct btrfs_root *root, struct inode *inode)
+{
+       struct btrfs_delayed_node *delayed_node;
+       int ret;
+
+       delayed_node = btrfs_get_or_create_delayed_node(inode);
+       if (IS_ERR(delayed_node))
+               return PTR_ERR(delayed_node);
+
+       mutex_lock(&delayed_node->mutex);
+       if (delayed_node->inode_dirty) {
+               fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
+               goto release_node;
+       }
+
+       ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node);
+       /*
+        * we must reserve enough space when we start a new transaction,
+        * so reserving metadata failure is impossible
+        */
+       BUG_ON(ret);
+
+       fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
+       delayed_node->inode_dirty = 1;
+       delayed_node->count++;
+       atomic_inc(&root->fs_info->delayed_root->items);
+release_node:
+       mutex_unlock(&delayed_node->mutex);
+       btrfs_release_delayed_node(delayed_node);
+       return ret;
+}
+
+static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
+{
+       struct btrfs_root *root = delayed_node->root;
+       struct btrfs_delayed_item *curr_item, *prev_item;
+
+       mutex_lock(&delayed_node->mutex);
+       curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
+       while (curr_item) {
+               btrfs_delayed_item_release_metadata(root, curr_item);
+               prev_item = curr_item;
+               curr_item = __btrfs_next_delayed_item(prev_item);
+               btrfs_release_delayed_item(prev_item);
+       }
+
+       curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
+       while (curr_item) {
+               btrfs_delayed_item_release_metadata(root, curr_item);
+               prev_item = curr_item;
+               curr_item = __btrfs_next_delayed_item(prev_item);
+               btrfs_release_delayed_item(prev_item);
+       }
+
+       if (delayed_node->inode_dirty) {
+               btrfs_delayed_inode_release_metadata(root, delayed_node);
+               btrfs_release_delayed_inode(delayed_node);
+       }
+       mutex_unlock(&delayed_node->mutex);
+}
+
+void btrfs_kill_delayed_inode_items(struct inode *inode)
+{
+       struct btrfs_delayed_node *delayed_node;
+
+       delayed_node = btrfs_get_delayed_node(inode);
+       if (!delayed_node)
+               return;
+
+       __btrfs_kill_delayed_node(delayed_node);
+       btrfs_release_delayed_node(delayed_node);
+}
+
+void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
+{
+       u64 inode_id = 0;
+       struct btrfs_delayed_node *delayed_nodes[8];
+       int i, n;
+
+       while (1) {
+               spin_lock(&root->inode_lock);
+               n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
+                                          (void **)delayed_nodes, inode_id,
+                                          ARRAY_SIZE(delayed_nodes));
+               if (!n) {
+                       spin_unlock(&root->inode_lock);
+                       break;
+               }
+
+               inode_id = delayed_nodes[n - 1]->inode_id + 1;
+
+               for (i = 0; i < n; i++)
+                       atomic_inc(&delayed_nodes[i]->refs);
+               spin_unlock(&root->inode_lock);
+
+               for (i = 0; i < n; i++) {
+                       __btrfs_kill_delayed_node(delayed_nodes[i]);
+                       btrfs_release_delayed_node(delayed_nodes[i]);
+               }
+       }
+}
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
new file mode 100644 (file)
index 0000000..eb7d240
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2011 Fujitsu.  All rights reserved.
+ * Written by Miao Xie <miaox@cn.fujitsu.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __DELAYED_TREE_OPERATION_H
+#define __DELAYED_TREE_OPERATION_H
+
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <asm/atomic.h>
+
+#include "ctree.h"
+
+/* types of the delayed item */
+#define BTRFS_DELAYED_INSERTION_ITEM   1
+#define BTRFS_DELAYED_DELETION_ITEM    2
+
+struct btrfs_delayed_root {
+       spinlock_t lock;
+       struct list_head node_list;
+       /*
+        * Used for delayed nodes which is waiting to be dealt with by the
+        * worker. If the delayed node is inserted into the work queue, we
+        * drop it from this list.
+        */
+       struct list_head prepare_list;
+       atomic_t items;         /* for delayed items */
+       int nodes;              /* for delayed nodes */
+       wait_queue_head_t wait;
+};
+
+struct btrfs_delayed_node {
+       u64 inode_id;
+       u64 bytes_reserved;
+       struct btrfs_root *root;
+       /* Used to add the node into the delayed root's node list. */
+       struct list_head n_list;
+       /*
+        * Used to add the node into the prepare list, the nodes in this list
+        * is waiting to be dealt with by the async worker.
+        */
+       struct list_head p_list;
+       struct rb_root ins_root;
+       struct rb_root del_root;
+       struct mutex mutex;
+       struct btrfs_inode_item inode_item;
+       atomic_t refs;
+       u64 index_cnt;
+       bool in_list;
+       bool inode_dirty;
+       int count;
+};
+
+struct btrfs_delayed_item {
+       struct rb_node rb_node;
+       struct btrfs_key key;
+       struct list_head tree_list;     /* used for batch insert/delete items */
+       struct list_head readdir_list;  /* used for readdir items */
+       u64 bytes_reserved;
+       struct btrfs_block_rsv *block_rsv;
+       struct btrfs_delayed_node *delayed_node;
+       atomic_t refs;
+       int ins_or_del;
+       u32 data_len;
+       char data[0];
+};
+
+static inline void btrfs_init_delayed_root(
+                               struct btrfs_delayed_root *delayed_root)
+{
+       atomic_set(&delayed_root->items, 0);
+       delayed_root->nodes = 0;
+       spin_lock_init(&delayed_root->lock);
+       init_waitqueue_head(&delayed_root->wait);
+       INIT_LIST_HEAD(&delayed_root->node_list);
+       INIT_LIST_HEAD(&delayed_root->prepare_list);
+}
+
+int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
+                                  struct btrfs_root *root, const char *name,
+                                  int name_len, struct inode *dir,
+                                  struct btrfs_disk_key *disk_key, u8 type,
+                                  u64 index);
+
+int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
+                                  struct btrfs_root *root, struct inode *dir,
+                                  u64 index);
+
+int btrfs_inode_delayed_dir_index_count(struct inode *inode);
+
+int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
+                           struct btrfs_root *root);
+
+void btrfs_balance_delayed_items(struct btrfs_root *root);
+
+int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+                                    struct inode *inode);
+/* Used for evicting the inode. */
+void btrfs_remove_delayed_node(struct inode *inode);
+void btrfs_kill_delayed_inode_items(struct inode *inode);
+
+
+int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
+                              struct btrfs_root *root, struct inode *inode);
+
+/* Used for drop dead root */
+void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
+
+/* Used for readdir() */
+void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
+                            struct list_head *del_list);
+void btrfs_put_delayed_items(struct list_head *ins_list,
+                            struct list_head *del_list);
+int btrfs_should_delete_dir_index(struct list_head *del_list,
+                                 u64 index);
+int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
+                                   filldir_t filldir,
+                                   struct list_head *ins_list);
+
+/* for init */
+int __init btrfs_delayed_inode_init(void);
+void btrfs_delayed_inode_exit(void);
+#endif
index ab8afed671a0db0a917e6f5875c9c90642bd80f4..1ddfca78e9109e484c8227d5c6022b907dd51438 100644 (file)
@@ -124,8 +124,9 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans,
  * to use for the second index (if one is created).
  */
 int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
-                         *root, const char *name, int name_len, u64 dir,
-                         struct btrfs_key *location, u8 type, u64 index)
+                         *root, const char *name, int name_len,
+                         struct inode *dir, struct btrfs_key *location,
+                         u8 type, u64 index)
 {
        int ret = 0;
        int ret2 = 0;
@@ -137,13 +138,17 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
        struct btrfs_disk_key disk_key;
        u32 data_size;
 
-       key.objectid = dir;
+       key.objectid = btrfs_ino(dir);
        btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY);
        key.offset = btrfs_name_hash(name, name_len);
 
        path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
        path->leave_spinning = 1;
 
+       btrfs_cpu_key_to_disk(&disk_key, location);
+
        data_size = sizeof(*dir_item) + name_len;
        dir_item = insert_with_overflow(trans, root, path, &key, data_size,
                                        name, name_len);
@@ -155,7 +160,6 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root
        }
 
        leaf = path->nodes[0];
-       btrfs_cpu_key_to_disk(&disk_key, location);
        btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
        btrfs_set_dir_type(leaf, dir_item, type);
        btrfs_set_dir_data_len(leaf, dir_item, 0);
@@ -174,27 +178,9 @@ second_insert:
        }
        btrfs_release_path(path);
 
-       btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
-       key.offset = index;
-       dir_item = insert_with_overflow(trans, root, path, &key, data_size,
-                                       name, name_len);
-       if (IS_ERR(dir_item)) {
-               ret2 = PTR_ERR(dir_item);
-               goto out_free;
-       }
-       leaf = path->nodes[0];
-       btrfs_cpu_key_to_disk(&disk_key, location);
-       btrfs_set_dir_item_key(leaf, dir_item, &disk_key);
-       btrfs_set_dir_type(leaf, dir_item, type);
-       btrfs_set_dir_data_len(leaf, dir_item, 0);
-       btrfs_set_dir_name_len(leaf, dir_item, name_len);
-       btrfs_set_dir_transid(leaf, dir_item, trans->transid);
-       name_ptr = (unsigned long)(dir_item + 1);
-       write_extent_buffer(leaf, name, name_ptr, name_len);
-       btrfs_mark_buffer_dirty(leaf);
-
+       ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir,
+                                             &disk_key, type, index);
 out_free:
-
        btrfs_free_path(path);
        if (ret)
                return ret;
index cb9d1b8bfe745291edb1bc5c7a937011a570bdc9..a2eb3a3755db5fef162093b4e2d58652f2b25bc0 100644 (file)
@@ -42,6 +42,7 @@
 #include "locking.h"
 #include "tree-log.h"
 #include "free-space-cache.h"
+#include "inode-map.h"
 
 static struct extent_io_ops btree_extent_io_ops;
 static void end_workqueue_fn(struct btrfs_work *work);
@@ -1045,6 +1046,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
        root->name = NULL;
        root->in_sysfs = 0;
        root->inode_tree = RB_ROOT;
+       INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
        root->block_rsv = NULL;
        root->orphan_block_rsv = NULL;
 
@@ -1298,6 +1300,19 @@ again:
        if (IS_ERR(root))
                return root;
 
+       root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
+       if (!root->free_ino_ctl)
+               goto fail;
+       root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
+                                       GFP_NOFS);
+       if (!root->free_ino_pinned)
+               goto fail;
+
+       btrfs_init_free_ino_ctl(root);
+       mutex_init(&root->fs_commit_mutex);
+       spin_lock_init(&root->cache_lock);
+       init_waitqueue_head(&root->cache_wait);
+
        set_anon_super(&root->anon_super, NULL);
 
        if (btrfs_root_refs(&root->root_item) == 0) {
@@ -1631,6 +1646,13 @@ struct btrfs_root *open_ctree(struct super_block *sb,
 
        INIT_LIST_HEAD(&fs_info->ordered_extents);
        spin_lock_init(&fs_info->ordered_extent_lock);
+       fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
+                                       GFP_NOFS);
+       if (!fs_info->delayed_root) {
+               err = -ENOMEM;
+               goto fail_iput;
+       }
+       btrfs_init_delayed_root(fs_info->delayed_root);
 
        sb->s_blocksize = 4096;
        sb->s_blocksize_bits = blksize_bits(4096);
@@ -1696,7 +1718,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        bh = btrfs_read_dev_super(fs_devices->latest_bdev);
        if (!bh) {
                err = -EINVAL;
-               goto fail_iput;
+               goto fail_alloc;
        }
 
        memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
@@ -1708,7 +1730,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
 
        disk_super = &fs_info->super_copy;
        if (!btrfs_super_root(disk_super))
-               goto fail_iput;
+               goto fail_alloc;
 
        /* check FS state, whether FS is broken. */
        fs_info->fs_state |= btrfs_super_flags(disk_super);
@@ -1724,7 +1746,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        ret = btrfs_parse_options(tree_root, options);
        if (ret) {
                err = ret;
-               goto fail_iput;
+               goto fail_alloc;
        }
 
        features = btrfs_super_incompat_flags(disk_super) &
@@ -1734,7 +1756,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                       "unsupported optional features (%Lx).\n",
                       (unsigned long long)features);
                err = -EINVAL;
-               goto fail_iput;
+               goto fail_alloc;
        }
 
        features = btrfs_super_incompat_flags(disk_super);
@@ -1750,7 +1772,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                       "unsupported option features (%Lx).\n",
                       (unsigned long long)features);
                err = -EINVAL;
-               goto fail_iput;
+               goto fail_alloc;
        }
 
        btrfs_init_workers(&fs_info->generic_worker,
@@ -1797,6 +1819,9 @@ struct btrfs_root *open_ctree(struct super_block *sb,
                           &fs_info->generic_worker);
        btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write",
                           1, &fs_info->generic_worker);
+       btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta",
+                          fs_info->thread_pool_size,
+                          &fs_info->generic_worker);
 
        /*
         * endios are largely parallel and should have a very
@@ -1818,6 +1843,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
        btrfs_start_workers(&fs_info->endio_write_workers, 1);
        btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
+       btrfs_start_workers(&fs_info->delayed_workers, 1);
 
        fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
        fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
@@ -2074,6 +2100,9 @@ fail_sb_buffer:
        btrfs_stop_workers(&fs_info->endio_write_workers);
        btrfs_stop_workers(&fs_info->endio_freespace_worker);
        btrfs_stop_workers(&fs_info->submit_workers);
+       btrfs_stop_workers(&fs_info->delayed_workers);
+fail_alloc:
+       kfree(fs_info->delayed_root);
 fail_iput:
        invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
        iput(fs_info->btree_inode);
@@ -2338,12 +2367,15 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
        if (btrfs_root_refs(&root->root_item) == 0)
                synchronize_srcu(&fs_info->subvol_srcu);
 
+       __btrfs_remove_free_space_cache(root->free_ino_pinned);
+       __btrfs_remove_free_space_cache(root->free_ino_ctl);
        free_fs_root(root);
        return 0;
 }
 
 static void free_fs_root(struct btrfs_root *root)
 {
+       iput(root->cache_inode);
        WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
        if (root->anon_super.s_dev) {
                down_write(&root->anon_super.s_umount);
@@ -2351,6 +2383,8 @@ static void free_fs_root(struct btrfs_root *root)
        }
        free_extent_buffer(root->node);
        free_extent_buffer(root->commit_root);
+       kfree(root->free_ino_ctl);
+       kfree(root->free_ino_pinned);
        kfree(root->name);
        kfree(root);
 }
@@ -2512,6 +2546,7 @@ int close_ctree(struct btrfs_root *root)
        del_fs_roots(fs_info);
 
        iput(fs_info->btree_inode);
+       kfree(fs_info->delayed_root);
 
        btrfs_stop_workers(&fs_info->generic_worker);
        btrfs_stop_workers(&fs_info->fixup_workers);
@@ -2523,6 +2558,7 @@ int close_ctree(struct btrfs_root *root)
        btrfs_stop_workers(&fs_info->endio_write_workers);
        btrfs_stop_workers(&fs_info->endio_freespace_worker);
        btrfs_stop_workers(&fs_info->submit_workers);
+       btrfs_stop_workers(&fs_info->delayed_workers);
 
        btrfs_close_devices(fs_info->fs_devices);
        btrfs_mapping_tree_free(&fs_info->mapping_tree);
@@ -2596,6 +2632,29 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
        u64 num_dirty;
        unsigned long thresh = 32 * 1024 * 1024;
 
+       if (current->flags & PF_MEMALLOC)
+               return;
+
+       btrfs_balance_delayed_items(root);
+
+       num_dirty = root->fs_info->dirty_metadata_bytes;
+
+       if (num_dirty > thresh) {
+               balance_dirty_pages_ratelimited_nr(
+                                  root->fs_info->btree_inode->i_mapping, 1);
+       }
+       return;
+}
+
+void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
+{
+       /*
+        * looks as though older kernels can get into trouble with
+        * this code, they end up stuck in balance_dirty_pages forever
+        */
+       u64 num_dirty;
+       unsigned long thresh = 32 * 1024 * 1024;
+
        if (current->flags & PF_MEMALLOC)
                return;
 
index 2d75f9e896f6115e117d53ae1e4adfc7dd58b7e6..a0b610a67aaeadf1f564d03e218305997c021a85 100644 (file)
@@ -61,6 +61,7 @@ struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
                                              struct btrfs_key *location);
 int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info);
 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
+void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr);
 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root);
 void btrfs_mark_buffer_dirty(struct extent_buffer *buf);
 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid);
index b4ffad859adb31a5ad5d8628a1fd6a10adae06ec..1b8dc33778f9c411206cc6ad9467546412431947 100644 (file)
@@ -32,7 +32,7 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
        len  = BTRFS_FID_SIZE_NON_CONNECTABLE;
        type = FILEID_BTRFS_WITHOUT_PARENT;
 
-       fid->objectid = inode->i_ino;
+       fid->objectid = btrfs_ino(inode);
        fid->root_objectid = BTRFS_I(inode)->root->objectid;
        fid->gen = inode->i_generation;
 
@@ -178,13 +178,13 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
        if (!path)
                return ERR_PTR(-ENOMEM);
 
-       if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
+       if (btrfs_ino(dir) == BTRFS_FIRST_FREE_OBJECTID) {
                key.objectid = root->root_key.objectid;
                key.type = BTRFS_ROOT_BACKREF_KEY;
                key.offset = (u64)-1;
                root = root->fs_info->tree_root;
        } else {
-               key.objectid = dir->i_ino;
+               key.objectid = btrfs_ino(dir);
                key.type = BTRFS_INODE_REF_KEY;
                key.offset = (u64)-1;
        }
@@ -244,6 +244,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
        struct btrfs_key key;
        int name_len;
        int ret;
+       u64 ino;
 
        if (!dir || !inode)
                return -EINVAL;
@@ -251,19 +252,21 @@ static int btrfs_get_name(struct dentry *parent, char *name,
        if (!S_ISDIR(dir->i_mode))
                return -EINVAL;
 
+       ino = btrfs_ino(inode);
+
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
        path->leave_spinning = 1;
 
-       if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
+       if (ino == BTRFS_FIRST_FREE_OBJECTID) {
                key.objectid = BTRFS_I(inode)->root->root_key.objectid;
                key.type = BTRFS_ROOT_BACKREF_KEY;
                key.offset = (u64)-1;
                root = root->fs_info->tree_root;
        } else {
-               key.objectid = inode->i_ino;
-               key.offset = dir->i_ino;
+               key.objectid = ino;
+               key.offset = btrfs_ino(dir);
                key.type = BTRFS_INODE_REF_KEY;
        }
 
@@ -272,7 +275,7 @@ static int btrfs_get_name(struct dentry *parent, char *name,
                btrfs_free_path(path);
                return ret;
        } else if (ret > 0) {
-               if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
+               if (ino == BTRFS_FIRST_FREE_OBJECTID) {
                        path->slots[0]--;
                } else {
                        btrfs_free_path(path);
@@ -281,11 +284,11 @@ static int btrfs_get_name(struct dentry *parent, char *name,
        }
        leaf = path->nodes[0];
 
-       if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
-              rref = btrfs_item_ptr(leaf, path->slots[0],
+       if (ino == BTRFS_FIRST_FREE_OBJECTID) {
+               rref = btrfs_item_ptr(leaf, path->slots[0],
                                     struct btrfs_root_ref);
-              name_ptr = (unsigned long)(rref + 1);
-              name_len = btrfs_root_ref_name_len(leaf, rref);
+               name_ptr = (unsigned long)(rref + 1);
+               name_len = btrfs_root_ref_name_len(leaf, rref);
        } else {
                iref = btrfs_item_ptr(leaf, path->slots[0],
                                      struct btrfs_inode_ref);
index b457f195636eb478408a8db8abca5123896df3b9..98ca149bdbc890ef1e911012e2b1c98285e200b0 100644 (file)
@@ -105,6 +105,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
                WARN_ON(cache->pinned > 0);
                WARN_ON(cache->reserved > 0);
                WARN_ON(cache->reserved_pinned > 0);
+               kfree(cache->free_space_ctl);
                kfree(cache);
        }
 }
@@ -3036,7 +3037,8 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
        /* make sure bytes are sectorsize aligned */
        bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
 
-       if (root == root->fs_info->tree_root) {
+       if (root == root->fs_info->tree_root ||
+           BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
                alloc_chunk = 0;
                committed = 1;
        }
@@ -3834,12 +3836,6 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
        WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
 }
 
-static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items)
-{
-       return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
-               3 * num_items;
-}
-
 int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root,
                                 int num_items)
@@ -3850,7 +3846,7 @@ int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
        if (num_items == 0 || root->fs_info->chunk_root == root)
                return 0;
 
-       num_bytes = calc_trans_metadata_size(root, num_items);
+       num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
        ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
                                  num_bytes);
        if (!ret) {
@@ -3889,14 +3885,14 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
         * If all of the metadata space is used, we can commit
         * transaction and use space it freed.
         */
-       u64 num_bytes = calc_trans_metadata_size(root, 4);
+       u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4);
        return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
 }
 
 void btrfs_orphan_release_metadata(struct inode *inode)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
-       u64 num_bytes = calc_trans_metadata_size(root, 4);
+       u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4);
        btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
 }
 
@@ -3910,7 +3906,7 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
         * two for root back/forward refs, two for directory entries
         * and one for root of the snapshot.
         */
-       u64 num_bytes = calc_trans_metadata_size(root, 5);
+       u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5);
        dst_rsv->space_info = src_rsv->space_info;
        return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
 }
@@ -3939,7 +3935,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 
        if (nr_extents > reserved_extents) {
                nr_extents -= reserved_extents;
-               to_reserve = calc_trans_metadata_size(root, nr_extents);
+               to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
        } else {
                nr_extents = 0;
                to_reserve = 0;
@@ -3993,7 +3989,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
 
        to_free = calc_csum_metadata_size(inode, num_bytes);
        if (nr_extents > 0)
-               to_free += calc_trans_metadata_size(root, nr_extents);
+               to_free += btrfs_calc_trans_metadata_size(root, nr_extents);
 
        btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
                                to_free);
@@ -4754,7 +4750,7 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
                return 0;
 
        wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
-                  (cache->free_space >= num_bytes));
+                  (cache->free_space_ctl->free_space >= num_bytes));
 
        put_caching_control(caching_ctl);
        return 0;
@@ -6908,10 +6904,16 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                        ret = -ENOMEM;
                        goto error;
                }
+               cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
+                                               GFP_NOFS);
+               if (!cache->free_space_ctl) {
+                       kfree(cache);
+                       ret = -ENOMEM;
+                       goto error;
+               }
 
                atomic_set(&cache->count, 1);
                spin_lock_init(&cache->lock);
-               spin_lock_init(&cache->tree_lock);
                cache->fs_info = info;
                INIT_LIST_HEAD(&cache->list);
                INIT_LIST_HEAD(&cache->cluster_list);
@@ -6919,14 +6921,6 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                if (need_clear)
                        cache->disk_cache_state = BTRFS_DC_CLEAR;
 
-               /*
-                * we only want to have 32k of ram per block group for keeping
-                * track of free space, and if we pass 1/2 of that we want to
-                * start converting things over to using bitmaps
-                */
-               cache->extents_thresh = ((1024 * 32) / 2) /
-                       sizeof(struct btrfs_free_space);
-
                read_extent_buffer(leaf, &cache->item,
                                   btrfs_item_ptr_offset(leaf, path->slots[0]),
                                   sizeof(cache->item));
@@ -6937,6 +6931,8 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                cache->flags = btrfs_block_group_flags(&cache->item);
                cache->sectorsize = root->sectorsize;
 
+               btrfs_init_free_space_ctl(cache);
+
                /*
                 * We need to exclude the super stripes now so that the space
                 * info has super bytes accounted for, otherwise we'll think
@@ -7023,6 +7019,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
        cache = kzalloc(sizeof(*cache), GFP_NOFS);
        if (!cache)
                return -ENOMEM;
+       cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
+                                       GFP_NOFS);
+       if (!cache->free_space_ctl) {
+               kfree(cache);
+               return -ENOMEM;
+       }
 
        cache->key.objectid = chunk_offset;
        cache->key.offset = size;
@@ -7030,19 +7032,13 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
        cache->sectorsize = root->sectorsize;
        cache->fs_info = root->fs_info;
 
-       /*
-        * we only want to have 32k of ram per block group for keeping track
-        * of free space, and if we pass 1/2 of that we want to start
-        * converting things over to using bitmaps
-        */
-       cache->extents_thresh = ((1024 * 32) / 2) /
-               sizeof(struct btrfs_free_space);
        atomic_set(&cache->count, 1);
        spin_lock_init(&cache->lock);
-       spin_lock_init(&cache->tree_lock);
        INIT_LIST_HEAD(&cache->list);
        INIT_LIST_HEAD(&cache->cluster_list);
 
+       btrfs_init_free_space_ctl(cache);
+
        btrfs_set_block_group_used(&cache->item, bytes_used);
        btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
        cache->flags = type;
@@ -7209,23 +7205,38 @@ out:
 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
 {
        struct btrfs_space_info *space_info;
+       struct btrfs_super_block *disk_super;
+       u64 features;
+       u64 flags;
+       int mixed = 0;
        int ret;
 
-       ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM, 0, 0,
-                                                                &space_info);
-       if (ret)
-               return ret;
+       disk_super = &fs_info->super_copy;
+       if (!btrfs_super_root(disk_super))
+               return 1;
 
-       ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA, 0, 0,
-                                                                &space_info);
-       if (ret)
-               return ret;
+       features = btrfs_super_incompat_flags(disk_super);
+       if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
+               mixed = 1;
 
-       ret = update_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA, 0, 0,
-                                                                &space_info);
+       flags = BTRFS_BLOCK_GROUP_SYSTEM;
+       ret = update_space_info(fs_info, flags, 0, 0, &space_info);
        if (ret)
-               return ret;
+               goto out;
 
+       if (mixed) {
+               flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
+               ret = update_space_info(fs_info, flags, 0, 0, &space_info);
+       } else {
+               flags = BTRFS_BLOCK_GROUP_METADATA;
+               ret = update_space_info(fs_info, flags, 0, 0, &space_info);
+               if (ret)
+                       goto out;
+
+               flags = BTRFS_BLOCK_GROUP_DATA;
+               ret = update_space_info(fs_info, flags, 0, 0, &space_info);
+       }
+out:
        return ret;
 }
 
index 91208296ff2bff8717b05824525ec31bf0216fcf..64c8b361b5395fdb867d86876f06f9cea4731123 100644 (file)
@@ -2810,7 +2810,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
         * because there might be preallocation past i_size
         */
        ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
-                                      path, inode->i_ino, -1, 0);
+                                      path, btrfs_ino(inode), -1, 0);
        if (ret < 0) {
                btrfs_free_path(path);
                return ret;
@@ -2823,7 +2823,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
        found_type = btrfs_key_type(&found_key);
 
        /* No extents, but there might be delalloc bits */
-       if (found_key.objectid != inode->i_ino ||
+       if (found_key.objectid != btrfs_ino(inode) ||
            found_type != BTRFS_EXTENT_DATA_KEY) {
                /* have to trust i_size as the end */
                last = (u64)-1;
index f47e43d855aa811cb09d418f2e4f9b7a3991d8e5..29e014984f60b250af967f618897a9a26ccf3bef 100644 (file)
@@ -208,8 +208,9 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
                                                EXTENT_NODATASUM, GFP_NOFS);
                                } else {
                                        printk(KERN_INFO "btrfs no csum found "
-                                              "for inode %lu start %llu\n",
-                                              inode->i_ino,
+                                              "for inode %llu start %llu\n",
+                                              (unsigned long long)
+                                              btrfs_ino(inode),
                                               (unsigned long long)offset);
                                }
                                item = NULL;
index 566bdf298ea873140f45e7d263aee3c89b17ba5c..58ddc4442159c59a0e646b33ed17919321c61302 100644 (file)
@@ -298,6 +298,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
        struct btrfs_path *path;
        struct btrfs_key key;
        struct btrfs_key new_key;
+       u64 ino = btrfs_ino(inode);
        u64 search_start = start;
        u64 disk_bytenr = 0;
        u64 num_bytes = 0;
@@ -318,14 +319,14 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode,
 
        while (1) {
                recow = 0;
-               ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+               ret = btrfs_lookup_file_extent(trans, root, path, ino,
                                               search_start, -1);
                if (ret < 0)
                        break;
                if (ret > 0 && path->slots[0] > 0 && search_start == start) {
                        leaf = path->nodes[0];
                        btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
-                       if (key.objectid == inode->i_ino &&
+                       if (key.objectid == ino &&
                            key.type == BTRFS_EXTENT_DATA_KEY)
                                path->slots[0]--;
                }
@@ -346,7 +347,7 @@ next_slot:
                }
 
                btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-               if (key.objectid > inode->i_ino ||
+               if (key.objectid > ino ||
                    key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end)
                        break;
 
@@ -592,6 +593,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
        int del_slot = 0;
        int recow;
        int ret;
+       u64 ino = btrfs_ino(inode);
 
        btrfs_drop_extent_cache(inode, start, end - 1, 0);
 
@@ -600,7 +602,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
 again:
        recow = 0;
        split = start;
-       key.objectid = inode->i_ino;
+       key.objectid = ino;
        key.type = BTRFS_EXTENT_DATA_KEY;
        key.offset = split;
 
@@ -612,8 +614,7 @@ again:
 
        leaf = path->nodes[0];
        btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
-       BUG_ON(key.objectid != inode->i_ino ||
-              key.type != BTRFS_EXTENT_DATA_KEY);
+       BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY);
        fi = btrfs_item_ptr(leaf, path->slots[0],
                            struct btrfs_file_extent_item);
        BUG_ON(btrfs_file_extent_type(leaf, fi) !=
@@ -630,7 +631,7 @@ again:
                other_start = 0;
                other_end = start;
                if (extent_mergeable(leaf, path->slots[0] - 1,
-                                    inode->i_ino, bytenr, orig_offset,
+                                    ino, bytenr, orig_offset,
                                     &other_start, &other_end)) {
                        new_key.offset = end;
                        btrfs_set_item_key_safe(trans, root, path, &new_key);
@@ -653,7 +654,7 @@ again:
                other_start = end;
                other_end = 0;
                if (extent_mergeable(leaf, path->slots[0] + 1,
-                                    inode->i_ino, bytenr, orig_offset,
+                                    ino, bytenr, orig_offset,
                                     &other_start, &other_end)) {
                        fi = btrfs_item_ptr(leaf, path->slots[0],
                                            struct btrfs_file_extent_item);
@@ -702,7 +703,7 @@ again:
 
                ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
                                           root->root_key.objectid,
-                                          inode->i_ino, orig_offset);
+                                          ino, orig_offset);
                BUG_ON(ret);
 
                if (split == start) {
@@ -718,7 +719,7 @@ again:
        other_start = end;
        other_end = 0;
        if (extent_mergeable(leaf, path->slots[0] + 1,
-                            inode->i_ino, bytenr, orig_offset,
+                            ino, bytenr, orig_offset,
                             &other_start, &other_end)) {
                if (recow) {
                        btrfs_release_path(path);
@@ -729,13 +730,13 @@ again:
                del_nr++;
                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
                                        0, root->root_key.objectid,
-                                       inode->i_ino, orig_offset);
+                                       ino, orig_offset);
                BUG_ON(ret);
        }
        other_start = 0;
        other_end = start;
        if (extent_mergeable(leaf, path->slots[0] - 1,
-                            inode->i_ino, bytenr, orig_offset,
+                            ino, bytenr, orig_offset,
                             &other_start, &other_end)) {
                if (recow) {
                        btrfs_release_path(path);
@@ -746,7 +747,7 @@ again:
                del_nr++;
                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
                                        0, root->root_key.objectid,
-                                       inode->i_ino, orig_offset);
+                                       ino, orig_offset);
                BUG_ON(ret);
        }
        if (del_nr == 0) {
index 0290b0c7b0038a3d05c724a22037cd7ed2d6e489..70d45795d758e63cd6303327245e80aeffcfa7ae 100644 (file)
 #include "transaction.h"
 #include "disk-io.h"
 #include "extent_io.h"
+#include "inode-map.h"
 
 #define BITS_PER_BITMAP                (PAGE_CACHE_SIZE * 8)
 #define MAX_CACHE_BYTES_PER_GIG        (32 * 1024)
 
-static void recalculate_thresholds(struct btrfs_block_group_cache
-                                  *block_group);
-static int link_free_space(struct btrfs_block_group_cache *block_group,
+static int link_free_space(struct btrfs_free_space_ctl *ctl,
                           struct btrfs_free_space *info);
 
-struct inode *lookup_free_space_inode(struct btrfs_root *root,
-                                     struct btrfs_block_group_cache
-                                     *block_group, struct btrfs_path *path)
+static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
+                                              struct btrfs_path *path,
+                                              u64 offset)
 {
        struct btrfs_key key;
        struct btrfs_key location;
@@ -46,15 +45,8 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
        struct inode *inode = NULL;
        int ret;
 
-       spin_lock(&block_group->lock);
-       if (block_group->inode)
-               inode = igrab(block_group->inode);
-       spin_unlock(&block_group->lock);
-       if (inode)
-               return inode;
-
        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
-       key.offset = block_group->key.objectid;
+       key.offset = offset;
        key.type = 0;
 
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
@@ -84,6 +76,27 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
 
        inode->i_mapping->flags &= ~__GFP_FS;
 
+       return inode;
+}
+
+struct inode *lookup_free_space_inode(struct btrfs_root *root,
+                                     struct btrfs_block_group_cache
+                                     *block_group, struct btrfs_path *path)
+{
+       struct inode *inode = NULL;
+
+       spin_lock(&block_group->lock);
+       if (block_group->inode)
+               inode = igrab(block_group->inode);
+       spin_unlock(&block_group->lock);
+       if (inode)
+               return inode;
+
+       inode = __lookup_free_space_inode(root, path,
+                                         block_group->key.objectid);
+       if (IS_ERR(inode))
+               return inode;
+
        spin_lock(&block_group->lock);
        if (!root->fs_info->closing) {
                block_group->inode = igrab(inode);
@@ -94,24 +107,18 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
        return inode;
 }
 
-int create_free_space_inode(struct btrfs_root *root,
-                           struct btrfs_trans_handle *trans,
-                           struct btrfs_block_group_cache *block_group,
-                           struct btrfs_path *path)
+int __create_free_space_inode(struct btrfs_root *root,
+                             struct btrfs_trans_handle *trans,
+                             struct btrfs_path *path, u64 ino, u64 offset)
 {
        struct btrfs_key key;
        struct btrfs_disk_key disk_key;
        struct btrfs_free_space_header *header;
        struct btrfs_inode_item *inode_item;
        struct extent_buffer *leaf;
-       u64 objectid;
        int ret;
 
-       ret = btrfs_find_free_objectid(trans, root, 0, &objectid);
-       if (ret < 0)
-               return ret;
-
-       ret = btrfs_insert_empty_inode(trans, root, path, objectid);
+       ret = btrfs_insert_empty_inode(trans, root, path, ino);
        if (ret)
                return ret;
 
@@ -131,13 +138,12 @@ int create_free_space_inode(struct btrfs_root *root,
                              BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM);
        btrfs_set_inode_nlink(leaf, inode_item, 1);
        btrfs_set_inode_transid(leaf, inode_item, trans->transid);
-       btrfs_set_inode_block_group(leaf, inode_item,
-                                   block_group->key.objectid);
+       btrfs_set_inode_block_group(leaf, inode_item, offset);
        btrfs_mark_buffer_dirty(leaf);
        btrfs_release_path(path);
 
        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
-       key.offset = block_group->key.objectid;
+       key.offset = offset;
        key.type = 0;
 
        ret = btrfs_insert_empty_item(trans, root, path, &key,
@@ -157,6 +163,22 @@ int create_free_space_inode(struct btrfs_root *root,
        return 0;
 }
 
+int create_free_space_inode(struct btrfs_root *root,
+                           struct btrfs_trans_handle *trans,
+                           struct btrfs_block_group_cache *block_group,
+                           struct btrfs_path *path)
+{
+       int ret;
+       u64 ino;
+
+       ret = btrfs_find_free_objectid(root, &ino);
+       if (ret < 0)
+               return ret;
+
+       return __create_free_space_inode(root, trans, path, ino,
+                                        block_group->key.objectid);
+}
+
 int btrfs_truncate_free_space_cache(struct btrfs_root *root,
                                    struct btrfs_trans_handle *trans,
                                    struct btrfs_path *path,
@@ -187,7 +209,8 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
                return ret;
        }
 
-       return btrfs_update_inode(trans, root, inode);
+       ret = btrfs_update_inode(trans, root, inode);
+       return ret;
 }
 
 static int readahead_cache(struct inode *inode)
@@ -209,15 +232,13 @@ static int readahead_cache(struct inode *inode)
        return 0;
 }
 
-int load_free_space_cache(struct btrfs_fs_info *fs_info,
-                         struct btrfs_block_group_cache *block_group)
+int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+                           struct btrfs_free_space_ctl *ctl,
+                           struct btrfs_path *path, u64 offset)
 {
-       struct btrfs_root *root = fs_info->tree_root;
-       struct inode *inode;
        struct btrfs_free_space_header *header;
        struct extent_buffer *leaf;
        struct page *page;
-       struct btrfs_path *path;
        u32 *checksums = NULL, *crc;
        char *disk_crcs = NULL;
        struct btrfs_key key;
@@ -225,76 +246,47 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
        u64 num_entries;
        u64 num_bitmaps;
        u64 generation;
-       u64 used = btrfs_block_group_used(&block_group->item);
        u32 cur_crc = ~(u32)0;
        pgoff_t index = 0;
        unsigned long first_page_offset;
        int num_checksums;
-       int ret = 0;
-
-       /*
-        * If we're unmounting then just return, since this does a search on the
-        * normal root and not the commit root and we could deadlock.
-        */
-       smp_mb();
-       if (fs_info->closing)
-               return 0;
-
-       /*
-        * If this block group has been marked to be cleared for one reason or
-        * another then we can't trust the on disk cache, so just return.
-        */
-       spin_lock(&block_group->lock);
-       if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
-               spin_unlock(&block_group->lock);
-               return 0;
-       }
-       spin_unlock(&block_group->lock);
+       int ret = 0, ret2;
 
        INIT_LIST_HEAD(&bitmaps);
 
-       path = btrfs_alloc_path();
-       if (!path)
-               return 0;
-
-       inode = lookup_free_space_inode(root, block_group, path);
-       if (IS_ERR(inode)) {
-               btrfs_free_path(path);
-               return 0;
-       }
-
        /* Nothing in the space cache, goodbye */
-       if (!i_size_read(inode)) {
-               btrfs_free_path(path);
+       if (!i_size_read(inode))
                goto out;
-       }
 
        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
-       key.offset = block_group->key.objectid;
+       key.offset = offset;
        key.type = 0;
 
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
-       if (ret) {
-               btrfs_free_path(path);
+       if (ret < 0)
+               goto out;
+       else if (ret > 0) {
+               btrfs_release_path(path);
+               ret = 0;
                goto out;
        }
 
+       ret = -1;
+
        leaf = path->nodes[0];
        header = btrfs_item_ptr(leaf, path->slots[0],
                                struct btrfs_free_space_header);
        num_entries = btrfs_free_space_entries(leaf, header);
        num_bitmaps = btrfs_free_space_bitmaps(leaf, header);
        generation = btrfs_free_space_generation(leaf, header);
-       btrfs_free_path(path);
+       btrfs_release_path(path);
 
        if (BTRFS_I(inode)->generation != generation) {
                printk(KERN_ERR "btrfs: free space inode generation (%llu) did"
-                      " not match free space cache generation (%llu) for "
-                      "block group %llu\n",
+                      " not match free space cache generation (%llu)\n",
                       (unsigned long long)BTRFS_I(inode)->generation,
-                      (unsigned long long)generation,
-                      (unsigned long long)block_group->key.objectid);
-               goto free_cache;
+                      (unsigned long long)generation);
+               goto out;
        }
 
        if (!num_entries)
@@ -311,10 +303,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
                goto out;
 
        ret = readahead_cache(inode);
-       if (ret) {
-               ret = 0;
+       if (ret)
                goto out;
-       }
 
        while (1) {
                struct btrfs_free_space_entry *entry;
@@ -333,10 +323,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
                }
 
                page = grab_cache_page(inode->i_mapping, index);
-               if (!page) {
-                       ret = 0;
+               if (!page)
                        goto free_cache;
-               }
 
                if (!PageUptodate(page)) {
                        btrfs_readpage(NULL, page);
@@ -345,9 +333,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
                                unlock_page(page);
                                page_cache_release(page);
                                printk(KERN_ERR "btrfs: error reading free "
-                                      "space cache: %llu\n",
-                                      (unsigned long long)
-                                      block_group->key.objectid);
+                                      "space cache\n");
                                goto free_cache;
                        }
                }
@@ -360,13 +346,10 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
                        gen = addr + (sizeof(u32) * num_checksums);
                        if (*gen != BTRFS_I(inode)->generation) {
                                printk(KERN_ERR "btrfs: space cache generation"
-                                      " (%llu) does not match inode (%llu) "
-                                      "for block group %llu\n",
+                                      " (%llu) does not match inode (%llu)\n",
                                       (unsigned long long)*gen,
                                       (unsigned long long)
-                                      BTRFS_I(inode)->generation,
-                                      (unsigned long long)
-                                      block_group->key.objectid);
+                                      BTRFS_I(inode)->generation);
                                kunmap(page);
                                unlock_page(page);
                                page_cache_release(page);
@@ -382,9 +365,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
                                          PAGE_CACHE_SIZE - start_offset);
                btrfs_csum_final(cur_crc, (char *)&cur_crc);
                if (cur_crc != *crc) {
-                       printk(KERN_ERR "btrfs: crc mismatch for page %lu in "
-                              "block group %llu\n", index,
-                              (unsigned long long)block_group->key.objectid);
+                       printk(KERN_ERR "btrfs: crc mismatch for page %lu\n",
+                              index);
                        kunmap(page);
                        unlock_page(page);
                        page_cache_release(page);
@@ -417,9 +399,9 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
                        }
 
                        if (entry->type == BTRFS_FREE_SPACE_EXTENT) {
-                               spin_lock(&block_group->tree_lock);
-                               ret = link_free_space(block_group, e);
-                               spin_unlock(&block_group->tree_lock);
+                               spin_lock(&ctl->tree_lock);
+                               ret = link_free_space(ctl, e);
+                               spin_unlock(&ctl->tree_lock);
                                BUG_ON(ret);
                        } else {
                                e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
@@ -431,11 +413,11 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
                                        page_cache_release(page);
                                        goto free_cache;
                                }
-                               spin_lock(&block_group->tree_lock);
-                               ret = link_free_space(block_group, e);
-                               block_group->total_bitmaps++;
-                               recalculate_thresholds(block_group);
-                               spin_unlock(&block_group->tree_lock);
+                               spin_lock(&ctl->tree_lock);
+                               ret2 = link_free_space(ctl, e);
+                               ctl->total_bitmaps++;
+                               ctl->op->recalc_thresholds(ctl);
+                               spin_unlock(&ctl->tree_lock);
                                list_add_tail(&e->list, &bitmaps);
                        }
 
@@ -471,41 +453,97 @@ next:
                index++;
        }
 
-       spin_lock(&block_group->tree_lock);
-       if (block_group->free_space != (block_group->key.offset - used -
-                                       block_group->bytes_super)) {
-               spin_unlock(&block_group->tree_lock);
-               printk(KERN_ERR "block group %llu has an wrong amount of free "
-                      "space\n", block_group->key.objectid);
-               ret = 0;
-               goto free_cache;
-       }
-       spin_unlock(&block_group->tree_lock);
-
        ret = 1;
 out:
        kfree(checksums);
        kfree(disk_crcs);
-       iput(inode);
        return ret;
-
 free_cache:
-       /* This cache is bogus, make sure it gets cleared */
+       __btrfs_remove_free_space_cache(ctl);
+       goto out;
+}
+
+int load_free_space_cache(struct btrfs_fs_info *fs_info,
+                         struct btrfs_block_group_cache *block_group)
+{
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+       struct btrfs_root *root = fs_info->tree_root;
+       struct inode *inode;
+       struct btrfs_path *path;
+       int ret;
+       bool matched;
+       u64 used = btrfs_block_group_used(&block_group->item);
+
+       /*
+        * If we're unmounting then just return, since this does a search on the
+        * normal root and not the commit root and we could deadlock.
+        */
+       smp_mb();
+       if (fs_info->closing)
+               return 0;
+
+       /*
+        * If this block group has been marked to be cleared for one reason or
+        * another then we can't trust the on disk cache, so just return.
+        */
        spin_lock(&block_group->lock);
-       block_group->disk_cache_state = BTRFS_DC_CLEAR;
+       if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) {
+               spin_unlock(&block_group->lock);
+               return 0;
+       }
        spin_unlock(&block_group->lock);
-       btrfs_remove_free_space_cache(block_group);
-       goto out;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return 0;
+
+       inode = lookup_free_space_inode(root, block_group, path);
+       if (IS_ERR(inode)) {
+               btrfs_free_path(path);
+               return 0;
+       }
+
+       ret = __load_free_space_cache(fs_info->tree_root, inode, ctl,
+                                     path, block_group->key.objectid);
+       btrfs_free_path(path);
+       if (ret <= 0)
+               goto out;
+
+       spin_lock(&ctl->tree_lock);
+       matched = (ctl->free_space == (block_group->key.offset - used -
+                                      block_group->bytes_super));
+       spin_unlock(&ctl->tree_lock);
+
+       if (!matched) {
+               __btrfs_remove_free_space_cache(ctl);
+               printk(KERN_ERR "block group %llu has an wrong amount of free "
+                      "space\n", block_group->key.objectid);
+               ret = -1;
+       }
+out:
+       if (ret < 0) {
+               /* This cache is bogus, make sure it gets cleared */
+               spin_lock(&block_group->lock);
+               block_group->disk_cache_state = BTRFS_DC_CLEAR;
+               spin_unlock(&block_group->lock);
+               ret = 0;
+
+               printk(KERN_ERR "btrfs: failed to load free space cache "
+                      "for block group %llu\n", block_group->key.objectid);
+       }
+
+       iput(inode);
+       return ret;
 }
 
-int btrfs_write_out_cache(struct btrfs_root *root,
-                         struct btrfs_trans_handle *trans,
-                         struct btrfs_block_group_cache *block_group,
-                         struct btrfs_path *path)
+int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
+                           struct btrfs_free_space_ctl *ctl,
+                           struct btrfs_block_group_cache *block_group,
+                           struct btrfs_trans_handle *trans,
+                           struct btrfs_path *path, u64 offset)
 {
        struct btrfs_free_space_header *header;
        struct extent_buffer *leaf;
-       struct inode *inode;
        struct rb_node *node;
        struct list_head *pos, *n;
        struct page **pages;
@@ -522,35 +560,18 @@ int btrfs_write_out_cache(struct btrfs_root *root,
        int index = 0, num_pages = 0;
        int entries = 0;
        int bitmaps = 0;
-       int ret = 0;
+       int ret = -1;
        bool next_page = false;
        bool out_of_space = false;
 
-       root = root->fs_info->tree_root;
-
        INIT_LIST_HEAD(&bitmap_list);
 
-       spin_lock(&block_group->lock);
-       if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
-               spin_unlock(&block_group->lock);
-               return 0;
-       }
-       spin_unlock(&block_group->lock);
-
-       inode = lookup_free_space_inode(root, block_group, path);
-       if (IS_ERR(inode))
-               return 0;
-
-       if (!i_size_read(inode)) {
-               iput(inode);
+       node = rb_first(&ctl->free_space_offset);
+       if (!node)
                return 0;
-       }
 
-       node = rb_first(&block_group->free_space_offset);
-       if (!node) {
-               iput(inode);
-               return 0;
-       }
+       if (!i_size_read(inode))
+               return -1;
 
        num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
                PAGE_CACHE_SHIFT;
@@ -560,16 +581,13 @@ int btrfs_write_out_cache(struct btrfs_root *root,
 
        /* We need a checksum per page. */
        crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
-       if (!crc) {
-               iput(inode);
-               return 0;
-       }
+       if (!crc)
+               return -1;
 
        pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
        if (!pages) {
                kfree(crc);
-               iput(inode);
-               return 0;
+               return -1;
        }
 
        /* Since the first page has all of our checksums and our generation we
@@ -579,7 +597,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
        first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
 
        /* Get the cluster for this block_group if it exists */
-       if (!list_empty(&block_group->cluster_list))
+       if (block_group && !list_empty(&block_group->cluster_list))
                cluster = list_entry(block_group->cluster_list.next,
                                     struct btrfs_free_cluster,
                                     block_group_list);
@@ -621,7 +639,8 @@ int btrfs_write_out_cache(struct btrfs_root *root,
         * When searching for pinned extents, we need to start at our start
         * offset.
         */
-       start = block_group->key.objectid;
+       if (block_group)
+               start = block_group->key.objectid;
 
        /* Write out the extent entries */
        do {
@@ -679,8 +698,9 @@ int btrfs_write_out_cache(struct btrfs_root *root,
                 * We want to add any pinned extents to our free space cache
                 * so we don't leak the space
                 */
-               while (!next_page && (start < block_group->key.objectid +
-                                     block_group->key.offset)) {
+               while (block_group && !next_page &&
+                      (start < block_group->key.objectid +
+                       block_group->key.offset)) {
                        ret = find_first_extent_bit(unpin, start, &start, &end,
                                                    EXTENT_DIRTY);
                        if (ret) {
@@ -798,12 +818,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
        filemap_write_and_wait(inode->i_mapping);
 
        key.objectid = BTRFS_FREE_SPACE_OBJECTID;
-       key.offset = block_group->key.objectid;
+       key.offset = offset;
        key.type = 0;
 
        ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
        if (ret < 0) {
-               ret = 0;
+               ret = -1;
                clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
                                 EXTENT_DIRTY | EXTENT_DELALLOC |
                                 EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS);
@@ -816,8 +836,8 @@ int btrfs_write_out_cache(struct btrfs_root *root,
                path->slots[0]--;
                btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
                if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID ||
-                   found_key.offset != block_group->key.objectid) {
-                       ret = 0;
+                   found_key.offset != offset) {
+                       ret = -1;
                        clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1,
                                         EXTENT_DIRTY | EXTENT_DELALLOC |
                                         EXTENT_DO_ACCOUNTING, 0, 0, NULL,
@@ -837,44 +857,78 @@ int btrfs_write_out_cache(struct btrfs_root *root,
        ret = 1;
 
 out_free:
-       if (ret == 0) {
+       if (ret != 1) {
                invalidate_inode_pages2_range(inode->i_mapping, 0, index);
-               spin_lock(&block_group->lock);
-               block_group->disk_cache_state = BTRFS_DC_ERROR;
-               spin_unlock(&block_group->lock);
                BTRFS_I(inode)->generation = 0;
        }
        kfree(checksums);
        kfree(pages);
        btrfs_update_inode(trans, root, inode);
+       return ret;
+}
+
+int btrfs_write_out_cache(struct btrfs_root *root,
+                         struct btrfs_trans_handle *trans,
+                         struct btrfs_block_group_cache *block_group,
+                         struct btrfs_path *path)
+{
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+       struct inode *inode;
+       int ret = 0;
+
+       root = root->fs_info->tree_root;
+
+       spin_lock(&block_group->lock);
+       if (block_group->disk_cache_state < BTRFS_DC_SETUP) {
+               spin_unlock(&block_group->lock);
+               return 0;
+       }
+       spin_unlock(&block_group->lock);
+
+       inode = lookup_free_space_inode(root, block_group, path);
+       if (IS_ERR(inode))
+               return 0;
+
+       ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
+                                     path, block_group->key.objectid);
+       if (ret < 0) {
+               spin_lock(&block_group->lock);
+               block_group->disk_cache_state = BTRFS_DC_ERROR;
+               spin_unlock(&block_group->lock);
+               ret = 0;
+
+               printk(KERN_ERR "btrfs: failed to write free space cace "
+                      "for block group %llu\n", block_group->key.objectid);
+       }
+
        iput(inode);
        return ret;
 }
 
-static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize,
+static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit,
                                          u64 offset)
 {
        BUG_ON(offset < bitmap_start);
        offset -= bitmap_start;
-       return (unsigned long)(div64_u64(offset, sectorsize));
+       return (unsigned long)(div_u64(offset, unit));
 }
 
-static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize)
+static inline unsigned long bytes_to_bits(u64 bytes, u32 unit)
 {
-       return (unsigned long)(div64_u64(bytes, sectorsize));
+       return (unsigned long)(div_u64(bytes, unit));
 }
 
-static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group,
+static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl,
                                   u64 offset)
 {
        u64 bitmap_start;
        u64 bytes_per_bitmap;
 
-       bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize;
-       bitmap_start = offset - block_group->key.objectid;
+       bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit;
+       bitmap_start = offset - ctl->start;
        bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap);
        bitmap_start *= bytes_per_bitmap;
-       bitmap_start += block_group->key.objectid;
+       bitmap_start += ctl->start;
 
        return bitmap_start;
 }
@@ -932,10 +986,10 @@ static int tree_insert_offset(struct rb_root *root, u64 offset,
  * offset.
  */
 static struct btrfs_free_space *
-tree_search_offset(struct btrfs_block_group_cache *block_group,
+tree_search_offset(struct btrfs_free_space_ctl *ctl,
                   u64 offset, int bitmap_only, int fuzzy)
 {
-       struct rb_node *n = block_group->free_space_offset.rb_node;
+       struct rb_node *n = ctl->free_space_offset.rb_node;
        struct btrfs_free_space *entry, *prev = NULL;
 
        /* find entry that is closest to the 'offset' */
@@ -1031,8 +1085,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
                                break;
                        }
                }
-               if (entry->offset + BITS_PER_BITMAP *
-                   block_group->sectorsize > offset)
+               if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset)
                        return entry;
        } else if (entry->offset + entry->bytes > offset)
                return entry;
@@ -1043,7 +1096,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
        while (1) {
                if (entry->bitmap) {
                        if (entry->offset + BITS_PER_BITMAP *
-                           block_group->sectorsize > offset)
+                           ctl->unit > offset)
                                break;
                } else {
                        if (entry->offset + entry->bytes > offset)
@@ -1059,42 +1112,47 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
 }
 
 static inline void
-__unlink_free_space(struct btrfs_block_group_cache *block_group,
+__unlink_free_space(struct btrfs_free_space_ctl *ctl,
                    struct btrfs_free_space *info)
 {
-       rb_erase(&info->offset_index, &block_group->free_space_offset);
-       block_group->free_extents--;
+       rb_erase(&info->offset_index, &ctl->free_space_offset);
+       ctl->free_extents--;
 }
 
-static void unlink_free_space(struct btrfs_block_group_cache *block_group,
+static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
                              struct btrfs_free_space *info)
 {
-       __unlink_free_space(block_group, info);
-       block_group->free_space -= info->bytes;
+       __unlink_free_space(ctl, info);
+       ctl->free_space -= info->bytes;
 }
 
-static int link_free_space(struct btrfs_block_group_cache *block_group,
+static int link_free_space(struct btrfs_free_space_ctl *ctl,
                           struct btrfs_free_space *info)
 {
        int ret = 0;
 
        BUG_ON(!info->bitmap && !info->bytes);
-       ret = tree_insert_offset(&block_group->free_space_offset, info->offset,
+       ret = tree_insert_offset(&ctl->free_space_offset, info->offset,
                                 &info->offset_index, (info->bitmap != NULL));
        if (ret)
                return ret;
 
-       block_group->free_space += info->bytes;
-       block_group->free_extents++;
+       ctl->free_space += info->bytes;
+       ctl->free_extents++;
        return ret;
 }
 
-static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
+static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
 {
+       struct btrfs_block_group_cache *block_group = ctl->private;
        u64 max_bytes;
        u64 bitmap_bytes;
        u64 extent_bytes;
        u64 size = block_group->key.offset;
+       u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
+       int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
+
+       BUG_ON(ctl->total_bitmaps > max_bitmaps);
 
        /*
         * The goal is to keep the total amount of memory used per 1gb of space
@@ -1112,10 +1170,10 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
         * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
         * we add more bitmaps.
         */
-       bitmap_bytes = (block_group->total_bitmaps + 1) * PAGE_CACHE_SIZE;
+       bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE;
 
        if (bitmap_bytes >= max_bytes) {
-               block_group->extents_thresh = 0;
+               ctl->extents_thresh = 0;
                return;
        }
 
@@ -1126,47 +1184,43 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
        extent_bytes = max_bytes - bitmap_bytes;
        extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2));
 
-       block_group->extents_thresh =
+       ctl->extents_thresh =
                div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
 }
 
-static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group,
+static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
                              struct btrfs_free_space *info, u64 offset,
                              u64 bytes)
 {
-       unsigned long start, end;
-       unsigned long i;
+       unsigned long start, count;
 
-       start = offset_to_bit(info->offset, block_group->sectorsize, offset);
-       end = start + bytes_to_bits(bytes, block_group->sectorsize);
-       BUG_ON(end > BITS_PER_BITMAP);
+       start = offset_to_bit(info->offset, ctl->unit, offset);
+       count = bytes_to_bits(bytes, ctl->unit);
+       BUG_ON(start + count > BITS_PER_BITMAP);
 
-       for (i = start; i < end; i++)
-               clear_bit(i, info->bitmap);
+       bitmap_clear(info->bitmap, start, count);
 
        info->bytes -= bytes;
-       block_group->free_space -= bytes;
+       ctl->free_space -= bytes;
 }
 
-static void bitmap_set_bits(struct btrfs_block_group_cache *block_group,
+static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl,
                            struct btrfs_free_space *info, u64 offset,
                            u64 bytes)
 {
-       unsigned long start, end;
-       unsigned long i;
+       unsigned long start, count;
 
-       start = offset_to_bit(info->offset, block_group->sectorsize, offset);
-       end = start + bytes_to_bits(bytes, block_group->sectorsize);
-       BUG_ON(end > BITS_PER_BITMAP);
+       start = offset_to_bit(info->offset, ctl->unit, offset);
+       count = bytes_to_bits(bytes, ctl->unit);
+       BUG_ON(start + count > BITS_PER_BITMAP);
 
-       for (i = start; i < end; i++)
-               set_bit(i, info->bitmap);
+       bitmap_set(info->bitmap, start, count);
 
        info->bytes += bytes;
-       block_group->free_space += bytes;
+       ctl->free_space += bytes;
 }
 
-static int search_bitmap(struct btrfs_block_group_cache *block_group,
+static int search_bitmap(struct btrfs_free_space_ctl *ctl,
                         struct btrfs_free_space *bitmap_info, u64 *offset,
                         u64 *bytes)
 {
@@ -1174,9 +1228,9 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group,
        unsigned long bits, i;
        unsigned long next_zero;
 
-       i = offset_to_bit(bitmap_info->offset, block_group->sectorsize,
+       i = offset_to_bit(bitmap_info->offset, ctl->unit,
                          max_t(u64, *offset, bitmap_info->offset));
-       bits = bytes_to_bits(*bytes, block_group->sectorsize);
+       bits = bytes_to_bits(*bytes, ctl->unit);
 
        for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i);
             i < BITS_PER_BITMAP;
@@ -1191,29 +1245,25 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group,
        }
 
        if (found_bits) {
-               *offset = (u64)(i * block_group->sectorsize) +
-                       bitmap_info->offset;
-               *bytes = (u64)(found_bits) * block_group->sectorsize;
+               *offset = (u64)(i * ctl->unit) + bitmap_info->offset;
+               *bytes = (u64)(found_bits) * ctl->unit;
                return 0;
        }
 
        return -1;
 }
 
-static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
-                                               *block_group, u64 *offset,
-                                               u64 *bytes, int debug)
+static struct btrfs_free_space *
+find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
 {
        struct btrfs_free_space *entry;
        struct rb_node *node;
        int ret;
 
-       if (!block_group->free_space_offset.rb_node)
+       if (!ctl->free_space_offset.rb_node)
                return NULL;
 
-       entry = tree_search_offset(block_group,
-                                  offset_to_bitmap(block_group, *offset),
-                                  0, 1);
+       entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1);
        if (!entry)
                return NULL;
 
@@ -1223,7 +1273,7 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
                        continue;
 
                if (entry->bitmap) {
-                       ret = search_bitmap(block_group, entry, offset, bytes);
+                       ret = search_bitmap(ctl, entry, offset, bytes);
                        if (!ret)
                                return entry;
                        continue;
@@ -1237,33 +1287,28 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache
        return NULL;
 }
 
-static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
+static void add_new_bitmap(struct btrfs_free_space_ctl *ctl,
                           struct btrfs_free_space *info, u64 offset)
 {
-       u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize;
-       int max_bitmaps = (int)div64_u64(block_group->key.offset +
-                                        bytes_per_bg - 1, bytes_per_bg);
-       BUG_ON(block_group->total_bitmaps >= max_bitmaps);
-
-       info->offset = offset_to_bitmap(block_group, offset);
+       info->offset = offset_to_bitmap(ctl, offset);
        info->bytes = 0;
-       link_free_space(block_group, info);
-       block_group->total_bitmaps++;
+       link_free_space(ctl, info);
+       ctl->total_bitmaps++;
 
-       recalculate_thresholds(block_group);
+       ctl->op->recalc_thresholds(ctl);
 }
 
-static void free_bitmap(struct btrfs_block_group_cache *block_group,
+static void free_bitmap(struct btrfs_free_space_ctl *ctl,
                        struct btrfs_free_space *bitmap_info)
 {
-       unlink_free_space(block_group, bitmap_info);
+       unlink_free_space(ctl, bitmap_info);
        kfree(bitmap_info->bitmap);
        kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
-       block_group->total_bitmaps--;
-       recalculate_thresholds(block_group);
+       ctl->total_bitmaps--;
+       ctl->op->recalc_thresholds(ctl);
 }
 
-static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
+static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl,
                              struct btrfs_free_space *bitmap_info,
                              u64 *offset, u64 *bytes)
 {
@@ -1272,8 +1317,7 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro
        int ret;
 
 again:
-       end = bitmap_info->offset +
-               (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1;
+       end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1;
 
        /*
         * XXX - this can go away after a few releases.
@@ -1288,24 +1332,22 @@ again:
        search_start = *offset;
        search_bytes = *bytes;
        search_bytes = min(search_bytes, end - search_start + 1);
-       ret = search_bitmap(block_group, bitmap_info, &search_start,
-                           &search_bytes);
+       ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes);
        BUG_ON(ret < 0 || search_start != *offset);
 
        if (*offset > bitmap_info->offset && *offset + *bytes > end) {
-               bitmap_clear_bits(block_group, bitmap_info, *offset,
-                                 end - *offset + 1);
+               bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1);
                *bytes -= end - *offset + 1;
                *offset = end + 1;
        } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) {
-               bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes);
+               bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes);
                *bytes = 0;
        }
 
        if (*bytes) {
                struct rb_node *next = rb_next(&bitmap_info->offset_index);
                if (!bitmap_info->bytes)
-                       free_bitmap(block_group, bitmap_info);
+                       free_bitmap(ctl, bitmap_info);
 
                /*
                 * no entry after this bitmap, but we still have bytes to
@@ -1332,31 +1374,28 @@ again:
                 */
                search_start = *offset;
                search_bytes = *bytes;
-               ret = search_bitmap(block_group, bitmap_info, &search_start,
+               ret = search_bitmap(ctl, bitmap_info, &search_start,
                                    &search_bytes);
                if (ret < 0 || search_start != *offset)
                        return -EAGAIN;
 
                goto again;
        } else if (!bitmap_info->bytes)
-               free_bitmap(block_group, bitmap_info);
+               free_bitmap(ctl, bitmap_info);
 
        return 0;
 }
 
-static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
-                             struct btrfs_free_space *info)
+static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
+                     struct btrfs_free_space *info)
 {
-       struct btrfs_free_space *bitmap_info;
-       int added = 0;
-       u64 bytes, offset, end;
-       int ret;
+       struct btrfs_block_group_cache *block_group = ctl->private;
 
        /*
         * If we are below the extents threshold then we can add this as an
         * extent, and don't have to deal with the bitmap
         */
-       if (block_group->free_extents < block_group->extents_thresh) {
+       if (ctl->free_extents < ctl->extents_thresh) {
                /*
                 * If this block group has some small extents we don't want to
                 * use up all of our free slots in the cache with them, we want
@@ -1365,11 +1404,10 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
                 * the overhead of a bitmap if we don't have to.
                 */
                if (info->bytes <= block_group->sectorsize * 4) {
-                       if (block_group->free_extents * 2 <=
-                           block_group->extents_thresh)
-                               return 0;
+                       if (ctl->free_extents * 2 <= ctl->extents_thresh)
+                               return false;
                } else {
-                       return 0;
+                       return false;
                }
        }
 
@@ -1379,31 +1417,42 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group,
         */
        if (BITS_PER_BITMAP * block_group->sectorsize >
            block_group->key.offset)
-               return 0;
+               return false;
+
+       return true;
+}
+
+static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
+                             struct btrfs_free_space *info)
+{
+       struct btrfs_free_space *bitmap_info;
+       int added = 0;
+       u64 bytes, offset, end;
+       int ret;
 
        bytes = info->bytes;
        offset = info->offset;
 
+       if (!ctl->op->use_bitmap(ctl, info))
+               return 0;
+
 again:
-       bitmap_info = tree_search_offset(block_group,
-                                        offset_to_bitmap(block_group, offset),
+       bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
                                         1, 0);
        if (!bitmap_info) {
                BUG_ON(added);
                goto new_bitmap;
        }
 
-       end = bitmap_info->offset +
-               (u64)(BITS_PER_BITMAP * block_group->sectorsize);
+       end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
 
        if (offset >= bitmap_info->offset && offset + bytes > end) {
-               bitmap_set_bits(block_group, bitmap_info, offset,
-                               end - offset);
+               bitmap_set_bits(ctl, bitmap_info, offset, end - offset);
                bytes -= end - offset;
                offset = end;
                added = 0;
        } else if (offset >= bitmap_info->offset && offset + bytes <= end) {
-               bitmap_set_bits(block_group, bitmap_info, offset, bytes);
+               bitmap_set_bits(ctl, bitmap_info, offset, bytes);
                bytes = 0;
        } else {
                BUG();
@@ -1417,19 +1466,19 @@ again:
 
 new_bitmap:
        if (info && info->bitmap) {
-               add_new_bitmap(block_group, info, offset);
+               add_new_bitmap(ctl, info, offset);
                added = 1;
                info = NULL;
                goto again;
        } else {
-               spin_unlock(&block_group->tree_lock);
+               spin_unlock(&ctl->tree_lock);
 
                /* no pre-allocated info, allocate a new one */
                if (!info) {
                        info = kmem_cache_zalloc(btrfs_free_space_cachep,
                                                 GFP_NOFS);
                        if (!info) {
-                               spin_lock(&block_group->tree_lock);
+                               spin_lock(&ctl->tree_lock);
                                ret = -ENOMEM;
                                goto out;
                        }
@@ -1437,7 +1486,7 @@ new_bitmap:
 
                /* allocate the bitmap */
                info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
-               spin_lock(&block_group->tree_lock);
+               spin_lock(&ctl->tree_lock);
                if (!info->bitmap) {
                        ret = -ENOMEM;
                        goto out;
@@ -1455,8 +1504,8 @@ out:
        return ret;
 }
 
-static bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
-                                struct btrfs_free_space *info, bool update_stat)
+static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
+                         struct btrfs_free_space *info, bool update_stat)
 {
        struct btrfs_free_space *left_info;
        struct btrfs_free_space *right_info;
@@ -1469,18 +1518,18 @@ static bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
         * are adding, if there is remove that struct and add a new one to
         * cover the entire range
         */
-       right_info = tree_search_offset(block_group, offset + bytes, 0, 0);
+       right_info = tree_search_offset(ctl, offset + bytes, 0, 0);
        if (right_info && rb_prev(&right_info->offset_index))
                left_info = rb_entry(rb_prev(&right_info->offset_index),
                                     struct btrfs_free_space, offset_index);
        else
-               left_info = tree_search_offset(block_group, offset - 1, 0, 0);
+               left_info = tree_search_offset(ctl, offset - 1, 0, 0);
 
        if (right_info && !right_info->bitmap) {
                if (update_stat)
-                       unlink_free_space(block_group, right_info);
+                       unlink_free_space(ctl, right_info);
                else
-                       __unlink_free_space(block_group, right_info);
+                       __unlink_free_space(ctl, right_info);
                info->bytes += right_info->bytes;
                kmem_cache_free(btrfs_free_space_cachep, right_info);
                merged = true;
@@ -1489,9 +1538,9 @@ static bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
        if (left_info && !left_info->bitmap &&
            left_info->offset + left_info->bytes == offset) {
                if (update_stat)
-                       unlink_free_space(block_group, left_info);
+                       unlink_free_space(ctl, left_info);
                else
-                       __unlink_free_space(block_group, left_info);
+                       __unlink_free_space(ctl, left_info);
                info->offset = left_info->offset;
                info->bytes += left_info->bytes;
                kmem_cache_free(btrfs_free_space_cachep, left_info);
@@ -1501,8 +1550,8 @@ static bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
        return merged;
 }
 
-int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
-                        u64 offset, u64 bytes)
+int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
+                          u64 offset, u64 bytes)
 {
        struct btrfs_free_space *info;
        int ret = 0;
@@ -1514,9 +1563,9 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
        info->offset = offset;
        info->bytes = bytes;
 
-       spin_lock(&block_group->tree_lock);
+       spin_lock(&ctl->tree_lock);
 
-       if (try_merge_free_space(block_group, info, true))
+       if (try_merge_free_space(ctl, info, true))
                goto link;
 
        /*
@@ -1524,7 +1573,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
         * extent then we know we're going to have to allocate a new extent, so
         * before we do that see if we need to drop this into a bitmap
         */
-       ret = insert_into_bitmap(block_group, info);
+       ret = insert_into_bitmap(ctl, info);
        if (ret < 0) {
                goto out;
        } else if (ret) {
@@ -1532,11 +1581,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
                goto out;
        }
 link:
-       ret = link_free_space(block_group, info);
+       ret = link_free_space(ctl, info);
        if (ret)
                kmem_cache_free(btrfs_free_space_cachep, info);
 out:
-       spin_unlock(&block_group->tree_lock);
+       spin_unlock(&ctl->tree_lock);
 
        if (ret) {
                printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret);
@@ -1549,21 +1598,21 @@ out:
 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
                            u64 offset, u64 bytes)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *info;
        struct btrfs_free_space *next_info = NULL;
        int ret = 0;
 
-       spin_lock(&block_group->tree_lock);
+       spin_lock(&ctl->tree_lock);
 
 again:
-       info = tree_search_offset(block_group, offset, 0, 0);
+       info = tree_search_offset(ctl, offset, 0, 0);
        if (!info) {
                /*
                 * oops didn't find an extent that matched the space we wanted
                 * to remove, look for a bitmap instead
                 */
-               info = tree_search_offset(block_group,
-                                         offset_to_bitmap(block_group, offset),
+               info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
                                          1, 0);
                if (!info) {
                        WARN_ON(1);
@@ -1578,8 +1627,8 @@ again:
                                             offset_index);
 
                if (next_info->bitmap)
-                       end = next_info->offset + BITS_PER_BITMAP *
-                               block_group->sectorsize - 1;
+                       end = next_info->offset +
+                             BITS_PER_BITMAP * ctl->unit - 1;
                else
                        end = next_info->offset + next_info->bytes;
 
@@ -1599,20 +1648,20 @@ again:
        }
 
        if (info->bytes == bytes) {
-               unlink_free_space(block_group, info);
+               unlink_free_space(ctl, info);
                if (info->bitmap) {
                        kfree(info->bitmap);
-                       block_group->total_bitmaps--;
+                       ctl->total_bitmaps--;
                }
                kmem_cache_free(btrfs_free_space_cachep, info);
                goto out_lock;
        }
 
        if (!info->bitmap && info->offset == offset) {
-               unlink_free_space(block_group, info);
+               unlink_free_space(ctl, info);
                info->offset += bytes;
                info->bytes -= bytes;
-               link_free_space(block_group, info);
+               link_free_space(ctl, info);
                goto out_lock;
        }
 
@@ -1626,13 +1675,13 @@ again:
                 * first unlink the old info and then
                 * insert it again after the hole we're creating
                 */
-               unlink_free_space(block_group, info);
+               unlink_free_space(ctl, info);
                if (offset + bytes < info->offset + info->bytes) {
                        u64 old_end = info->offset + info->bytes;
 
                        info->offset = offset + bytes;
                        info->bytes = old_end - info->offset;
-                       ret = link_free_space(block_group, info);
+                       ret = link_free_space(ctl, info);
                        WARN_ON(ret);
                        if (ret)
                                goto out_lock;
@@ -1642,7 +1691,7 @@ again:
                         */
                        kmem_cache_free(btrfs_free_space_cachep, info);
                }
-               spin_unlock(&block_group->tree_lock);
+               spin_unlock(&ctl->tree_lock);
 
                /* step two, insert a new info struct to cover
                 * anything before the hole
@@ -1653,12 +1702,12 @@ again:
                goto out;
        }
 
-       ret = remove_from_bitmap(block_group, info, &offset, &bytes);
+       ret = remove_from_bitmap(ctl, info, &offset, &bytes);
        if (ret == -EAGAIN)
                goto again;
        BUG_ON(ret);
 out_lock:
-       spin_unlock(&block_group->tree_lock);
+       spin_unlock(&ctl->tree_lock);
 out:
        return ret;
 }
@@ -1666,11 +1715,12 @@ out:
 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
                           u64 bytes)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *info;
        struct rb_node *n;
        int count = 0;
 
-       for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) {
+       for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
                info = rb_entry(n, struct btrfs_free_space, offset_index);
                if (info->bytes >= bytes)
                        count++;
@@ -1685,6 +1735,30 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
               "\n", count);
 }
 
+static struct btrfs_free_space_op free_space_op = {
+       .recalc_thresholds      = recalculate_thresholds,
+       .use_bitmap             = use_bitmap,
+};
+
+void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
+{
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+
+       spin_lock_init(&ctl->tree_lock);
+       ctl->unit = block_group->sectorsize;
+       ctl->start = block_group->key.objectid;
+       ctl->private = block_group;
+       ctl->op = &free_space_op;
+
+       /*
+        * we only want to have 32k of ram per block group for keeping
+        * track of free space, and if we pass 1/2 of that we want to
+        * start converting things over to using bitmaps
+        */
+       ctl->extents_thresh = ((1024 * 32) / 2) /
+                               sizeof(struct btrfs_free_space);
+}
+
 /*
  * for a given cluster, put all of its extents back into the free
  * space cache.  If the block group passed doesn't match the block group
@@ -1696,6 +1770,7 @@ __btrfs_return_cluster_to_free_space(
                             struct btrfs_block_group_cache *block_group,
                             struct btrfs_free_cluster *cluster)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *entry;
        struct rb_node *node;
 
@@ -1717,8 +1792,8 @@ __btrfs_return_cluster_to_free_space(
 
                bitmap = (entry->bitmap != NULL);
                if (!bitmap)
-                       try_merge_free_space(block_group, entry, false);
-               tree_insert_offset(&block_group->free_space_offset,
+                       try_merge_free_space(ctl, entry, false);
+               tree_insert_offset(&ctl->free_space_offset,
                                   entry->offset, &entry->offset_index, bitmap);
        }
        cluster->root = RB_ROOT;
@@ -1729,14 +1804,38 @@ out:
        return 0;
 }
 
-void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
+void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl)
 {
        struct btrfs_free_space *info;
        struct rb_node *node;
+
+       while ((node = rb_last(&ctl->free_space_offset)) != NULL) {
+               info = rb_entry(node, struct btrfs_free_space, offset_index);
+               unlink_free_space(ctl, info);
+               kfree(info->bitmap);
+               kmem_cache_free(btrfs_free_space_cachep, info);
+               if (need_resched()) {
+                       spin_unlock(&ctl->tree_lock);
+                       cond_resched();
+                       spin_lock(&ctl->tree_lock);
+               }
+       }
+}
+
+void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl)
+{
+       spin_lock(&ctl->tree_lock);
+       __btrfs_remove_free_space_cache_locked(ctl);
+       spin_unlock(&ctl->tree_lock);
+}
+
+void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
+{
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_cluster *cluster;
        struct list_head *head;
 
-       spin_lock(&block_group->tree_lock);
+       spin_lock(&ctl->tree_lock);
        while ((head = block_group->cluster_list.next) !=
               &block_group->cluster_list) {
                cluster = list_entry(head, struct btrfs_free_cluster,
@@ -1745,60 +1844,46 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group)
                WARN_ON(cluster->block_group != block_group);
                __btrfs_return_cluster_to_free_space(block_group, cluster);
                if (need_resched()) {
-                       spin_unlock(&block_group->tree_lock);
+                       spin_unlock(&ctl->tree_lock);
                        cond_resched();
-                       spin_lock(&block_group->tree_lock);
+                       spin_lock(&ctl->tree_lock);
                }
        }
+       __btrfs_remove_free_space_cache_locked(ctl);
+       spin_unlock(&ctl->tree_lock);
 
-       while ((node = rb_last(&block_group->free_space_offset)) != NULL) {
-               info = rb_entry(node, struct btrfs_free_space, offset_index);
-               if (!info->bitmap) {
-                       unlink_free_space(block_group, info);
-                       kmem_cache_free(btrfs_free_space_cachep, info);
-               } else {
-                       free_bitmap(block_group, info);
-               }
-
-               if (need_resched()) {
-                       spin_unlock(&block_group->tree_lock);
-                       cond_resched();
-                       spin_lock(&block_group->tree_lock);
-               }
-       }
-
-       spin_unlock(&block_group->tree_lock);
 }
 
 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
                               u64 offset, u64 bytes, u64 empty_size)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *entry = NULL;
        u64 bytes_search = bytes + empty_size;
        u64 ret = 0;
 
-       spin_lock(&block_group->tree_lock);
-       entry = find_free_space(block_group, &offset, &bytes_search, 0);
+       spin_lock(&ctl->tree_lock);
+       entry = find_free_space(ctl, &offset, &bytes_search);
        if (!entry)
                goto out;
 
        ret = offset;
        if (entry->bitmap) {
-               bitmap_clear_bits(block_group, entry, offset, bytes);
+               bitmap_clear_bits(ctl, entry, offset, bytes);
                if (!entry->bytes)
-                       free_bitmap(block_group, entry);
+                       free_bitmap(ctl, entry);
        } else {
-               unlink_free_space(block_group, entry);
+               unlink_free_space(ctl, entry);
                entry->offset += bytes;
                entry->bytes -= bytes;
                if (!entry->bytes)
                        kmem_cache_free(btrfs_free_space_cachep, entry);
                else
-                       link_free_space(block_group, entry);
+                       link_free_space(ctl, entry);
        }
 
 out:
-       spin_unlock(&block_group->tree_lock);
+       spin_unlock(&ctl->tree_lock);
 
        return ret;
 }
@@ -1815,6 +1900,7 @@ int btrfs_return_cluster_to_free_space(
                               struct btrfs_block_group_cache *block_group,
                               struct btrfs_free_cluster *cluster)
 {
+       struct btrfs_free_space_ctl *ctl;
        int ret;
 
        /* first, get a safe pointer to the block group */
@@ -1833,10 +1919,12 @@ int btrfs_return_cluster_to_free_space(
        atomic_inc(&block_group->count);
        spin_unlock(&cluster->lock);
 
+       ctl = block_group->free_space_ctl;
+
        /* now return any extents the cluster had on it */
-       spin_lock(&block_group->tree_lock);
+       spin_lock(&ctl->tree_lock);
        ret = __btrfs_return_cluster_to_free_space(block_group, cluster);
-       spin_unlock(&block_group->tree_lock);
+       spin_unlock(&ctl->tree_lock);
 
        /* finally drop our ref */
        btrfs_put_block_group(block_group);
@@ -1848,6 +1936,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
                                   struct btrfs_free_space *entry,
                                   u64 bytes, u64 min_start)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        int err;
        u64 search_start = cluster->window_start;
        u64 search_bytes = bytes;
@@ -1856,13 +1945,12 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
        search_start = min_start;
        search_bytes = bytes;
 
-       err = search_bitmap(block_group, entry, &search_start,
-                           &search_bytes);
+       err = search_bitmap(ctl, entry, &search_start, &search_bytes);
        if (err)
                return 0;
 
        ret = search_start;
-       bitmap_clear_bits(block_group, entry, ret, bytes);
+       bitmap_clear_bits(ctl, entry, ret, bytes);
 
        return ret;
 }
@@ -1876,6 +1964,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
                             struct btrfs_free_cluster *cluster, u64 bytes,
                             u64 min_start)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *entry = NULL;
        struct rb_node *node;
        u64 ret = 0;
@@ -1933,20 +2022,20 @@ out:
        if (!ret)
                return 0;
 
-       spin_lock(&block_group->tree_lock);
+       spin_lock(&ctl->tree_lock);
 
-       block_group->free_space -= bytes;
+       ctl->free_space -= bytes;
        if (entry->bytes == 0) {
-               block_group->free_extents--;
+               ctl->free_extents--;
                if (entry->bitmap) {
                        kfree(entry->bitmap);
-                       block_group->total_bitmaps--;
-                       recalculate_thresholds(block_group);
+                       ctl->total_bitmaps--;
+                       ctl->op->recalc_thresholds(ctl);
                }
                kmem_cache_free(btrfs_free_space_cachep, entry);
        }
 
-       spin_unlock(&block_group->tree_lock);
+       spin_unlock(&ctl->tree_lock);
 
        return ret;
 }
@@ -1956,6 +2045,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
                                struct btrfs_free_cluster *cluster,
                                u64 offset, u64 bytes, u64 min_bytes)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        unsigned long next_zero;
        unsigned long i;
        unsigned long search_bits;
@@ -2010,7 +2100,7 @@ again:
 
        cluster->window_start = start * block_group->sectorsize +
                entry->offset;
-       rb_erase(&entry->offset_index, &block_group->free_space_offset);
+       rb_erase(&entry->offset_index, &ctl->free_space_offset);
        ret = tree_insert_offset(&cluster->root, entry->offset,
                                 &entry->offset_index, 1);
        BUG_ON(ret);
@@ -2025,6 +2115,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
                                   struct btrfs_free_cluster *cluster,
                                   u64 offset, u64 bytes, u64 min_bytes)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *first = NULL;
        struct btrfs_free_space *entry = NULL;
        struct btrfs_free_space *prev = NULL;
@@ -2035,7 +2126,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
        u64 max_extent;
        u64 max_gap = 128 * 1024;
 
-       entry = tree_search_offset(block_group, offset, 0, 1);
+       entry = tree_search_offset(ctl, offset, 0, 1);
        if (!entry)
                return -ENOSPC;
 
@@ -2101,7 +2192,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
                if (entry->bitmap)
                        continue;
 
-               rb_erase(&entry->offset_index, &block_group->free_space_offset);
+               rb_erase(&entry->offset_index, &ctl->free_space_offset);
                ret = tree_insert_offset(&cluster->root, entry->offset,
                                         &entry->offset_index, 0);
                BUG_ON(ret);
@@ -2120,16 +2211,15 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
                                struct btrfs_free_cluster *cluster,
                                u64 offset, u64 bytes, u64 min_bytes)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *entry;
        struct rb_node *node;
        int ret = -ENOSPC;
 
-       if (block_group->total_bitmaps == 0)
+       if (ctl->total_bitmaps == 0)
                return -ENOSPC;
 
-       entry = tree_search_offset(block_group,
-                                  offset_to_bitmap(block_group, offset),
-                                  0, 1);
+       entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
        if (!entry)
                return -ENOSPC;
 
@@ -2162,6 +2252,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
                             struct btrfs_free_cluster *cluster,
                             u64 offset, u64 bytes, u64 empty_size)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        u64 min_bytes;
        int ret;
 
@@ -2181,14 +2272,14 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
        } else
                min_bytes = max(bytes, (bytes + empty_size) >> 2);
 
-       spin_lock(&block_group->tree_lock);
+       spin_lock(&ctl->tree_lock);
 
        /*
         * If we know we don't have enough space to make a cluster don't even
         * bother doing all the work to try and find one.
         */
-       if (block_group->free_space < min_bytes) {
-               spin_unlock(&block_group->tree_lock);
+       if (ctl->free_space < min_bytes) {
+               spin_unlock(&ctl->tree_lock);
                return -ENOSPC;
        }
 
@@ -2214,7 +2305,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
        }
 out:
        spin_unlock(&cluster->lock);
-       spin_unlock(&block_group->tree_lock);
+       spin_unlock(&ctl->tree_lock);
 
        return ret;
 }
@@ -2235,6 +2326,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
 int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
                           u64 *trimmed, u64 start, u64 end, u64 minlen)
 {
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *entry = NULL;
        struct btrfs_fs_info *fs_info = block_group->fs_info;
        u64 bytes = 0;
@@ -2244,52 +2336,50 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
        *trimmed = 0;
 
        while (start < end) {
-               spin_lock(&block_group->tree_lock);
+               spin_lock(&ctl->tree_lock);
 
-               if (block_group->free_space < minlen) {
-                       spin_unlock(&block_group->tree_lock);
+               if (ctl->free_space < minlen) {
+                       spin_unlock(&ctl->tree_lock);
                        break;
                }
 
-               entry = tree_search_offset(block_group, start, 0, 1);
+               entry = tree_search_offset(ctl, start, 0, 1);
                if (!entry)
-                       entry = tree_search_offset(block_group,
-                                                  offset_to_bitmap(block_group,
-                                                                   start),
+                       entry = tree_search_offset(ctl,
+                                                  offset_to_bitmap(ctl, start),
                                                   1, 1);
 
                if (!entry || entry->offset >= end) {
-                       spin_unlock(&block_group->tree_lock);
+                       spin_unlock(&ctl->tree_lock);
                        break;
                }
 
                if (entry->bitmap) {
-                       ret = search_bitmap(block_group, entry, &start, &bytes);
+                       ret = search_bitmap(ctl, entry, &start, &bytes);
                        if (!ret) {
                                if (start >= end) {
-                                       spin_unlock(&block_group->tree_lock);
+                                       spin_unlock(&ctl->tree_lock);
                                        break;
                                }
                                bytes = min(bytes, end - start);
-                               bitmap_clear_bits(block_group, entry,
-                                                 start, bytes);
+                               bitmap_clear_bits(ctl, entry, start, bytes);
                                if (entry->bytes == 0)
-                                       free_bitmap(block_group, entry);
+                                       free_bitmap(ctl, entry);
                        } else {
                                start = entry->offset + BITS_PER_BITMAP *
                                        block_group->sectorsize;
-                               spin_unlock(&block_group->tree_lock);
+                               spin_unlock(&ctl->tree_lock);
                                ret = 0;
                                continue;
                        }
                } else {
                        start = entry->offset;
                        bytes = min(entry->bytes, end - start);
-                       unlink_free_space(block_group, entry);
+                       unlink_free_space(ctl, entry);
                        kmem_cache_free(btrfs_free_space_cachep, entry);
                }
 
-               spin_unlock(&block_group->tree_lock);
+               spin_unlock(&ctl->tree_lock);
 
                if (bytes >= minlen) {
                        int update_ret;
@@ -2301,8 +2391,7 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
                                                         bytes,
                                                         &actually_trimmed);
 
-                       btrfs_add_free_space(block_group,
-                                            start, bytes);
+                       btrfs_add_free_space(block_group, start, bytes);
                        if (!update_ret)
                                btrfs_update_reserved_bytes(block_group,
                                                            bytes, 0, 1);
@@ -2324,3 +2413,145 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
 
        return ret;
 }
+
+/*
+ * Find the left-most item in the cache tree, and then return the
+ * smallest inode number in the item.
+ *
+ * Note: the returned inode number may not be the smallest one in
+ * the tree, if the left-most item is a bitmap.
+ */
+u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root)
+{
+       struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl;
+       struct btrfs_free_space *entry = NULL;
+       u64 ino = 0;
+
+       spin_lock(&ctl->tree_lock);
+
+       if (RB_EMPTY_ROOT(&ctl->free_space_offset))
+               goto out;
+
+       entry = rb_entry(rb_first(&ctl->free_space_offset),
+                        struct btrfs_free_space, offset_index);
+
+       if (!entry->bitmap) {
+               ino = entry->offset;
+
+               unlink_free_space(ctl, entry);
+               entry->offset++;
+               entry->bytes--;
+               if (!entry->bytes)
+                       kmem_cache_free(btrfs_free_space_cachep, entry);
+               else
+                       link_free_space(ctl, entry);
+       } else {
+               u64 offset = 0;
+               u64 count = 1;
+               int ret;
+
+               ret = search_bitmap(ctl, entry, &offset, &count);
+               BUG_ON(ret);
+
+               ino = offset;
+               bitmap_clear_bits(ctl, entry, offset, 1);
+               if (entry->bytes == 0)
+                       free_bitmap(ctl, entry);
+       }
+out:
+       spin_unlock(&ctl->tree_lock);
+
+       return ino;
+}
+
+struct inode *lookup_free_ino_inode(struct btrfs_root *root,
+                                   struct btrfs_path *path)
+{
+       struct inode *inode = NULL;
+
+       spin_lock(&root->cache_lock);
+       if (root->cache_inode)
+               inode = igrab(root->cache_inode);
+       spin_unlock(&root->cache_lock);
+       if (inode)
+               return inode;
+
+       inode = __lookup_free_space_inode(root, path, 0);
+       if (IS_ERR(inode))
+               return inode;
+
+       spin_lock(&root->cache_lock);
+       if (!root->fs_info->closing)
+               root->cache_inode = igrab(inode);
+       spin_unlock(&root->cache_lock);
+
+       return inode;
+}
+
+int create_free_ino_inode(struct btrfs_root *root,
+                         struct btrfs_trans_handle *trans,
+                         struct btrfs_path *path)
+{
+       return __create_free_space_inode(root, trans, path,
+                                        BTRFS_FREE_INO_OBJECTID, 0);
+}
+
+int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
+{
+       struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+       struct btrfs_path *path;
+       struct inode *inode;
+       int ret = 0;
+       u64 root_gen = btrfs_root_generation(&root->root_item);
+
+       /*
+        * If we're unmounting then just return, since this does a search on the
+        * normal root and not the commit root and we could deadlock.
+        */
+       smp_mb();
+       if (fs_info->closing)
+               return 0;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return 0;
+
+       inode = lookup_free_ino_inode(root, path);
+       if (IS_ERR(inode))
+               goto out;
+
+       if (root_gen != BTRFS_I(inode)->generation)
+               goto out_put;
+
+       ret = __load_free_space_cache(root, inode, ctl, path, 0);
+
+       if (ret < 0)
+               printk(KERN_ERR "btrfs: failed to load free ino cache for "
+                      "root %llu\n", root->root_key.objectid);
+out_put:
+       iput(inode);
+out:
+       btrfs_free_path(path);
+       return ret;
+}
+
+int btrfs_write_out_ino_cache(struct btrfs_root *root,
+                             struct btrfs_trans_handle *trans,
+                             struct btrfs_path *path)
+{
+       struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+       struct inode *inode;
+       int ret;
+
+       inode = lookup_free_ino_inode(root, path);
+       if (IS_ERR(inode))
+               return 0;
+
+       ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0);
+       if (ret < 0)
+               printk(KERN_ERR "btrfs: failed to write free ino cache "
+                      "for root %llu\n", root->root_key.objectid);
+
+       iput(inode);
+       return ret;
+}
index 12b2b5165f8ad530f6b35abe6dc38ddd291aa634..8f2613f779edc6bfb2dfcac1e6884f2d7ad156b7 100644 (file)
@@ -27,6 +27,25 @@ struct btrfs_free_space {
        struct list_head list;
 };
 
+struct btrfs_free_space_ctl {
+       spinlock_t tree_lock;
+       struct rb_root free_space_offset;
+       u64 free_space;
+       int extents_thresh;
+       int free_extents;
+       int total_bitmaps;
+       int unit;
+       u64 start;
+       struct btrfs_free_space_op *op;
+       void *private;
+};
+
+struct btrfs_free_space_op {
+       void (*recalc_thresholds)(struct btrfs_free_space_ctl *ctl);
+       bool (*use_bitmap)(struct btrfs_free_space_ctl *ctl,
+                          struct btrfs_free_space *info);
+};
+
 struct inode *lookup_free_space_inode(struct btrfs_root *root,
                                      struct btrfs_block_group_cache
                                      *block_group, struct btrfs_path *path);
@@ -45,14 +64,36 @@ int btrfs_write_out_cache(struct btrfs_root *root,
                          struct btrfs_trans_handle *trans,
                          struct btrfs_block_group_cache *block_group,
                          struct btrfs_path *path);
-int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
-                        u64 bytenr, u64 size);
+
+struct inode *lookup_free_ino_inode(struct btrfs_root *root,
+                                   struct btrfs_path *path);
+int create_free_ino_inode(struct btrfs_root *root,
+                         struct btrfs_trans_handle *trans,
+                         struct btrfs_path *path);
+int load_free_ino_cache(struct btrfs_fs_info *fs_info,
+                       struct btrfs_root *root);
+int btrfs_write_out_ino_cache(struct btrfs_root *root,
+                             struct btrfs_trans_handle *trans,
+                             struct btrfs_path *path);
+
+void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group);
+int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl,
+                          u64 bytenr, u64 size);
+static inline int
+btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
+                    u64 bytenr, u64 size)
+{
+       return __btrfs_add_free_space(block_group->free_space_ctl,
+                                     bytenr, size);
+}
 int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group,
                            u64 bytenr, u64 size);
+void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl);
 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
-                                  *block_group);
+                                    *block_group);
 u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
                               u64 offset, u64 bytes, u64 empty_size);
+u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
 void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
                           u64 bytes);
 int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
index c05a08f4c4111fdfa4157e9447005523f256fb67..000970512624030e68dde82de63f68ae9cb8c36c 100644 (file)
  * Boston, MA 021110-1307, USA.
  */
 
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/pagemap.h>
+
 #include "ctree.h"
 #include "disk-io.h"
+#include "free-space-cache.h"
+#include "inode-map.h"
 #include "transaction.h"
 
-int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid)
+static int caching_kthread(void *data)
+{
+       struct btrfs_root *root = data;
+       struct btrfs_fs_info *fs_info = root->fs_info;
+       struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+       struct btrfs_key key;
+       struct btrfs_path *path;
+       struct extent_buffer *leaf;
+       u64 last = (u64)-1;
+       int slot;
+       int ret;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       /* Since the commit root is read-only, we can safely skip locking. */
+       path->skip_locking = 1;
+       path->search_commit_root = 1;
+       path->reada = 2;
+
+       key.objectid = BTRFS_FIRST_FREE_OBJECTID;
+       key.offset = 0;
+       key.type = BTRFS_INODE_ITEM_KEY;
+again:
+       /* need to make sure the commit_root doesn't disappear */
+       mutex_lock(&root->fs_commit_mutex);
+
+       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+       if (ret < 0)
+               goto out;
+
+       while (1) {
+               smp_mb();
+               if (fs_info->closing > 1)
+                       goto out;
+
+               leaf = path->nodes[0];
+               slot = path->slots[0];
+               if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+                       ret = btrfs_next_leaf(root, path);
+                       if (ret < 0)
+                               goto out;
+                       else if (ret > 0)
+                               break;
+
+                       if (need_resched() ||
+                           btrfs_transaction_in_commit(fs_info)) {
+                               leaf = path->nodes[0];
+
+                               if (btrfs_header_nritems(leaf) == 0) {
+                                       WARN_ON(1);
+                                       break;
+                               }
+
+                               /*
+                                * Save the key so we can advances forward
+                                * in the next search.
+                                */
+                               btrfs_item_key_to_cpu(leaf, &key, 0);
+                               btrfs_release_path(path);
+                               root->cache_progress = last;
+                               mutex_unlock(&root->fs_commit_mutex);
+                               schedule_timeout(1);
+                               goto again;
+                       } else
+                               continue;
+               }
+
+               btrfs_item_key_to_cpu(leaf, &key, slot);
+
+               if (key.type != BTRFS_INODE_ITEM_KEY)
+                       goto next;
+
+               if (key.objectid >= BTRFS_LAST_FREE_OBJECTID)
+                       break;
+
+               if (last != (u64)-1 && last + 1 != key.objectid) {
+                       __btrfs_add_free_space(ctl, last + 1,
+                                              key.objectid - last - 1);
+                       wake_up(&root->cache_wait);
+               }
+
+               last = key.objectid;
+next:
+               path->slots[0]++;
+       }
+
+       if (last < BTRFS_LAST_FREE_OBJECTID - 1) {
+               __btrfs_add_free_space(ctl, last + 1,
+                                      BTRFS_LAST_FREE_OBJECTID - last - 1);
+       }
+
+       spin_lock(&root->cache_lock);
+       root->cached = BTRFS_CACHE_FINISHED;
+       spin_unlock(&root->cache_lock);
+
+       root->cache_progress = (u64)-1;
+       btrfs_unpin_free_ino(root);
+out:
+       wake_up(&root->cache_wait);
+       mutex_unlock(&root->fs_commit_mutex);
+
+       btrfs_free_path(path);
+
+       return ret;
+}
+
+static void start_caching(struct btrfs_root *root)
+{
+       struct task_struct *tsk;
+       int ret;
+
+       spin_lock(&root->cache_lock);
+       if (root->cached != BTRFS_CACHE_NO) {
+               spin_unlock(&root->cache_lock);
+               return;
+       }
+
+       root->cached = BTRFS_CACHE_STARTED;
+       spin_unlock(&root->cache_lock);
+
+       ret = load_free_ino_cache(root->fs_info, root);
+       if (ret == 1) {
+               spin_lock(&root->cache_lock);
+               root->cached = BTRFS_CACHE_FINISHED;
+               spin_unlock(&root->cache_lock);
+               return;
+       }
+
+       tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n",
+                         root->root_key.objectid);
+       BUG_ON(IS_ERR(tsk));
+}
+
+int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
+{
+again:
+       *objectid = btrfs_find_ino_for_alloc(root);
+
+       if (*objectid != 0)
+               return 0;
+
+       start_caching(root);
+
+       wait_event(root->cache_wait,
+                  root->cached == BTRFS_CACHE_FINISHED ||
+                  root->free_ino_ctl->free_space > 0);
+
+       if (root->cached == BTRFS_CACHE_FINISHED &&
+           root->free_ino_ctl->free_space == 0)
+               return -ENOSPC;
+       else
+               goto again;
+}
+
+void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
+{
+       struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+       struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
+again:
+       if (root->cached == BTRFS_CACHE_FINISHED) {
+               __btrfs_add_free_space(ctl, objectid, 1);
+       } else {
+               /*
+                * If we are in the process of caching free ino chunks,
+                * to avoid adding the same inode number to the free_ino
+                * tree twice due to cross transaction, we'll leave it
+                * in the pinned tree until a transaction is committed
+                * or the caching work is done.
+                */
+
+               mutex_lock(&root->fs_commit_mutex);
+               spin_lock(&root->cache_lock);
+               if (root->cached == BTRFS_CACHE_FINISHED) {
+                       spin_unlock(&root->cache_lock);
+                       mutex_unlock(&root->fs_commit_mutex);
+                       goto again;
+               }
+               spin_unlock(&root->cache_lock);
+
+               start_caching(root);
+
+               if (objectid <= root->cache_progress)
+                       __btrfs_add_free_space(ctl, objectid, 1);
+               else
+                       __btrfs_add_free_space(pinned, objectid, 1);
+
+               mutex_unlock(&root->fs_commit_mutex);
+       }
+}
+
+/*
+ * When a transaction is committed, we'll move those inode numbers which
+ * are smaller than root->cache_progress from pinned tree to free_ino tree,
+ * and others will just be dropped, because the commit root we were
+ * searching has changed.
+ *
+ * Must be called with root->fs_commit_mutex held
+ */
+void btrfs_unpin_free_ino(struct btrfs_root *root)
+{
+       struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+       struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset;
+       struct btrfs_free_space *info;
+       struct rb_node *n;
+       u64 count;
+
+       while (1) {
+               n = rb_first(rbroot);
+               if (!n)
+                       break;
+
+               info = rb_entry(n, struct btrfs_free_space, offset_index);
+               BUG_ON(info->bitmap);
+
+               if (info->offset > root->cache_progress)
+                       goto free;
+               else if (info->offset + info->bytes > root->cache_progress)
+                       count = root->cache_progress - info->offset + 1;
+               else
+                       count = info->bytes;
+
+               __btrfs_add_free_space(ctl, info->offset, count);
+free:
+               rb_erase(&info->offset_index, rbroot);
+               kfree(info);
+       }
+}
+
+#define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space))
+#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8)
+
+/*
+ * The goal is to keep the memory used by the free_ino tree won't
+ * exceed the memory if we use bitmaps only.
+ */
+static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
+{
+       struct btrfs_free_space *info;
+       struct rb_node *n;
+       int max_ino;
+       int max_bitmaps;
+
+       n = rb_last(&ctl->free_space_offset);
+       if (!n) {
+               ctl->extents_thresh = INIT_THRESHOLD;
+               return;
+       }
+       info = rb_entry(n, struct btrfs_free_space, offset_index);
+
+       /*
+        * Find the maximum inode number in the filesystem. Note we
+        * ignore the fact that this can be a bitmap, because we are
+        * not doing precise calculation.
+        */
+       max_ino = info->bytes - 1;
+
+       max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP;
+       if (max_bitmaps <= ctl->total_bitmaps) {
+               ctl->extents_thresh = 0;
+               return;
+       }
+
+       ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) *
+                               PAGE_CACHE_SIZE / sizeof(*info);
+}
+
+/*
+ * We don't fall back to bitmap, if we are below the extents threshold
+ * or this chunk of inode numbers is a big one.
+ */
+static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
+                      struct btrfs_free_space *info)
+{
+       if (ctl->free_extents < ctl->extents_thresh ||
+           info->bytes > INODES_PER_BITMAP / 10)
+               return false;
+
+       return true;
+}
+
+static struct btrfs_free_space_op free_ino_op = {
+       .recalc_thresholds      = recalculate_thresholds,
+       .use_bitmap             = use_bitmap,
+};
+
+static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl)
+{
+}
+
+static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl,
+                             struct btrfs_free_space *info)
+{
+       /*
+        * We always use extents for two reasons:
+        *
+        * - The pinned tree is only used during the process of caching
+        *   work.
+        * - Make code simpler. See btrfs_unpin_free_ino().
+        */
+       return false;
+}
+
+static struct btrfs_free_space_op pinned_free_ino_op = {
+       .recalc_thresholds      = pinned_recalc_thresholds,
+       .use_bitmap             = pinned_use_bitmap,
+};
+
+void btrfs_init_free_ino_ctl(struct btrfs_root *root)
+{
+       struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+       struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
+
+       spin_lock_init(&ctl->tree_lock);
+       ctl->unit = 1;
+       ctl->start = 0;
+       ctl->private = NULL;
+       ctl->op = &free_ino_op;
+
+       /*
+        * Initially we allow to use 16K of ram to cache chunks of
+        * inode numbers before we resort to bitmaps. This is somewhat
+        * arbitrary, but it will be adjusted in runtime.
+        */
+       ctl->extents_thresh = INIT_THRESHOLD;
+
+       spin_lock_init(&pinned->tree_lock);
+       pinned->unit = 1;
+       pinned->start = 0;
+       pinned->private = NULL;
+       pinned->extents_thresh = 0;
+       pinned->op = &pinned_free_ino_op;
+}
+
+int btrfs_save_ino_cache(struct btrfs_root *root,
+                        struct btrfs_trans_handle *trans)
+{
+       struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
+       struct btrfs_path *path;
+       struct inode *inode;
+       u64 alloc_hint = 0;
+       int ret;
+       int prealloc;
+       bool retry = false;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+again:
+       inode = lookup_free_ino_inode(root, path);
+       if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
+               ret = PTR_ERR(inode);
+               goto out;
+       }
+
+       if (IS_ERR(inode)) {
+               BUG_ON(retry);
+               retry = true;
+
+               ret = create_free_ino_inode(root, trans, path);
+               if (ret)
+                       goto out;
+               goto again;
+       }
+
+       BTRFS_I(inode)->generation = 0;
+       ret = btrfs_update_inode(trans, root, inode);
+       WARN_ON(ret);
+
+       if (i_size_read(inode) > 0) {
+               ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
+               if (ret)
+                       goto out_put;
+       }
+
+       spin_lock(&root->cache_lock);
+       if (root->cached != BTRFS_CACHE_FINISHED) {
+               ret = -1;
+               spin_unlock(&root->cache_lock);
+               goto out_put;
+       }
+       spin_unlock(&root->cache_lock);
+
+       spin_lock(&ctl->tree_lock);
+       prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents;
+       prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE);
+       prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE;
+       spin_unlock(&ctl->tree_lock);
+
+       /* Just to make sure we have enough space */
+       prealloc += 8 * PAGE_CACHE_SIZE;
+
+       ret = btrfs_check_data_free_space(inode, prealloc);
+       if (ret)
+               goto out_put;
+
+       ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
+                                             prealloc, prealloc, &alloc_hint);
+       if (ret)
+               goto out_put;
+       btrfs_free_reserved_data_space(inode, prealloc);
+
+out_put:
+       iput(inode);
+out:
+       if (ret == 0)
+               ret = btrfs_write_out_ino_cache(root, trans, path);
+
+       btrfs_free_path(path);
+       return ret;
+}
+
+static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
 {
        struct btrfs_path *path;
        int ret;
@@ -55,15 +474,14 @@ error:
        return ret;
 }
 
-int btrfs_find_free_objectid(struct btrfs_trans_handle *trans,
-                            struct btrfs_root *root,
-                            u64 dirid, u64 *objectid)
+int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
 {
        int ret;
        mutex_lock(&root->objectid_mutex);
 
        if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
-               ret = btrfs_find_highest_inode(root, &root->highest_objectid);
+               ret = btrfs_find_highest_objectid(root,
+                                                 &root->highest_objectid);
                if (ret)
                        goto out;
        }
diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h
new file mode 100644 (file)
index 0000000..ddb347b
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef __BTRFS_INODE_MAP
+#define __BTRFS_INODE_MAP
+
+void btrfs_init_free_ino_ctl(struct btrfs_root *root);
+void btrfs_unpin_free_ino(struct btrfs_root *root);
+void btrfs_return_ino(struct btrfs_root *root, u64 objectid);
+int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid);
+int btrfs_save_ino_cache(struct btrfs_root *root,
+                        struct btrfs_trans_handle *trans);
+
+int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid);
+
+#endif
index 1d1017f915586e0b60923ba289783a02dd919e11..8ae72c3eedb16d1bf26bd008151eb57a567eb723 100644 (file)
@@ -52,6 +52,7 @@
 #include "compression.h"
 #include "locking.h"
 #include "free-space-cache.h"
+#include "inode-map.h"
 
 struct btrfs_iget_args {
        u64 ino;
@@ -139,7 +140,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
        path->leave_spinning = 1;
        btrfs_set_trans_block_group(trans, inode);
 
-       key.objectid = inode->i_ino;
+       key.objectid = btrfs_ino(inode);
        key.offset = start;
        btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
        datasize = btrfs_file_extent_calc_inline_size(cur_size);
@@ -746,6 +747,15 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
        return alloc_hint;
 }
 
+static inline bool is_free_space_inode(struct btrfs_root *root,
+                                      struct inode *inode)
+{
+       if (root == root->fs_info->tree_root ||
+           BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID)
+               return true;
+       return false;
+}
+
 /*
  * when extent_io.c finds a delayed allocation range in the file,
  * the call backs end up in this code.  The basic idea is to
@@ -778,7 +788,7 @@ static noinline int cow_file_range(struct inode *inode,
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
        int ret = 0;
 
-       BUG_ON(root == root->fs_info->tree_root);
+       BUG_ON(is_free_space_inode(root, inode));
        trans = btrfs_join_transaction(root, 1);
        BUG_ON(IS_ERR(trans));
        btrfs_set_trans_block_group(trans, inode);
@@ -1050,29 +1060,31 @@ static noinline int run_delalloc_nocow(struct inode *inode,
        int type;
        int nocow;
        int check_prev = 1;
-       bool nolock = false;
+       bool nolock;
+       u64 ino = btrfs_ino(inode);
 
        path = btrfs_alloc_path();
        BUG_ON(!path);
-       if (root == root->fs_info->tree_root) {
-               nolock = true;
+
+       nolock = is_free_space_inode(root, inode);
+
+       if (nolock)
                trans = btrfs_join_transaction_nolock(root, 1);
-       } else {
+       else
                trans = btrfs_join_transaction(root, 1);
-       }
        BUG_ON(IS_ERR(trans));
 
        cow_start = (u64)-1;
        cur_offset = start;
        while (1) {
-               ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+               ret = btrfs_lookup_file_extent(trans, root, path, ino,
                                               cur_offset, 0);
                BUG_ON(ret < 0);
                if (ret > 0 && path->slots[0] > 0 && check_prev) {
                        leaf = path->nodes[0];
                        btrfs_item_key_to_cpu(leaf, &found_key,
                                              path->slots[0] - 1);
-                       if (found_key.objectid == inode->i_ino &&
+                       if (found_key.objectid == ino &&
                            found_key.type == BTRFS_EXTENT_DATA_KEY)
                                path->slots[0]--;
                }
@@ -1093,7 +1105,7 @@ next_slot:
                num_bytes = 0;
                btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
 
-               if (found_key.objectid > inode->i_ino ||
+               if (found_key.objectid > ino ||
                    found_key.type > BTRFS_EXTENT_DATA_KEY ||
                    found_key.offset > end)
                        break;
@@ -1128,7 +1140,7 @@ next_slot:
                                goto out_check;
                        if (btrfs_extent_readonly(root, disk_bytenr))
                                goto out_check;
-                       if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
+                       if (btrfs_cross_ref_exist(trans, root, ino,
                                                  found_key.offset -
                                                  extent_offset, disk_bytenr))
                                goto out_check;
@@ -1317,8 +1329,7 @@ static int btrfs_set_bit_hook(struct inode *inode,
        if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
                struct btrfs_root *root = BTRFS_I(inode)->root;
                u64 len = state->end + 1 - state->start;
-               int do_list = (root->root_key.objectid !=
-                              BTRFS_ROOT_TREE_OBJECTID);
+               bool do_list = !is_free_space_inode(root, inode);
 
                if (*bits & EXTENT_FIRST_DELALLOC)
                        *bits &= ~EXTENT_FIRST_DELALLOC;
@@ -1351,8 +1362,7 @@ static int btrfs_clear_bit_hook(struct inode *inode,
        if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) {
                struct btrfs_root *root = BTRFS_I(inode)->root;
                u64 len = state->end + 1 - state->start;
-               int do_list = (root->root_key.objectid !=
-                              BTRFS_ROOT_TREE_OBJECTID);
+               bool do_list = !is_free_space_inode(root, inode);
 
                if (*bits & EXTENT_FIRST_DELALLOC)
                        *bits &= ~EXTENT_FIRST_DELALLOC;
@@ -1459,7 +1469,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
 
        skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
 
-       if (root == root->fs_info->tree_root)
+       if (is_free_space_inode(root, inode))
                ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2);
        else
                ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
@@ -1645,7 +1655,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
                                 &hint, 0);
        BUG_ON(ret);
 
-       ins.objectid = inode->i_ino;
+       ins.objectid = btrfs_ino(inode);
        ins.offset = file_pos;
        ins.type = BTRFS_EXTENT_DATA_KEY;
        ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
@@ -1676,7 +1686,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
        ins.type = BTRFS_EXTENT_ITEM_KEY;
        ret = btrfs_alloc_reserved_file_extent(trans, root,
                                        root->root_key.objectid,
-                                       inode->i_ino, file_pos, &ins);
+                                       btrfs_ino(inode), file_pos, &ins);
        BUG_ON(ret);
        btrfs_free_path(path);
 
@@ -1702,7 +1712,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
        struct extent_state *cached_state = NULL;
        int compress_type = 0;
        int ret;
-       bool nolock = false;
+       bool nolock;
 
        ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
                                             end - start + 1);
@@ -1710,7 +1720,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
                return 0;
        BUG_ON(!ordered_extent);
 
-       nolock = (root == root->fs_info->tree_root);
+       nolock = is_free_space_inode(root, inode);
 
        if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
                BUG_ON(!list_empty(&ordered_extent->list));
@@ -2005,8 +2015,9 @@ good:
        return 0;
 
 zeroit:
-       printk_ratelimited(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
-                      "private %llu\n", page->mapping->host->i_ino,
+       printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u "
+                      "private %llu\n",
+                      (unsigned long long)btrfs_ino(page->mapping->host),
                       (unsigned long long)start, csum,
                       (unsigned long long)private);
        memset(kaddr + offset, 1, end - start + 1);
@@ -2243,7 +2254,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
 
        /* insert an orphan item to track this unlinked/truncated file */
        if (insert >= 1) {
-               ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
+               ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
                BUG_ON(ret);
        }
 
@@ -2280,7 +2291,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
        spin_unlock(&root->orphan_lock);
 
        if (trans && delete_item) {
-               ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
+               ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode));
                BUG_ON(ret);
        }
 
@@ -2542,7 +2553,8 @@ static void btrfs_read_locked_inode(struct inode *inode)
         * try to precache a NULL acl entry for files that don't have
         * any xattrs or acls
         */
-       maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
+       maybe_acls = acls_after_inode_item(leaf, path->slots[0],
+                                          btrfs_ino(inode));
        if (!maybe_acls)
                cache_no_acl(inode);
 
@@ -2646,11 +2658,26 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
        struct extent_buffer *leaf;
        int ret;
 
+       /*
+        * If root is tree root, it means this inode is used to
+        * store free space information. And these inodes are updated
+        * when committing the transaction, so they needn't delaye to
+        * be updated, or deadlock will occured.
+        */
+       if (!is_free_space_inode(root, inode)) {
+               ret = btrfs_delayed_update_inode(trans, root, inode);
+               if (!ret)
+                       btrfs_set_inode_last_trans(trans, inode);
+               return ret;
+       }
+
        path = btrfs_alloc_path();
-       BUG_ON(!path);
+       if (!path)
+               return -ENOMEM;
+
        path->leave_spinning = 1;
-       ret = btrfs_lookup_inode(trans, root, path,
-                                &BTRFS_I(inode)->location, 1);
+       ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location,
+                                1);
        if (ret) {
                if (ret > 0)
                        ret = -ENOENT;
@@ -2660,7 +2687,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
        btrfs_unlock_up_safe(path, 1);
        leaf = path->nodes[0];
        inode_item = btrfs_item_ptr(leaf, path->slots[0],
-                                 struct btrfs_inode_item);
+                                   struct btrfs_inode_item);
 
        fill_inode_item(trans, leaf, inode_item, inode);
        btrfs_mark_buffer_dirty(leaf);
@@ -2671,7 +2698,6 @@ failed:
        return ret;
 }
 
-
 /*
  * unlink helper that gets used here in inode.c and in the tree logging
  * recovery code.  It remove a link in a directory with a given name, and
@@ -2688,6 +2714,8 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
        struct btrfs_dir_item *di;
        struct btrfs_key key;
        u64 index;
+       u64 ino = btrfs_ino(inode);
+       u64 dir_ino = btrfs_ino(dir);
 
        path = btrfs_alloc_path();
        if (!path) {
@@ -2696,7 +2724,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
        }
 
        path->leave_spinning = 1;
-       di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
+       di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
                                    name, name_len, -1);
        if (IS_ERR(di)) {
                ret = PTR_ERR(di);
@@ -2713,31 +2741,21 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
                goto err;
        btrfs_release_path(path);
 
-       ret = btrfs_del_inode_ref(trans, root, name, name_len,
-                                 inode->i_ino,
-                                 dir->i_ino, &index);
+       ret = btrfs_del_inode_ref(trans, root, name, name_len, ino,
+                                 dir_ino, &index);
        if (ret) {
                printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
-                      "inode %lu parent %lu\n", name_len, name,
-                      inode->i_ino, dir->i_ino);
+                      "inode %llu parent %llu\n", name_len, name,
+                      (unsigned long long)ino, (unsigned long long)dir_ino);
                goto err;
        }
 
-       di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
-                                        index, name, name_len, -1);
-       if (IS_ERR(di)) {
-               ret = PTR_ERR(di);
-               goto err;
-       }
-       if (!di) {
-               ret = -ENOENT;
+       ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
+       if (ret)
                goto err;
-       }
-       ret = btrfs_delete_one_dir_name(trans, root, path, di);
-       btrfs_release_path(path);
 
        ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
-                                        inode, dir->i_ino);
+                                        inode, dir_ino);
        BUG_ON(ret != 0 && ret != -ENOENT);
 
        ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
@@ -2815,12 +2833,14 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
        int check_link = 1;
        int err = -ENOSPC;
        int ret;
+       u64 ino = btrfs_ino(inode);
+       u64 dir_ino = btrfs_ino(dir);
 
        trans = btrfs_start_transaction(root, 10);
        if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC)
                return trans;
 
-       if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
+       if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
                return ERR_PTR(-ENOSPC);
 
        /* check if there is someone else holds reference */
@@ -2879,7 +2899,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
 
        if (ret == 0 && S_ISREG(inode->i_mode)) {
                ret = btrfs_lookup_file_extent(trans, root, path,
-                                              inode->i_ino, (u64)-1, 0);
+                                              ino, (u64)-1, 0);
                if (ret < 0) {
                        err = ret;
                        goto out;
@@ -2895,7 +2915,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
                goto out;
        }
 
-       di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
+       di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
                                dentry->d_name.name, dentry->d_name.len, 0);
        if (IS_ERR(di)) {
                err = PTR_ERR(di);
@@ -2912,7 +2932,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
 
        ref = btrfs_lookup_inode_ref(trans, root, path,
                                dentry->d_name.name, dentry->d_name.len,
-                               inode->i_ino, dir->i_ino, 0);
+                               ino, dir_ino, 0);
        if (IS_ERR(ref)) {
                err = PTR_ERR(ref);
                goto out;
@@ -2923,7 +2943,15 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir,
        index = btrfs_inode_ref_index(path->nodes[0], ref);
        btrfs_release_path(path);
 
-       di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index,
+       /*
+        * This is a commit root search, if we can lookup inode item and other
+        * relative items in the commit root, it means the transaction of
+        * dir/file creation has been committed, and the dir index item that we
+        * delay to insert has also been inserted into the commit root. So
+        * we needn't worry about the delayed insertion of the dir index item
+        * here.
+        */
+       di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index,
                                dentry->d_name.name, dentry->d_name.len, 0);
        if (IS_ERR(di)) {
                err = PTR_ERR(di);
@@ -2998,12 +3026,13 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
        struct btrfs_key key;
        u64 index;
        int ret;
+       u64 dir_ino = btrfs_ino(dir);
 
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
 
-       di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
+       di = btrfs_lookup_dir_item(trans, root, path, dir_ino,
                                   name, name_len, -1);
        BUG_ON(IS_ERR_OR_NULL(di));
 
@@ -3016,10 +3045,10 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
 
        ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
                                 objectid, root->root_key.objectid,
-                                dir->i_ino, &index, name, name_len);
+                                dir_ino, &index, name, name_len);
        if (ret < 0) {
                BUG_ON(ret != -ENOENT);
-               di = btrfs_search_dir_index_item(root, path, dir->i_ino,
+               di = btrfs_search_dir_index_item(root, path, dir_ino,
                                                 name, name_len);
                BUG_ON(IS_ERR_OR_NULL(di));
 
@@ -3028,24 +3057,16 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
                btrfs_release_path(path);
                index = key.offset;
        }
+       btrfs_release_path(path);
 
-       di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
-                                        index, name, name_len, -1);
-       BUG_ON(IS_ERR_OR_NULL(di));
-
-       leaf = path->nodes[0];
-       btrfs_dir_item_key_to_cpu(leaf, di, &key);
-       WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
-       ret = btrfs_delete_one_dir_name(trans, root, path, di);
+       ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
        BUG_ON(ret);
-       btrfs_release_path(path);
 
        btrfs_i_size_write(dir, dir->i_size - name_len * 2);
        dir->i_mtime = dir->i_ctime = CURRENT_TIME;
        ret = btrfs_update_inode(trans, root, dir);
        BUG_ON(ret);
 
-       btrfs_free_path(path);
        return 0;
 }
 
@@ -3058,7 +3079,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
        unsigned long nr = 0;
 
        if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
-           inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
+           btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID)
                return -ENOTEMPTY;
 
        trans = __unlink_start_trans(dir, dentry);
@@ -3067,7 +3088,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
 
        btrfs_set_trans_block_group(trans, dir);
 
-       if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
+       if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
                err = btrfs_unlink_subvol(trans, root, dir,
                                          BTRFS_I(inode)->location.objectid,
                                          dentry->d_name.name,
@@ -3127,17 +3148,27 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
        int encoding;
        int ret;
        int err = 0;
+       u64 ino = btrfs_ino(inode);
 
        BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
 
        if (root->ref_cows || root == root->fs_info->tree_root)
                btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
 
+       /*
+        * This function is also used to drop the items in the log tree before
+        * we relog the inode, so if root != BTRFS_I(inode)->root, it means
+        * it is used to drop the loged items. So we shouldn't kill the delayed
+        * items.
+        */
+       if (min_type == 0 && root == BTRFS_I(inode)->root)
+               btrfs_kill_delayed_inode_items(inode);
+
        path = btrfs_alloc_path();
        BUG_ON(!path);
        path->reada = -1;
 
-       key.objectid = inode->i_ino;
+       key.objectid = ino;
        key.offset = (u64)-1;
        key.type = (u8)-1;
 
@@ -3165,7 +3196,7 @@ search_again:
                found_type = btrfs_key_type(&found_key);
                encoding = 0;
 
-               if (found_key.objectid != inode->i_ino)
+               if (found_key.objectid != ino)
                        break;
 
                if (found_type < min_type)
@@ -3284,7 +3315,7 @@ delete:
                        ret = btrfs_free_extent(trans, root, extent_start,
                                                extent_num_bytes, 0,
                                                btrfs_header_owner(leaf),
-                                               inode->i_ino, extent_offset);
+                                               ino, extent_offset);
                        BUG_ON(ret);
                }
 
@@ -3293,7 +3324,9 @@ delete:
 
                if (path->slots[0] == 0 ||
                    path->slots[0] != pending_del_slot) {
-                       if (root->ref_cows) {
+                       if (root->ref_cows &&
+                           BTRFS_I(inode)->location.objectid !=
+                                               BTRFS_FREE_INO_OBJECTID) {
                                err = -EAGAIN;
                                goto out;
                        }
@@ -3483,7 +3516,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
                                break;
 
                        err = btrfs_insert_file_extent(trans, root,
-                                       inode->i_ino, cur_offset, 0,
+                                       btrfs_ino(inode), cur_offset, 0,
                                        0, hole_size, 0, hole_size,
                                        0, 0, 0);
                        if (err)
@@ -3585,7 +3618,7 @@ void btrfs_evict_inode(struct inode *inode)
 
        truncate_inode_pages(&inode->i_data, 0);
        if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 ||
-                              root == root->fs_info->tree_root))
+                              is_free_space_inode(root, inode)))
                goto no_delete;
 
        if (is_bad_inode(inode)) {
@@ -3638,6 +3671,10 @@ void btrfs_evict_inode(struct inode *inode)
                BUG_ON(ret);
        }
 
+       if (!(root == root->fs_info->tree_root ||
+             root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID))
+               btrfs_return_ino(root, btrfs_ino(inode));
+
        nr = trans->blocks_used;
        btrfs_end_transaction(trans, root);
        btrfs_btree_balance_dirty(root, nr);
@@ -3663,7 +3700,7 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
        path = btrfs_alloc_path();
        BUG_ON(!path);
 
-       di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
+       di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name,
                                    namelen, 0);
        if (IS_ERR(di))
                ret = PTR_ERR(di);
@@ -3716,7 +3753,7 @@ static int fixup_tree_root_location(struct btrfs_root *root,
 
        leaf = path->nodes[0];
        ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
-       if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino ||
+       if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) ||
            btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
                goto out;
 
@@ -3755,6 +3792,7 @@ static void inode_tree_add(struct inode *inode)
        struct btrfs_inode *entry;
        struct rb_node **p;
        struct rb_node *parent;
+       u64 ino = btrfs_ino(inode);
 again:
        p = &root->inode_tree.rb_node;
        parent = NULL;
@@ -3767,9 +3805,9 @@ again:
                parent = *p;
                entry = rb_entry(parent, struct btrfs_inode, rb_node);
 
-               if (inode->i_ino < entry->vfs_inode.i_ino)
+               if (ino < btrfs_ino(&entry->vfs_inode))
                        p = &parent->rb_left;
-               else if (inode->i_ino > entry->vfs_inode.i_ino)
+               else if (ino > btrfs_ino(&entry->vfs_inode))
                        p = &parent->rb_right;
                else {
                        WARN_ON(!(entry->vfs_inode.i_state &
@@ -3833,9 +3871,9 @@ again:
                prev = node;
                entry = rb_entry(node, struct btrfs_inode, rb_node);
 
-               if (objectid < entry->vfs_inode.i_ino)
+               if (objectid < btrfs_ino(&entry->vfs_inode))
                        node = node->rb_left;
-               else if (objectid > entry->vfs_inode.i_ino)
+               else if (objectid > btrfs_ino(&entry->vfs_inode))
                        node = node->rb_right;
                else
                        break;
@@ -3843,7 +3881,7 @@ again:
        if (!node) {
                while (prev) {
                        entry = rb_entry(prev, struct btrfs_inode, rb_node);
-                       if (objectid <= entry->vfs_inode.i_ino) {
+                       if (objectid <= btrfs_ino(&entry->vfs_inode)) {
                                node = prev;
                                break;
                        }
@@ -3852,7 +3890,7 @@ again:
        }
        while (node) {
                entry = rb_entry(node, struct btrfs_inode, rb_node);
-               objectid = entry->vfs_inode.i_ino + 1;
+               objectid = btrfs_ino(&entry->vfs_inode) + 1;
                inode = igrab(&entry->vfs_inode);
                if (inode) {
                        spin_unlock(&root->inode_lock);
@@ -3890,7 +3928,7 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p)
 static int btrfs_find_actor(struct inode *inode, void *opaque)
 {
        struct btrfs_iget_args *args = opaque;
-       return args->ino == inode->i_ino &&
+       return args->ino == btrfs_ino(inode) &&
                args->root == BTRFS_I(inode)->root;
 }
 
@@ -4035,7 +4073,7 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
        return d_splice_alias(inode, dentry);
 }
 
-static unsigned char btrfs_filetype_table[] = {
+unsigned char btrfs_filetype_table[] = {
        DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
 };
 
@@ -4049,6 +4087,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
        struct btrfs_key key;
        struct btrfs_key found_key;
        struct btrfs_path *path;
+       struct list_head ins_list;
+       struct list_head del_list;
        int ret;
        struct extent_buffer *leaf;
        int slot;
@@ -4061,6 +4101,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
        char tmp_name[32];
        char *name_ptr;
        int name_len;
+       int is_curr = 0;        /* filp->f_pos points to the current index? */
 
        /* FIXME, use a real flag for deciding about the key type */
        if (root->fs_info->tree_root == root)
@@ -4068,9 +4109,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
 
        /* special case for "." */
        if (filp->f_pos == 0) {
-               over = filldir(dirent, ".", 1,
-                              1, inode->i_ino,
-                              DT_DIR);
+               over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR);
                if (over)
                        return 0;
                filp->f_pos = 1;
@@ -4085,11 +4124,19 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
                filp->f_pos = 2;
        }
        path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
        path->reada = 2;
 
+       if (key_type == BTRFS_DIR_INDEX_KEY) {
+               INIT_LIST_HEAD(&ins_list);
+               INIT_LIST_HEAD(&del_list);
+               btrfs_get_delayed_items(inode, &ins_list, &del_list);
+       }
+
        btrfs_set_key_type(&key, key_type);
        key.offset = filp->f_pos;
-       key.objectid = inode->i_ino;
+       key.objectid = btrfs_ino(inode);
 
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        if (ret < 0)
@@ -4116,8 +4163,13 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
                        break;
                if (found_key.offset < filp->f_pos)
                        goto next;
+               if (key_type == BTRFS_DIR_INDEX_KEY &&
+                   btrfs_should_delete_dir_index(&del_list,
+                                                 found_key.offset))
+                       goto next;
 
                filp->f_pos = found_key.offset;
+               is_curr = 1;
 
                di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
                di_cur = 0;
@@ -4172,6 +4224,15 @@ next:
                path->slots[0]++;
        }
 
+       if (key_type == BTRFS_DIR_INDEX_KEY) {
+               if (is_curr)
+                       filp->f_pos++;
+               ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir,
+                                                     &ins_list);
+               if (ret)
+                       goto nopos;
+       }
+
        /* Reached end of directory/root. Bump pos past the last item. */
        if (key_type == BTRFS_DIR_INDEX_KEY)
                /*
@@ -4184,6 +4245,8 @@ next:
 nopos:
        ret = 0;
 err:
+       if (key_type == BTRFS_DIR_INDEX_KEY)
+               btrfs_put_delayed_items(&ins_list, &del_list);
        btrfs_free_path(path);
        return ret;
 }
@@ -4199,7 +4262,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
                return 0;
 
        smp_mb();
-       nolock = (root->fs_info->closing && root == root->fs_info->tree_root);
+       if (root->fs_info->closing && is_free_space_inode(root, inode))
+               nolock = true;
 
        if (wbc->sync_mode == WB_SYNC_ALL) {
                if (nolock)
@@ -4243,8 +4307,9 @@ void btrfs_dirty_inode(struct inode *inode)
                trans = btrfs_start_transaction(root, 1);
                if (IS_ERR(trans)) {
                        printk_ratelimited(KERN_ERR "btrfs: fail to "
-                                      "dirty  inode %lu error %ld\n",
-                                      inode->i_ino, PTR_ERR(trans));
+                                      "dirty  inode %llu error %ld\n",
+                                      (unsigned long long)btrfs_ino(inode),
+                                      PTR_ERR(trans));
                        return;
                }
                btrfs_set_trans_block_group(trans, inode);
@@ -4252,11 +4317,14 @@ void btrfs_dirty_inode(struct inode *inode)
                ret = btrfs_update_inode(trans, root, inode);
                if (ret) {
                        printk_ratelimited(KERN_ERR "btrfs: fail to "
-                                      "dirty  inode %lu error %d\n",
-                                      inode->i_ino, ret);
+                                      "dirty  inode %llu error %d\n",
+                                      (unsigned long long)btrfs_ino(inode),
+                                      ret);
                }
        }
        btrfs_end_transaction(trans, root);
+       if (BTRFS_I(inode)->delayed_node)
+               btrfs_balance_delayed_items(root);
 }
 
 /*
@@ -4272,7 +4340,7 @@ static int btrfs_set_inode_index_count(struct inode *inode)
        struct extent_buffer *leaf;
        int ret;
 
-       key.objectid = inode->i_ino;
+       key.objectid = btrfs_ino(inode);
        btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
        key.offset = (u64)-1;
 
@@ -4304,7 +4372,7 @@ static int btrfs_set_inode_index_count(struct inode *inode)
        leaf = path->nodes[0];
        btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
 
-       if (found_key.objectid != inode->i_ino ||
+       if (found_key.objectid != btrfs_ino(inode) ||
            btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
                BTRFS_I(inode)->index_cnt = 2;
                goto out;
@@ -4325,9 +4393,12 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index)
        int ret = 0;
 
        if (BTRFS_I(dir)->index_cnt == (u64)-1) {
-               ret = btrfs_set_inode_index_count(dir);
-               if (ret)
-                       return ret;
+               ret = btrfs_inode_delayed_dir_index_count(dir);
+               if (ret) {
+                       ret = btrfs_set_inode_index_count(dir);
+                       if (ret)
+                               return ret;
+               }
        }
 
        *index = BTRFS_I(dir)->index_cnt;
@@ -4363,6 +4434,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                return ERR_PTR(-ENOMEM);
        }
 
+       /*
+        * we have to initialize this early, so we can reclaim the inode
+        * number if we fail afterwards in this function.
+        */
+       inode->i_ino = objectid;
+
        if (dir) {
                trace_btrfs_inode_request(dir);
 
@@ -4408,7 +4485,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
                goto fail;
 
        inode_init_owner(inode, dir, mode);
-       inode->i_ino = objectid;
        inode_set_bytes(inode, 0);
        inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
        inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
@@ -4472,29 +4548,29 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
        int ret = 0;
        struct btrfs_key key;
        struct btrfs_root *root = BTRFS_I(parent_inode)->root;
+       u64 ino = btrfs_ino(inode);
+       u64 parent_ino = btrfs_ino(parent_inode);
 
-       if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+       if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
                memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
        } else {
-               key.objectid = inode->i_ino;
+               key.objectid = ino;
                btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
                key.offset = 0;
        }
 
-       if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+       if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) {
                ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
                                         key.objectid, root->root_key.objectid,
-                                        parent_inode->i_ino,
-                                        index, name, name_len);
+                                        parent_ino, index, name, name_len);
        } else if (add_backref) {
-               ret = btrfs_insert_inode_ref(trans, root,
-                                            name, name_len, inode->i_ino,
-                                            parent_inode->i_ino, index);
+               ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino,
+                                            parent_ino, index);
        }
 
        if (ret == 0) {
                ret = btrfs_insert_dir_item(trans, root, name, name_len,
-                                           parent_inode->i_ino, &key,
+                                           parent_inode, &key,
                                            btrfs_inode_type(inode), index);
                BUG_ON(ret);
 
@@ -4537,10 +4613,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
        if (!new_valid_dev(rdev))
                return -EINVAL;
 
-       err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
-       if (err)
-               return err;
-
        /*
         * 2 for inode item and ref
         * 2 for dir items
@@ -4552,8 +4624,12 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
 
        btrfs_set_trans_block_group(trans, dir);
 
+       err = btrfs_find_free_ino(root, &objectid);
+       if (err)
+               goto out_unlock;
+
        inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
-                               dentry->d_name.len, dir->i_ino, objectid,
+                               dentry->d_name.len, btrfs_ino(dir), objectid,
                                BTRFS_I(dir)->block_group, mode, &index);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
@@ -4600,9 +4676,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
        u64 objectid;
        u64 index = 0;
 
-       err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
-       if (err)
-               return err;
        /*
         * 2 for inode item and ref
         * 2 for dir items
@@ -4614,8 +4687,12 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
 
        btrfs_set_trans_block_group(trans, dir);
 
+       err = btrfs_find_free_ino(root, &objectid);
+       if (err)
+               goto out_unlock;
+
        inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
-                               dentry->d_name.len, dir->i_ino, objectid,
+                               dentry->d_name.len, btrfs_ino(dir), objectid,
                                BTRFS_I(dir)->block_group, mode, &index);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
@@ -4726,10 +4803,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        u64 index = 0;
        unsigned long nr = 1;
 
-       err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
-       if (err)
-               return err;
-
        /*
         * 2 items for inode and ref
         * 2 items for dir items
@@ -4740,8 +4813,12 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
                return PTR_ERR(trans);
        btrfs_set_trans_block_group(trans, dir);
 
+       err = btrfs_find_free_ino(root, &objectid);
+       if (err)
+               goto out_fail;
+
        inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
-                               dentry->d_name.len, dir->i_ino, objectid,
+                               dentry->d_name.len, btrfs_ino(dir), objectid,
                                BTRFS_I(dir)->block_group, S_IFDIR | mode,
                                &index);
        if (IS_ERR(inode)) {
@@ -4864,7 +4941,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
        u64 bytenr;
        u64 extent_start = 0;
        u64 extent_end = 0;
-       u64 objectid = inode->i_ino;
+       u64 objectid = btrfs_ino(inode);
        u32 found_type;
        struct btrfs_path *path = NULL;
        struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -5372,7 +5449,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
        if (!path)
                return -ENOMEM;
 
-       ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+       ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
                                       offset, 0);
        if (ret < 0)
                goto out;
@@ -5389,7 +5466,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
        ret = 0;
        leaf = path->nodes[0];
        btrfs_item_key_to_cpu(leaf, &key, slot);
-       if (key.objectid != inode->i_ino ||
+       if (key.objectid != btrfs_ino(inode) ||
            key.type != BTRFS_EXTENT_DATA_KEY) {
                /* not our file or wrong item type, must cow */
                goto out;
@@ -5423,7 +5500,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
         * look for other files referencing this extent, if we
         * find any we must cow
         */
-       if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
+       if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode),
                                  key.offset - backref_offset, disk_bytenr))
                goto out;
 
@@ -5613,9 +5690,10 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
 
                        flush_dcache_page(bvec->bv_page);
                        if (csum != *private) {
-                               printk(KERN_ERR "btrfs csum failed ino %lu off"
+                               printk(KERN_ERR "btrfs csum failed ino %llu off"
                                      " %llu csum %u private %u\n",
-                                     inode->i_ino, (unsigned long long)start,
+                                     (unsigned long long)btrfs_ino(inode),
+                                     (unsigned long long)start,
                                      csum, *private);
                                err = -EIO;
                        }
@@ -5762,9 +5840,9 @@ static void btrfs_end_dio_bio(struct bio *bio, int err)
        struct btrfs_dio_private *dip = bio->bi_private;
 
        if (err) {
-               printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu "
+               printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
                      "sector %#Lx len %u err no %d\n",
-                     dip->inode->i_ino, bio->bi_rw,
+                     (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw,
                      (unsigned long long)bio->bi_sector, bio->bi_size, err);
                dip->errors = 1;
 
@@ -6607,6 +6685,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
        ei->dummy_inode = 0;
        ei->force_compress = BTRFS_COMPRESS_NONE;
 
+       ei->delayed_node = NULL;
+
        inode = &ei->vfs_inode;
        extent_map_tree_init(&ei->extent_tree);
        extent_io_tree_init(&ei->io_tree, &inode->i_data);
@@ -6674,8 +6754,8 @@ void btrfs_destroy_inode(struct inode *inode)
 
        spin_lock(&root->orphan_lock);
        if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
-               printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n",
-                      inode->i_ino);
+               printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
+                      (unsigned long long)btrfs_ino(inode));
                list_del_init(&BTRFS_I(inode)->i_orphan);
        }
        spin_unlock(&root->orphan_lock);
@@ -6697,6 +6777,7 @@ void btrfs_destroy_inode(struct inode *inode)
        inode_tree_del(inode);
        btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
 free:
+       btrfs_remove_delayed_node(inode);
        call_rcu(&inode->i_rcu, btrfs_i_callback);
 }
 
@@ -6705,7 +6786,7 @@ int btrfs_drop_inode(struct inode *inode)
        struct btrfs_root *root = BTRFS_I(inode)->root;
 
        if (btrfs_root_refs(&root->root_item) == 0 &&
-           root != root->fs_info->tree_root)
+           !is_free_space_inode(root, inode))
                return 1;
        else
                return generic_drop_inode(inode);
@@ -6808,38 +6889,39 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        struct btrfs_trans_handle *trans;
        struct btrfs_root *root = BTRFS_I(old_dir)->root;
        struct btrfs_root *dest = BTRFS_I(new_dir)->root;
-       struct inode *newinode = new_dentry->d_inode;
+       struct inode *new_inode = new_dentry->d_inode;
        struct inode *old_inode = old_dentry->d_inode;
        struct timespec ctime = CURRENT_TIME;
        u64 index = 0;
        u64 root_objectid;
        int ret;
+       u64 old_ino = btrfs_ino(old_inode);
 
-       if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
+       if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
                return -EPERM;
 
        /* we only allow rename subvolume link between subvolumes */
-       if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
+       if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
                return -EXDEV;
 
-       if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
-           (newinode && newinode->i_ino == BTRFS_FIRST_FREE_OBJECTID))
+       if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
+           (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID))
                return -ENOTEMPTY;
 
-       if (S_ISDIR(old_inode->i_mode) && newinode &&
-           newinode->i_size > BTRFS_EMPTY_DIR_SIZE)
+       if (S_ISDIR(old_inode->i_mode) && new_inode &&
+           new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
                return -ENOTEMPTY;
        /*
         * we're using rename to replace one file with another.
         * and the replacement file is large.  Start IO on it now so
         * we don't add too much work to the end of the transaction
         */
-       if (newinode && S_ISREG(old_inode->i_mode) && newinode->i_size &&
+       if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
            old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
                filemap_flush(old_inode->i_mapping);
 
        /* close the racy window with snapshot create/destroy ioctl */
-       if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
+       if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
                down_read(&root->fs_info->subvol_sem);
        /*
         * We want to reserve the absolute worst case amount of items.  So if
@@ -6864,15 +6946,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (ret)
                goto out_fail;
 
-       if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+       if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
                /* force full log commit if subvolume involved. */
                root->fs_info->last_trans_log_full_commit = trans->transid;
        } else {
                ret = btrfs_insert_inode_ref(trans, dest,
                                             new_dentry->d_name.name,
                                             new_dentry->d_name.len,
-                                            old_inode->i_ino,
-                                            new_dir->i_ino, index);
+                                            old_ino,
+                                            btrfs_ino(new_dir), index);
                if (ret)
                        goto out_fail;
                /*
@@ -6888,10 +6970,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
         * make sure the inode gets flushed if it is replacing
         * something.
         */
-       if (newinode && newinode->i_size &&
-           old_inode && S_ISREG(old_inode->i_mode)) {
+       if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
                btrfs_add_ordered_operation(trans, root, old_inode);
-       }
 
        old_dir->i_ctime = old_dir->i_mtime = ctime;
        new_dir->i_ctime = new_dir->i_mtime = ctime;
@@ -6900,7 +6980,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        if (old_dentry->d_parent != new_dentry->d_parent)
                btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
 
-       if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
+       if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
                root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
                ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
                                        old_dentry->d_name.name,
@@ -6915,16 +6995,16 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        }
        BUG_ON(ret);
 
-       if (newinode) {
-               newinode->i_ctime = CURRENT_TIME;
-               if (unlikely(newinode->i_ino ==
+       if (new_inode) {
+               new_inode->i_ctime = CURRENT_TIME;
+               if (unlikely(btrfs_ino(new_inode) ==
                             BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
-                       root_objectid = BTRFS_I(newinode)->location.objectid;
+                       root_objectid = BTRFS_I(new_inode)->location.objectid;
                        ret = btrfs_unlink_subvol(trans, dest, new_dir,
                                                root_objectid,
                                                new_dentry->d_name.name,
                                                new_dentry->d_name.len);
-                       BUG_ON(newinode->i_nlink == 0);
+                       BUG_ON(new_inode->i_nlink == 0);
                } else {
                        ret = btrfs_unlink_inode(trans, dest, new_dir,
                                                 new_dentry->d_inode,
@@ -6932,7 +7012,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                                                 new_dentry->d_name.len);
                }
                BUG_ON(ret);
-               if (newinode->i_nlink == 0) {
+               if (new_inode->i_nlink == 0) {
                        ret = btrfs_orphan_add(trans, new_dentry->d_inode);
                        BUG_ON(ret);
                }
@@ -6945,7 +7025,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                             new_dentry->d_name.len, 0, index);
        BUG_ON(ret);
 
-       if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
+       if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
                struct dentry *parent = dget_parent(new_dentry);
                btrfs_log_new_name(trans, old_inode, old_dir, parent);
                dput(parent);
@@ -6954,7 +7034,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 out_fail:
        btrfs_end_transaction_throttle(trans, root);
 out_notrans:
-       if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
+       if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&root->fs_info->subvol_sem);
 
        return ret;
@@ -7031,9 +7111,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
        if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
                return -ENAMETOOLONG;
 
-       err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid);
-       if (err)
-               return err;
        /*
         * 2 items for inode item and ref
         * 2 items for dir items
@@ -7045,8 +7122,12 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
 
        btrfs_set_trans_block_group(trans, dir);
 
+       err = btrfs_find_free_ino(root, &objectid);
+       if (err)
+               goto out_unlock;
+
        inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
-                               dentry->d_name.len, dir->i_ino, objectid,
+                               dentry->d_name.len, btrfs_ino(dir), objectid,
                                BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
                                &index);
        if (IS_ERR(inode)) {
@@ -7078,7 +7159,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
 
        path = btrfs_alloc_path();
        BUG_ON(!path);
-       key.objectid = inode->i_ino;
+       key.objectid = btrfs_ino(inode);
        key.offset = 0;
        btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
        datasize = btrfs_file_extent_calc_inline_size(name_len);
index d11fc6548e15c0ff65c9092bc94b37d148fb4892..ed8c055ab70f06bec725a49cf06d068d7ec0aaa9 100644 (file)
@@ -50,6 +50,7 @@
 #include "print-tree.h"
 #include "volumes.h"
 #include "locking.h"
+#include "inode-map.h"
 
 /* Mask out flags that are inappropriate for the given type of inode. */
 static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
@@ -81,6 +82,13 @@ static unsigned int btrfs_flags_to_ioctl(unsigned int flags)
                iflags |= FS_NOATIME_FL;
        if (flags & BTRFS_INODE_DIRSYNC)
                iflags |= FS_DIRSYNC_FL;
+       if (flags & BTRFS_INODE_NODATACOW)
+               iflags |= FS_NOCOW_FL;
+
+       if ((flags & BTRFS_INODE_COMPRESS) && !(flags & BTRFS_INODE_NOCOMPRESS))
+               iflags |= FS_COMPR_FL;
+       else if (flags & BTRFS_INODE_NOCOMPRESS)
+               iflags |= FS_NOCOMP_FL;
 
        return iflags;
 }
@@ -144,16 +152,13 @@ static int check_flags(unsigned int flags)
        if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \
                      FS_NOATIME_FL | FS_NODUMP_FL | \
                      FS_SYNC_FL | FS_DIRSYNC_FL | \
-                     FS_NOCOMP_FL | FS_COMPR_FL | \
-                     FS_NOCOW_FL | FS_COW_FL))
+                     FS_NOCOMP_FL | FS_COMPR_FL |
+                     FS_NOCOW_FL))
                return -EOPNOTSUPP;
 
        if ((flags & FS_NOCOMP_FL) && (flags & FS_COMPR_FL))
                return -EINVAL;
 
-       if ((flags & FS_NOCOW_FL) && (flags & FS_COW_FL))
-               return -EINVAL;
-
        return 0;
 }
 
@@ -218,6 +223,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
                ip->flags |= BTRFS_INODE_DIRSYNC;
        else
                ip->flags &= ~BTRFS_INODE_DIRSYNC;
+       if (flags & FS_NOCOW_FL)
+               ip->flags |= BTRFS_INODE_NODATACOW;
+       else
+               ip->flags &= ~BTRFS_INODE_NODATACOW;
 
        /*
         * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
@@ -230,11 +239,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
        } else if (flags & FS_COMPR_FL) {
                ip->flags |= BTRFS_INODE_COMPRESS;
                ip->flags &= ~BTRFS_INODE_NOCOMPRESS;
+       } else {
+               ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
        }
-       if (flags & FS_NOCOW_FL)
-               ip->flags |= BTRFS_INODE_NODATACOW;
-       else if (flags & FS_COW_FL)
-               ip->flags &= ~BTRFS_INODE_NODATACOW;
 
        trans = btrfs_join_transaction(root, 1);
        BUG_ON(IS_ERR(trans));
@@ -323,8 +330,7 @@ static noinline int create_subvol(struct btrfs_root *root,
        u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
        u64 index = 0;
 
-       ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root,
-                                      0, &objectid);
+       ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid);
        if (ret) {
                dput(parent);
                return ret;
@@ -416,7 +422,7 @@ static noinline int create_subvol(struct btrfs_root *root,
        BUG_ON(ret);
 
        ret = btrfs_insert_dir_item(trans, root,
-                                   name, namelen, dir->i_ino, &key,
+                                   name, namelen, dir, &key,
                                    BTRFS_FT_DIR, index);
        if (ret)
                goto fail;
@@ -427,7 +433,7 @@ static noinline int create_subvol(struct btrfs_root *root,
 
        ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
                                 objectid, root->root_key.objectid,
-                                dir->i_ino, index, name, namelen);
+                                btrfs_ino(dir), index, name, namelen);
 
        BUG_ON(ret);
 
@@ -1123,7 +1129,7 @@ static noinline int btrfs_ioctl_subvol_getflags(struct file *file,
        int ret = 0;
        u64 flags = 0;
 
-       if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID)
+       if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
                return -EINVAL;
 
        down_read(&root->fs_info->subvol_sem);
@@ -1150,7 +1156,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
        if (root->fs_info->sb->s_flags & MS_RDONLY)
                return -EROFS;
 
-       if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID)
+       if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID)
                return -EINVAL;
 
        if (copy_from_user(&flags, arg, sizeof(flags)))
@@ -1633,7 +1639,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
                        goto out_dput;
        }
 
-       if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
+       if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) {
                err = -EINVAL;
                goto out_dput;
        }
@@ -1919,7 +1925,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
        }
 
        /* clone data */
-       key.objectid = src->i_ino;
+       key.objectid = btrfs_ino(src);
        key.type = BTRFS_EXTENT_DATA_KEY;
        key.offset = 0;
 
@@ -1946,7 +1952,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
 
                btrfs_item_key_to_cpu(leaf, &key, slot);
                if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY ||
-                   key.objectid != src->i_ino)
+                   key.objectid != btrfs_ino(src))
                        break;
 
                if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
@@ -1989,7 +1995,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                                goto next;
 
                        memcpy(&new_key, &key, sizeof(new_key));
-                       new_key.objectid = inode->i_ino;
+                       new_key.objectid = btrfs_ino(inode);
                        if (off <= key.offset)
                                new_key.offset = key.offset + destoff - off;
                        else
@@ -2043,7 +2049,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                                        ret = btrfs_inc_extent_ref(trans, root,
                                                        disko, diskl, 0,
                                                        root->root_key.objectid,
-                                                       inode->i_ino,
+                                                       btrfs_ino(inode),
                                                        new_key.offset - datao);
                                        BUG_ON(ret);
                                }
index f726e72dd36279efdc0675b51e54f0949b694cc0..051992c7fcc9529f151fabce88b3a5d73f361bda 100644 (file)
@@ -30,6 +30,7 @@
 #include "btrfs_inode.h"
 #include "async-thread.h"
 #include "free-space-cache.h"
+#include "inode-map.h"
 
 /*
  * backref_node, mapping_node and tree_block start with this
@@ -1409,9 +1410,9 @@ again:
                prev = node;
                entry = rb_entry(node, struct btrfs_inode, rb_node);
 
-               if (objectid < entry->vfs_inode.i_ino)
+               if (objectid < btrfs_ino(&entry->vfs_inode))
                        node = node->rb_left;
-               else if (objectid > entry->vfs_inode.i_ino)
+               else if (objectid > btrfs_ino(&entry->vfs_inode))
                        node = node->rb_right;
                else
                        break;
@@ -1419,7 +1420,7 @@ again:
        if (!node) {
                while (prev) {
                        entry = rb_entry(prev, struct btrfs_inode, rb_node);
-                       if (objectid <= entry->vfs_inode.i_ino) {
+                       if (objectid <= btrfs_ino(&entry->vfs_inode)) {
                                node = prev;
                                break;
                        }
@@ -1434,7 +1435,7 @@ again:
                        return inode;
                }
 
-               objectid = entry->vfs_inode.i_ino + 1;
+               objectid = btrfs_ino(&entry->vfs_inode) + 1;
                if (cond_resched_lock(&root->inode_lock))
                        goto again;
 
@@ -1470,7 +1471,7 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr,
                return -ENOMEM;
 
        bytenr -= BTRFS_I(reloc_inode)->index_cnt;
-       ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
+       ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode),
                                       bytenr, 0);
        if (ret < 0)
                goto out;
@@ -1558,11 +1559,11 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
                        if (first) {
                                inode = find_next_inode(root, key.objectid);
                                first = 0;
-                       } else if (inode && inode->i_ino < key.objectid) {
+                       } else if (inode && btrfs_ino(inode) < key.objectid) {
                                btrfs_add_delayed_iput(inode);
                                inode = find_next_inode(root, key.objectid);
                        }
-                       if (inode && inode->i_ino == key.objectid) {
+                       if (inode && btrfs_ino(inode) == key.objectid) {
                                end = key.offset +
                                      btrfs_file_extent_num_bytes(leaf, fi);
                                WARN_ON(!IS_ALIGNED(key.offset,
@@ -1893,6 +1894,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
        struct inode *inode = NULL;
        u64 objectid;
        u64 start, end;
+       u64 ino;
 
        objectid = min_key->objectid;
        while (1) {
@@ -1905,17 +1907,18 @@ static int invalidate_extent_cache(struct btrfs_root *root,
                inode = find_next_inode(root, objectid);
                if (!inode)
                        break;
+               ino = btrfs_ino(inode);
 
-               if (inode->i_ino > max_key->objectid) {
+               if (ino > max_key->objectid) {
                        iput(inode);
                        break;
                }
 
-               objectid = inode->i_ino + 1;
+               objectid = ino + 1;
                if (!S_ISREG(inode->i_mode))
                        continue;
 
-               if (unlikely(min_key->objectid == inode->i_ino)) {
+               if (unlikely(min_key->objectid == ino)) {
                        if (min_key->type > BTRFS_EXTENT_DATA_KEY)
                                continue;
                        if (min_key->type < BTRFS_EXTENT_DATA_KEY)
@@ -1928,7 +1931,7 @@ static int invalidate_extent_cache(struct btrfs_root *root,
                        start = 0;
                }
 
-               if (unlikely(max_key->objectid == inode->i_ino)) {
+               if (unlikely(max_key->objectid == ino)) {
                        if (max_key->type < BTRFS_EXTENT_DATA_KEY)
                                continue;
                        if (max_key->type > BTRFS_EXTENT_DATA_KEY) {
@@ -3897,7 +3900,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
        if (IS_ERR(trans))
                return ERR_CAST(trans);
 
-       err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
+       err = btrfs_find_free_objectid(root, &objectid);
        if (err)
                goto out;
 
index 3e28521643fb24c23babdde2e1228cb7690339ac..fb72e2bea882ff7d9fc5e56176770ae7b8854497 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/magic.h>
 #include <linux/slab.h>
 #include "compat.h"
+#include "delayed-inode.h"
 #include "ctree.h"
 #include "disk-io.h"
 #include "transaction.h"
@@ -1206,10 +1207,14 @@ static int __init init_btrfs_fs(void)
        if (err)
                goto free_extent_io;
 
-       err = btrfs_interface_init();
+       err = btrfs_delayed_inode_init();
        if (err)
                goto free_extent_map;
 
+       err = btrfs_interface_init();
+       if (err)
+               goto free_delayed_inode;
+
        err = register_filesystem(&btrfs_fs_type);
        if (err)
                goto unregister_ioctl;
@@ -1219,6 +1224,8 @@ static int __init init_btrfs_fs(void)
 
 unregister_ioctl:
        btrfs_interface_exit();
+free_delayed_inode:
+       btrfs_delayed_inode_exit();
 free_extent_map:
        extent_map_exit();
 free_extent_io:
@@ -1235,6 +1242,7 @@ free_sysfs:
 static void __exit exit_btrfs_fs(void)
 {
        btrfs_destroy_cachep();
+       btrfs_delayed_inode_exit();
        extent_map_exit();
        extent_io_exit();
        btrfs_interface_exit();
index ab9633fd72a40c7cbdc8dc91b9b13cc44fdef6f6..c3c223ae66918d9e244fc909c4d6b7836490ef95 100644 (file)
@@ -174,18 +174,6 @@ static const struct sysfs_ops btrfs_root_attr_ops = {
        .store  = btrfs_root_attr_store,
 };
 
-static struct kobj_type btrfs_root_ktype = {
-       .default_attrs  = btrfs_root_attrs,
-       .sysfs_ops      = &btrfs_root_attr_ops,
-       .release        = btrfs_root_release,
-};
-
-static struct kobj_type btrfs_super_ktype = {
-       .default_attrs  = btrfs_super_attrs,
-       .sysfs_ops      = &btrfs_super_attr_ops,
-       .release        = btrfs_super_release,
-};
-
 /* /sys/fs/btrfs/ entry */
 static struct kset *btrfs_kset;
 
index 211aceeb9ea05bd1ba08eb6dc13d37d3885eded2..33679fc710c6048a5e2737f7b683e9d6b90b7901 100644 (file)
@@ -27,6 +27,7 @@
 #include "transaction.h"
 #include "locking.h"
 #include "tree-log.h"
+#include "inode-map.h"
 
 #define BTRFS_ROOT_TRANS_TAG 0
 
@@ -443,19 +444,40 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
                          struct btrfs_root *root)
 {
-       return __btrfs_end_transaction(trans, root, 0, 1);
+       int ret;
+
+       ret = __btrfs_end_transaction(trans, root, 0, 1);
+       if (ret)
+               return ret;
+       return 0;
 }
 
 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
                                   struct btrfs_root *root)
 {
-       return __btrfs_end_transaction(trans, root, 1, 1);
+       int ret;
+
+       ret = __btrfs_end_transaction(trans, root, 1, 1);
+       if (ret)
+               return ret;
+       return 0;
 }
 
 int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root)
 {
-       return __btrfs_end_transaction(trans, root, 0, 0);
+       int ret;
+
+       ret = __btrfs_end_transaction(trans, root, 0, 0);
+       if (ret)
+               return ret;
+       return 0;
+}
+
+int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
+                               struct btrfs_root *root)
+{
+       return __btrfs_end_transaction(trans, root, 1, 1);
 }
 
 /*
@@ -716,8 +738,14 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
                        btrfs_update_reloc_root(trans, root);
                        btrfs_orphan_commit_root(trans, root);
 
+                       btrfs_save_ino_cache(root, trans);
+
                        if (root->commit_root != root->node) {
+                               mutex_lock(&root->fs_commit_mutex);
                                switch_commit_root(root);
+                               btrfs_unpin_free_ino(root);
+                               mutex_unlock(&root->fs_commit_mutex);
+
                                btrfs_set_root_node(&root->root_item,
                                                    root->node);
                        }
@@ -795,7 +823,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
                goto fail;
        }
 
-       ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
+       ret = btrfs_find_free_objectid(tree_root, &objectid);
        if (ret) {
                pending->error = ret;
                goto fail;
@@ -832,7 +860,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        BUG_ON(ret);
        ret = btrfs_insert_dir_item(trans, parent_root,
                                dentry->d_name.name, dentry->d_name.len,
-                               parent_inode->i_ino, &key,
+                               parent_inode, &key,
                                BTRFS_FT_DIR, index);
        BUG_ON(ret);
 
@@ -874,7 +902,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
         */
        ret = btrfs_add_root_ref(trans, tree_root, objectid,
                                 parent_root->root_key.objectid,
-                                parent_inode->i_ino, index,
+                                btrfs_ino(parent_inode), index,
                                 dentry->d_name.name, dentry->d_name.len);
        BUG_ON(ret);
        dput(parent);
@@ -902,6 +930,14 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
        int ret;
 
        list_for_each_entry(pending, head, list) {
+               /*
+                * We must deal with the delayed items before creating
+                * snapshots, or we will create a snapthot with inconsistent
+                * information.
+               */
+               ret = btrfs_run_delayed_items(trans, fs_info->fs_root);
+               BUG_ON(ret);
+
                ret = create_pending_snapshot(trans, fs_info, pending);
                BUG_ON(ret);
        }
@@ -1155,6 +1191,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                        BUG_ON(ret);
                }
 
+               ret = btrfs_run_delayed_items(trans, root);
+               BUG_ON(ret);
+
                /*
                 * rename don't use btrfs_join_transaction, so, once we
                 * set the transaction to blocked above, we aren't going
@@ -1181,6 +1220,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        ret = create_pending_snapshots(trans, root->fs_info);
        BUG_ON(ret);
 
+       ret = btrfs_run_delayed_items(trans, root);
+       BUG_ON(ret);
+
        ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
        BUG_ON(ret);
 
@@ -1297,6 +1339,8 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
                root = list_entry(list.next, struct btrfs_root, root_list);
                list_del(&root->root_list);
 
+               btrfs_kill_all_delayed_nodes(root);
+
                if (btrfs_header_backref_rev(root->node) <
                    BTRFS_MIXED_BACKREF_REV)
                        btrfs_drop_snapshot(root, NULL, 0);
index 000a41008c3b4a38d5e5f8da96c4f006f107e729..804c88639e5de00a4065267aaf52d050dd88303c 100644 (file)
@@ -112,6 +112,8 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
                                   int wait_for_unblock);
 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
                                   struct btrfs_root *root);
+int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
+                               struct btrfs_root *root);
 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root);
 void btrfs_throttle(struct btrfs_root *root);
index c599e8c2a53c22e74475e95f750cfc4877f2ebce..a794b9f60138f8a04075268648cb739481635006 100644 (file)
@@ -519,7 +519,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
         * file.  This must be done before the btrfs_drop_extents run
         * so we don't try to drop this extent.
         */
-       ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
+       ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
                                       start, 0);
 
        if (ret == 0 &&
@@ -832,7 +832,7 @@ again:
        read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen);
 
        /* if we already have a perfect match, we're done */
-       if (inode_in_dir(root, path, dir->i_ino, inode->i_ino,
+       if (inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
                         btrfs_inode_ref_index(eb, ref),
                         name, namelen)) {
                goto out;
@@ -960,8 +960,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
        unsigned long ptr;
        unsigned long ptr_end;
        int name_len;
+       u64 ino = btrfs_ino(inode);
 
-       key.objectid = inode->i_ino;
+       key.objectid = ino;
        key.type = BTRFS_INODE_REF_KEY;
        key.offset = (u64)-1;
 
@@ -980,7 +981,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
                }
                btrfs_item_key_to_cpu(path->nodes[0], &key,
                                      path->slots[0]);
-               if (key.objectid != inode->i_ino ||
+               if (key.objectid != ino ||
                    key.type != BTRFS_INODE_REF_KEY)
                        break;
                ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
@@ -1011,10 +1012,10 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
        if (inode->i_nlink == 0) {
                if (S_ISDIR(inode->i_mode)) {
                        ret = replay_dir_deletes(trans, root, NULL, path,
-                                                inode->i_ino, 1);
+                                                ino, 1);
                        BUG_ON(ret);
                }
-               ret = insert_orphan_item(trans, root, inode->i_ino);
+               ret = insert_orphan_item(trans, root, ino);
                BUG_ON(ret);
        }
        btrfs_free_path(path);
@@ -2197,6 +2198,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
        int ret;
        int err = 0;
        int bytes_del = 0;
+       u64 dir_ino = btrfs_ino(dir);
 
        if (BTRFS_I(dir)->logged_trans < trans->transid)
                return 0;
@@ -2214,7 +2216,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
                goto out_unlock;
        }
 
-       di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino,
+       di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
                                   name, name_len, -1);
        if (IS_ERR(di)) {
                err = PTR_ERR(di);
@@ -2226,7 +2228,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
                BUG_ON(ret);
        }
        btrfs_release_path(path);
-       di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino,
+       di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
                                         index, name, name_len, -1);
        if (IS_ERR(di)) {
                err = PTR_ERR(di);
@@ -2244,7 +2246,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
        if (bytes_del) {
                struct btrfs_key key;
 
-               key.objectid = dir->i_ino;
+               key.objectid = dir_ino;
                key.offset = 0;
                key.type = BTRFS_INODE_ITEM_KEY;
                btrfs_release_path(path);
@@ -2303,7 +2305,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
        log = root->log_root;
        mutex_lock(&BTRFS_I(inode)->log_mutex);
 
-       ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino,
+       ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
                                  dirid, &index);
        mutex_unlock(&BTRFS_I(inode)->log_mutex);
        if (ret == -ENOSPC) {
@@ -2369,13 +2371,14 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
        int nritems;
        u64 first_offset = min_offset;
        u64 last_offset = (u64)-1;
+       u64 ino = btrfs_ino(inode);
 
        log = root->log_root;
-       max_key.objectid = inode->i_ino;
+       max_key.objectid = ino;
        max_key.offset = (u64)-1;
        max_key.type = key_type;
 
-       min_key.objectid = inode->i_ino;
+       min_key.objectid = ino;
        min_key.type = key_type;
        min_key.offset = min_offset;
 
@@ -2388,9 +2391,8 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
         * we didn't find anything from this transaction, see if there
         * is anything at all
         */
-       if (ret != 0 || min_key.objectid != inode->i_ino ||
-           min_key.type != key_type) {
-               min_key.objectid = inode->i_ino;
+       if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
+               min_key.objectid = ino;
                min_key.type = key_type;
                min_key.offset = (u64)-1;
                btrfs_release_path(path);
@@ -2399,7 +2401,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
                        btrfs_release_path(path);
                        return ret;
                }
-               ret = btrfs_previous_item(root, path, inode->i_ino, key_type);
+               ret = btrfs_previous_item(root, path, ino, key_type);
 
                /* if ret == 0 there are items for this type,
                 * create a range to tell us the last key of this type.
@@ -2417,7 +2419,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
        }
 
        /* go backward to find any previous key */
-       ret = btrfs_previous_item(root, path, inode->i_ino, key_type);
+       ret = btrfs_previous_item(root, path, ino, key_type);
        if (ret == 0) {
                struct btrfs_key tmp;
                btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
@@ -2452,8 +2454,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
                for (i = path->slots[0]; i < nritems; i++) {
                        btrfs_item_key_to_cpu(src, &min_key, i);
 
-                       if (min_key.objectid != inode->i_ino ||
-                           min_key.type != key_type)
+                       if (min_key.objectid != ino || min_key.type != key_type)
                                goto done;
                        ret = overwrite_item(trans, log, dst_path, src, i,
                                             &min_key);
@@ -2474,7 +2475,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
                        goto done;
                }
                btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
-               if (tmp.objectid != inode->i_ino || tmp.type != key_type) {
+               if (tmp.objectid != ino || tmp.type != key_type) {
                        last_offset = (u64)-1;
                        goto done;
                }
@@ -2500,8 +2501,7 @@ done:
                 * is valid
                 */
                ret = insert_dir_log_key(trans, log, path, key_type,
-                                        inode->i_ino, first_offset,
-                                        last_offset);
+                                        ino, first_offset, last_offset);
                if (ret)
                        err = ret;
        }
@@ -2745,6 +2745,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
        int nritems;
        int ins_start_slot = 0;
        int ins_nr;
+       u64 ino = btrfs_ino(inode);
 
        log = root->log_root;
 
@@ -2757,11 +2758,11 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                return -ENOMEM;
        }
 
-       min_key.objectid = inode->i_ino;
+       min_key.objectid = ino;
        min_key.type = BTRFS_INODE_ITEM_KEY;
        min_key.offset = 0;
 
-       max_key.objectid = inode->i_ino;
+       max_key.objectid = ino;
 
        /* today the code can only do partial logging of directories */
        if (!S_ISDIR(inode->i_mode))
@@ -2773,6 +2774,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                max_key.type = (u8)-1;
        max_key.offset = (u64)-1;
 
+       ret = btrfs_commit_inode_delayed_items(trans, inode);
+       if (ret) {
+               btrfs_free_path(path);
+               btrfs_free_path(dst_path);
+               return ret;
+       }
+
        mutex_lock(&BTRFS_I(inode)->log_mutex);
 
        /*
@@ -2784,8 +2792,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
 
                if (inode_only == LOG_INODE_EXISTS)
                        max_key_type = BTRFS_XATTR_ITEM_KEY;
-               ret = drop_objectid_items(trans, log, path,
-                                         inode->i_ino, max_key_type);
+               ret = drop_objectid_items(trans, log, path, ino, max_key_type);
        } else {
                ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0);
        }
@@ -2803,7 +2810,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                        break;
 again:
                /* note, ins_nr might be > 0 here, cleanup outside the loop */
-               if (min_key.objectid != inode->i_ino)
+               if (min_key.objectid != ino)
                        break;
                if (min_key.type > max_key.type)
                        break;
index 4ca88d1e18e20badb72a0133d6d2c8a6d15fbd99..f3107e4b4d56a3d4b31a5f889ff321328e881978 100644 (file)
@@ -44,7 +44,7 @@ ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
                return -ENOMEM;
 
        /* lookup the xattr by name */
-       di = btrfs_lookup_xattr(NULL, root, path, inode->i_ino, name,
+       di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name,
                                strlen(name), 0);
        if (!di) {
                ret = -ENODATA;
@@ -103,7 +103,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
                return -ENOMEM;
 
        /* first lets see if we already have this xattr */
-       di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name,
+       di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name,
                                strlen(name), -1);
        if (IS_ERR(di)) {
                ret = PTR_ERR(di);
@@ -136,7 +136,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
        }
 
        /* ok we have to create a completely new xattr */
-       ret = btrfs_insert_xattr_item(trans, root, path, inode->i_ino,
+       ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
                                      name, name_len, value, size);
        BUG_ON(ret);
 out:
@@ -190,7 +190,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size)
         * NOTE: we set key.offset = 0; because we want to start with the
         * first xattr that we find and walk forward
         */
-       key.objectid = inode->i_ino;
+       key.objectid = btrfs_ino(inode);
        btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY);
        key.offset = 0;
 
index e159c529fd2bbc845bbd893f645ac7633d91b9eb..38b8ab554924cd2d4a9ddf11ce3a79efe3d411cd 100644 (file)
@@ -775,6 +775,13 @@ get_more_pages:
                                            ci->i_truncate_seq,
                                            ci->i_truncate_size,
                                            &inode->i_mtime, true, 1, 0);
+
+                               if (!req) {
+                                       rc = -ENOMEM;
+                                       unlock_page(page);
+                                       break;
+                               }
+
                                max_pages = req->r_num_pages;
 
                                alloc_page_vec(fsc, req);
index 5323c330bbf3d4acad87fe6a48701b9ee7f27589..2a5404c1c42f4efd1be78f5d24eed337c24429e3 100644 (file)
@@ -819,7 +819,7 @@ int __ceph_caps_used(struct ceph_inode_info *ci)
                used |= CEPH_CAP_FILE_CACHE;
        if (ci->i_wr_ref)
                used |= CEPH_CAP_FILE_WR;
-       if (ci->i_wrbuffer_ref)
+       if (ci->i_wb_ref || ci->i_wrbuffer_ref)
                used |= CEPH_CAP_FILE_BUFFER;
        return used;
 }
@@ -1331,10 +1331,11 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci)
 }
 
 /*
- * Mark caps dirty.  If inode is newly dirty, add to the global dirty
- * list.
+ * Mark caps dirty.  If inode is newly dirty, return the dirty flags.
+ * Caller is then responsible for calling __mark_inode_dirty with the
+ * returned flags value.
  */
-void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
+int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
 {
        struct ceph_mds_client *mdsc =
                ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
@@ -1357,7 +1358,7 @@ void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
                list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
                spin_unlock(&mdsc->cap_dirty_lock);
                if (ci->i_flushing_caps == 0) {
-                       igrab(inode);
+                       ihold(inode);
                        dirty |= I_DIRTY_SYNC;
                }
        }
@@ -1365,9 +1366,8 @@ void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
        if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
            (mask & CEPH_CAP_FILE_BUFFER))
                dirty |= I_DIRTY_DATASYNC;
-       if (dirty)
-               __mark_inode_dirty(inode, dirty);
        __cap_delay_requeue(mdsc, ci);
+       return dirty;
 }
 
 /*
@@ -1990,11 +1990,11 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got)
        if (got & CEPH_CAP_FILE_WR)
                ci->i_wr_ref++;
        if (got & CEPH_CAP_FILE_BUFFER) {
-               if (ci->i_wrbuffer_ref == 0)
-                       igrab(&ci->vfs_inode);
-               ci->i_wrbuffer_ref++;
-               dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n",
-                    &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref);
+               if (ci->i_wb_ref == 0)
+                       ihold(&ci->vfs_inode);
+               ci->i_wb_ref++;
+               dout("__take_cap_refs %p wb %d -> %d (?)\n",
+                    &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
        }
 }
 
@@ -2169,12 +2169,12 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
                if (--ci->i_rdcache_ref == 0)
                        last++;
        if (had & CEPH_CAP_FILE_BUFFER) {
-               if (--ci->i_wrbuffer_ref == 0) {
+               if (--ci->i_wb_ref == 0) {
                        last++;
                        put++;
                }
-               dout("put_cap_refs %p wrbuffer %d -> %d (?)\n",
-                    inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref);
+               dout("put_cap_refs %p wb %d -> %d (?)\n",
+                    inode, ci->i_wb_ref+1, ci->i_wb_ref);
        }
        if (had & CEPH_CAP_FILE_WR)
                if (--ci->i_wr_ref == 0) {
index 159b512d5a271379abf0bde217fd46feeb38c79b..203252d88d9fa6509d1dd7bfed3e4198c414d906 100644 (file)
@@ -734,9 +734,12 @@ retry_snap:
                }
        }
        if (ret >= 0) {
+               int dirty;
                spin_lock(&inode->i_lock);
-               __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
+               dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
                spin_unlock(&inode->i_lock);
+               if (dirty)
+                       __mark_inode_dirty(inode, dirty);
        }
 
 out:
index b54c97da1c43fd8a4d52238d469c0aa6cb904772..70b6a4839c386be5fa4b40b48c3bf6521eda7ca7 100644 (file)
@@ -355,6 +355,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
        ci->i_rd_ref = 0;
        ci->i_rdcache_ref = 0;
        ci->i_wr_ref = 0;
+       ci->i_wb_ref = 0;
        ci->i_wrbuffer_ref = 0;
        ci->i_wrbuffer_ref_head = 0;
        ci->i_shared_gen = 0;
@@ -1567,6 +1568,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
        int release = 0, dirtied = 0;
        int mask = 0;
        int err = 0;
+       int inode_dirty_flags = 0;
 
        if (ceph_snap(inode) != CEPH_NOSNAP)
                return -EROFS;
@@ -1725,13 +1727,16 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
                dout("setattr %p ATTR_FILE ... hrm!\n", inode);
 
        if (dirtied) {
-               __ceph_mark_dirty_caps(ci, dirtied);
+               inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied);
                inode->i_ctime = CURRENT_TIME;
        }
 
        release &= issued;
        spin_unlock(&inode->i_lock);
 
+       if (inode_dirty_flags)
+               __mark_inode_dirty(inode, inode_dirty_flags);
+
        if (mask) {
                req->r_inode = igrab(inode);
                req->r_inode_drop = release;
index f60b07b0feb075042e91c3cdabea569bd01ee158..d0fae4ce9ba55b704ccedadbfd3f298e3a00cfb4 100644 (file)
@@ -3304,8 +3304,8 @@ static void con_put(struct ceph_connection *con)
 {
        struct ceph_mds_session *s = con->private;
 
+       dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref) - 1);
        ceph_put_mds_session(s);
-       dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref));
 }
 
 /*
index e86ec1155f8f401ec2722f6b8ae005f48c6d9a42..24067d68a5549769df4be2c2a2574c36d6dcf702 100644 (file)
@@ -206,7 +206,7 @@ void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
                up_write(&mdsc->snap_rwsem);
        } else {
                spin_lock(&mdsc->snap_empty_lock);
-               list_add(&mdsc->snap_empty, &realm->empty_item);
+               list_add(&realm->empty_item, &mdsc->snap_empty);
                spin_unlock(&mdsc->snap_empty_lock);
        }
 }
index 619fe719968ff124771de1811b80ba165a54c2b3..f5cabefa98dc03cee37b12e8c961a4f208e970a4 100644 (file)
@@ -293,7 +293,7 @@ struct ceph_inode_info {
 
        /* held references to caps */
        int i_pin_ref;
-       int i_rd_ref, i_rdcache_ref, i_wr_ref;
+       int i_rd_ref, i_rdcache_ref, i_wr_ref, i_wb_ref;
        int i_wrbuffer_ref, i_wrbuffer_ref_head;
        u32 i_shared_gen;       /* increment each time we get FILE_SHARED */
        u32 i_rdcache_gen;      /* incremented each time we get FILE_CACHE. */
@@ -506,7 +506,7 @@ static inline int __ceph_caps_dirty(struct ceph_inode_info *ci)
 {
        return ci->i_dirty_caps | ci->i_flushing_caps;
 }
-extern void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask);
+extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask);
 
 extern int ceph_caps_revoking(struct ceph_inode_info *ci, int mask);
 extern int __ceph_caps_used(struct ceph_inode_info *ci);
index 8c9eba6ef9df4999ce4894893900984d943695e5..f2b628696180e93d69bae9c72464ed4b795d694e 100644 (file)
@@ -703,6 +703,7 @@ int ceph_setxattr(struct dentry *dentry, const char *name,
        struct ceph_inode_xattr *xattr = NULL;
        int issued;
        int required_blob_size;
+       int dirty;
 
        if (ceph_snap(inode) != CEPH_NOSNAP)
                return -EROFS;
@@ -763,11 +764,12 @@ retry:
        dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
        err = __set_xattr(ci, newname, name_len, newval,
                          val_len, 1, 1, 1, &xattr);
-       __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
+       dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
        ci->i_xattrs.dirty = true;
        inode->i_ctime = CURRENT_TIME;
        spin_unlock(&inode->i_lock);
-
+       if (dirty)
+               __mark_inode_dirty(inode, dirty);
        return err;
 
 do_sync:
@@ -810,6 +812,7 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
        struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
        int issued;
        int err;
+       int dirty;
 
        if (ceph_snap(inode) != CEPH_NOSNAP)
                return -EROFS;
@@ -833,12 +836,13 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
                goto do_sync;
 
        err = __remove_xattr_by_name(ceph_inode(inode), name);
-       __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
+       dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
        ci->i_xattrs.dirty = true;
        inode->i_ctime = CURRENT_TIME;
 
        spin_unlock(&inode->i_lock);
-
+       if (dirty)
+               __mark_inode_dirty(inode, dirty);
        return err;
 do_sync:
        spin_unlock(&inode->i_lock);
index 23d43cde4306042407dcd3df76d78ab81b793db7..1b2e180b018dd01e9d65041c6eb8cc288974b2c3 100644 (file)
@@ -277,6 +277,7 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen,
 
        for (i = 0, j = 0; i < srclen; j++) {
                src_char = source[i];
+               charlen = 1;
                switch (src_char) {
                case 0:
                        put_unaligned(0, &target[j]);
@@ -316,16 +317,13 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen,
                                dst_char = cpu_to_le16(0x003f);
                                charlen = 1;
                        }
-                       /*
-                        * character may take more than one byte in the source
-                        * string, but will take exactly two bytes in the
-                        * target string
-                        */
-                       i += charlen;
-                       continue;
                }
+               /*
+                * character may take more than one byte in the source string,
+                * but will take exactly two bytes in the target string
+                */
+               i += charlen;
                put_unaligned(dst_char, &target[j]);
-               i++; /* move to next char in source string */
        }
 
 ctoUCS_out:
index 4bc862a80efa5a722ab8f6088a95c30b040fcd7b..277262a8e82f282d4a32b0ddd4a244bc55e99472 100644 (file)
@@ -274,7 +274,8 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
        char *data_area_of_target;
        char *data_area_of_buf2;
        int remaining;
-       __u16 byte_count, total_data_size, total_in_buf, total_in_buf2;
+       unsigned int byte_count, total_in_buf;
+       __u16 total_data_size, total_in_buf2;
 
        total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
 
@@ -287,7 +288,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
        remaining = total_data_size - total_in_buf;
 
        if (remaining < 0)
-               return -EINVAL;
+               return -EPROTO;
 
        if (remaining == 0) /* nothing to do, ignore */
                return 0;
@@ -308,20 +309,29 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
        data_area_of_target += total_in_buf;
 
        /* copy second buffer into end of first buffer */
-       memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2);
        total_in_buf += total_in_buf2;
+       /* is the result too big for the field? */
+       if (total_in_buf > USHRT_MAX)
+               return -EPROTO;
        put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount);
+
+       /* fix up the BCC */
        byte_count = get_bcc_le(pTargetSMB);
        byte_count += total_in_buf2;
+       /* is the result too big for the field? */
+       if (byte_count > USHRT_MAX)
+               return -EPROTO;
        put_bcc_le(byte_count, pTargetSMB);
 
        byte_count = pTargetSMB->smb_buf_length;
        byte_count += total_in_buf2;
-
-       /* BB also add check that we are not beyond maximum buffer size */
-
+       /* don't allow buffer to overflow */
+       if (byte_count > CIFSMaxBufSize)
+               return -ENOBUFS;
        pTargetSMB->smb_buf_length = byte_count;
 
+       memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2);
+
        if (remaining == total_in_buf2) {
                cFYI(1, "found the last secondary response");
                return 0; /* we are done */
@@ -607,59 +617,63 @@ incomplete_rcv:
                list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
                        mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
 
-                       if ((mid_entry->mid == smb_buffer->Mid) &&
-                           (mid_entry->midState == MID_REQUEST_SUBMITTED) &&
-                           (mid_entry->command == smb_buffer->Command)) {
-                               if (length == 0 &&
-                                  check2ndT2(smb_buffer, server->maxBuf) > 0) {
-                                       /* We have a multipart transact2 resp */
-                                       isMultiRsp = true;
-                                       if (mid_entry->resp_buf) {
-                                               /* merge response - fix up 1st*/
-                                               if (coalesce_t2(smb_buffer,
-                                                       mid_entry->resp_buf)) {
-                                                       mid_entry->multiRsp =
-                                                                true;
-                                                       break;
-                                               } else {
-                                                       /* all parts received */
-                                                       mid_entry->multiEnd =
-                                                                true;
-                                                       goto multi_t2_fnd;
-                                               }
+                       if (mid_entry->mid != smb_buffer->Mid ||
+                           mid_entry->midState != MID_REQUEST_SUBMITTED ||
+                           mid_entry->command != smb_buffer->Command) {
+                               mid_entry = NULL;
+                               continue;
+                       }
+
+                       if (length == 0 &&
+                           check2ndT2(smb_buffer, server->maxBuf) > 0) {
+                               /* We have a multipart transact2 resp */
+                               isMultiRsp = true;
+                               if (mid_entry->resp_buf) {
+                                       /* merge response - fix up 1st*/
+                                       length = coalesce_t2(smb_buffer,
+                                                       mid_entry->resp_buf);
+                                       if (length > 0) {
+                                               length = 0;
+                                               mid_entry->multiRsp = true;
+                                               break;
                                        } else {
-                                               if (!isLargeBuf) {
-                                                       cERROR(1, "1st trans2 resp needs bigbuf");
-                                       /* BB maybe we can fix this up,  switch
-                                          to already allocated large buffer? */
-                                               } else {
-                                                       /* Have first buffer */
-                                                       mid_entry->resp_buf =
-                                                                smb_buffer;
-                                                       mid_entry->largeBuf =
-                                                                true;
-                                                       bigbuf = NULL;
-                                               }
+                                               /* all parts received or
+                                                * packet is malformed
+                                                */
+                                               mid_entry->multiEnd = true;
+                                               goto multi_t2_fnd;
+                                       }
+                               } else {
+                                       if (!isLargeBuf) {
+                                               /*
+                                                * FIXME: switch to already
+                                                *        allocated largebuf?
+                                                */
+                                               cERROR(1, "1st trans2 resp "
+                                                         "needs bigbuf");
+                                       } else {
+                                               /* Have first buffer */
+                                               mid_entry->resp_buf =
+                                                        smb_buffer;
+                                               mid_entry->largeBuf = true;
+                                               bigbuf = NULL;
                                        }
-                                       break;
                                }
-                               mid_entry->resp_buf = smb_buffer;
-                               mid_entry->largeBuf = isLargeBuf;
+                               break;
+                       }
+                       mid_entry->resp_buf = smb_buffer;
+                       mid_entry->largeBuf = isLargeBuf;
 multi_t2_fnd:
-                               if (length == 0)
-                                       mid_entry->midState =
-                                                       MID_RESPONSE_RECEIVED;
-                               else
-                                       mid_entry->midState =
-                                                       MID_RESPONSE_MALFORMED;
+                       if (length == 0)
+                               mid_entry->midState = MID_RESPONSE_RECEIVED;
+                       else
+                               mid_entry->midState = MID_RESPONSE_MALFORMED;
 #ifdef CONFIG_CIFS_STATS2
-                               mid_entry->when_received = jiffies;
+                       mid_entry->when_received = jiffies;
 #endif
-                               list_del_init(&mid_entry->qhead);
-                               mid_entry->callback(mid_entry);
-                               break;
-                       }
-                       mid_entry = NULL;
+                       list_del_init(&mid_entry->qhead);
+                       mid_entry->callback(mid_entry);
+                       break;
                }
                spin_unlock(&GlobalMid_Lock);
 
@@ -2659,6 +2673,11 @@ is_path_accessible(int xid, struct cifsTconInfo *tcon,
                              0 /* not legacy */, cifs_sb->local_nls,
                              cifs_sb->mnt_cifs_flags &
                                CIFS_MOUNT_MAP_SPECIAL_CHR);
+
+       if (rc == -EOPNOTSUPP || rc == -EINVAL)
+               rc = SMBQueryInformation(xid, tcon, full_path, pfile_info,
+                               cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
+                                 CIFS_MOUNT_MAP_SPECIAL_CHR);
        kfree(pfile_info);
        return rc;
 }
index f6728eb6f4b94774431136a8700ce9e96b4d2940..645114ad0a10031e08ad9beaa815a720b9fe50b0 100644 (file)
@@ -276,7 +276,7 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
 }
 
 static void
-decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses,
+decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
                      const struct nls_table *nls_cp)
 {
        int len;
@@ -284,19 +284,6 @@ decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses,
 
        cFYI(1, "bleft %d", bleft);
 
-       /*
-        * Windows servers do not always double null terminate their final
-        * Unicode string. Check to see if there are an uneven number of bytes
-        * left. If so, then add an extra NULL pad byte to the end of the
-        * response.
-        *
-        * See section 2.7.2 in "Implementing CIFS" for details
-        */
-       if (bleft % 2) {
-               data[bleft] = 0;
-               ++bleft;
-       }
-
        kfree(ses->serverOS);
        ses->serverOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp);
        cFYI(1, "serverOS=%s", ses->serverOS);
@@ -929,7 +916,9 @@ ssetup_ntlmssp_authenticate:
        }
 
        /* BB check if Unicode and decode strings */
-       if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
+       if (bytes_remaining == 0) {
+               /* no string area to decode, do nothing */
+       } else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
                /* unicode string area must be word-aligned */
                if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) {
                        ++bcc_ptr;
index 3313dd19f543841d7e4689cfd18da028e4522892..9a37a9b6de3a23f026f6dc0d9c2906e07b1f8bf0 100644 (file)
@@ -53,11 +53,14 @@ DEFINE_SPINLOCK(configfs_dirent_lock);
 static void configfs_d_iput(struct dentry * dentry,
                            struct inode * inode)
 {
-       struct configfs_dirent * sd = dentry->d_fsdata;
+       struct configfs_dirent *sd = dentry->d_fsdata;
 
        if (sd) {
                BUG_ON(sd->s_dentry != dentry);
+               /* Coordinate with configfs_readdir */
+               spin_lock(&configfs_dirent_lock);
                sd->s_dentry = NULL;
+               spin_unlock(&configfs_dirent_lock);
                configfs_put(sd);
        }
        iput(inode);
@@ -689,7 +692,8 @@ static int create_default_group(struct config_group *parent_group,
                        sd = child->d_fsdata;
                        sd->s_type |= CONFIGFS_USET_DEFAULT;
                } else {
-                       d_delete(child);
+                       BUG_ON(child->d_inode);
+                       d_drop(child);
                        dput(child);
                }
        }
@@ -1545,7 +1549,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
        struct configfs_dirent * parent_sd = dentry->d_fsdata;
        struct configfs_dirent *cursor = filp->private_data;
        struct list_head *p, *q = &cursor->s_sibling;
-       ino_t ino;
+       ino_t ino = 0;
        int i = filp->f_pos;
 
        switch (i) {
@@ -1573,6 +1577,7 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
                                struct configfs_dirent *next;
                                const char * name;
                                int len;
+                               struct inode *inode = NULL;
 
                                next = list_entry(p, struct configfs_dirent,
                                                   s_sibling);
@@ -1581,9 +1586,28 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
 
                                name = configfs_get_name(next);
                                len = strlen(name);
-                               if (next->s_dentry)
-                                       ino = next->s_dentry->d_inode->i_ino;
-                               else
+
+                               /*
+                                * We'll have a dentry and an inode for
+                                * PINNED items and for open attribute
+                                * files.  We lock here to prevent a race
+                                * with configfs_d_iput() clearing
+                                * s_dentry before calling iput().
+                                *
+                                * Why do we go to the trouble?  If
+                                * someone has an attribute file open,
+                                * the inode number should match until
+                                * they close it.  Beyond that, we don't
+                                * care.
+                                */
+                               spin_lock(&configfs_dirent_lock);
+                               dentry = next->s_dentry;
+                               if (dentry)
+                                       inode = dentry->d_inode;
+                               if (inode)
+                                       ino = inode->i_ino;
+                               spin_unlock(&configfs_dirent_lock);
+                               if (!inode)
                                        ino = iunique(configfs_sb, 2);
 
                                if (filldir(dirent, name, len, filp->f_pos, ino,
@@ -1683,7 +1707,8 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
                err = configfs_attach_group(sd->s_element, &group->cg_item,
                                            dentry);
                if (err) {
-                       d_delete(dentry);
+                       BUG_ON(dentry->d_inode);
+                       d_drop(dentry);
                        dput(dentry);
                } else {
                        spin_lock(&configfs_dirent_lock);
index c6ba49bd95b34136d04bae30b83342933c35ce7a..b32eb29a4e6ff4f8dd892b1af9ea74d712f148ee 100644 (file)
@@ -174,7 +174,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
                if (!inode)
                        return 0;
 
-               if (nd->flags & LOOKUP_RCU)
+               if (nd && (nd->flags & LOOKUP_RCU))
                        return -ECHILD;
 
                fc = get_fuse_conn(inode);
index 0c39dc3ef7d71473a0a951612fd54e82a63cf3a2..56bd15c5bf6cffee6cdd2e36c379eeb42c22d604 100644 (file)
@@ -1,7 +1,6 @@
 config HPFS_FS
        tristate "OS/2 HPFS file system support"
        depends on BLOCK
-       depends on BROKEN || !PREEMPT
        help
          OS/2 is IBM's operating system for PC's, the same as Warp, and HPFS
          is the file system used for organizing files on OS/2 hard disk
index 5503e2c28910102bb0a2a22b97d8e2a7bf20cbad..7a5eb2c718c854206d6db419abe4ce7bc61d12c7 100644 (file)
@@ -8,8 +8,6 @@
 
 #include "hpfs_fn.h"
 
-static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec);
-
 /*
  * Check if a sector is allocated in bitmap
  * This is really slow. Turned on only if chk==2
@@ -18,9 +16,9 @@ static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec);
 static int chk_if_allocated(struct super_block *s, secno sec, char *msg)
 {
        struct quad_buffer_head qbh;
-       unsigned *bmp;
+       u32 *bmp;
        if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail;
-       if ((bmp[(sec & 0x3fff) >> 5] >> (sec & 0x1f)) & 1) {
+       if ((cpu_to_le32(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) {
                hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec);
                goto fail1;
        }
@@ -28,7 +26,7 @@ static int chk_if_allocated(struct super_block *s, secno sec, char *msg)
        if (sec >= hpfs_sb(s)->sb_dirband_start && sec < hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) {
                unsigned ssec = (sec - hpfs_sb(s)->sb_dirband_start) / 4;
                if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto fail;
-               if ((bmp[ssec >> 5] >> (ssec & 0x1f)) & 1) {
+               if ((le32_to_cpu(bmp[ssec >> 5]) >> (ssec & 0x1f)) & 1) {
                        hpfs_error(s, "sector '%s' - %08x not allocated in directory bitmap", msg, sec);
                        goto fail1;
                }
@@ -75,7 +73,6 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne
                hpfs_error(s, "Bad allocation size: %d", n);
                return 0;
        }
-       lock_super(s);
        if (bs != ~0x3fff) {
                if (!(bmp = hpfs_map_bitmap(s, near >> 14, &qbh, "aib"))) goto uls;
        } else {
@@ -85,10 +82,6 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne
                ret = bs + nr;
                goto rt;
        }
-       /*if (!tstbits(bmp, nr + n, n + forward)) {
-               ret = bs + nr + n;
-               goto rt;
-       }*/
        q = nr + n; b = 0;
        while ((a = tstbits(bmp, q, n + forward)) != 0) {
                q += a;
@@ -105,14 +98,14 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne
                goto rt;
        }
        nr >>= 5;
-       /*for (i = nr + 1; i != nr; i++, i &= 0x1ff) {*/
+       /*for (i = nr + 1; i != nr; i++, i &= 0x1ff) */
        i = nr;
        do {
-               if (!bmp[i]) goto cont;
-               if (n + forward >= 0x3f && bmp[i] != -1) goto cont;
+               if (!le32_to_cpu(bmp[i])) goto cont;
+               if (n + forward >= 0x3f && le32_to_cpu(bmp[i]) != 0xffffffff) goto cont;
                q = i<<5;
                if (i > 0) {
-                       unsigned k = bmp[i-1];
+                       unsigned k = le32_to_cpu(bmp[i-1]);
                        while (k & 0x80000000) {
                                q--; k <<= 1;
                        }
@@ -132,18 +125,17 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne
        } while (i != nr);
        rt:
        if (ret) {
-               if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (bmp[(ret & 0x3fff) >> 5] | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) {
+               if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (le32_to_cpu(bmp[(ret & 0x3fff) >> 5]) | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) {
                        hpfs_error(s, "Allocation doesn't work! Wanted %d, allocated at %08x", n, ret);
                        ret = 0;
                        goto b;
                }
-               bmp[(ret & 0x3fff) >> 5] &= ~(((1 << n) - 1) << (ret & 0x1f));
+               bmp[(ret & 0x3fff) >> 5] &= cpu_to_le32(~(((1 << n) - 1) << (ret & 0x1f)));
                hpfs_mark_4buffers_dirty(&qbh);
        }
        b:
        hpfs_brelse4(&qbh);
        uls:
-       unlock_super(s);
        return ret;
 }
 
@@ -155,7 +147,7 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne
  *                             sectors
  */
 
-secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward, int lock)
+secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward)
 {
        secno sec;
        int i;
@@ -167,7 +159,6 @@ secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forwa
                forward = -forward;
                f_p = 1;
        }
-       if (lock) hpfs_lock_creation(s);
        n_bmps = (sbi->sb_fs_size + 0x4000 - 1) >> 14;
        if (near && near < sbi->sb_fs_size) {
                if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret;
@@ -214,18 +205,17 @@ secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forwa
        ret:
        if (sec && f_p) {
                for (i = 0; i < forward; i++) {
-                       if (!hpfs_alloc_if_possible_nolock(s, sec + i + 1)) {
+                       if (!hpfs_alloc_if_possible(s, sec + i + 1)) {
                                hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i);
                                sec = 0;
                                break;
                        }
                }
        }
-       if (lock) hpfs_unlock_creation(s);
        return sec;
 }
 
-static secno alloc_in_dirband(struct super_block *s, secno near, int lock)
+static secno alloc_in_dirband(struct super_block *s, secno near)
 {
        unsigned nr = near;
        secno sec;
@@ -236,49 +226,35 @@ static secno alloc_in_dirband(struct super_block *s, secno near, int lock)
                nr = sbi->sb_dirband_start + sbi->sb_dirband_size - 4;
        nr -= sbi->sb_dirband_start;
        nr >>= 2;
-       if (lock) hpfs_lock_creation(s);
        sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0);
-       if (lock) hpfs_unlock_creation(s);
        if (!sec) return 0;
        return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start;
 }
 
 /* Alloc sector if it's free */
 
-static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec)
+int hpfs_alloc_if_possible(struct super_block *s, secno sec)
 {
        struct quad_buffer_head qbh;
-       unsigned *bmp;
-       lock_super(s);
+       u32 *bmp;
        if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end;
-       if (bmp[(sec & 0x3fff) >> 5] & (1 << (sec & 0x1f))) {
-               bmp[(sec & 0x3fff) >> 5] &= ~(1 << (sec & 0x1f));
+       if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) {
+               bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f)));
                hpfs_mark_4buffers_dirty(&qbh);
                hpfs_brelse4(&qbh);
-               unlock_super(s);
                return 1;
        }
        hpfs_brelse4(&qbh);
        end:
-       unlock_super(s);
        return 0;
 }
 
-int hpfs_alloc_if_possible(struct super_block *s, secno sec)
-{
-       int r;
-       hpfs_lock_creation(s);
-       r = hpfs_alloc_if_possible_nolock(s, sec);
-       hpfs_unlock_creation(s);
-       return r;
-}
-
 /* Free sectors in bitmaps */
 
 void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n)
 {
        struct quad_buffer_head qbh;
-       unsigned *bmp;
+       u32 *bmp;
        struct hpfs_sb_info *sbi = hpfs_sb(s);
        /*printk("2 - ");*/
        if (!n) return;
@@ -286,26 +262,22 @@ void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n)
                hpfs_error(s, "Trying to free reserved sector %08x", sec);
                return;
        }
-       lock_super(s);
        sbi->sb_max_fwd_alloc += n > 0xffff ? 0xffff : n;
        if (sbi->sb_max_fwd_alloc > 0xffffff) sbi->sb_max_fwd_alloc = 0xffffff;
        new_map:
        if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "free"))) {
-               unlock_super(s);
                return;
        }       
        new_tst:
-       if ((bmp[(sec & 0x3fff) >> 5] >> (sec & 0x1f) & 1)) {
+       if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f) & 1)) {
                hpfs_error(s, "sector %08x not allocated", sec);
                hpfs_brelse4(&qbh);
-               unlock_super(s);
                return;
        }
-       bmp[(sec & 0x3fff) >> 5] |= 1 << (sec & 0x1f);
+       bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f));
        if (!--n) {
                hpfs_mark_4buffers_dirty(&qbh);
                hpfs_brelse4(&qbh);
-               unlock_super(s);
                return;
        }       
        if (!(++sec & 0x3fff)) {
@@ -327,13 +299,13 @@ int hpfs_check_free_dnodes(struct super_block *s, int n)
        int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14;
        int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff;
        int i, j;
-       unsigned *bmp;
+       u32 *bmp;
        struct quad_buffer_head qbh;
        if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
                for (j = 0; j < 512; j++) {
                        unsigned k;
-                       if (!bmp[j]) continue;
-                       for (k = bmp[j]; k; k >>= 1) if (k & 1) if (!--n) {
+                       if (!le32_to_cpu(bmp[j])) continue;
+                       for (k = le32_to_cpu(bmp[j]); k; k >>= 1) if (k & 1) if (!--n) {
                                hpfs_brelse4(&qbh);
                                return 0;
                        }
@@ -352,10 +324,10 @@ int hpfs_check_free_dnodes(struct super_block *s, int n)
        chk_bmp:
        if (bmp) {
                for (j = 0; j < 512; j++) {
-                       unsigned k;
-                       if (!bmp[j]) continue;
+                       u32 k;
+                       if (!le32_to_cpu(bmp[j])) continue;
                        for (k = 0xf; k; k <<= 4)
-                               if ((bmp[j] & k) == k) {
+                               if ((le32_to_cpu(bmp[j]) & k) == k) {
                                        if (!--n) {
                                                hpfs_brelse4(&qbh);
                                                return 0;
@@ -379,44 +351,40 @@ void hpfs_free_dnode(struct super_block *s, dnode_secno dno)
                hpfs_free_sectors(s, dno, 4);
        } else {
                struct quad_buffer_head qbh;
-               unsigned *bmp;
+               u32 *bmp;
                unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4;
-               lock_super(s);
                if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
-                       unlock_super(s);
                        return;
                }
-               bmp[ssec >> 5] |= 1 << (ssec & 0x1f);
+               bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f));
                hpfs_mark_4buffers_dirty(&qbh);
                hpfs_brelse4(&qbh);
-               unlock_super(s);
        }
 }
 
 struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near,
-                        dnode_secno *dno, struct quad_buffer_head *qbh,
-                        int lock)
+                        dnode_secno *dno, struct quad_buffer_head *qbh)
 {
        struct dnode *d;
        if (hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_dmap) > FREE_DNODES_ADD) {
-               if (!(*dno = alloc_in_dirband(s, near, lock)))
-                       if (!(*dno = hpfs_alloc_sector(s, near, 4, 0, lock))) return NULL;
+               if (!(*dno = alloc_in_dirband(s, near)))
+                       if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL;
        } else {
-               if (!(*dno = hpfs_alloc_sector(s, near, 4, 0, lock)))
-                       if (!(*dno = alloc_in_dirband(s, near, lock))) return NULL;
+               if (!(*dno = hpfs_alloc_sector(s, near, 4, 0)))
+                       if (!(*dno = alloc_in_dirband(s, near))) return NULL;
        }
        if (!(d = hpfs_get_4sectors(s, *dno, qbh))) {
                hpfs_free_dnode(s, *dno);
                return NULL;
        }
        memset(d, 0, 2048);
-       d->magic = DNODE_MAGIC;
-       d->first_free = 52;
+       d->magic = cpu_to_le32(DNODE_MAGIC);
+       d->first_free = cpu_to_le32(52);
        d->dirent[0] = 32;
        d->dirent[2] = 8;
        d->dirent[30] = 1;
        d->dirent[31] = 255;
-       d->self = *dno;
+       d->self = cpu_to_le32(*dno);
        return d;
 }
 
@@ -424,16 +392,16 @@ struct fnode *hpfs_alloc_fnode(struct super_block *s, secno near, fnode_secno *f
                          struct buffer_head **bh)
 {
        struct fnode *f;
-       if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD, 1))) return NULL;
+       if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD))) return NULL;
        if (!(f = hpfs_get_sector(s, *fno, bh))) {
                hpfs_free_sectors(s, *fno, 1);
                return NULL;
        }       
        memset(f, 0, 512);
-       f->magic = FNODE_MAGIC;
-       f->ea_offs = 0xc4;
+       f->magic = cpu_to_le32(FNODE_MAGIC);
+       f->ea_offs = cpu_to_le16(0xc4);
        f->btree.n_free_nodes = 8;
-       f->btree.first_free = 8;
+       f->btree.first_free = cpu_to_le16(8);
        return f;
 }
 
@@ -441,16 +409,16 @@ struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *a
                          struct buffer_head **bh)
 {
        struct anode *a;
-       if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD, 1))) return NULL;
+       if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD))) return NULL;
        if (!(a = hpfs_get_sector(s, *ano, bh))) {
                hpfs_free_sectors(s, *ano, 1);
                return NULL;
        }
        memset(a, 0, 512);
-       a->magic = ANODE_MAGIC;
-       a->self = *ano;
+       a->magic = cpu_to_le32(ANODE_MAGIC);
+       a->self = cpu_to_le32(*ano);
        a->btree.n_free_nodes = 40;
        a->btree.n_used_nodes = 0;
-       a->btree.first_free = 8;
+       a->btree.first_free = cpu_to_le16(8);
        return a;
 }
index 6a2f04bf3df007249047523e10a098fd2b43c603..08b503e8ed29ec610a098cb9658e1a2ecaa1779c 100644 (file)
@@ -22,8 +22,8 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
        if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1;
        if (btree->internal) {
                for (i = 0; i < btree->n_used_nodes; i++)
-                       if (btree->u.internal[i].file_secno > sec) {
-                               a = btree->u.internal[i].down;
+                       if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) {
+                               a = le32_to_cpu(btree->u.internal[i].down);
                                brelse(bh);
                                if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
                                btree = &anode->btree;
@@ -34,18 +34,18 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
                return -1;
        }
        for (i = 0; i < btree->n_used_nodes; i++)
-               if (btree->u.external[i].file_secno <= sec &&
-                   btree->u.external[i].file_secno + btree->u.external[i].length > sec) {
-                       a = btree->u.external[i].disk_secno + sec - btree->u.external[i].file_secno;
+               if (le32_to_cpu(btree->u.external[i].file_secno) <= sec &&
+                   le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) {
+                       a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno);
                        if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) {
                                brelse(bh);
                                return -1;
                        }
                        if (inode) {
                                struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
-                               hpfs_inode->i_file_sec = btree->u.external[i].file_secno;
-                               hpfs_inode->i_disk_sec = btree->u.external[i].disk_secno;
-                               hpfs_inode->i_n_secs = btree->u.external[i].length;
+                               hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno);
+                               hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno);
+                               hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length);
                        }
                        brelse(bh);
                        return a;
@@ -83,8 +83,8 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
                return -1;
        }
        if (btree->internal) {
-               a = btree->u.internal[n].down;
-               btree->u.internal[n].file_secno = -1;
+               a = le32_to_cpu(btree->u.internal[n].down);
+               btree->u.internal[n].file_secno = cpu_to_le32(-1);
                mark_buffer_dirty(bh);
                brelse(bh);
                if (hpfs_sb(s)->sb_chk)
@@ -94,15 +94,15 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
                goto go_down;
        }
        if (n >= 0) {
-               if (btree->u.external[n].file_secno + btree->u.external[n].length != fsecno) {
+               if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) {
                        hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x",
-                               btree->u.external[n].file_secno + btree->u.external[n].length, fsecno,
+                               le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno,
                                fnod?'f':'a', node);
                        brelse(bh);
                        return -1;
                }
-               if (hpfs_alloc_if_possible(s, se = btree->u.external[n].disk_secno + btree->u.external[n].length)) {
-                       btree->u.external[n].length++;
+               if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) {
+                       btree->u.external[n].length = cpu_to_le32(le32_to_cpu(btree->u.external[n].length) + 1);
                        mark_buffer_dirty(bh);
                        brelse(bh);
                        return se;
@@ -115,20 +115,20 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
                }
                se = !fnod ? node : (node + 16384) & ~16383;
        }       
-       if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M, 1))) {
+       if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) {
                brelse(bh);
                return -1;
        }
-       fs = n < 0 ? 0 : btree->u.external[n].file_secno + btree->u.external[n].length;
+       fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length);
        if (!btree->n_free_nodes) {
-               up = a != node ? anode->up : -1;
+               up = a != node ? le32_to_cpu(anode->up) : -1;
                if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) {
                        brelse(bh);
                        hpfs_free_sectors(s, se, 1);
                        return -1;
                }
                if (a == node && fnod) {
-                       anode->up = node;
+                       anode->up = cpu_to_le32(node);
                        anode->btree.fnode_parent = 1;
                        anode->btree.n_used_nodes = btree->n_used_nodes;
                        anode->btree.first_free = btree->first_free;
@@ -137,9 +137,9 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
                        btree->internal = 1;
                        btree->n_free_nodes = 11;
                        btree->n_used_nodes = 1;
-                       btree->first_free = (char *)&(btree->u.internal[1]) - (char *)btree;
-                       btree->u.internal[0].file_secno = -1;
-                       btree->u.internal[0].down = na;
+                       btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree);
+                       btree->u.internal[0].file_secno = cpu_to_le32(-1);
+                       btree->u.internal[0].down = cpu_to_le32(na);
                        mark_buffer_dirty(bh);
                } else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) {
                        brelse(bh);
@@ -153,15 +153,15 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
                btree = &anode->btree;
        }
        btree->n_free_nodes--; n = btree->n_used_nodes++;
-       btree->first_free += 12;
-       btree->u.external[n].disk_secno = se;
-       btree->u.external[n].file_secno = fs;
-       btree->u.external[n].length = 1;
+       btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 12);
+       btree->u.external[n].disk_secno = cpu_to_le32(se);
+       btree->u.external[n].file_secno = cpu_to_le32(fs);
+       btree->u.external[n].length = cpu_to_le32(1);
        mark_buffer_dirty(bh);
        brelse(bh);
        if ((a == node && fnod) || na == -1) return se;
        c2 = 0;
-       while (up != -1) {
+       while (up != (anode_secno)-1) {
                struct anode *new_anode;
                if (hpfs_sb(s)->sb_chk)
                        if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1;
@@ -174,47 +174,47 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
                }
                if (btree->n_free_nodes) {
                        btree->n_free_nodes--; n = btree->n_used_nodes++;
-                       btree->first_free += 8;
-                       btree->u.internal[n].file_secno = -1;
-                       btree->u.internal[n].down = na;
-                       btree->u.internal[n-1].file_secno = fs;
+                       btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 8);
+                       btree->u.internal[n].file_secno = cpu_to_le32(-1);
+                       btree->u.internal[n].down = cpu_to_le32(na);
+                       btree->u.internal[n-1].file_secno = cpu_to_le32(fs);
                        mark_buffer_dirty(bh);
                        brelse(bh);
                        brelse(bh2);
                        hpfs_free_sectors(s, ra, 1);
                        if ((anode = hpfs_map_anode(s, na, &bh))) {
-                               anode->up = up;
+                               anode->up = cpu_to_le32(up);
                                anode->btree.fnode_parent = up == node && fnod;
                                mark_buffer_dirty(bh);
                                brelse(bh);
                        }
                        return se;
                }
-               up = up != node ? anode->up : -1;
-               btree->u.internal[btree->n_used_nodes - 1].file_secno = /*fs*/-1;
+               up = up != node ? le32_to_cpu(anode->up) : -1;
+               btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1);
                mark_buffer_dirty(bh);
                brelse(bh);
                a = na;
                if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) {
                        anode = new_anode;
-                       /*anode->up = up != -1 ? up : ra;*/
+                       /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/
                        anode->btree.internal = 1;
                        anode->btree.n_used_nodes = 1;
                        anode->btree.n_free_nodes = 59;
-                       anode->btree.first_free = 16;
-                       anode->btree.u.internal[0].down = a;
-                       anode->btree.u.internal[0].file_secno = -1;
+                       anode->btree.first_free = cpu_to_le16(16);
+                       anode->btree.u.internal[0].down = cpu_to_le32(a);
+                       anode->btree.u.internal[0].file_secno = cpu_to_le32(-1);
                        mark_buffer_dirty(bh);
                        brelse(bh);
                        if ((anode = hpfs_map_anode(s, a, &bh))) {
-                               anode->up = na;
+                               anode->up = cpu_to_le32(na);
                                mark_buffer_dirty(bh);
                                brelse(bh);
                        }
                } else na = a;
        }
        if ((anode = hpfs_map_anode(s, na, &bh))) {
-               anode->up = node;
+               anode->up = cpu_to_le32(node);
                if (fnod) anode->btree.fnode_parent = 1;
                mark_buffer_dirty(bh);
                brelse(bh);
@@ -232,14 +232,14 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
                }
                btree = &fnode->btree;
        }
-       ranode->up = node;
-       memcpy(&ranode->btree, btree, btree->first_free);
+       ranode->up = cpu_to_le32(node);
+       memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
        if (fnod) ranode->btree.fnode_parent = 1;
        ranode->btree.n_free_nodes = (ranode->btree.internal ? 60 : 40) - ranode->btree.n_used_nodes;
        if (ranode->btree.internal) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
                struct anode *unode;
-               if ((unode = hpfs_map_anode(s, ranode->u.internal[n].down, &bh1))) {
-                       unode->up = ra;
+               if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
+                       unode->up = cpu_to_le32(ra);
                        unode->btree.fnode_parent = 0;
                        mark_buffer_dirty(bh1);
                        brelse(bh1);
@@ -248,11 +248,11 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
        btree->internal = 1;
        btree->n_free_nodes = fnod ? 10 : 58;
        btree->n_used_nodes = 2;
-       btree->first_free = (char *)&btree->u.internal[2] - (char *)btree;
-       btree->u.internal[0].file_secno = fs;
-       btree->u.internal[0].down = ra;
-       btree->u.internal[1].file_secno = -1;
-       btree->u.internal[1].down = na;
+       btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree);
+       btree->u.internal[0].file_secno = cpu_to_le32(fs);
+       btree->u.internal[0].down = cpu_to_le32(ra);
+       btree->u.internal[1].file_secno = cpu_to_le32(-1);
+       btree->u.internal[1].down = cpu_to_le32(na);
        mark_buffer_dirty(bh);
        brelse(bh);
        mark_buffer_dirty(bh2);
@@ -279,7 +279,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
        go_down:
        d2 = 0;
        while (btree1->internal) {
-               ano = btree1->u.internal[pos].down;
+               ano = le32_to_cpu(btree1->u.internal[pos].down);
                if (level) brelse(bh);
                if (hpfs_sb(s)->sb_chk)
                        if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1"))
@@ -290,7 +290,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
                pos = 0;
        }
        for (i = 0; i < btree1->n_used_nodes; i++)
-               hpfs_free_sectors(s, btree1->u.external[i].disk_secno, btree1->u.external[i].length);
+               hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length));
        go_up:
        if (!level) return;
        brelse(bh);
@@ -298,13 +298,13 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
                if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return;
        hpfs_free_sectors(s, ano, 1);
        oano = ano;
-       ano = anode->up;
+       ano = le32_to_cpu(anode->up);
        if (--level) {
                if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
                btree1 = &anode->btree;
        } else btree1 = btree;
        for (i = 0; i < btree1->n_used_nodes; i++) {
-               if (btree1->u.internal[i].down == oano) {
+               if (le32_to_cpu(btree1->u.internal[i].down) == oano) {
                        if ((pos = i + 1) < btree1->n_used_nodes)
                                goto go_down;
                        else
@@ -411,7 +411,7 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
                if (fno) {
                        btree->n_free_nodes = 8;
                        btree->n_used_nodes = 0;
-                       btree->first_free = 8;
+                       btree->first_free = cpu_to_le16(8);
                        btree->internal = 0;
                        mark_buffer_dirty(bh);
                } else hpfs_free_sectors(s, f, 1);
@@ -421,22 +421,22 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
        while (btree->internal) {
                nodes = btree->n_used_nodes + btree->n_free_nodes;
                for (i = 0; i < btree->n_used_nodes; i++)
-                       if (btree->u.internal[i].file_secno >= secs) goto f;
+                       if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f;
                brelse(bh);
                hpfs_error(s, "internal btree %08x doesn't end with -1", node);
                return;
                f:
                for (j = i + 1; j < btree->n_used_nodes; j++)
-                       hpfs_ea_remove(s, btree->u.internal[j].down, 1, 0);
+                       hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0);
                btree->n_used_nodes = i + 1;
                btree->n_free_nodes = nodes - btree->n_used_nodes;
-               btree->first_free = 8 + 8 * btree->n_used_nodes;
+               btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes);
                mark_buffer_dirty(bh);
-               if (btree->u.internal[i].file_secno == secs) {
+               if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) {
                        brelse(bh);
                        return;
                }
-               node = btree->u.internal[i].down;
+               node = le32_to_cpu(btree->u.internal[i].down);
                brelse(bh);
                if (hpfs_sb(s)->sb_chk)
                        if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree"))
@@ -446,25 +446,25 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
        }       
        nodes = btree->n_used_nodes + btree->n_free_nodes;
        for (i = 0; i < btree->n_used_nodes; i++)
-               if (btree->u.external[i].file_secno + btree->u.external[i].length >= secs) goto ff;
+               if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff;
        brelse(bh);
        return;
        ff:
-       if (secs <= btree->u.external[i].file_secno) {
+       if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) {
                hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs);
                if (i) i--;
        }
-       else if (btree->u.external[i].file_secno + btree->u.external[i].length > secs) {
-               hpfs_free_sectors(s, btree->u.external[i].disk_secno + secs -
-                       btree->u.external[i].file_secno, btree->u.external[i].length
-                       - secs + btree->u.external[i].file_secno); /* I hope gcc optimizes this :-) */
-               btree->u.external[i].length = secs - btree->u.external[i].file_secno;
+       else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) {
+               hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs -
+                       le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length)
+                       - secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */
+               btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno));
        }
        for (j = i + 1; j < btree->n_used_nodes; j++)
-               hpfs_free_sectors(s, btree->u.external[j].disk_secno, btree->u.external[j].length);
+               hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length));
        btree->n_used_nodes = i + 1;
        btree->n_free_nodes = nodes - btree->n_used_nodes;
-       btree->first_free = 8 + 12 * btree->n_used_nodes;
+       btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes);
        mark_buffer_dirty(bh);
        brelse(bh);
 }
@@ -480,12 +480,12 @@ void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
        struct extended_attribute *ea_end;
        if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
        if (!fnode->dirflag) hpfs_remove_btree(s, &fnode->btree);
-       else hpfs_remove_dtree(s, fnode->u.external[0].disk_secno);
+       else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
        ea_end = fnode_end_ea(fnode);
        for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
                if (ea->indirect)
                        hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea));
-       hpfs_ea_ext_remove(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l);
+       hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l));
        brelse(bh);
        hpfs_free_sectors(s, fno, 1);
 }
index 793cb9d943d21a70584958c27fba6b2f97b37f93..9ecde27d1e297ed3d42a742352f469f1bf2dfb9a 100644 (file)
@@ -9,22 +9,6 @@
 #include <linux/slab.h>
 #include "hpfs_fn.h"
 
-void hpfs_lock_creation(struct super_block *s)
-{
-#ifdef DEBUG_LOCKS
-       printk("lock creation\n");
-#endif
-       mutex_lock(&hpfs_sb(s)->hpfs_creation_de);
-}
-
-void hpfs_unlock_creation(struct super_block *s)
-{
-#ifdef DEBUG_LOCKS
-       printk("unlock creation\n");
-#endif
-       mutex_unlock(&hpfs_sb(s)->hpfs_creation_de);
-}
-
 /* Map a sector into a buffer and return pointers to it and to the buffer. */
 
 void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp,
@@ -32,6 +16,8 @@ void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head
 {
        struct buffer_head *bh;
 
+       hpfs_lock_assert(s);
+
        cond_resched();
 
        *bhp = bh = sb_bread(s, secno);
@@ -50,6 +36,8 @@ void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head
        struct buffer_head *bh;
        /*return hpfs_map_sector(s, secno, bhp, 0);*/
 
+       hpfs_lock_assert(s);
+
        cond_resched();
 
        if ((*bhp = bh = sb_getblk(s, secno)) != NULL) {
@@ -70,6 +58,8 @@ void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffe
        struct buffer_head *bh;
        char *data;
 
+       hpfs_lock_assert(s);
+
        cond_resched();
 
        if (secno & 3) {
@@ -125,6 +115,8 @@ void *hpfs_get_4sectors(struct super_block *s, unsigned secno,
 {
        cond_resched();
 
+       hpfs_lock_assert(s);
+
        if (secno & 3) {
                printk("HPFS: hpfs_get_4sectors: unaligned read\n");
                return NULL;
index b3d7c0ddb60971e2cb4217afb0bf22d4683c2988..f46ae025bfb58bc172716efcb66772add1daac43 100644 (file)
@@ -88,9 +88,9 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        hpfs_error(inode->i_sb, "not a directory, fnode %08lx",
                                        (unsigned long)inode->i_ino);
                }
-               if (hpfs_inode->i_dno != fno->u.external[0].disk_secno) {
+               if (hpfs_inode->i_dno != le32_to_cpu(fno->u.external[0].disk_secno)) {
                        e = 1;
-                       hpfs_error(inode->i_sb, "corrupted inode: i_dno == %08x, fnode -> dnode == %08x", hpfs_inode->i_dno, fno->u.external[0].disk_secno);
+                       hpfs_error(inode->i_sb, "corrupted inode: i_dno == %08x, fnode -> dnode == %08x", hpfs_inode->i_dno, le32_to_cpu(fno->u.external[0].disk_secno));
                }
                brelse(bh);
                if (e) {
@@ -156,7 +156,7 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                        goto again;
                }
                tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3);
-               if (filldir(dirent, tempname, de->namelen, old_pos, de->fnode, DT_UNKNOWN) < 0) {
+               if (filldir(dirent, tempname, de->namelen, old_pos, le32_to_cpu(de->fnode), DT_UNKNOWN) < 0) {
                        filp->f_pos = old_pos;
                        if (tempname != de->name) kfree(tempname);
                        hpfs_brelse4(&qbh);
@@ -221,7 +221,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
         * Get inode number, what we're after.
         */
 
-       ino = de->fnode;
+       ino = le32_to_cpu(de->fnode);
 
        /*
         * Go find or make an inode.
@@ -236,7 +236,7 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
                hpfs_init_inode(result);
                if (de->directory)
                        hpfs_read_inode(result);
-               else if (de->ea_size && hpfs_sb(dir->i_sb)->sb_eas)
+               else if (le32_to_cpu(de->ea_size) && hpfs_sb(dir->i_sb)->sb_eas)
                        hpfs_read_inode(result);
                else {
                        result->i_mode |= S_IFREG;
@@ -250,8 +250,6 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
        hpfs_result = hpfs_i(result);
        if (!de->directory) hpfs_result->i_parent_dir = dir->i_ino;
 
-       hpfs_decide_conv(result, name, len);
-
        if (de->has_acl || de->has_xtd_perm) if (!(dir->i_sb->s_flags & MS_RDONLY)) {
                hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures");
                goto bail1;
@@ -263,19 +261,19 @@ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct name
         */
 
        if (!result->i_ctime.tv_sec) {
-               if (!(result->i_ctime.tv_sec = local_to_gmt(dir->i_sb, de->creation_date)))
+               if (!(result->i_ctime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->creation_date))))
                        result->i_ctime.tv_sec = 1;
                result->i_ctime.tv_nsec = 0;
-               result->i_mtime.tv_sec = local_to_gmt(dir->i_sb, de->write_date);
+               result->i_mtime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->write_date));
                result->i_mtime.tv_nsec = 0;
-               result->i_atime.tv_sec = local_to_gmt(dir->i_sb, de->read_date);
+               result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->read_date));
                result->i_atime.tv_nsec = 0;
-               hpfs_result->i_ea_size = de->ea_size;
+               hpfs_result->i_ea_size = le32_to_cpu(de->ea_size);
                if (!hpfs_result->i_ea_mode && de->read_only)
                        result->i_mode &= ~0222;
                if (!de->directory) {
                        if (result->i_size == -1) {
-                               result->i_size = de->file_size;
+                               result->i_size = le32_to_cpu(de->file_size);
                                result->i_data.a_ops = &hpfs_aops;
                                hpfs_i(result)->mmu_private = result->i_size;
                        /*
index 9b2ffadfc8c42dd9a39f86a0bdedb6a8dbbd0df5..1e0e2ac30fd3be93f8e5b7a97618f19a52220ec4 100644 (file)
@@ -14,11 +14,11 @@ static loff_t get_pos(struct dnode *d, struct hpfs_dirent *fde)
        struct hpfs_dirent *de_end = dnode_end_de(d);
        int i = 1;
        for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) {
-               if (de == fde) return ((loff_t) d->self << 4) | (loff_t)i;
+               if (de == fde) return ((loff_t) le32_to_cpu(d->self) << 4) | (loff_t)i;
                i++;
        }
        printk("HPFS: get_pos: not_found\n");
-       return ((loff_t)d->self << 4) | (loff_t)1;
+       return ((loff_t)le32_to_cpu(d->self) << 4) | (loff_t)1;
 }
 
 void hpfs_add_pos(struct inode *inode, loff_t *pos)
@@ -130,29 +130,30 @@ static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno
 {
        struct hpfs_dirent *de;
        if (!(de = dnode_last_de(d))) {
-               hpfs_error(s, "set_last_pointer: empty dnode %08x", d->self);
+               hpfs_error(s, "set_last_pointer: empty dnode %08x", le32_to_cpu(d->self));
                return;
        }
        if (hpfs_sb(s)->sb_chk) {
                if (de->down) {
                        hpfs_error(s, "set_last_pointer: dnode %08x has already last pointer %08x",
-                               d->self, de_down_pointer(de));
+                               le32_to_cpu(d->self), de_down_pointer(de));
                        return;
                }
-               if (de->length != 32) {
-                       hpfs_error(s, "set_last_pointer: bad last dirent in dnode %08x", d->self);
+               if (le16_to_cpu(de->length) != 32) {
+                       hpfs_error(s, "set_last_pointer: bad last dirent in dnode %08x", le32_to_cpu(d->self));
                        return;
                }
        }
        if (ptr) {
-               if ((d->first_free += 4) > 2048) {
-                       hpfs_error(s,"set_last_pointer: too long dnode %08x", d->self);
-                       d->first_free -= 4;
+               d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) + 4);
+               if (le32_to_cpu(d->first_free) > 2048) {
+                       hpfs_error(s, "set_last_pointer: too long dnode %08x", le32_to_cpu(d->self));
+                       d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) - 4);
                        return;
                }
-               de->length = 36;
+               de->length = cpu_to_le16(36);
                de->down = 1;
-               *(dnode_secno *)((char *)de + 32) = ptr;
+               *(dnode_secno *)((char *)de + 32) = cpu_to_le32(ptr);
        }
 }
 
@@ -168,7 +169,7 @@ struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d,
        for (de = dnode_first_de(d); de < de_end; de = de_next_de(de)) {
                int c = hpfs_compare_names(s, name, namelen, de->name, de->namelen, de->last);
                if (!c) {
-                       hpfs_error(s, "name (%c,%d) already exists in dnode %08x", *name, namelen, d->self);
+                       hpfs_error(s, "name (%c,%d) already exists in dnode %08x", *name, namelen, le32_to_cpu(d->self));
                        return NULL;
                }
                if (c < 0) break;
@@ -176,15 +177,14 @@ struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d,
        memmove((char *)de + d_size, de, (char *)de_end - (char *)de);
        memset(de, 0, d_size);
        if (down_ptr) {
-               *(int *)((char *)de + d_size - 4) = down_ptr;
+               *(dnode_secno *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr);
                de->down = 1;
        }
-       de->length = d_size;
-       if (down_ptr) de->down = 1;
+       de->length = cpu_to_le16(d_size);
        de->not_8x3 = hpfs_is_name_long(name, namelen);
        de->namelen = namelen;
        memcpy(de->name, name, namelen);
-       d->first_free += d_size;
+       d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) + d_size);
        return de;
 }
 
@@ -194,25 +194,25 @@ static void hpfs_delete_de(struct super_block *s, struct dnode *d,
                           struct hpfs_dirent *de)
 {
        if (de->last) {
-               hpfs_error(s, "attempt to delete last dirent in dnode %08x", d->self);
+               hpfs_error(s, "attempt to delete last dirent in dnode %08x", le32_to_cpu(d->self));
                return;
        }
-       d->first_free -= de->length;
-       memmove(de, de_next_de(de), d->first_free + (char *)d - (char *)de);
+       d->first_free = cpu_to_le32(le32_to_cpu(d->first_free) - le16_to_cpu(de->length));
+       memmove(de, de_next_de(de), le32_to_cpu(d->first_free) + (char *)d - (char *)de);
 }
 
 static void fix_up_ptrs(struct super_block *s, struct dnode *d)
 {
        struct hpfs_dirent *de;
        struct hpfs_dirent *de_end = dnode_end_de(d);
-       dnode_secno dno = d->self;
+       dnode_secno dno = le32_to_cpu(d->self);
        for (de = dnode_first_de(d); de < de_end; de = de_next_de(de))
                if (de->down) {
                        struct quad_buffer_head qbh;
                        struct dnode *dd;
                        if ((dd = hpfs_map_dnode(s, de_down_pointer(de), &qbh))) {
-                               if (dd->up != dno || dd->root_dnode) {
-                                       dd->up = dno;
+                               if (le32_to_cpu(dd->up) != dno || dd->root_dnode) {
+                                       dd->up = cpu_to_le32(dno);
                                        dd->root_dnode = 0;
                                        hpfs_mark_4buffers_dirty(&qbh);
                                }
@@ -262,7 +262,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
                        kfree(nname);
                        return 1;
                }
-       if (d->first_free + de_size(namelen, down_ptr) <= 2048) {
+       if (le32_to_cpu(d->first_free) + de_size(namelen, down_ptr) <= 2048) {
                loff_t t;
                copy_de(de=hpfs_add_de(i->i_sb, d, name, namelen, down_ptr), new_de);
                t = get_pos(d, de);
@@ -286,11 +286,11 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
                kfree(nname);
                return 1;
        }       
-       memcpy(nd, d, d->first_free);
+       memcpy(nd, d, le32_to_cpu(d->first_free));
        copy_de(de = hpfs_add_de(i->i_sb, nd, name, namelen, down_ptr), new_de);
        for_all_poss(i, hpfs_pos_ins, get_pos(nd, de), 1);
        h = ((char *)dnode_last_de(nd) - (char *)nd) / 2 + 10;
-       if (!(ad = hpfs_alloc_dnode(i->i_sb, d->up, &adno, &qbh1, 0))) {
+       if (!(ad = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &adno, &qbh1))) {
                hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted");
                hpfs_brelse4(&qbh);
                kfree(nd);
@@ -313,20 +313,21 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
        down_ptr = adno;
        set_last_pointer(i->i_sb, ad, de->down ? de_down_pointer(de) : 0);
        de = de_next_de(de);
-       memmove((char *)nd + 20, de, nd->first_free + (char *)nd - (char *)de);
-       nd->first_free -= (char *)de - (char *)nd - 20;
-       memcpy(d, nd, nd->first_free);
+       memmove((char *)nd + 20, de, le32_to_cpu(nd->first_free) + (char *)nd - (char *)de);
+       nd->first_free = cpu_to_le32(le32_to_cpu(nd->first_free) - ((char *)de - (char *)nd - 20));
+       memcpy(d, nd, le32_to_cpu(nd->first_free));
        for_all_poss(i, hpfs_pos_del, (loff_t)dno << 4, pos);
        fix_up_ptrs(i->i_sb, ad);
        if (!d->root_dnode) {
-               dno = ad->up = d->up;
+               ad->up = d->up;
+               dno = le32_to_cpu(ad->up);
                hpfs_mark_4buffers_dirty(&qbh);
                hpfs_brelse4(&qbh);
                hpfs_mark_4buffers_dirty(&qbh1);
                hpfs_brelse4(&qbh1);
                goto go_up;
        }
-       if (!(rd = hpfs_alloc_dnode(i->i_sb, d->up, &rdno, &qbh2, 0))) {
+       if (!(rd = hpfs_alloc_dnode(i->i_sb, le32_to_cpu(d->up), &rdno, &qbh2))) {
                hpfs_error(i->i_sb, "unable to alloc dnode - dnode tree will be corrupted");
                hpfs_brelse4(&qbh);
                hpfs_brelse4(&qbh1);
@@ -338,7 +339,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
        i->i_blocks += 4;
        rd->root_dnode = 1;
        rd->up = d->up;
-       if (!(fnode = hpfs_map_fnode(i->i_sb, d->up, &bh))) {
+       if (!(fnode = hpfs_map_fnode(i->i_sb, le32_to_cpu(d->up), &bh))) {
                hpfs_free_dnode(i->i_sb, rdno);
                hpfs_brelse4(&qbh);
                hpfs_brelse4(&qbh1);
@@ -347,10 +348,11 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
                kfree(nname);
                return 1;
        }
-       fnode->u.external[0].disk_secno = rdno;
+       fnode->u.external[0].disk_secno = cpu_to_le32(rdno);
        mark_buffer_dirty(bh);
        brelse(bh);
-       d->up = ad->up = hpfs_i(i)->i_dno = rdno;
+       hpfs_i(i)->i_dno = rdno;
+       d->up = ad->up = cpu_to_le32(rdno);
        d->root_dnode = ad->root_dnode = 0;
        hpfs_mark_4buffers_dirty(&qbh);
        hpfs_brelse4(&qbh);
@@ -373,7 +375,7 @@ static int hpfs_add_to_dnode(struct inode *i, dnode_secno dno,
 
 int hpfs_add_dirent(struct inode *i,
                    const unsigned char *name, unsigned namelen,
-                   struct hpfs_dirent *new_de, int cdepth)
+                   struct hpfs_dirent *new_de)
 {
        struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
        struct dnode *d;
@@ -403,7 +405,6 @@ int hpfs_add_dirent(struct inode *i,
                }
        }
        hpfs_brelse4(&qbh);
-       if (!cdepth) hpfs_lock_creation(i->i_sb);
        if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_ADD)) {
                c = 1;
                goto ret;
@@ -411,7 +412,6 @@ int hpfs_add_dirent(struct inode *i,
        i->i_version++;
        c = hpfs_add_to_dnode(i, dno, name, namelen, new_de, 0);
        ret:
-       if (!cdepth) hpfs_unlock_creation(i->i_sb);
        return c;
 }
 
@@ -437,9 +437,9 @@ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to)
                                return 0;
                if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return 0;
                if (hpfs_sb(i->i_sb)->sb_chk) {
-                       if (dnode->up != chk_up) {
+                       if (le32_to_cpu(dnode->up) != chk_up) {
                                hpfs_error(i->i_sb, "move_to_top: up pointer from %08x should be %08x, is %08x",
-                                       dno, chk_up, dnode->up);
+                                       dno, chk_up, le32_to_cpu(dnode->up));
                                hpfs_brelse4(&qbh);
                                return 0;
                        }
@@ -455,7 +455,7 @@ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to)
                hpfs_brelse4(&qbh);
        }
        while (!(de = dnode_pre_last_de(dnode))) {
-               dnode_secno up = dnode->up;
+               dnode_secno up = le32_to_cpu(dnode->up);
                hpfs_brelse4(&qbh);
                hpfs_free_dnode(i->i_sb, dno);
                i->i_size -= 2048;
@@ -474,8 +474,8 @@ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to)
                        hpfs_brelse4(&qbh);
                        return 0;
                }
-               dnode->first_free -= 4;
-               de->length -= 4;
+               dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) - 4);
+               de->length = cpu_to_le16(le16_to_cpu(de->length) - 4);
                de->down = 0;
                hpfs_mark_4buffers_dirty(&qbh);
                dno = up;
@@ -483,12 +483,12 @@ static secno move_to_top(struct inode *i, dnode_secno from, dnode_secno to)
        t = get_pos(dnode, de);
        for_all_poss(i, hpfs_pos_subst, t, 4);
        for_all_poss(i, hpfs_pos_subst, t + 1, 5);
-       if (!(nde = kmalloc(de->length, GFP_NOFS))) {
+       if (!(nde = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) {
                hpfs_error(i->i_sb, "out of memory for dirent - directory will be corrupted");
                hpfs_brelse4(&qbh);
                return 0;
        }
-       memcpy(nde, de, de->length);
+       memcpy(nde, de, le16_to_cpu(de->length));
        ddno = de->down ? de_down_pointer(de) : 0;
        hpfs_delete_de(i->i_sb, dnode, de);
        set_last_pointer(i->i_sb, dnode, ddno);
@@ -517,11 +517,11 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
        try_it_again:
        if (hpfs_stop_cycles(i->i_sb, dno, &c1, &c2, "delete_empty_dnode")) return;
        if (!(dnode = hpfs_map_dnode(i->i_sb, dno, &qbh))) return;
-       if (dnode->first_free > 56) goto end;
-       if (dnode->first_free == 52 || dnode->first_free == 56) {
+       if (le32_to_cpu(dnode->first_free) > 56) goto end;
+       if (le32_to_cpu(dnode->first_free) == 52 || le32_to_cpu(dnode->first_free) == 56) {
                struct hpfs_dirent *de_end;
                int root = dnode->root_dnode;
-               up = dnode->up;
+               up = le32_to_cpu(dnode->up);
                de = dnode_first_de(dnode);
                down = de->down ? de_down_pointer(de) : 0;
                if (hpfs_sb(i->i_sb)->sb_chk) if (root && !down) {
@@ -545,13 +545,13 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
                                return;
                            }
                        if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) {
-                               d1->up = up;
+                               d1->up = cpu_to_le32(up);
                                d1->root_dnode = 1;
                                hpfs_mark_4buffers_dirty(&qbh1);
                                hpfs_brelse4(&qbh1);
                        }
                        if ((fnode = hpfs_map_fnode(i->i_sb, up, &bh))) {
-                               fnode->u.external[0].disk_secno = down;
+                               fnode->u.external[0].disk_secno = cpu_to_le32(down);
                                mark_buffer_dirty(bh);
                                brelse(bh);
                        }
@@ -570,22 +570,22 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
                for_all_poss(i, hpfs_pos_subst, ((loff_t)dno << 4) | 1, ((loff_t)up << 4) | p);
                if (!down) {
                        de->down = 0;
-                       de->length -= 4;
-                       dnode->first_free -= 4;
+                       de->length = cpu_to_le16(le16_to_cpu(de->length) - 4);
+                       dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) - 4);
                        memmove(de_next_de(de), (char *)de_next_de(de) + 4,
-                               (char *)dnode + dnode->first_free - (char *)de_next_de(de));
+                               (char *)dnode + le32_to_cpu(dnode->first_free) - (char *)de_next_de(de));
                } else {
                        struct dnode *d1;
                        struct quad_buffer_head qbh1;
-                       *(dnode_secno *) ((void *) de + de->length - 4) = down;
+                       *(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4) = down;
                        if ((d1 = hpfs_map_dnode(i->i_sb, down, &qbh1))) {
-                               d1->up = up;
+                               d1->up = cpu_to_le32(up);
                                hpfs_mark_4buffers_dirty(&qbh1);
                                hpfs_brelse4(&qbh1);
                        }
                }
        } else {
-               hpfs_error(i->i_sb, "delete_empty_dnode: dnode %08x, first_free == %03x", dno, dnode->first_free);
+               hpfs_error(i->i_sb, "delete_empty_dnode: dnode %08x, first_free == %03x", dno, le32_to_cpu(dnode->first_free));
                goto end;
        }
 
@@ -596,18 +596,18 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
                struct quad_buffer_head qbh1;
                if (!de_next->down) goto endm;
                ndown = de_down_pointer(de_next);
-               if (!(de_cp = kmalloc(de->length, GFP_NOFS))) {
+               if (!(de_cp = kmalloc(le16_to_cpu(de->length), GFP_NOFS))) {
                        printk("HPFS: out of memory for dtree balancing\n");
                        goto endm;
                }
-               memcpy(de_cp, de, de->length);
+               memcpy(de_cp, de, le16_to_cpu(de->length));
                hpfs_delete_de(i->i_sb, dnode, de);
                hpfs_mark_4buffers_dirty(&qbh);
                hpfs_brelse4(&qbh);
                for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, 4);
                for_all_poss(i, hpfs_pos_del, ((loff_t)up << 4) | p, 1);
                if (de_cp->down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de_cp), &qbh1))) {
-                       d1->up = ndown;
+                       d1->up = cpu_to_le32(ndown);
                        hpfs_mark_4buffers_dirty(&qbh1);
                        hpfs_brelse4(&qbh1);
                }
@@ -635,7 +635,7 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
                        struct hpfs_dirent *del = dnode_last_de(d1);
                        dlp = del->down ? de_down_pointer(del) : 0;
                        if (!dlp && down) {
-                               if (d1->first_free > 2044) {
+                               if (le32_to_cpu(d1->first_free) > 2044) {
                                        if (hpfs_sb(i->i_sb)->sb_chk >= 2) {
                                                printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n");
                                                printk("HPFS: warning: terminating balancing operation\n");
@@ -647,38 +647,38 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
                                        printk("HPFS: warning: unbalanced dnode tree, see hpfs.txt 4 more info\n");
                                        printk("HPFS: warning: goin'on\n");
                                }
-                               del->length += 4;
+                               del->length = cpu_to_le16(le16_to_cpu(del->length) + 4);
                                del->down = 1;
-                               d1->first_free += 4;
+                               d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) + 4);
                        }
                        if (dlp && !down) {
-                               del->length -= 4;
+                               del->length = cpu_to_le16(le16_to_cpu(del->length) - 4);
                                del->down = 0;
-                               d1->first_free -= 4;
+                               d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) - 4);
                        } else if (down)
-                               *(dnode_secno *) ((void *) del + del->length - 4) = down;
+                               *(dnode_secno *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down);
                } else goto endm;
-               if (!(de_cp = kmalloc(de_prev->length, GFP_NOFS))) {
+               if (!(de_cp = kmalloc(le16_to_cpu(de_prev->length), GFP_NOFS))) {
                        printk("HPFS: out of memory for dtree balancing\n");
                        hpfs_brelse4(&qbh1);
                        goto endm;
                }
                hpfs_mark_4buffers_dirty(&qbh1);
                hpfs_brelse4(&qbh1);
-               memcpy(de_cp, de_prev, de_prev->length);
+               memcpy(de_cp, de_prev, le16_to_cpu(de_prev->length));
                hpfs_delete_de(i->i_sb, dnode, de_prev);
                if (!de_prev->down) {
-                       de_prev->length += 4;
+                       de_prev->length = cpu_to_le16(le16_to_cpu(de_prev->length) + 4);
                        de_prev->down = 1;
-                       dnode->first_free += 4;
+                       dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) + 4);
                }
-               *(dnode_secno *) ((void *) de_prev + de_prev->length - 4) = ndown;
+               *(dnode_secno *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown);
                hpfs_mark_4buffers_dirty(&qbh);
                hpfs_brelse4(&qbh);
                for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4);
                for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | p, ((loff_t)up << 4) | (p - 1));
                if (down) if ((d1 = hpfs_map_dnode(i->i_sb, de_down_pointer(de), &qbh1))) {
-                       d1->up = ndown;
+                       d1->up = cpu_to_le32(ndown);
                        hpfs_mark_4buffers_dirty(&qbh1);
                        hpfs_brelse4(&qbh1);
                }
@@ -701,7 +701,6 @@ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de,
 {
        struct dnode *dnode = qbh->data;
        dnode_secno down = 0;
-       int lock = 0;
        loff_t t;
        if (de->first || de->last) {
                hpfs_error(i->i_sb, "hpfs_remove_dirent: attempt to delete first or last dirent in dnode %08x", dno);
@@ -710,11 +709,8 @@ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de,
        }
        if (de->down) down = de_down_pointer(de);
        if (depth && (de->down || (de == dnode_first_de(dnode) && de_next_de(de)->last))) {
-               lock = 1;
-               hpfs_lock_creation(i->i_sb);
                if (hpfs_check_free_dnodes(i->i_sb, FREE_DNODES_DEL)) {
                        hpfs_brelse4(qbh);
-                       hpfs_unlock_creation(i->i_sb);
                        return 2;
                }
        }
@@ -727,11 +723,9 @@ int hpfs_remove_dirent(struct inode *i, dnode_secno dno, struct hpfs_dirent *de,
                dnode_secno a = move_to_top(i, down, dno);
                for_all_poss(i, hpfs_pos_subst, 5, t);
                if (a) delete_empty_dnode(i, a);
-               if (lock) hpfs_unlock_creation(i->i_sb);
                return !a;
        }
        delete_empty_dnode(i, dno);
-       if (lock) hpfs_unlock_creation(i->i_sb);
        return 0;
 }
 
@@ -751,8 +745,8 @@ void hpfs_count_dnodes(struct super_block *s, dnode_secno dno, int *n_dnodes,
        ptr = 0;
        go_up:
        if (!(dnode = hpfs_map_dnode(s, dno, &qbh))) return;
-       if (hpfs_sb(s)->sb_chk) if (odno && odno != -1 && dnode->up != odno)
-               hpfs_error(s, "hpfs_count_dnodes: bad up pointer; dnode %08x, down %08x points to %08x", odno, dno, dnode->up);
+       if (hpfs_sb(s)->sb_chk) if (odno && odno != -1 && le32_to_cpu(dnode->up) != odno)
+               hpfs_error(s, "hpfs_count_dnodes: bad up pointer; dnode %08x, down %08x points to %08x", odno, dno, le32_to_cpu(dnode->up));
        de = dnode_first_de(dnode);
        if (ptr) while(1) {
                if (de->down) if (de_down_pointer(de) == ptr) goto process_de;
@@ -776,7 +770,7 @@ void hpfs_count_dnodes(struct super_block *s, dnode_secno dno, int *n_dnodes,
        if (!de->first && !de->last && n_items) (*n_items)++;
        if ((de = de_next_de(de)) < dnode_end_de(dnode)) goto next_de;
        ptr = dno;
-       dno = dnode->up;
+       dno = le32_to_cpu(dnode->up);
        if (dnode->root_dnode) {
                hpfs_brelse4(&qbh);
                return;
@@ -824,8 +818,8 @@ dnode_secno hpfs_de_as_down_as_possible(struct super_block *s, dnode_secno dno)
                        return d;
        if (!(de = map_nth_dirent(s, d, 1, &qbh, NULL))) return dno;
        if (hpfs_sb(s)->sb_chk)
-               if (up && ((struct dnode *)qbh.data)->up != up)
-                       hpfs_error(s, "hpfs_de_as_down_as_possible: bad up pointer; dnode %08x, down %08x points to %08x", up, d, ((struct dnode *)qbh.data)->up);
+               if (up && le32_to_cpu(((struct dnode *)qbh.data)->up) != up)
+                       hpfs_error(s, "hpfs_de_as_down_as_possible: bad up pointer; dnode %08x, down %08x points to %08x", up, d, le32_to_cpu(((struct dnode *)qbh.data)->up));
        if (!de->down) {
                hpfs_brelse4(&qbh);
                return d;
@@ -874,7 +868,7 @@ struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp,
        /* Going up */
        if (dnode->root_dnode) goto bail;
 
-       if (!(up_dnode = hpfs_map_dnode(inode->i_sb, dnode->up, &qbh0)))
+       if (!(up_dnode = hpfs_map_dnode(inode->i_sb, le32_to_cpu(dnode->up), &qbh0)))
                goto bail;
 
        end_up_de = dnode_end_de(up_dnode);
@@ -882,16 +876,16 @@ struct hpfs_dirent *map_pos_dirent(struct inode *inode, loff_t *posp,
        for (up_de = dnode_first_de(up_dnode); up_de < end_up_de;
             up_de = de_next_de(up_de)) {
                if (!(++c & 077)) hpfs_error(inode->i_sb,
-                       "map_pos_dirent: pos crossed dnode boundary; dnode = %08x", dnode->up);
+                       "map_pos_dirent: pos crossed dnode boundary; dnode = %08x", le32_to_cpu(dnode->up));
                if (up_de->down && de_down_pointer(up_de) == dno) {
-                       *posp = ((loff_t) dnode->up << 4) + c;
+                       *posp = ((loff_t) le32_to_cpu(dnode->up) << 4) + c;
                        hpfs_brelse4(&qbh0);
                        return de;
                }
        }
        
        hpfs_error(inode->i_sb, "map_pos_dirent: pointer to dnode %08x not found in parent dnode %08x",
-               dno, dnode->up);
+               dno, le32_to_cpu(dnode->up));
        hpfs_brelse4(&qbh0);
        
        bail:
@@ -1017,17 +1011,17 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno,
                /*name2[15] = 0xff;*/
                name1len = 15; name2len = 256;
        }
-       if (!(upf = hpfs_map_fnode(s, f->up, &bh))) {
+       if (!(upf = hpfs_map_fnode(s, le32_to_cpu(f->up), &bh))) {
                kfree(name2);
                return NULL;
        }       
        if (!upf->dirflag) {
                brelse(bh);
-               hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, f->up);
+               hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, le32_to_cpu(f->up));
                kfree(name2);
                return NULL;
        }
-       dno = upf->u.external[0].disk_secno;
+       dno = le32_to_cpu(upf->u.external[0].disk_secno);
        brelse(bh);
        go_down:
        downd = 0;
@@ -1049,7 +1043,7 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno,
                return NULL;
        }
        next_de:
-       if (de->fnode == fno) {
+       if (le32_to_cpu(de->fnode) == fno) {
                kfree(name2);
                return de;
        }
@@ -1065,7 +1059,7 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno,
                goto go_down;
        }
        f:
-       if (de->fnode == fno) {
+       if (le32_to_cpu(de->fnode) == fno) {
                kfree(name2);
                return de;
        }
@@ -1074,7 +1068,7 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno,
        if ((de = de_next_de(de)) < de_end) goto next_de;
        if (d->root_dnode) goto not_found;
        downd = dno;
-       dno = d->up;
+       dno = le32_to_cpu(d->up);
        hpfs_brelse4(qbh);
        if (hpfs_sb(s)->sb_chk)
                if (hpfs_stop_cycles(s, downd, &d1, &d2, "map_fnode_dirent #2")) {
index 45e53d972b42ed27e931c5fa01ae5fb194b0c5f2..d8b84d113c891bbcfd8416d3f35153983b0549a7 100644 (file)
@@ -24,7 +24,7 @@ void hpfs_ea_ext_remove(struct super_block *s, secno a, int ano, unsigned len)
                }
                if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return;
                if (ea->indirect) {
-                       if (ea->valuelen != 8) {
+                       if (ea_valuelen(ea) != 8) {
                                hpfs_error(s, "ea->indirect set while ea->valuelen!=8, %s %08x, pos %08x",
                                        ano ? "anode" : "sectors", a, pos);
                                return;
@@ -33,7 +33,7 @@ void hpfs_ea_ext_remove(struct super_block *s, secno a, int ano, unsigned len)
                                return;
                        hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea));
                }
-               pos += ea->namelen + ea->valuelen + 5;
+               pos += ea->namelen + ea_valuelen(ea) + 5;
        }
        if (!ano) hpfs_free_sectors(s, a, (len+511) >> 9);
        else {
@@ -76,24 +76,24 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key,
        unsigned pos;
        int ano, len;
        secno a;
+       char ex[4 + 255 + 1 + 8];
        struct extended_attribute *ea;
        struct extended_attribute *ea_end = fnode_end_ea(fnode);
        for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
                if (!strcmp(ea->name, key)) {
                        if (ea->indirect)
                                goto indirect;
-                       if (ea->valuelen >= size)
+                       if (ea_valuelen(ea) >= size)
                                return -EINVAL;
-                       memcpy(buf, ea_data(ea), ea->valuelen);
-                       buf[ea->valuelen] = 0;
+                       memcpy(buf, ea_data(ea), ea_valuelen(ea));
+                       buf[ea_valuelen(ea)] = 0;
                        return 0;
                }
-       a = fnode->ea_secno;
-       len = fnode->ea_size_l;
+       a = le32_to_cpu(fnode->ea_secno);
+       len = le32_to_cpu(fnode->ea_size_l);
        ano = fnode->ea_anode;
        pos = 0;
        while (pos < len) {
-               char ex[4 + 255 + 1 + 8];
                ea = (struct extended_attribute *)ex;
                if (pos + 4 > len) {
                        hpfs_error(s, "EAs don't end correctly, %s %08x, len %08x",
@@ -106,14 +106,14 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key,
                if (!strcmp(ea->name, key)) {
                        if (ea->indirect)
                                goto indirect;
-                       if (ea->valuelen >= size)
+                       if (ea_valuelen(ea) >= size)
                                return -EINVAL;
-                       if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea->valuelen, buf))
+                       if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea_valuelen(ea), buf))
                                return -EIO;
-                       buf[ea->valuelen] = 0;
+                       buf[ea_valuelen(ea)] = 0;
                        return 0;
                }
-               pos += ea->namelen + ea->valuelen + 5;
+               pos += ea->namelen + ea_valuelen(ea) + 5;
        }
        return -ENOENT;
 indirect:
@@ -138,16 +138,16 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si
                if (!strcmp(ea->name, key)) {
                        if (ea->indirect)
                                return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea));
-                       if (!(ret = kmalloc((*size = ea->valuelen) + 1, GFP_NOFS))) {
+                       if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) {
                                printk("HPFS: out of memory for EA\n");
                                return NULL;
                        }
-                       memcpy(ret, ea_data(ea), ea->valuelen);
-                       ret[ea->valuelen] = 0;
+                       memcpy(ret, ea_data(ea), ea_valuelen(ea));
+                       ret[ea_valuelen(ea)] = 0;
                        return ret;
                }
-       a = fnode->ea_secno;
-       len = fnode->ea_size_l;
+       a = le32_to_cpu(fnode->ea_secno);
+       len = le32_to_cpu(fnode->ea_size_l);
        ano = fnode->ea_anode;
        pos = 0;
        while (pos < len) {
@@ -164,18 +164,18 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si
                if (!strcmp(ea->name, key)) {
                        if (ea->indirect)
                                return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea));
-                       if (!(ret = kmalloc((*size = ea->valuelen) + 1, GFP_NOFS))) {
+                       if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) {
                                printk("HPFS: out of memory for EA\n");
                                return NULL;
                        }
-                       if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea->valuelen, ret)) {
+                       if (hpfs_ea_read(s, a, ano, pos + 4 + ea->namelen + 1, ea_valuelen(ea), ret)) {
                                kfree(ret);
                                return NULL;
                        }
-                       ret[ea->valuelen] = 0;
+                       ret[ea_valuelen(ea)] = 0;
                        return ret;
                }
-               pos += ea->namelen + ea->valuelen + 5;
+               pos += ea->namelen + ea_valuelen(ea) + 5;
        }
        return NULL;
 }
@@ -202,13 +202,13 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
                        if (ea->indirect) {
                                if (ea_len(ea) == size)
                                        set_indirect_ea(s, ea->anode, ea_sec(ea), data, size);
-                       } else if (ea->valuelen == size) {
+                       } else if (ea_valuelen(ea) == size) {
                                memcpy(ea_data(ea), data, size);
                        }
                        return;
                }
-       a = fnode->ea_secno;
-       len = fnode->ea_size_l;
+       a = le32_to_cpu(fnode->ea_secno);
+       len = le32_to_cpu(fnode->ea_size_l);
        ano = fnode->ea_anode;
        pos = 0;
        while (pos < len) {
@@ -228,68 +228,70 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
                                        set_indirect_ea(s, ea->anode, ea_sec(ea), data, size);
                        }
                        else {
-                               if (ea->valuelen == size)
+                               if (ea_valuelen(ea) == size)
                                        hpfs_ea_write(s, a, ano, pos + 4 + ea->namelen + 1, size, data);
                        }
                        return;
                }
-               pos += ea->namelen + ea->valuelen + 5;
+               pos += ea->namelen + ea_valuelen(ea) + 5;
        }
-       if (!fnode->ea_offs) {
-               /*if (fnode->ea_size_s) {
+       if (!le16_to_cpu(fnode->ea_offs)) {
+               /*if (le16_to_cpu(fnode->ea_size_s)) {
                        hpfs_error(s, "fnode %08x: ea_size_s == %03x, ea_offs == 0",
-                               inode->i_ino, fnode->ea_size_s);
+                               inode->i_ino, le16_to_cpu(fnode->ea_size_s));
                        return;
                }*/
-               fnode->ea_offs = 0xc4;
+               fnode->ea_offs = cpu_to_le16(0xc4);
        }
-       if (fnode->ea_offs < 0xc4 || fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200) {
+       if (le16_to_cpu(fnode->ea_offs) < 0xc4 || le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200) {
                hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x",
                        (unsigned long)inode->i_ino,
-                       fnode->ea_offs, fnode->ea_size_s);
+                       le32_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s));
                return;
        }
-       if ((fnode->ea_size_s || !fnode->ea_size_l) &&
-            fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s + strlen(key) + size + 5 <= 0x200) {
+       if ((le16_to_cpu(fnode->ea_size_s) || !le32_to_cpu(fnode->ea_size_l)) &&
+            le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) + strlen(key) + size + 5 <= 0x200) {
                ea = fnode_end_ea(fnode);
                *(char *)ea = 0;
                ea->namelen = strlen(key);
-               ea->valuelen = size;
+               ea->valuelen_lo = size;
+               ea->valuelen_hi = size >> 8;
                strcpy(ea->name, key);
                memcpy(ea_data(ea), data, size);
-               fnode->ea_size_s += strlen(key) + size + 5;
+               fnode->ea_size_s = cpu_to_le16(le16_to_cpu(fnode->ea_size_s) + strlen(key) + size + 5);
                goto ret;
        }
        /* Most the code here is 99.9993422% unused. I hope there are no bugs.
           But what .. HPFS.IFS has also bugs in ea management. */
-       if (fnode->ea_size_s && !fnode->ea_size_l) {
+       if (le16_to_cpu(fnode->ea_size_s) && !le32_to_cpu(fnode->ea_size_l)) {
                secno n;
                struct buffer_head *bh;
                char *data;
-               if (!(n = hpfs_alloc_sector(s, fno, 1, 0, 1))) return;
+               if (!(n = hpfs_alloc_sector(s, fno, 1, 0))) return;
                if (!(data = hpfs_get_sector(s, n, &bh))) {
                        hpfs_free_sectors(s, n, 1);
                        return;
                }
-               memcpy(data, fnode_ea(fnode), fnode->ea_size_s);
-               fnode->ea_size_l = fnode->ea_size_s;
-               fnode->ea_size_s = 0;
-               fnode->ea_secno = n;
-               fnode->ea_anode = 0;
+               memcpy(data, fnode_ea(fnode), le16_to_cpu(fnode->ea_size_s));
+               fnode->ea_size_l = cpu_to_le32(le16_to_cpu(fnode->ea_size_s));
+               fnode->ea_size_s = cpu_to_le16(0);
+               fnode->ea_secno = cpu_to_le32(n);
+               fnode->ea_anode = cpu_to_le32(0);
                mark_buffer_dirty(bh);
                brelse(bh);
        }
-       pos = fnode->ea_size_l + 5 + strlen(key) + size;
-       len = (fnode->ea_size_l + 511) >> 9;
+       pos = le32_to_cpu(fnode->ea_size_l) + 5 + strlen(key) + size;
+       len = (le32_to_cpu(fnode->ea_size_l) + 511) >> 9;
        if (pos >= 30000) goto bail;
        while (((pos + 511) >> 9) > len) {
                if (!len) {
-                       if (!(fnode->ea_secno = hpfs_alloc_sector(s, fno, 1, 0, 1)))
-                               goto bail;
+                       secno q = hpfs_alloc_sector(s, fno, 1, 0);
+                       if (!q) goto bail;
+                       fnode->ea_secno = cpu_to_le32(q);
                        fnode->ea_anode = 0;
                        len++;
                } else if (!fnode->ea_anode) {
-                       if (hpfs_alloc_if_possible(s, fnode->ea_secno + len)) {
+                       if (hpfs_alloc_if_possible(s, le32_to_cpu(fnode->ea_secno) + len)) {
                                len++;
                        } else {
                                /* Aargh... don't know how to create ea anodes :-( */
@@ -298,26 +300,26 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
                                anode_secno a_s;
                                if (!(anode = hpfs_alloc_anode(s, fno, &a_s, &bh)))
                                        goto bail;
-                               anode->up = fno;
+                               anode->up = cpu_to_le32(fno);
                                anode->btree.fnode_parent = 1;
                                anode->btree.n_free_nodes--;
                                anode->btree.n_used_nodes++;
-                               anode->btree.first_free += 12;
-                               anode->u.external[0].disk_secno = fnode->ea_secno;
-                               anode->u.external[0].file_secno = 0;
-                               anode->u.external[0].length = len;
+                               anode->btree.first_free = cpu_to_le16(le16_to_cpu(anode->btree.first_free) + 12);
+                               anode->u.external[0].disk_secno = cpu_to_le32(le32_to_cpu(fnode->ea_secno));
+                               anode->u.external[0].file_secno = cpu_to_le32(0);
+                               anode->u.external[0].length = cpu_to_le32(len);
                                mark_buffer_dirty(bh);
                                brelse(bh);
                                fnode->ea_anode = 1;
-                               fnode->ea_secno = a_s;*/
+                               fnode->ea_secno = cpu_to_le32(a_s);*/
                                secno new_sec;
                                int i;
-                               if (!(new_sec = hpfs_alloc_sector(s, fno, 1, 1 - ((pos + 511) >> 9), 1)))
+                               if (!(new_sec = hpfs_alloc_sector(s, fno, 1, 1 - ((pos + 511) >> 9))))
                                        goto bail;
                                for (i = 0; i < len; i++) {
                                        struct buffer_head *bh1, *bh2;
                                        void *b1, *b2;
-                                       if (!(b1 = hpfs_map_sector(s, fnode->ea_secno + i, &bh1, len - i - 1))) {
+                                       if (!(b1 = hpfs_map_sector(s, le32_to_cpu(fnode->ea_secno) + i, &bh1, len - i - 1))) {
                                                hpfs_free_sectors(s, new_sec, (pos + 511) >> 9);
                                                goto bail;
                                        }
@@ -331,13 +333,13 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
                                        mark_buffer_dirty(bh2);
                                        brelse(bh2);
                                }
-                               hpfs_free_sectors(s, fnode->ea_secno, len);
-                               fnode->ea_secno = new_sec;
+                               hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno), len);
+                               fnode->ea_secno = cpu_to_le32(new_sec);
                                len = (pos + 511) >> 9;
                        }
                }
                if (fnode->ea_anode) {
-                       if (hpfs_add_sector_to_btree(s, fnode->ea_secno,
+                       if (hpfs_add_sector_to_btree(s, le32_to_cpu(fnode->ea_secno),
                                                     0, len) != -1) {
                                len++;
                        } else {
@@ -349,17 +351,17 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
        h[1] = strlen(key);
        h[2] = size & 0xff;
        h[3] = size >> 8;
-       if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l, 4, h)) goto bail;
-       if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l + 4, h[1] + 1, key)) goto bail;
-       if (hpfs_ea_write(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l + 5 + h[1], size, data)) goto bail;
-       fnode->ea_size_l = pos;
+       if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l), 4, h)) goto bail;
+       if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 4, h[1] + 1, key)) goto bail;
+       if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 5 + h[1], size, data)) goto bail;
+       fnode->ea_size_l = cpu_to_le32(pos);
        ret:
        hpfs_i(inode)->i_ea_size += 5 + strlen(key) + size;
        return;
        bail:
-       if (fnode->ea_secno)
-               if (fnode->ea_anode) hpfs_truncate_btree(s, fnode->ea_secno, 1, (fnode->ea_size_l + 511) >> 9);
-               else hpfs_free_sectors(s, fnode->ea_secno + ((fnode->ea_size_l + 511) >> 9), len - ((fnode->ea_size_l + 511) >> 9));
-       else fnode->ea_secno = fnode->ea_size_l = 0;
+       if (le32_to_cpu(fnode->ea_secno))
+               if (fnode->ea_anode) hpfs_truncate_btree(s, le32_to_cpu(fnode->ea_secno), 1, (le32_to_cpu(fnode->ea_size_l) + 511) >> 9);
+               else hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno) + ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9), len - ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9));
+       else fnode->ea_secno = fnode->ea_size_l = cpu_to_le32(0);
 }
        
index 9b9eb6933e43d8620a38ec744f77d4d8712e3b9a..89c500ee521382c2250c0a818a3c18c773025ac8 100644 (file)
@@ -20,8 +20,8 @@ static int hpfs_file_release(struct inode *inode, struct file *file)
 
 int hpfs_file_fsync(struct file *file, int datasync)
 {
-       /*return file_fsync(file, datasync);*/
-       return 0; /* Don't fsync :-) */
+       struct inode *inode = file->f_mapping->host;
+       return sync_blockdev(inode->i_sb->s_bdev);
 }
 
 /*
@@ -48,38 +48,46 @@ static secno hpfs_bmap(struct inode *inode, unsigned file_secno)
 static void hpfs_truncate(struct inode *i)
 {
        if (IS_IMMUTABLE(i)) return /*-EPERM*/;
-       hpfs_lock(i->i_sb);
+       hpfs_lock_assert(i->i_sb);
+
        hpfs_i(i)->i_n_secs = 0;
        i->i_blocks = 1 + ((i->i_size + 511) >> 9);
        hpfs_i(i)->mmu_private = i->i_size;
        hpfs_truncate_btree(i->i_sb, i->i_ino, 1, ((i->i_size + 511) >> 9));
        hpfs_write_inode(i);
        hpfs_i(i)->i_n_secs = 0;
-       hpfs_unlock(i->i_sb);
 }
 
 static int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create)
 {
+       int r;
        secno s;
+       hpfs_lock(inode->i_sb);
        s = hpfs_bmap(inode, iblock);
        if (s) {
                map_bh(bh_result, inode->i_sb, s);
-               return 0;
+               goto ret_0;
        }
-       if (!create) return 0;
+       if (!create) goto ret_0;
        if (iblock<<9 != hpfs_i(inode)->mmu_private) {
                BUG();
-               return -EIO;
+               r = -EIO;
+               goto ret_r;
        }
        if ((s = hpfs_add_sector_to_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1)) == -1) {
                hpfs_truncate_btree(inode->i_sb, inode->i_ino, 1, inode->i_blocks - 1);
-               return -ENOSPC;
+               r = -ENOSPC;
+               goto ret_r;
        }
        inode->i_blocks++;
        hpfs_i(inode)->mmu_private += 512;
        set_buffer_new(bh_result);
        map_bh(bh_result, inode->i_sb, s);
-       return 0;
+       ret_0:
+       r = 0;
+       ret_r:
+       hpfs_unlock(inode->i_sb);
+       return r;
 }
 
 static int hpfs_writepage(struct page *page, struct writeback_control *wbc)
@@ -130,8 +138,11 @@ static ssize_t hpfs_file_write(struct file *file, const char __user *buf,
        ssize_t retval;
 
        retval = do_sync_write(file, buf, count, ppos);
-       if (retval > 0)
+       if (retval > 0) {
+               hpfs_lock(file->f_path.dentry->d_sb);
                hpfs_i(file->f_path.dentry->d_inode)->i_dirty = 1;
+               hpfs_unlock(file->f_path.dentry->d_sb);
+       }
        return retval;
 }
 
index 0e84c73cd9c4ea88514e6d145bb6a753a3205c03..8b0650aae32812bac9abbb581439b592d64949d2 100644 (file)
    For definitive information on HPFS, ask somebody else -- this is guesswork.
    There are certain to be many mistakes. */
 
+#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
+#error unknown endian
+#endif
+
 /* Notation */
 
-typedef unsigned secno;                        /* sector number, partition relative */
+typedef u32 secno;                     /* sector number, partition relative */
 
 typedef secno dnode_secno;             /* sector number of a dnode */
 typedef secno fnode_secno;             /* sector number of an fnode */
@@ -38,28 +42,28 @@ typedef u32 time32_t;               /* 32-bit time_t type */
 
 struct hpfs_boot_block
 {
-  unsigned char jmp[3];
-  unsigned char oem_id[8];
-  unsigned char bytes_per_sector[2];   /* 512 */
-  unsigned char sectors_per_cluster;
-  unsigned char n_reserved_sectors[2];
-  unsigned char n_fats;
-  unsigned char n_rootdir_entries[2];
-  unsigned char n_sectors_s[2];
-  unsigned char media_byte;
-  unsigned short sectors_per_fat;
-  unsigned short sectors_per_track;
-  unsigned short heads_per_cyl;
-  unsigned int n_hidden_sectors;
-  unsigned int n_sectors_l;            /* size of partition */
-  unsigned char drive_number;
-  unsigned char mbz;
-  unsigned char sig_28h;               /* 28h */
-  unsigned char vol_serno[4];
-  unsigned char vol_label[11];
-  unsigned char sig_hpfs[8];           /* "HPFS    " */
-  unsigned char pad[448];
-  unsigned short magic;                        /* aa55 */
+  u8 jmp[3];
+  u8 oem_id[8];
+  u8 bytes_per_sector[2];      /* 512 */
+  u8 sectors_per_cluster;
+  u8 n_reserved_sectors[2];
+  u8 n_fats;
+  u8 n_rootdir_entries[2];
+  u8 n_sectors_s[2];
+  u8 media_byte;
+  u16 sectors_per_fat;
+  u16 sectors_per_track;
+  u16 heads_per_cyl;
+  u32 n_hidden_sectors;
+  u32 n_sectors_l;             /* size of partition */
+  u8 drive_number;
+  u8 mbz;
+  u8 sig_28h;                  /* 28h */
+  u8 vol_serno[4];
+  u8 vol_label[11];
+  u8 sig_hpfs[8];              /* "HPFS    " */
+  u8 pad[448];
+  u16 magic;                   /* aa55 */
 };
 
 
@@ -71,31 +75,29 @@ struct hpfs_boot_block
 
 struct hpfs_super_block
 {
-  unsigned magic;                      /* f995 e849 */
-  unsigned magic1;                     /* fa53 e9c5, more magic? */
-  /*unsigned huh202;*/                 /* ?? 202 = N. of B. in 1.00390625 S.*/
-  char version;                                /* version of a filesystem  usually 2 */
-  char funcversion;                    /* functional version - oldest version
+  u32 magic;                           /* f995 e849 */
+  u32 magic1;                          /* fa53 e9c5, more magic? */
+  u8 version;                          /* version of a filesystem  usually 2 */
+  u8 funcversion;                      /* functional version - oldest version
                                           of filesystem that can understand
                                           this disk */
-  unsigned short int zero;             /* 0 */
+  u16 zero;                            /* 0 */
   fnode_secno root;                    /* fnode of root directory */
   secno n_sectors;                     /* size of filesystem */
-  unsigned n_badblocks;                        /* number of bad blocks */
+  u32 n_badblocks;                     /* number of bad blocks */
   secno bitmaps;                       /* pointers to free space bit maps */
-  unsigned zero1;                      /* 0 */
+  u32 zero1;                           /* 0 */
   secno badblocks;                     /* bad block list */
-  unsigned zero3;                      /* 0 */
+  u32 zero3;                           /* 0 */
   time32_t last_chkdsk;                        /* date last checked, 0 if never */
-  /*unsigned zero4;*/                  /* 0 */
-  time32_t last_optimize;                      /* date last optimized, 0 if never */
+  time32_t last_optimize;              /* date last optimized, 0 if never */
   secno n_dir_band;                    /* number of sectors in dir band */
   secno dir_band_start;                        /* first sector in dir band */
   secno dir_band_end;                  /* last sector in dir band */
   secno dir_band_bitmap;               /* free space map, 1 dnode per bit */
-  char volume_name[32];                        /* not used */
+  u8 volume_name[32];                  /* not used */
   secno user_id_table;                 /* 8 preallocated sectors - user id */
-  unsigned zero6[103];                 /* 0 */
+  u32 zero6[103];                      /* 0 */
 };
 
 
@@ -107,44 +109,65 @@ struct hpfs_super_block
 
 struct hpfs_spare_block
 {
-  unsigned magic;                      /* f991 1849 */
-  unsigned magic1;                     /* fa52 29c5, more magic? */
-
-  unsigned dirty: 1;                   /* 0 clean, 1 "improperly stopped" */
-  /*unsigned flag1234: 4;*/            /* unknown flags */
-  unsigned sparedir_used: 1;           /* spare dirblks used */
-  unsigned hotfixes_used: 1;           /* hotfixes used */
-  unsigned bad_sector: 1;              /* bad sector, corrupted disk (???) */
-  unsigned bad_bitmap: 1;              /* bad bitmap */
-  unsigned fast: 1;                    /* partition was fast formatted */
-  unsigned old_wrote: 1;               /* old version wrote to partion */
-  unsigned old_wrote_1: 1;             /* old version wrote to partion (?) */
-  unsigned install_dasd_limits: 1;     /* HPFS386 flags */
-  unsigned resynch_dasd_limits: 1;
-  unsigned dasd_limits_operational: 1;
-  unsigned multimedia_active: 1;
-  unsigned dce_acls_active: 1;
-  unsigned dasd_limits_dirty: 1;
-  unsigned flag67: 2;
-  unsigned char mm_contlgulty;
-  unsigned char unused;
+  u32 magic;                           /* f991 1849 */
+  u32 magic1;                          /* fa52 29c5, more magic? */
+
+#ifdef __LITTLE_ENDIAN
+  u8 dirty: 1;                         /* 0 clean, 1 "improperly stopped" */
+  u8 sparedir_used: 1;                 /* spare dirblks used */
+  u8 hotfixes_used: 1;                 /* hotfixes used */
+  u8 bad_sector: 1;                    /* bad sector, corrupted disk (???) */
+  u8 bad_bitmap: 1;                    /* bad bitmap */
+  u8 fast: 1;                          /* partition was fast formatted */
+  u8 old_wrote: 1;                     /* old version wrote to partion */
+  u8 old_wrote_1: 1;                   /* old version wrote to partion (?) */
+#else
+  u8 old_wrote_1: 1;                   /* old version wrote to partion (?) */
+  u8 old_wrote: 1;                     /* old version wrote to partion */
+  u8 fast: 1;                          /* partition was fast formatted */
+  u8 bad_bitmap: 1;                    /* bad bitmap */
+  u8 bad_sector: 1;                    /* bad sector, corrupted disk (???) */
+  u8 hotfixes_used: 1;                 /* hotfixes used */
+  u8 sparedir_used: 1;                 /* spare dirblks used */
+  u8 dirty: 1;                         /* 0 clean, 1 "improperly stopped" */
+#endif
+
+#ifdef __LITTLE_ENDIAN
+  u8 install_dasd_limits: 1;           /* HPFS386 flags */
+  u8 resynch_dasd_limits: 1;
+  u8 dasd_limits_operational: 1;
+  u8 multimedia_active: 1;
+  u8 dce_acls_active: 1;
+  u8 dasd_limits_dirty: 1;
+  u8 flag67: 2;
+#else
+  u8 flag67: 2;
+  u8 dasd_limits_dirty: 1;
+  u8 dce_acls_active: 1;
+  u8 multimedia_active: 1;
+  u8 dasd_limits_operational: 1;
+  u8 resynch_dasd_limits: 1;
+  u8 install_dasd_limits: 1;           /* HPFS386 flags */
+#endif
+
+  u8 mm_contlgulty;
+  u8 unused;
 
   secno hotfix_map;                    /* info about remapped bad sectors */
-  unsigned n_spares_used;              /* number of hotfixes */
-  unsigned n_spares;                   /* number of spares in hotfix map */
-  unsigned n_dnode_spares_free;                /* spare dnodes unused */
-  unsigned n_dnode_spares;             /* length of spare_dnodes[] list,
+  u32 n_spares_used;                   /* number of hotfixes */
+  u32 n_spares;                                /* number of spares in hotfix map */
+  u32 n_dnode_spares_free;             /* spare dnodes unused */
+  u32 n_dnode_spares;                  /* length of spare_dnodes[] list,
                                           follows in this block*/
   secno code_page_dir;                 /* code page directory block */
-  unsigned n_code_pages;               /* number of code pages */
-  /*unsigned large_numbers[2];*/       /* ?? */
-  unsigned super_crc;                  /* on HPFS386 and LAN Server this is
+  u32 n_code_pages;                    /* number of code pages */
+  u32 super_crc;                       /* on HPFS386 and LAN Server this is
                                           checksum of superblock, on normal
                                           OS/2 unused */
-  unsigned spare_crc;                  /* on HPFS386 checksum of spareblock */
-  unsigned zero1[15];                  /* unused */
+  u32 spare_crc;                       /* on HPFS386 checksum of spareblock */
+  u32 zero1[15];                       /* unused */
   dnode_secno spare_dnodes[100];       /* emergency free dnode list */
-  unsigned zero2[1];                   /* room for more? */
+  u32 zero2[1];                                /* room for more? */
 };
 
 /* The bad block list is 4 sectors long.  The first word must be zero,
@@ -179,18 +202,18 @@ struct hpfs_spare_block
 
 struct code_page_directory
 {
-  unsigned magic;                      /* 4945 21f7 */
-  unsigned n_code_pages;               /* number of pointers following */
-  unsigned zero1[2];
+  u32 magic;                           /* 4945 21f7 */
+  u32 n_code_pages;                    /* number of pointers following */
+  u32 zero1[2];
   struct {
-    unsigned short ix;                 /* index */
-    unsigned short code_page_number;   /* code page number */
-    unsigned bounds;                   /* matches corresponding word
+    u16 ix;                            /* index */
+    u16 code_page_number;              /* code page number */
+    u32 bounds;                                /* matches corresponding word
                                           in data block */
     secno code_page_data;              /* sector number of a code_page_data
                                           containing c.p. array */
-    unsigned short index;              /* index in c.p. array in that sector*/
-    unsigned short unknown;            /* some unknown value; usually 0;
+    u16 index;                         /* index in c.p. array in that sector*/
+    u16 unknown;                       /* some unknown value; usually 0;
                                           2 in Japanese version */
   } array[31];                         /* unknown length */
 };
@@ -201,21 +224,21 @@ struct code_page_directory
 
 struct code_page_data
 {
-  unsigned magic;                      /* 8945 21f7 */
-  unsigned n_used;                     /* # elements used in c_p_data[] */
-  unsigned bounds[3];                  /* looks a bit like
+  u32 magic;                           /* 8945 21f7 */
+  u32 n_used;                          /* # elements used in c_p_data[] */
+  u32 bounds[3];                       /* looks a bit like
                                             (beg1,end1), (beg2,end2)
                                           one byte each */
-  unsigned short offs[3];              /* offsets from start of sector
+  u16 offs[3];                         /* offsets from start of sector
                                           to start of c_p_data[ix] */
   struct {
-    unsigned short ix;                 /* index */
-    unsigned short code_page_number;   /* code page number */
-    unsigned short unknown;            /* the same as in cp directory */
-    unsigned char map[128];            /* upcase table for chars 80..ff */
-    unsigned short zero2;
+    u16 ix;                            /* index */
+    u16 code_page_number;              /* code page number */
+    u16 unknown;                       /* the same as in cp directory */
+    u8 map[128];                       /* upcase table for chars 80..ff */
+    u16 zero2;
   } code_page[3];
-  unsigned char incognita[78];
+  u8 incognita[78];
 };
 
 
@@ -255,50 +278,84 @@ struct code_page_data
 #define DNODE_MAGIC   0x77e40aae
 
 struct dnode {
-  unsigned magic;                      /* 77e4 0aae */
-  unsigned first_free;                 /* offset from start of dnode to
+  u32 magic;                           /* 77e4 0aae */
+  u32 first_free;                      /* offset from start of dnode to
                                           first free dir entry */
-  unsigned root_dnode:1;               /* Is it root dnode? */
-  unsigned increment_me:31;            /* some kind of activity counter?
-                                          Neither HPFS.IFS nor CHKDSK cares
+#ifdef __LITTLE_ENDIAN
+  u8 root_dnode: 1;                    /* Is it root dnode? */
+  u8 increment_me: 7;                  /* some kind of activity counter? */
+                                       /* Neither HPFS.IFS nor CHKDSK cares
+                                          if you change this word */
+#else
+  u8 increment_me: 7;                  /* some kind of activity counter? */
+                                       /* Neither HPFS.IFS nor CHKDSK cares
                                           if you change this word */
+  u8 root_dnode: 1;                    /* Is it root dnode? */
+#endif
+  u8 increment_me2[3];
   secno up;                            /* (root dnode) directory's fnode
                                           (nonroot) parent dnode */
   dnode_secno self;                    /* pointer to this dnode */
-  unsigned char dirent[2028];          /* one or more dirents */
+  u8 dirent[2028];                     /* one or more dirents */
 };
 
 struct hpfs_dirent {
-  unsigned short length;               /* offset to next dirent */
-  unsigned first: 1;                   /* set on phony ^A^A (".") entry */
-  unsigned has_acl: 1;
-  unsigned down: 1;                    /* down pointer present (after name) */
-  unsigned last: 1;                    /* set on phony \377 entry */
-  unsigned has_ea: 1;                  /* entry has EA */
-  unsigned has_xtd_perm: 1;            /* has extended perm list (???) */
-  unsigned has_explicit_acl: 1;
-  unsigned has_needea: 1;              /* ?? some EA has NEEDEA set
+  u16 length;                          /* offset to next dirent */
+
+#ifdef __LITTLE_ENDIAN
+  u8 first: 1;                         /* set on phony ^A^A (".") entry */
+  u8 has_acl: 1;
+  u8 down: 1;                          /* down pointer present (after name) */
+  u8 last: 1;                          /* set on phony \377 entry */
+  u8 has_ea: 1;                                /* entry has EA */
+  u8 has_xtd_perm: 1;                  /* has extended perm list (???) */
+  u8 has_explicit_acl: 1;
+  u8 has_needea: 1;                    /* ?? some EA has NEEDEA set
+                                          I have no idea why this is
+                                          interesting in a dir entry */
+#else
+  u8 has_needea: 1;                    /* ?? some EA has NEEDEA set
                                           I have no idea why this is
                                           interesting in a dir entry */
-  unsigned read_only: 1;               /* dos attrib */
-  unsigned hidden: 1;                  /* dos attrib */
-  unsigned system: 1;                  /* dos attrib */
-  unsigned flag11: 1;                  /* would be volume label dos attrib */
-  unsigned directory: 1;               /* dos attrib */
-  unsigned archive: 1;                 /* dos attrib */
-  unsigned not_8x3: 1;                 /* name is not 8.3 */
-  unsigned flag15: 1;
+  u8 has_explicit_acl: 1;
+  u8 has_xtd_perm: 1;                  /* has extended perm list (???) */
+  u8 has_ea: 1;                                /* entry has EA */
+  u8 last: 1;                          /* set on phony \377 entry */
+  u8 down: 1;                          /* down pointer present (after name) */
+  u8 has_acl: 1;
+  u8 first: 1;                         /* set on phony ^A^A (".") entry */
+#endif
+
+#ifdef __LITTLE_ENDIAN
+  u8 read_only: 1;                     /* dos attrib */
+  u8 hidden: 1;                                /* dos attrib */
+  u8 system: 1;                                /* dos attrib */
+  u8 flag11: 1;                                /* would be volume label dos attrib */
+  u8 directory: 1;                     /* dos attrib */
+  u8 archive: 1;                       /* dos attrib */
+  u8 not_8x3: 1;                       /* name is not 8.3 */
+  u8 flag15: 1;
+#else
+  u8 flag15: 1;
+  u8 not_8x3: 1;                       /* name is not 8.3 */
+  u8 archive: 1;                       /* dos attrib */
+  u8 directory: 1;                     /* dos attrib */
+  u8 flag11: 1;                                /* would be volume label dos attrib */
+  u8 system: 1;                                /* dos attrib */
+  u8 hidden: 1;                                /* dos attrib */
+  u8 read_only: 1;                     /* dos attrib */
+#endif
+
   fnode_secno fnode;                   /* fnode giving allocation info */
   time32_t write_date;                 /* mtime */
-  unsigned file_size;                  /* file length, bytes */
+  u32 file_size;                       /* file length, bytes */
   time32_t read_date;                  /* atime */
   time32_t creation_date;                      /* ctime */
-  unsigned ea_size;                    /* total EA length, bytes */
-  unsigned char no_of_acls : 3;                /* number of ACL's */
-  unsigned char reserver : 5;
-  unsigned char ix;                    /* code page index (of filename), see
+  u32 ea_size;                         /* total EA length, bytes */
+  u8 no_of_acls;                       /* number of ACL's (low 3 bits) */
+  u8 ix;                               /* code page index (of filename), see
                                           struct code_page_data */
-  unsigned char namelen, name[1];      /* file name */
+  u8 namelen, name[1];                 /* file name */
   /* dnode_secno down;   btree down pointer, if present,
                          follows name on next word boundary, or maybe it
                          precedes next dirent, which is on a word boundary. */
@@ -318,38 +375,50 @@ struct hpfs_dirent {
 
 struct bplus_leaf_node
 {
-  unsigned file_secno;                 /* first file sector in extent */
-  unsigned length;                     /* length, sectors */
+  u32 file_secno;                      /* first file sector in extent */
+  u32 length;                          /* length, sectors */
   secno disk_secno;                    /* first corresponding disk sector */
 };
 
 struct bplus_internal_node
 {
-  unsigned file_secno;                 /* subtree maps sectors < this  */
+  u32 file_secno;                      /* subtree maps sectors < this  */
   anode_secno down;                    /* pointer to subtree */
 };
 
 struct bplus_header
 {
-  unsigned hbff: 1;    /* high bit of first free entry offset */
-  unsigned flag1: 1;
-  unsigned flag2: 1;
-  unsigned flag3: 1;
-  unsigned flag4: 1;
-  unsigned fnode_parent: 1;            /* ? we're pointed to by an fnode,
+#ifdef __LITTLE_ENDIAN
+  u8 hbff: 1;                  /* high bit of first free entry offset */
+  u8 flag1234: 4;
+  u8 fnode_parent: 1;                  /* ? we're pointed to by an fnode,
                                           the data btree or some ea or the
                                           main ea bootage pointer ea_secno */
                                        /* also can get set in fnodes, which
                                           may be a chkdsk glitch or may mean
                                           this bit is irrelevant in fnodes,
                                           or this interpretation is all wet */
-  unsigned binary_search: 1;           /* suggest binary search (unused) */
-  unsigned internal: 1;                        /* 1 -> (internal) tree of anodes
+  u8 binary_search: 1;                 /* suggest binary search (unused) */
+  u8 internal: 1;                      /* 1 -> (internal) tree of anodes
+                                          0 -> (leaf) list of extents */
+#else
+  u8 internal: 1;                      /* 1 -> (internal) tree of anodes
                                           0 -> (leaf) list of extents */
-  unsigned char fill[3];
-  unsigned char n_free_nodes;          /* free nodes in following array */
-  unsigned char n_used_nodes;          /* used nodes in following array */
-  unsigned short first_free;           /* offset from start of header to
+  u8 binary_search: 1;                 /* suggest binary search (unused) */
+  u8 fnode_parent: 1;                  /* ? we're pointed to by an fnode,
+                                          the data btree or some ea or the
+                                          main ea bootage pointer ea_secno */
+                                       /* also can get set in fnodes, which
+                                          may be a chkdsk glitch or may mean
+                                          this bit is irrelevant in fnodes,
+                                          or this interpretation is all wet */
+  u8 flag1234: 4;
+  u8 hbff: 1;                  /* high bit of first free entry offset */
+#endif
+  u8 fill[3];
+  u8 n_free_nodes;                     /* free nodes in following array */
+  u8 n_used_nodes;                     /* used nodes in following array */
+  u16 first_free;                      /* offset from start of header to
                                           first free node in array */
   union {
     struct bplus_internal_node internal[0]; /* (internal) 2-word entries giving
@@ -369,37 +438,38 @@ struct bplus_header
 
 struct fnode
 {
-  unsigned magic;                      /* f7e4 0aae */
-  unsigned zero1[2];                   /* read history */
-  unsigned char len, name[15];         /* true length, truncated name */
+  u32 magic;                           /* f7e4 0aae */
+  u32 zero1[2];                                /* read history */
+  u8 len, name[15];                    /* true length, truncated name */
   fnode_secno up;                      /* pointer to file's directory fnode */
-  /*unsigned zero2[3];*/
   secno acl_size_l;
   secno acl_secno;
-  unsigned short acl_size_s;
-  char acl_anode;
-  char zero2;                          /* history bit count */
-  unsigned ea_size_l;                  /* length of disk-resident ea's */
+  u16 acl_size_s;
+  u8 acl_anode;
+  u8 zero2;                            /* history bit count */
+  u32 ea_size_l;                       /* length of disk-resident ea's */
   secno ea_secno;                      /* first sector of disk-resident ea's*/
-  unsigned short ea_size_s;            /* length of fnode-resident ea's */
-
-  unsigned flag0: 1;
-  unsigned ea_anode: 1;                        /* 1 -> ea_secno is an anode */
-  unsigned flag2: 1;
-  unsigned flag3: 1;
-  unsigned flag4: 1;
-  unsigned flag5: 1;
-  unsigned flag6: 1;
-  unsigned flag7: 1;
-  unsigned dirflag: 1;                 /* 1 -> directory.  first & only extent
+  u16 ea_size_s;                       /* length of fnode-resident ea's */
+
+#ifdef __LITTLE_ENDIAN
+  u8 flag0: 1;
+  u8 ea_anode: 1;                      /* 1 -> ea_secno is an anode */
+  u8 flag234567: 6;
+#else
+  u8 flag234567: 6;
+  u8 ea_anode: 1;                      /* 1 -> ea_secno is an anode */
+  u8 flag0: 1;
+#endif
+
+#ifdef __LITTLE_ENDIAN
+  u8 dirflag: 1;                       /* 1 -> directory.  first & only extent
                                           points to dnode. */
-  unsigned flag9: 1;
-  unsigned flag10: 1;
-  unsigned flag11: 1;
-  unsigned flag12: 1;
-  unsigned flag13: 1;
-  unsigned flag14: 1;
-  unsigned flag15: 1;
+  u8 flag9012345: 7;
+#else
+  u8 flag9012345: 7;
+  u8 dirflag: 1;                       /* 1 -> directory.  first & only extent
+                                          points to dnode. */
+#endif
 
   struct bplus_header btree;           /* b+ tree, 8 extents or 12 subtrees */
   union {
@@ -407,17 +477,16 @@ struct fnode
     struct bplus_internal_node internal[12];
   } u;
 
-  unsigned file_size;                  /* file length, bytes */
-  unsigned n_needea;                   /* number of EA's with NEEDEA set */
-  char user_id[16];                    /* unused */
-  unsigned short ea_offs;              /* offset from start of fnode
+  u32 file_size;                       /* file length, bytes */
+  u32 n_needea;                                /* number of EA's with NEEDEA set */
+  u8 user_id[16];                      /* unused */
+  u16 ea_offs;                         /* offset from start of fnode
                                           to first fnode-resident ea */
-  char dasd_limit_treshhold;
-  char dasd_limit_delta;
-  unsigned dasd_limit;
-  unsigned dasd_usage;
-  /*unsigned zero5[2];*/
-  unsigned char ea[316];               /* zero or more EA's, packed together
+  u8 dasd_limit_treshhold;
+  u8 dasd_limit_delta;
+  u32 dasd_limit;
+  u32 dasd_usage;
+  u8 ea[316];                          /* zero or more EA's, packed together
                                           with no alignment padding.
                                           (Do not use this name, get here
                                           via fnode + ea_offs. I think.) */
@@ -430,7 +499,7 @@ struct fnode
 
 struct anode
 {
-  unsigned magic;                      /* 37e4 0aae */
+  u32 magic;                           /* 37e4 0aae */
   anode_secno self;                    /* pointer to this anode */
   secno up;                            /* parent anode or fnode */
 
@@ -440,7 +509,7 @@ struct anode
     struct bplus_internal_node internal[60];
   } u;
 
-  unsigned fill[3];                    /* unused */
+  u32 fill[3];                         /* unused */
 };
 
 
@@ -461,25 +530,31 @@ struct anode
 
 struct extended_attribute
 {
-  unsigned indirect: 1;                        /* 1 -> value gives sector number
+#ifdef __LITTLE_ENDIAN
+  u8 indirect: 1;                      /* 1 -> value gives sector number
                                           where real value starts */
-  unsigned anode: 1;                   /* 1 -> sector is an anode
+  u8 anode: 1;                         /* 1 -> sector is an anode
+                                          that points to fragmented value */
+  u8 flag23456: 5;
+  u8 needea: 1;                                /* required ea */
+#else
+  u8 needea: 1;                                /* required ea */
+  u8 flag23456: 5;
+  u8 anode: 1;                         /* 1 -> sector is an anode
                                           that points to fragmented value */
-  unsigned flag2: 1;
-  unsigned flag3: 1;
-  unsigned flag4: 1;
-  unsigned flag5: 1;
-  unsigned flag6: 1;
-  unsigned needea: 1;                  /* required ea */
-  unsigned char namelen;               /* length of name, bytes */
-  unsigned short valuelen;             /* length of value, bytes */
-  unsigned char name[0];
+  u8 indirect: 1;                      /* 1 -> value gives sector number
+                                          where real value starts */
+#endif
+  u8 namelen;                          /* length of name, bytes */
+  u8 valuelen_lo;                      /* length of value, bytes */
+  u8 valuelen_hi;                      /* length of value, bytes */
+  u8 name[0];
   /*
-    unsigned char name[namelen];       ascii attrib name
-    unsigned char nul;                 terminating '\0', not counted
-    unsigned char value[valuelen];     value, arbitrary
+    u8 name[namelen];                  ascii attrib name
+    u8 nul;                            terminating '\0', not counted
+    u8 value[valuelen];                        value, arbitrary
       if this.indirect, valuelen is 8 and the value is
-        unsigned length;               real length of value, bytes
+        u32 length;                    real length of value, bytes
         secno secno;                   sector address where it starts
       if this.anode, the above sector number is the root of an anode tree
         which points to the value.
index c15adbca07ff1a433fbf8ec9d2ac03fb34d057c5..dd552f862c8f1c562a34768f970b6ddcb68b2336 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/pagemap.h>
 #include <linux/buffer_head.h>
 #include <linux/slab.h>
+#include <asm/unaligned.h>
 
 #include "hpfs.h"
 
@@ -51,18 +52,16 @@ struct hpfs_inode_info {
        unsigned i_disk_sec;    /* (files) minimalist cache of alloc info */
        unsigned i_n_secs;      /* (files) minimalist cache of alloc info */
        unsigned i_ea_size;     /* size of extended attributes */
-       unsigned i_conv : 2;    /* (files) crlf->newline hackery */
        unsigned i_ea_mode : 1; /* file's permission is stored in ea */
        unsigned i_ea_uid : 1;  /* file's uid is stored in ea */
        unsigned i_ea_gid : 1;  /* file's gid is stored in ea */
        unsigned i_dirty : 1;
-       struct mutex i_mutex;
-       struct mutex i_parent_mutex;
        loff_t **i_rddir_off;
        struct inode vfs_inode;
 };
 
 struct hpfs_sb_info {
+       struct mutex hpfs_mutex;        /* global hpfs lock */
        ino_t sb_root;                  /* inode number of root dir */
        unsigned sb_fs_size;            /* file system size, sectors */
        unsigned sb_bitmaps;            /* sector number of bitmap list */
@@ -74,7 +73,6 @@ struct hpfs_sb_info {
        uid_t sb_uid;                   /* uid from mount options */
        gid_t sb_gid;                   /* gid from mount options */
        umode_t sb_mode;                /* mode from mount options */
-       unsigned sb_conv : 2;           /* crlf->newline hackery */
        unsigned sb_eas : 2;            /* eas: 0-ignore, 1-ro, 2-rw */
        unsigned sb_err : 2;            /* on errs: 0-cont, 1-ro, 2-panic */
        unsigned sb_chk : 2;            /* checks: 0-no, 1-normal, 2-strict */
@@ -87,20 +85,9 @@ struct hpfs_sb_info {
        unsigned *sb_bmp_dir;           /* main bitmap directory */
        unsigned sb_c_bitmap;           /* current bitmap */
        unsigned sb_max_fwd_alloc;      /* max forwad allocation */
-       struct mutex hpfs_creation_de;  /* when creating dirents, nobody else
-                                          can alloc blocks */
-       /*unsigned sb_mounting : 1;*/
        int sb_timeshift;
 };
 
-/*
- * conv= options
- */
-
-#define CONV_BINARY 0                  /* no conversion */
-#define CONV_TEXT 1                    /* crlf->newline */
-#define CONV_AUTO 2                    /* decide based on file contents */
-
 /* Four 512-byte buffers and the 2k block obtained by concatenating them */
 
 struct quad_buffer_head {
@@ -113,7 +100,7 @@ struct quad_buffer_head {
 static inline dnode_secno de_down_pointer (struct hpfs_dirent *de)
 {
   CHKCOND(de->down,("HPFS: de_down_pointer: !de->down\n"));
-  return *(dnode_secno *) ((void *) de + de->length - 4);
+  return le32_to_cpu(*(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4));
 }
 
 /* The first dir entry in a dnode */
@@ -127,41 +114,46 @@ static inline struct hpfs_dirent *dnode_first_de (struct dnode *dnode)
 
 static inline struct hpfs_dirent *dnode_end_de (struct dnode *dnode)
 {
-  CHKCOND(dnode->first_free>=0x14 && dnode->first_free<=0xa00,("HPFS: dnode_end_de: dnode->first_free = %d\n",(int)dnode->first_free));
-  return (void *) dnode + dnode->first_free;
+  CHKCOND(le32_to_cpu(dnode->first_free)>=0x14 && le32_to_cpu(dnode->first_free)<=0xa00,("HPFS: dnode_end_de: dnode->first_free = %x\n",(unsigned)le32_to_cpu(dnode->first_free)));
+  return (void *) dnode + le32_to_cpu(dnode->first_free);
 }
 
 /* The dir entry after dir entry de */
 
 static inline struct hpfs_dirent *de_next_de (struct hpfs_dirent *de)
 {
-  CHKCOND(de->length>=0x20 && de->length<0x800,("HPFS: de_next_de: de->length = %d\n",(int)de->length));
-  return (void *) de + de->length;
+  CHKCOND(le16_to_cpu(de->length)>=0x20 && le16_to_cpu(de->length)<0x800,("HPFS: de_next_de: de->length = %x\n",(unsigned)le16_to_cpu(de->length)));
+  return (void *) de + le16_to_cpu(de->length);
 }
 
 static inline struct extended_attribute *fnode_ea(struct fnode *fnode)
 {
-       return (struct extended_attribute *)((char *)fnode + fnode->ea_offs + fnode->acl_size_s);
+       return (struct extended_attribute *)((char *)fnode + le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s));
 }
 
 static inline struct extended_attribute *fnode_end_ea(struct fnode *fnode)
 {
-       return (struct extended_attribute *)((char *)fnode + fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s);
+       return (struct extended_attribute *)((char *)fnode + le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s));
+}
+
+static unsigned ea_valuelen(struct extended_attribute *ea)
+{
+       return ea->valuelen_lo + 256 * ea->valuelen_hi;
 }
 
 static inline struct extended_attribute *next_ea(struct extended_attribute *ea)
 {
-       return (struct extended_attribute *)((char *)ea + 5 + ea->namelen + ea->valuelen);
+       return (struct extended_attribute *)((char *)ea + 5 + ea->namelen + ea_valuelen(ea));
 }
 
 static inline secno ea_sec(struct extended_attribute *ea)
 {
-       return *(secno *)((char *)ea + 9 + ea->namelen);
+       return le32_to_cpu(get_unaligned((secno *)((char *)ea + 9 + ea->namelen)));
 }
 
 static inline secno ea_len(struct extended_attribute *ea)
 {
-       return *(secno *)((char *)ea + 5 + ea->namelen);
+       return le32_to_cpu(get_unaligned((secno *)((char *)ea + 5 + ea->namelen)));
 }
 
 static inline char *ea_data(struct extended_attribute *ea)
@@ -186,13 +178,13 @@ static inline void copy_de(struct hpfs_dirent *dst, struct hpfs_dirent *src)
        dst->not_8x3 = n;
 }
 
-static inline unsigned tstbits(unsigned *bmp, unsigned b, unsigned n)
+static inline unsigned tstbits(u32 *bmp, unsigned b, unsigned n)
 {
        int i;
        if ((b >= 0x4000) || (b + n - 1 >= 0x4000)) return n;
-       if (!((bmp[(b & 0x3fff) >> 5] >> (b & 0x1f)) & 1)) return 1;
+       if (!((le32_to_cpu(bmp[(b & 0x3fff) >> 5]) >> (b & 0x1f)) & 1)) return 1;
        for (i = 1; i < n; i++)
-               if (/*b+i < 0x4000 &&*/ !((bmp[((b+i) & 0x3fff) >> 5] >> ((b+i) & 0x1f)) & 1))
+               if (!((le32_to_cpu(bmp[((b+i) & 0x3fff) >> 5]) >> ((b+i) & 0x1f)) & 1))
                        return i + 1;
        return 0;
 }
@@ -200,12 +192,12 @@ static inline unsigned tstbits(unsigned *bmp, unsigned b, unsigned n)
 /* alloc.c */
 
 int hpfs_chk_sectors(struct super_block *, secno, int, char *);
-secno hpfs_alloc_sector(struct super_block *, secno, unsigned, int, int);
+secno hpfs_alloc_sector(struct super_block *, secno, unsigned, int);
 int hpfs_alloc_if_possible(struct super_block *, secno);
 void hpfs_free_sectors(struct super_block *, secno, unsigned);
 int hpfs_check_free_dnodes(struct super_block *, int);
 void hpfs_free_dnode(struct super_block *, secno);
-struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *, int);
+struct dnode *hpfs_alloc_dnode(struct super_block *, secno, dnode_secno *, struct quad_buffer_head *);
 struct fnode *hpfs_alloc_fnode(struct super_block *, secno, fnode_secno *, struct buffer_head **);
 struct anode *hpfs_alloc_anode(struct super_block *, secno, anode_secno *, struct buffer_head **);
 
@@ -222,8 +214,6 @@ void hpfs_remove_fnode(struct super_block *, fnode_secno fno);
 
 /* buffer.c */
 
-void hpfs_lock_creation(struct super_block *);
-void hpfs_unlock_creation(struct super_block *);
 void *hpfs_map_sector(struct super_block *, unsigned, struct buffer_head **, int);
 void *hpfs_get_sector(struct super_block *, unsigned, struct buffer_head **);
 void *hpfs_map_4sectors(struct super_block *, unsigned, struct quad_buffer_head *, int);
@@ -247,7 +237,7 @@ void hpfs_del_pos(struct inode *, loff_t *);
 struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *,
                                const unsigned char *, unsigned, secno);
 int hpfs_add_dirent(struct inode *, const unsigned char *, unsigned,
-                   struct hpfs_dirent *, int);
+                   struct hpfs_dirent *);
 int hpfs_remove_dirent(struct inode *, dnode_secno, struct hpfs_dirent *, struct quad_buffer_head *, int);
 void hpfs_count_dnodes(struct super_block *, dnode_secno, int *, int *, int *);
 dnode_secno hpfs_de_as_down_as_possible(struct super_block *, dnode_secno dno);
@@ -303,7 +293,6 @@ int hpfs_compare_names(struct super_block *, const unsigned char *, unsigned,
                       const unsigned char *, unsigned, int);
 int hpfs_is_name_long(const unsigned char *, unsigned);
 void hpfs_adjust_length(const unsigned char *, unsigned *);
-void hpfs_decide_conv(struct inode *, const unsigned char *, unsigned);
 
 /* namei.c */
 
@@ -346,21 +335,26 @@ static inline time32_t gmt_to_local(struct super_block *s, time_t t)
 /*
  * Locking:
  *
- * hpfs_lock() is a leftover from the big kernel lock.
- * Right now, these functions are empty and only left
- * for documentation purposes. The file system no longer
- * works on SMP systems, so the lock is not needed
- * any more.
+ * hpfs_lock() locks the whole filesystem. It must be taken
+ * on any method called by the VFS.
  *
- * If someone is interested in making it work again, this
- * would be the place to start by adding a per-superblock
- * mutex and fixing all the bugs and performance issues
- * caused by that.
+ * We don't do any per-file locking anymore, it is hard to
+ * review and HPFS is not performance-sensitive anyway.
  */
 static inline void hpfs_lock(struct super_block *s)
 {
+       struct hpfs_sb_info *sbi = hpfs_sb(s);
+       mutex_lock(&sbi->hpfs_mutex);
 }
 
 static inline void hpfs_unlock(struct super_block *s)
 {
+       struct hpfs_sb_info *sbi = hpfs_sb(s);
+       mutex_unlock(&sbi->hpfs_mutex);
+}
+
+static inline void hpfs_lock_assert(struct super_block *s)
+{
+       struct hpfs_sb_info *sbi = hpfs_sb(s);
+       WARN_ON(!mutex_is_locked(&sbi->hpfs_mutex));
 }
index 87f1f787e7677efdd7a17a4e9a8ed2f71c397606..338cd8368451cd081a800aa77440ce237f24b592 100644 (file)
@@ -17,7 +17,6 @@ void hpfs_init_inode(struct inode *i)
        i->i_uid = hpfs_sb(sb)->sb_uid;
        i->i_gid = hpfs_sb(sb)->sb_gid;
        i->i_mode = hpfs_sb(sb)->sb_mode;
-       hpfs_inode->i_conv = hpfs_sb(sb)->sb_conv;
        i->i_size = -1;
        i->i_blocks = -1;
        
@@ -116,8 +115,8 @@ void hpfs_read_inode(struct inode *i)
                i->i_mode |= S_IFDIR;
                i->i_op = &hpfs_dir_iops;
                i->i_fop = &hpfs_dir_ops;
-               hpfs_inode->i_parent_dir = fnode->up;
-               hpfs_inode->i_dno = fnode->u.external[0].disk_secno;
+               hpfs_inode->i_parent_dir = le32_to_cpu(fnode->up);
+               hpfs_inode->i_dno = le32_to_cpu(fnode->u.external[0].disk_secno);
                if (hpfs_sb(sb)->sb_chk >= 2) {
                        struct buffer_head *bh0;
                        if (hpfs_map_fnode(sb, hpfs_inode->i_parent_dir, &bh0)) brelse(bh0);
@@ -133,7 +132,7 @@ void hpfs_read_inode(struct inode *i)
                i->i_op = &hpfs_file_iops;
                i->i_fop = &hpfs_file_ops;
                i->i_nlink = 1;
-               i->i_size = fnode->file_size;
+               i->i_size = le32_to_cpu(fnode->file_size);
                i->i_blocks = ((i->i_size + 511) >> 9) + 1;
                i->i_data.a_ops = &hpfs_aops;
                hpfs_i(i)->mmu_private = i->i_size;
@@ -144,7 +143,7 @@ void hpfs_read_inode(struct inode *i)
 static void hpfs_write_inode_ea(struct inode *i, struct fnode *fnode)
 {
        struct hpfs_inode_info *hpfs_inode = hpfs_i(i);
-       /*if (fnode->acl_size_l || fnode->acl_size_s) {
+       /*if (le32_to_cpu(fnode->acl_size_l) || le16_to_cpu(fnode->acl_size_s)) {
                   Some unknown structures like ACL may be in fnode,
                   we'd better not overwrite them
                hpfs_error(i->i_sb, "fnode %08x has some unknown HPFS386 stuctures", i->i_ino);
@@ -187,9 +186,7 @@ void hpfs_write_inode(struct inode *i)
                kfree(hpfs_inode->i_rddir_off);
                hpfs_inode->i_rddir_off = NULL;
        }
-       mutex_lock(&hpfs_inode->i_parent_mutex);
        if (!i->i_nlink) {
-               mutex_unlock(&hpfs_inode->i_parent_mutex);
                return;
        }
        parent = iget_locked(i->i_sb, hpfs_inode->i_parent_dir);
@@ -200,14 +197,9 @@ void hpfs_write_inode(struct inode *i)
                        hpfs_read_inode(parent);
                        unlock_new_inode(parent);
                }
-               mutex_lock(&hpfs_inode->i_mutex);
                hpfs_write_inode_nolock(i);
-               mutex_unlock(&hpfs_inode->i_mutex);
                iput(parent);
-       } else {
-               mark_inode_dirty(i);
        }
-       mutex_unlock(&hpfs_inode->i_parent_mutex);
 }
 
 void hpfs_write_inode_nolock(struct inode *i)
@@ -226,30 +218,30 @@ void hpfs_write_inode_nolock(struct inode *i)
                }
        } else de = NULL;
        if (S_ISREG(i->i_mode)) {
-               fnode->file_size = i->i_size;
-               if (de) de->file_size = i->i_size;
+               fnode->file_size = cpu_to_le32(i->i_size);
+               if (de) de->file_size = cpu_to_le32(i->i_size);
        } else if (S_ISDIR(i->i_mode)) {
-               fnode->file_size = 0;
-               if (de) de->file_size = 0;
+               fnode->file_size = cpu_to_le32(0);
+               if (de) de->file_size = cpu_to_le32(0);
        }
        hpfs_write_inode_ea(i, fnode);
        if (de) {
-               de->write_date = gmt_to_local(i->i_sb, i->i_mtime.tv_sec);
-               de->read_date = gmt_to_local(i->i_sb, i->i_atime.tv_sec);
-               de->creation_date = gmt_to_local(i->i_sb, i->i_ctime.tv_sec);
+               de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec));
+               de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec));
+               de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_ctime.tv_sec));
                de->read_only = !(i->i_mode & 0222);
-               de->ea_size = hpfs_inode->i_ea_size;
+               de->ea_size = cpu_to_le32(hpfs_inode->i_ea_size);
                hpfs_mark_4buffers_dirty(&qbh);
                hpfs_brelse4(&qbh);
        }
        if (S_ISDIR(i->i_mode)) {
                if ((de = map_dirent(i, hpfs_inode->i_dno, "\001\001", 2, NULL, &qbh))) {
-                       de->write_date = gmt_to_local(i->i_sb, i->i_mtime.tv_sec);
-                       de->read_date = gmt_to_local(i->i_sb, i->i_atime.tv_sec);
-                       de->creation_date = gmt_to_local(i->i_sb, i->i_ctime.tv_sec);
+                       de->write_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_mtime.tv_sec));
+                       de->read_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_atime.tv_sec));
+                       de->creation_date = cpu_to_le32(gmt_to_local(i->i_sb, i->i_ctime.tv_sec));
                        de->read_only = !(i->i_mode & 0222);
-                       de->ea_size = /*hpfs_inode->i_ea_size*/0;
-                       de->file_size = 0;
+                       de->ea_size = cpu_to_le32(/*hpfs_inode->i_ea_size*/0);
+                       de->file_size = cpu_to_le32(0);
                        hpfs_mark_4buffers_dirty(&qbh);
                        hpfs_brelse4(&qbh);
                } else
@@ -269,6 +261,10 @@ int hpfs_setattr(struct dentry *dentry, struct iattr *attr)
        hpfs_lock(inode->i_sb);
        if (inode->i_ino == hpfs_sb(inode->i_sb)->sb_root)
                goto out_unlock;
+       if ((attr->ia_valid & ATTR_UID) && attr->ia_uid >= 0x10000)
+               goto out_unlock;
+       if ((attr->ia_valid & ATTR_GID) && attr->ia_gid >= 0x10000)
+               goto out_unlock;
        if ((attr->ia_valid & ATTR_SIZE) && attr->ia_size > inode->i_size)
                goto out_unlock;
 
@@ -284,7 +280,6 @@ int hpfs_setattr(struct dentry *dentry, struct iattr *attr)
        }
 
        setattr_copy(inode, attr);
-       mark_inode_dirty(inode);
 
        hpfs_write_inode(inode);
 
index 840d033ecee832561f7f9ed91cec78527a62e38f..a790821366a7f045d068fe47df517dc479b0ecce 100644 (file)
@@ -21,7 +21,7 @@ unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
                hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id);
                return NULL;
        }
-       sec = hpfs_sb(s)->sb_bmp_dir[bmp_block];
+       sec = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block]);
        if (!sec || sec > hpfs_sb(s)->sb_fs_size-4) {
                hpfs_error(s, "invalid bitmap block pointer %08x -> %08x at %s", bmp_block, sec, id);
                return NULL;
@@ -46,18 +46,18 @@ unsigned char *hpfs_load_code_page(struct super_block *s, secno cps)
        struct code_page_data *cpd;
        struct code_page_directory *cp = hpfs_map_sector(s, cps, &bh, 0);
        if (!cp) return NULL;
-       if (cp->magic != CP_DIR_MAGIC) {
-               printk("HPFS: Code page directory magic doesn't match (magic = %08x)\n", cp->magic);
+       if (le32_to_cpu(cp->magic) != CP_DIR_MAGIC) {
+               printk("HPFS: Code page directory magic doesn't match (magic = %08x)\n", le32_to_cpu(cp->magic));
                brelse(bh);
                return NULL;
        }
-       if (!cp->n_code_pages) {
+       if (!le32_to_cpu(cp->n_code_pages)) {
                printk("HPFS: n_code_pages == 0\n");
                brelse(bh);
                return NULL;
        }
-       cpds = cp->array[0].code_page_data;
-       cpi = cp->array[0].index;
+       cpds = le32_to_cpu(cp->array[0].code_page_data);
+       cpi = le16_to_cpu(cp->array[0].index);
        brelse(bh);
 
        if (cpi >= 3) {
@@ -66,12 +66,12 @@ unsigned char *hpfs_load_code_page(struct super_block *s, secno cps)
        }
        
        if (!(cpd = hpfs_map_sector(s, cpds, &bh, 0))) return NULL;
-       if ((unsigned)cpd->offs[cpi] > 0x178) {
+       if (le16_to_cpu(cpd->offs[cpi]) > 0x178) {
                printk("HPFS: Code page index out of sector\n");
                brelse(bh);
                return NULL;
        }
-       ptr = (unsigned char *)cpd + cpd->offs[cpi] + 6;
+       ptr = (unsigned char *)cpd + le16_to_cpu(cpd->offs[cpi]) + 6;
        if (!(cp_table = kmalloc(256, GFP_KERNEL))) {
                printk("HPFS: out of memory for code page table\n");
                brelse(bh);
@@ -125,7 +125,7 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea
                if (hpfs_sb(s)->sb_chk) {
                        struct extended_attribute *ea;
                        struct extended_attribute *ea_end;
-                       if (fnode->magic != FNODE_MAGIC) {
+                       if (le32_to_cpu(fnode->magic) != FNODE_MAGIC) {
                                hpfs_error(s, "bad magic on fnode %08lx",
                                        (unsigned long)ino);
                                goto bail;
@@ -138,7 +138,7 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea
                                            (unsigned long)ino);
                                        goto bail;
                                }
-                               if (fnode->btree.first_free !=
+                               if (le16_to_cpu(fnode->btree.first_free) !=
                                    8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) {
                                        hpfs_error(s,
                                            "bad first_free pointer in fnode %08lx",
@@ -146,12 +146,12 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea
                                        goto bail;
                                }
                        }
-                       if (fnode->ea_size_s && ((signed int)fnode->ea_offs < 0xc4 ||
-                          (signed int)fnode->ea_offs + fnode->acl_size_s + fnode->ea_size_s > 0x200)) {
+                       if (le16_to_cpu(fnode->ea_size_s) && (le16_to_cpu(fnode->ea_offs) < 0xc4 ||
+                          le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200)) {
                                hpfs_error(s,
                                        "bad EA info in fnode %08lx: ea_offs == %04x ea_size_s == %04x",
                                        (unsigned long)ino,
-                                       fnode->ea_offs, fnode->ea_size_s);
+                                       le16_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s));
                                goto bail;
                        }
                        ea = fnode_ea(fnode);
@@ -178,16 +178,20 @@ struct anode *hpfs_map_anode(struct super_block *s, anode_secno ano, struct buff
        if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ano, 1, "anode")) return NULL;
        if ((anode = hpfs_map_sector(s, ano, bhp, ANODE_RD_AHEAD)))
                if (hpfs_sb(s)->sb_chk) {
-                       if (anode->magic != ANODE_MAGIC || anode->self != ano) {
+                       if (le32_to_cpu(anode->magic) != ANODE_MAGIC) {
                                hpfs_error(s, "bad magic on anode %08x", ano);
                                goto bail;
                        }
+                       if (le32_to_cpu(anode->self) != ano) {
+                               hpfs_error(s, "self pointer invalid on anode %08x", ano);
+                               goto bail;
+                       }
                        if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes !=
                            (anode->btree.internal ? 60 : 40)) {
                                hpfs_error(s, "bad number of nodes in anode %08x", ano);
                                goto bail;
                        }
-                       if (anode->btree.first_free !=
+                       if (le16_to_cpu(anode->btree.first_free) !=
                            8 + anode->btree.n_used_nodes * (anode->btree.internal ? 8 : 12)) {
                                hpfs_error(s, "bad first_free pointer in anode %08x", ano);
                                goto bail;
@@ -219,26 +223,26 @@ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno,
                        unsigned p, pp = 0;
                        unsigned char *d = (unsigned char *)dnode;
                        int b = 0;
-                       if (dnode->magic != DNODE_MAGIC) {
+                       if (le32_to_cpu(dnode->magic) != DNODE_MAGIC) {
                                hpfs_error(s, "bad magic on dnode %08x", secno);
                                goto bail;
                        }
-                       if (dnode->self != secno)
-                               hpfs_error(s, "bad self pointer on dnode %08x self = %08x", secno, dnode->self);
+                       if (le32_to_cpu(dnode->self) != secno)
+                               hpfs_error(s, "bad self pointer on dnode %08x self = %08x", secno, le32_to_cpu(dnode->self));
                        /* Check dirents - bad dirents would cause infinite
                           loops or shooting to memory */
-                       if (dnode->first_free > 2048/* || dnode->first_free < 84*/) {
-                               hpfs_error(s, "dnode %08x has first_free == %08x", secno, dnode->first_free);
+                       if (le32_to_cpu(dnode->first_free) > 2048) {
+                               hpfs_error(s, "dnode %08x has first_free == %08x", secno, le32_to_cpu(dnode->first_free));
                                goto bail;
                        }
-                       for (p = 20; p < dnode->first_free; p += d[p] + (d[p+1] << 8)) {
+                       for (p = 20; p < le32_to_cpu(dnode->first_free); p += d[p] + (d[p+1] << 8)) {
                                struct hpfs_dirent *de = (struct hpfs_dirent *)((char *)dnode + p);
-                               if (de->length > 292 || (de->length < 32) || (de->length & 3) || p + de->length > 2048) {
+                               if (le16_to_cpu(de->length) > 292 || (le16_to_cpu(de->length) < 32) || (le16_to_cpu(de->length) & 3) || p + le16_to_cpu(de->length) > 2048) {
                                        hpfs_error(s, "bad dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
                                        goto bail;
                                }
-                               if (((31 + de->namelen + de->down*4 + 3) & ~3) != de->length) {
-                                       if (((31 + de->namelen + de->down*4 + 3) & ~3) < de->length && s->s_flags & MS_RDONLY) goto ok;
+                               if (((31 + de->namelen + de->down*4 + 3) & ~3) != le16_to_cpu(de->length)) {
+                                       if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & MS_RDONLY) goto ok;
                                        hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
                                        goto bail;
                                }
@@ -251,7 +255,7 @@ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno,
                                pp = p;
                                
                        }
-                       if (p != dnode->first_free) {
+                       if (p != le32_to_cpu(dnode->first_free)) {
                                hpfs_error(s, "size on last dirent does not match first_free; dnode %08x", secno);
                                goto bail;
                        }
@@ -277,7 +281,7 @@ dnode_secno hpfs_fnode_dno(struct super_block *s, ino_t ino)
        if (!fnode)
                return 0;
 
-       dno = fnode->u.external[0].disk_secno;
+       dno = le32_to_cpu(fnode->u.external[0].disk_secno);
        brelse(bh);
        return dno;
 }
index f24736d7a439218aa238f75df3338c3e3e64744f..9acdf338def0b4d3dc26c59171b9dc337217ec30 100644 (file)
@@ -8,39 +8,6 @@
 
 #include "hpfs_fn.h"
 
-static const char *text_postfix[]={
-".ASM", ".BAS", ".BAT", ".C", ".CC", ".CFG", ".CMD", ".CON", ".CPP", ".DEF",
-".DOC", ".DPR", ".ERX", ".H", ".HPP", ".HTM", ".HTML", ".JAVA", ".LOG", ".PAS",
-".RC", ".TEX", ".TXT", ".Y", ""};
-
-static const char *text_prefix[]={
-"AUTOEXEC.", "CHANGES", "COPYING", "CONFIG.", "CREDITS", "FAQ", "FILE_ID.DIZ",
-"MAKEFILE", "READ.ME", "README", "TERMCAP", ""};
-
-void hpfs_decide_conv(struct inode *inode, const unsigned char *name, unsigned len)
-{
-       struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
-       int i;
-       if (hpfs_inode->i_conv != CONV_AUTO) return;
-       for (i = 0; *text_postfix[i]; i++) {
-               int l = strlen(text_postfix[i]);
-               if (l <= len)
-                       if (!hpfs_compare_names(inode->i_sb, text_postfix[i], l, name + len - l, l, 0))
-                               goto text;
-       }
-       for (i = 0; *text_prefix[i]; i++) {
-               int l = strlen(text_prefix[i]);
-               if (l <= len)
-                       if (!hpfs_compare_names(inode->i_sb, text_prefix[i], l, name, l, 0))
-                               goto text;
-       }
-       hpfs_inode->i_conv = CONV_BINARY;
-       return;
-       text:
-       hpfs_inode->i_conv = CONV_TEXT;
-       return;
-}
-
 static inline int not_allowed_char(unsigned char c)
 {
        return c<' ' || c=='"' || c=='*' || c=='/' || c==':' || c=='<' ||
index d5f8c8a190233dc8cd066b065868aca822989128..1f05839c27a7fca1f24c99dda71adf07bb2930c5 100644 (file)
@@ -29,7 +29,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        fnode = hpfs_alloc_fnode(dir->i_sb, hpfs_i(dir)->i_dno, &fno, &bh);
        if (!fnode)
                goto bail;
-       dnode = hpfs_alloc_dnode(dir->i_sb, fno, &dno, &qbh0, 1);
+       dnode = hpfs_alloc_dnode(dir->i_sb, fno, &dno, &qbh0);
        if (!dnode)
                goto bail1;
        memset(&dee, 0, sizeof dee);
@@ -37,8 +37,8 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        if (!(mode & 0222)) dee.read_only = 1;
        /*dee.archive = 0;*/
        dee.hidden = name[0] == '.';
-       dee.fnode = fno;
-       dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds());
+       dee.fnode = cpu_to_le32(fno);
+       dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
        result = new_inode(dir->i_sb);
        if (!result)
                goto bail2;
@@ -46,7 +46,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        result->i_ino = fno;
        hpfs_i(result)->i_parent_dir = dir->i_ino;
        hpfs_i(result)->i_dno = dno;
-       result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date);
+       result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
        result->i_ctime.tv_nsec = 0; 
        result->i_mtime.tv_nsec = 0; 
        result->i_atime.tv_nsec = 0; 
@@ -60,8 +60,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        if (dee.read_only)
                result->i_mode &= ~0222;
 
-       mutex_lock(&hpfs_i(dir)->i_mutex);
-       r = hpfs_add_dirent(dir, name, len, &dee, 0);
+       r = hpfs_add_dirent(dir, name, len, &dee);
        if (r == 1)
                goto bail3;
        if (r == -1) {
@@ -70,21 +69,21 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
        }
        fnode->len = len;
        memcpy(fnode->name, name, len > 15 ? 15 : len);
-       fnode->up = dir->i_ino;
+       fnode->up = cpu_to_le32(dir->i_ino);
        fnode->dirflag = 1;
        fnode->btree.n_free_nodes = 7;
        fnode->btree.n_used_nodes = 1;
-       fnode->btree.first_free = 0x14;
-       fnode->u.external[0].disk_secno = dno;
-       fnode->u.external[0].file_secno = -1;
+       fnode->btree.first_free = cpu_to_le16(0x14);
+       fnode->u.external[0].disk_secno = cpu_to_le32(dno);
+       fnode->u.external[0].file_secno = cpu_to_le32(-1);
        dnode->root_dnode = 1;
-       dnode->up = fno;
+       dnode->up = cpu_to_le32(fno);
        de = hpfs_add_de(dir->i_sb, dnode, "\001\001", 2, 0);
-       de->creation_date = de->write_date = de->read_date = gmt_to_local(dir->i_sb, get_seconds());
+       de->creation_date = de->write_date = de->read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
        if (!(mode & 0222)) de->read_only = 1;
        de->first = de->directory = 1;
        /*de->hidden = de->system = 0;*/
-       de->fnode = fno;
+       de->fnode = cpu_to_le32(fno);
        mark_buffer_dirty(bh);
        brelse(bh);
        hpfs_mark_4buffers_dirty(&qbh0);
@@ -101,11 +100,9 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
                hpfs_write_inode_nolock(result);
        }
        d_instantiate(dentry, result);
-       mutex_unlock(&hpfs_i(dir)->i_mutex);
        hpfs_unlock(dir->i_sb);
        return 0;
 bail3:
-       mutex_unlock(&hpfs_i(dir)->i_mutex);
        iput(result);
 bail2:
        hpfs_brelse4(&qbh0);
@@ -140,8 +137,8 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
        if (!(mode & 0222)) dee.read_only = 1;
        dee.archive = 1;
        dee.hidden = name[0] == '.';
-       dee.fnode = fno;
-       dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds());
+       dee.fnode = cpu_to_le32(fno);
+       dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
 
        result = new_inode(dir->i_sb);
        if (!result)
@@ -154,9 +151,8 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
        result->i_op = &hpfs_file_iops;
        result->i_fop = &hpfs_file_ops;
        result->i_nlink = 1;
-       hpfs_decide_conv(result, name, len);
        hpfs_i(result)->i_parent_dir = dir->i_ino;
-       result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date);
+       result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
        result->i_ctime.tv_nsec = 0;
        result->i_mtime.tv_nsec = 0;
        result->i_atime.tv_nsec = 0;
@@ -168,8 +164,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
        result->i_data.a_ops = &hpfs_aops;
        hpfs_i(result)->mmu_private = 0;
 
-       mutex_lock(&hpfs_i(dir)->i_mutex);
-       r = hpfs_add_dirent(dir, name, len, &dee, 0);
+       r = hpfs_add_dirent(dir, name, len, &dee);
        if (r == 1)
                goto bail2;
        if (r == -1) {
@@ -178,7 +173,7 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
        }
        fnode->len = len;
        memcpy(fnode->name, name, len > 15 ? 15 : len);
-       fnode->up = dir->i_ino;
+       fnode->up = cpu_to_le32(dir->i_ino);
        mark_buffer_dirty(bh);
        brelse(bh);
 
@@ -193,12 +188,10 @@ static int hpfs_create(struct inode *dir, struct dentry *dentry, int mode, struc
                hpfs_write_inode_nolock(result);
        }
        d_instantiate(dentry, result);
-       mutex_unlock(&hpfs_i(dir)->i_mutex);
        hpfs_unlock(dir->i_sb);
        return 0;
 
 bail2:
-       mutex_unlock(&hpfs_i(dir)->i_mutex);
        iput(result);
 bail1:
        brelse(bh);
@@ -232,8 +225,8 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
        if (!(mode & 0222)) dee.read_only = 1;
        dee.archive = 1;
        dee.hidden = name[0] == '.';
-       dee.fnode = fno;
-       dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds());
+       dee.fnode = cpu_to_le32(fno);
+       dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
 
        result = new_inode(dir->i_sb);
        if (!result)
@@ -242,7 +235,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
        hpfs_init_inode(result);
        result->i_ino = fno;
        hpfs_i(result)->i_parent_dir = dir->i_ino;
-       result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date);
+       result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
        result->i_ctime.tv_nsec = 0;
        result->i_mtime.tv_nsec = 0;
        result->i_atime.tv_nsec = 0;
@@ -254,8 +247,7 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
        result->i_blocks = 1;
        init_special_inode(result, mode, rdev);
 
-       mutex_lock(&hpfs_i(dir)->i_mutex);
-       r = hpfs_add_dirent(dir, name, len, &dee, 0);
+       r = hpfs_add_dirent(dir, name, len, &dee);
        if (r == 1)
                goto bail2;
        if (r == -1) {
@@ -264,19 +256,17 @@ static int hpfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t
        }
        fnode->len = len;
        memcpy(fnode->name, name, len > 15 ? 15 : len);
-       fnode->up = dir->i_ino;
+       fnode->up = cpu_to_le32(dir->i_ino);
        mark_buffer_dirty(bh);
 
        insert_inode_hash(result);
 
        hpfs_write_inode_nolock(result);
        d_instantiate(dentry, result);
-       mutex_unlock(&hpfs_i(dir)->i_mutex);
        brelse(bh);
        hpfs_unlock(dir->i_sb);
        return 0;
 bail2:
-       mutex_unlock(&hpfs_i(dir)->i_mutex);
        iput(result);
 bail1:
        brelse(bh);
@@ -310,8 +300,8 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
        memset(&dee, 0, sizeof dee);
        dee.archive = 1;
        dee.hidden = name[0] == '.';
-       dee.fnode = fno;
-       dee.creation_date = dee.write_date = dee.read_date = gmt_to_local(dir->i_sb, get_seconds());
+       dee.fnode = cpu_to_le32(fno);
+       dee.creation_date = dee.write_date = dee.read_date = cpu_to_le32(gmt_to_local(dir->i_sb, get_seconds()));
 
        result = new_inode(dir->i_sb);
        if (!result)
@@ -319,7 +309,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
        result->i_ino = fno;
        hpfs_init_inode(result);
        hpfs_i(result)->i_parent_dir = dir->i_ino;
-       result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, dee.creation_date);
+       result->i_ctime.tv_sec = result->i_mtime.tv_sec = result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(dee.creation_date));
        result->i_ctime.tv_nsec = 0;
        result->i_mtime.tv_nsec = 0;
        result->i_atime.tv_nsec = 0;
@@ -333,8 +323,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
        result->i_op = &page_symlink_inode_operations;
        result->i_data.a_ops = &hpfs_symlink_aops;
 
-       mutex_lock(&hpfs_i(dir)->i_mutex);
-       r = hpfs_add_dirent(dir, name, len, &dee, 0);
+       r = hpfs_add_dirent(dir, name, len, &dee);
        if (r == 1)
                goto bail2;
        if (r == -1) {
@@ -343,7 +332,7 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
        }
        fnode->len = len;
        memcpy(fnode->name, name, len > 15 ? 15 : len);
-       fnode->up = dir->i_ino;
+       fnode->up = cpu_to_le32(dir->i_ino);
        hpfs_set_ea(result, fnode, "SYMLINK", symlink, strlen(symlink));
        mark_buffer_dirty(bh);
        brelse(bh);
@@ -352,11 +341,9 @@ static int hpfs_symlink(struct inode *dir, struct dentry *dentry, const char *sy
 
        hpfs_write_inode_nolock(result);
        d_instantiate(dentry, result);
-       mutex_unlock(&hpfs_i(dir)->i_mutex);
        hpfs_unlock(dir->i_sb);
        return 0;
 bail2:
-       mutex_unlock(&hpfs_i(dir)->i_mutex);
        iput(result);
 bail1:
        brelse(bh);
@@ -374,7 +361,6 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
        struct hpfs_dirent *de;
        struct inode *inode = dentry->d_inode;
        dnode_secno dno;
-       fnode_secno fno;
        int r;
        int rep = 0;
        int err;
@@ -382,8 +368,6 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
        hpfs_lock(dir->i_sb);
        hpfs_adjust_length(name, &len);
 again:
-       mutex_lock(&hpfs_i(inode)->i_parent_mutex);
-       mutex_lock(&hpfs_i(dir)->i_mutex);
        err = -ENOENT;
        de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
        if (!de)
@@ -397,7 +381,6 @@ again:
        if (de->directory)
                goto out1;
 
-       fno = de->fnode;
        r = hpfs_remove_dirent(dir, dno, de, &qbh, 1);
        switch (r) {
        case 1:
@@ -410,8 +393,6 @@ again:
                if (rep++)
                        break;
 
-               mutex_unlock(&hpfs_i(dir)->i_mutex);
-               mutex_unlock(&hpfs_i(inode)->i_parent_mutex);
                dentry_unhash(dentry);
                if (!d_unhashed(dentry)) {
                        dput(dentry);
@@ -445,8 +426,6 @@ again:
 out1:
        hpfs_brelse4(&qbh);
 out:
-       mutex_unlock(&hpfs_i(dir)->i_mutex);
-       mutex_unlock(&hpfs_i(inode)->i_parent_mutex);
        hpfs_unlock(dir->i_sb);
        return err;
 }
@@ -459,15 +438,12 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
        struct hpfs_dirent *de;
        struct inode *inode = dentry->d_inode;
        dnode_secno dno;
-       fnode_secno fno;
        int n_items = 0;
        int err;
        int r;
 
        hpfs_adjust_length(name, &len);
        hpfs_lock(dir->i_sb);
-       mutex_lock(&hpfs_i(inode)->i_parent_mutex);
-       mutex_lock(&hpfs_i(dir)->i_mutex);
        err = -ENOENT;
        de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
        if (!de)
@@ -486,7 +462,6 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
        if (n_items)
                goto out1;
 
-       fno = de->fnode;
        r = hpfs_remove_dirent(dir, dno, de, &qbh, 1);
        switch (r) {
        case 1:
@@ -505,8 +480,6 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry)
 out1:
        hpfs_brelse4(&qbh);
 out:
-       mutex_unlock(&hpfs_i(dir)->i_mutex);
-       mutex_unlock(&hpfs_i(inode)->i_parent_mutex);
        hpfs_unlock(dir->i_sb);
        return err;
 }
@@ -568,12 +541,6 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        hpfs_lock(i->i_sb);
        /* order doesn't matter, due to VFS exclusion */
-       mutex_lock(&hpfs_i(i)->i_parent_mutex);
-       if (new_inode)
-               mutex_lock(&hpfs_i(new_inode)->i_parent_mutex);
-       mutex_lock(&hpfs_i(old_dir)->i_mutex);
-       if (new_dir != old_dir)
-               mutex_lock(&hpfs_i(new_dir)->i_mutex);
        
        /* Erm? Moving over the empty non-busy directory is perfectly legal */
        if (new_inode && S_ISDIR(new_inode->i_mode)) {
@@ -610,9 +577,7 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        if (new_dir == old_dir) hpfs_brelse4(&qbh);
 
-       hpfs_lock_creation(i->i_sb);
-       if ((r = hpfs_add_dirent(new_dir, new_name, new_len, &de, 1))) {
-               hpfs_unlock_creation(i->i_sb);
+       if ((r = hpfs_add_dirent(new_dir, new_name, new_len, &de))) {
                if (r == -1) hpfs_error(new_dir->i_sb, "hpfs_rename: dirent already exists!");
                err = r == 1 ? -ENOSPC : -EFSERROR;
                if (new_dir != old_dir) hpfs_brelse4(&qbh);
@@ -621,20 +586,17 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        
        if (new_dir == old_dir)
                if (!(dep = map_dirent(old_dir, hpfs_i(old_dir)->i_dno, old_name, old_len, &dno, &qbh))) {
-                       hpfs_unlock_creation(i->i_sb);
                        hpfs_error(i->i_sb, "lookup succeeded but map dirent failed at #2");
                        err = -ENOENT;
                        goto end1;
                }
 
        if ((r = hpfs_remove_dirent(old_dir, dno, dep, &qbh, 0))) {
-               hpfs_unlock_creation(i->i_sb);
                hpfs_error(i->i_sb, "hpfs_rename: could not remove dirent");
                err = r == 2 ? -ENOSPC : -EFSERROR;
                goto end1;
        }
-       hpfs_unlock_creation(i->i_sb);
-       
+
        end:
        hpfs_i(i)->i_parent_dir = new_dir->i_ino;
        if (S_ISDIR(i->i_mode)) {
@@ -642,22 +604,14 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                drop_nlink(old_dir);
        }
        if ((fnode = hpfs_map_fnode(i->i_sb, i->i_ino, &bh))) {
-               fnode->up = new_dir->i_ino;
+               fnode->up = cpu_to_le32(new_dir->i_ino);
                fnode->len = new_len;
                memcpy(fnode->name, new_name, new_len>15?15:new_len);
                if (new_len < 15) memset(&fnode->name[new_len], 0, 15 - new_len);
                mark_buffer_dirty(bh);
                brelse(bh);
        }
-       hpfs_i(i)->i_conv = hpfs_sb(i->i_sb)->sb_conv;
-       hpfs_decide_conv(i, new_name, new_len);
 end1:
-       if (old_dir != new_dir)
-               mutex_unlock(&hpfs_i(new_dir)->i_mutex);
-       mutex_unlock(&hpfs_i(old_dir)->i_mutex);
-       mutex_unlock(&hpfs_i(i)->i_parent_mutex);
-       if (new_inode)
-               mutex_unlock(&hpfs_i(new_inode)->i_parent_mutex);
        hpfs_unlock(i->i_sb);
        return err;
 }
index c89b40808587925c616c4c20cc5e6ee5b6932aa0..98580a3b5005feea514e961476d9a0686c912979 100644 (file)
 
 /* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
 
-static void mark_dirty(struct super_block *s)
+static void mark_dirty(struct super_block *s, int remount)
 {
-       if (hpfs_sb(s)->sb_chkdsk && !(s->s_flags & MS_RDONLY)) {
+       if (hpfs_sb(s)->sb_chkdsk && (remount || !(s->s_flags & MS_RDONLY))) {
                struct buffer_head *bh;
                struct hpfs_spare_block *sb;
                if ((sb = hpfs_map_sector(s, 17, &bh, 0))) {
                        sb->dirty = 1;
                        sb->old_wrote = 0;
                        mark_buffer_dirty(bh);
+                       sync_dirty_buffer(bh);
                        brelse(bh);
                }
        }
@@ -40,10 +41,12 @@ static void unmark_dirty(struct super_block *s)
        struct buffer_head *bh;
        struct hpfs_spare_block *sb;
        if (s->s_flags & MS_RDONLY) return;
+       sync_blockdev(s->s_bdev);
        if ((sb = hpfs_map_sector(s, 17, &bh, 0))) {
                sb->dirty = hpfs_sb(s)->sb_chkdsk > 1 - hpfs_sb(s)->sb_was_error;
                sb->old_wrote = hpfs_sb(s)->sb_chkdsk >= 2 && !hpfs_sb(s)->sb_was_error;
                mark_buffer_dirty(bh);
+               sync_dirty_buffer(bh);
                brelse(bh);
        }
 }
@@ -63,13 +66,13 @@ void hpfs_error(struct super_block *s, const char *fmt, ...)
        if (!hpfs_sb(s)->sb_was_error) {
                if (hpfs_sb(s)->sb_err == 2) {
                        printk("; crashing the system because you wanted it\n");
-                       mark_dirty(s);
+                       mark_dirty(s, 0);
                        panic("HPFS panic");
                } else if (hpfs_sb(s)->sb_err == 1) {
                        if (s->s_flags & MS_RDONLY) printk("; already mounted read-only\n");
                        else {
                                printk("; remounting read-only\n");
-                               mark_dirty(s);
+                               mark_dirty(s, 0);
                                s->s_flags |= MS_RDONLY;
                        }
                } else if (s->s_flags & MS_RDONLY) printk("; going on - but anything won't be destroyed because it's read-only\n");
@@ -102,9 +105,12 @@ static void hpfs_put_super(struct super_block *s)
 {
        struct hpfs_sb_info *sbi = hpfs_sb(s);
 
+       hpfs_lock(s);
+       unmark_dirty(s);
+       hpfs_unlock(s);
+
        kfree(sbi->sb_cp_table);
        kfree(sbi->sb_bmp_dir);
-       unmark_dirty(s);
        s->s_fs_info = NULL;
        kfree(sbi);
 }
@@ -129,7 +135,7 @@ static unsigned count_bitmaps(struct super_block *s)
        n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14;
        count = 0;
        for (n = 0; n < n_bands; n++)
-               count += hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_bmp_dir[n]);
+               count += hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n]));
        return count;
 }
 
@@ -188,8 +194,6 @@ static void init_once(void *foo)
 {
        struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo;
 
-       mutex_init(&ei->i_mutex);
-       mutex_init(&ei->i_parent_mutex);
        inode_init_once(&ei->vfs_inode);
 }
 
@@ -218,7 +222,6 @@ static void destroy_inodecache(void)
 
 enum {
        Opt_help, Opt_uid, Opt_gid, Opt_umask, Opt_case_lower, Opt_case_asis,
-       Opt_conv_binary, Opt_conv_text, Opt_conv_auto,
        Opt_check_none, Opt_check_normal, Opt_check_strict,
        Opt_err_cont, Opt_err_ro, Opt_err_panic,
        Opt_eas_no, Opt_eas_ro, Opt_eas_rw,
@@ -233,9 +236,6 @@ static const match_table_t tokens = {
        {Opt_umask, "umask=%o"},
        {Opt_case_lower, "case=lower"},
        {Opt_case_asis, "case=asis"},
-       {Opt_conv_binary, "conv=binary"},
-       {Opt_conv_text, "conv=text"},
-       {Opt_conv_auto, "conv=auto"},
        {Opt_check_none, "check=none"},
        {Opt_check_normal, "check=normal"},
        {Opt_check_strict, "check=strict"},
@@ -253,7 +253,7 @@ static const match_table_t tokens = {
 };
 
 static int parse_opts(char *opts, uid_t *uid, gid_t *gid, umode_t *umask,
-                     int *lowercase, int *conv, int *eas, int *chk, int *errs,
+                     int *lowercase, int *eas, int *chk, int *errs,
                      int *chkdsk, int *timeshift)
 {
        char *p;
@@ -295,15 +295,6 @@ static int parse_opts(char *opts, uid_t *uid, gid_t *gid, umode_t *umask,
                case Opt_case_asis:
                        *lowercase = 0;
                        break;
-               case Opt_conv_binary:
-                       *conv = CONV_BINARY;
-                       break;
-               case Opt_conv_text:
-                       *conv = CONV_TEXT;
-                       break;
-               case Opt_conv_auto:
-                       *conv = CONV_AUTO;
-                       break;
                case Opt_check_none:
                        *chk = 0;
                        break;
@@ -370,9 +361,6 @@ HPFS filesystem options:\n\
       umask=xxx         set mode of files that don't have mode specified in eas\n\
       case=lower        lowercase all files\n\
       case=asis         do not lowercase files (default)\n\
-      conv=binary       do not convert CR/LF -> LF (default)\n\
-      conv=auto         convert only files with known text extensions\n\
-      conv=text         convert all files\n\
       check=none        no fs checks - kernel may crash on corrupted filesystem\n\
       check=normal      do some checks - it should not crash (default)\n\
       check=strict      do extra time-consuming checks, used for debugging\n\
@@ -394,7 +382,7 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
        uid_t uid;
        gid_t gid;
        umode_t umask;
-       int lowercase, conv, eas, chk, errs, chkdsk, timeshift;
+       int lowercase, eas, chk, errs, chkdsk, timeshift;
        int o;
        struct hpfs_sb_info *sbi = hpfs_sb(s);
        char *new_opts = kstrdup(data, GFP_KERNEL);
@@ -405,11 +393,11 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
        lock_super(s);
        uid = sbi->sb_uid; gid = sbi->sb_gid;
        umask = 0777 & ~sbi->sb_mode;
-       lowercase = sbi->sb_lowercase; conv = sbi->sb_conv;
+       lowercase = sbi->sb_lowercase;
        eas = sbi->sb_eas; chk = sbi->sb_chk; chkdsk = sbi->sb_chkdsk;
        errs = sbi->sb_err; timeshift = sbi->sb_timeshift;
 
-       if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase, &conv,
+       if (!(o = parse_opts(data, &uid, &gid, &umask, &lowercase,
            &eas, &chk, &errs, &chkdsk, &timeshift))) {
                printk("HPFS: bad mount options.\n");
                goto out_err;
@@ -427,11 +415,11 @@ static int hpfs_remount_fs(struct super_block *s, int *flags, char *data)
 
        sbi->sb_uid = uid; sbi->sb_gid = gid;
        sbi->sb_mode = 0777 & ~umask;
-       sbi->sb_lowercase = lowercase; sbi->sb_conv = conv;
+       sbi->sb_lowercase = lowercase;
        sbi->sb_eas = eas; sbi->sb_chk = chk; sbi->sb_chkdsk = chkdsk;
        sbi->sb_err = errs; sbi->sb_timeshift = timeshift;
 
-       if (!(*flags & MS_RDONLY)) mark_dirty(s);
+       if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);
 
        replace_mount_options(s, new_opts);
 
@@ -471,7 +459,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
        uid_t uid;
        gid_t gid;
        umode_t umask;
-       int lowercase, conv, eas, chk, errs, chkdsk, timeshift;
+       int lowercase, eas, chk, errs, chkdsk, timeshift;
 
        dnode_secno root_dno;
        struct hpfs_dirent *de = NULL;
@@ -479,11 +467,6 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
 
        int o;
 
-       if (num_possible_cpus() > 1) {
-               printk(KERN_ERR "HPFS is not SMP safe\n");
-               return -EINVAL;
-       }
-
        save_mount_options(s, options);
 
        sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
@@ -495,20 +478,20 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
        sbi->sb_bmp_dir = NULL;
        sbi->sb_cp_table = NULL;
 
-       mutex_init(&sbi->hpfs_creation_de);
+       mutex_init(&sbi->hpfs_mutex);
+       hpfs_lock(s);
 
        uid = current_uid();
        gid = current_gid();
        umask = current_umask();
        lowercase = 0;
-       conv = CONV_BINARY;
        eas = 2;
        chk = 1;
        errs = 1;
        chkdsk = 1;
        timeshift = 0;
 
-       if (!(o = parse_opts(options, &uid, &gid, &umask, &lowercase, &conv,
+       if (!(o = parse_opts(options, &uid, &gid, &umask, &lowercase,
            &eas, &chk, &errs, &chkdsk, &timeshift))) {
                printk("HPFS: bad mount options.\n");
                goto bail0;
@@ -526,9 +509,9 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
        if (!(spareblock = hpfs_map_sector(s, 17, &bh2, 0))) goto bail3;
 
        /* Check magics */
-       if (/*bootblock->magic != BB_MAGIC
-           ||*/ superblock->magic != SB_MAGIC
-           || spareblock->magic != SP_MAGIC) {
+       if (/*le16_to_cpu(bootblock->magic) != BB_MAGIC
+           ||*/ le32_to_cpu(superblock->magic) != SB_MAGIC
+           || le32_to_cpu(spareblock->magic) != SP_MAGIC) {
                if (!silent) printk("HPFS: Bad magic ... probably not HPFS\n");
                goto bail4;
        }
@@ -549,19 +532,18 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
        s->s_op = &hpfs_sops;
        s->s_d_op = &hpfs_dentry_operations;
 
-       sbi->sb_root = superblock->root;
-       sbi->sb_fs_size = superblock->n_sectors;
-       sbi->sb_bitmaps = superblock->bitmaps;
-       sbi->sb_dirband_start = superblock->dir_band_start;
-       sbi->sb_dirband_size = superblock->n_dir_band;
-       sbi->sb_dmap = superblock->dir_band_bitmap;
+       sbi->sb_root = le32_to_cpu(superblock->root);
+       sbi->sb_fs_size = le32_to_cpu(superblock->n_sectors);
+       sbi->sb_bitmaps = le32_to_cpu(superblock->bitmaps);
+       sbi->sb_dirband_start = le32_to_cpu(superblock->dir_band_start);
+       sbi->sb_dirband_size = le32_to_cpu(superblock->n_dir_band);
+       sbi->sb_dmap = le32_to_cpu(superblock->dir_band_bitmap);
        sbi->sb_uid = uid;
        sbi->sb_gid = gid;
        sbi->sb_mode = 0777 & ~umask;
        sbi->sb_n_free = -1;
        sbi->sb_n_free_dnodes = -1;
        sbi->sb_lowercase = lowercase;
-       sbi->sb_conv = conv;
        sbi->sb_eas = eas;
        sbi->sb_chk = chk;
        sbi->sb_chkdsk = chkdsk;
@@ -573,7 +555,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
        sbi->sb_max_fwd_alloc = 0xffffff;
        
        /* Load bitmap directory */
-       if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, superblock->bitmaps)))
+       if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps))))
                goto bail4;
        
        /* Check for general fs errors*/
@@ -591,20 +573,20 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
                mark_buffer_dirty(bh2);
        }
 
-       if (spareblock->hotfixes_used || spareblock->n_spares_used) {
+       if (le32_to_cpu(spareblock->hotfixes_used) || le32_to_cpu(spareblock->n_spares_used)) {
                if (errs >= 2) {
                        printk("HPFS: Hotfixes not supported here, try chkdsk\n");
-                       mark_dirty(s);
+                       mark_dirty(s, 0);
                        goto bail4;
                }
                hpfs_error(s, "hotfixes not supported here, try chkdsk");
                if (errs == 0) printk("HPFS: Proceeding, but your filesystem will be probably corrupted by this driver...\n");
                else printk("HPFS: This driver may read bad files or crash when operating on disk with hotfixes.\n");
        }
-       if (spareblock->n_dnode_spares != spareblock->n_dnode_spares_free) {
+       if (le32_to_cpu(spareblock->n_dnode_spares) != le32_to_cpu(spareblock->n_dnode_spares_free)) {
                if (errs >= 2) {
                        printk("HPFS: Spare dnodes used, try chkdsk\n");
-                       mark_dirty(s);
+                       mark_dirty(s, 0);
                        goto bail4;
                }
                hpfs_error(s, "warning: spare dnodes used, try chkdsk");
@@ -612,26 +594,26 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
        }
        if (chk) {
                unsigned a;
-               if (superblock->dir_band_end - superblock->dir_band_start + 1 != superblock->n_dir_band ||
-                   superblock->dir_band_end < superblock->dir_band_start || superblock->n_dir_band > 0x4000) {
+               if (le32_to_cpu(superblock->dir_band_end) - le32_to_cpu(superblock->dir_band_start) + 1 != le32_to_cpu(superblock->n_dir_band) ||
+                   le32_to_cpu(superblock->dir_band_end) < le32_to_cpu(superblock->dir_band_start) || le32_to_cpu(superblock->n_dir_band) > 0x4000) {
                        hpfs_error(s, "dir band size mismatch: dir_band_start==%08x, dir_band_end==%08x, n_dir_band==%08x",
-                               superblock->dir_band_start, superblock->dir_band_end, superblock->n_dir_band);
+                               le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->dir_band_end), le32_to_cpu(superblock->n_dir_band));
                        goto bail4;
                }
                a = sbi->sb_dirband_size;
                sbi->sb_dirband_size = 0;
-               if (hpfs_chk_sectors(s, superblock->dir_band_start, superblock->n_dir_band, "dir_band") ||
-                   hpfs_chk_sectors(s, superblock->dir_band_bitmap, 4, "dir_band_bitmap") ||
-                   hpfs_chk_sectors(s, superblock->bitmaps, 4, "bitmaps")) {
-                       mark_dirty(s);
+               if (hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_start), le32_to_cpu(superblock->n_dir_band), "dir_band") ||
+                   hpfs_chk_sectors(s, le32_to_cpu(superblock->dir_band_bitmap), 4, "dir_band_bitmap") ||
+                   hpfs_chk_sectors(s, le32_to_cpu(superblock->bitmaps), 4, "bitmaps")) {
+                       mark_dirty(s, 0);
                        goto bail4;
                }
                sbi->sb_dirband_size = a;
        } else printk("HPFS: You really don't want any checks? You are crazy...\n");
 
        /* Load code page table */
-       if (spareblock->n_code_pages)
-               if (!(sbi->sb_cp_table = hpfs_load_code_page(s, spareblock->code_page_dir)))
+       if (le32_to_cpu(spareblock->n_code_pages))
+               if (!(sbi->sb_cp_table = hpfs_load_code_page(s, le32_to_cpu(spareblock->code_page_dir))))
                        printk("HPFS: Warning: code page support is disabled\n");
 
        brelse(bh2);
@@ -660,13 +642,13 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
        if (!de)
                hpfs_error(s, "unable to find root dir");
        else {
-               root->i_atime.tv_sec = local_to_gmt(s, de->read_date);
+               root->i_atime.tv_sec = local_to_gmt(s, le32_to_cpu(de->read_date));
                root->i_atime.tv_nsec = 0;
-               root->i_mtime.tv_sec = local_to_gmt(s, de->write_date);
+               root->i_mtime.tv_sec = local_to_gmt(s, le32_to_cpu(de->write_date));
                root->i_mtime.tv_nsec = 0;
-               root->i_ctime.tv_sec = local_to_gmt(s, de->creation_date);
+               root->i_ctime.tv_sec = local_to_gmt(s, le32_to_cpu(de->creation_date));
                root->i_ctime.tv_nsec = 0;
-               hpfs_i(root)->i_ea_size = de->ea_size;
+               hpfs_i(root)->i_ea_size = le16_to_cpu(de->ea_size);
                hpfs_i(root)->i_parent_dir = root->i_ino;
                if (root->i_size == -1)
                        root->i_size = 2048;
@@ -674,6 +656,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
                        root->i_blocks = 5;
                hpfs_brelse4(&qbh);
        }
+       hpfs_unlock(s);
        return 0;
 
 bail4: brelse(bh2);
@@ -681,6 +664,7 @@ bail3:      brelse(bh1);
 bail2: brelse(bh0);
 bail1:
 bail0:
+       hpfs_unlock(s);
        kfree(sbi->sb_bmp_dir);
        kfree(sbi->sb_cp_table);
        s->s_fs_info = NULL;
index 33435e4b14d24aaad89bfab57da526870bbfa797..ce03a182c771c42e39e90c0f7b3737b897d02215 100644 (file)
@@ -480,10 +480,6 @@ static int logfs_read_sb(struct super_block *sb, int read_only)
                        !read_only)
                return -EIO;
 
-       mutex_init(&super->s_dirop_mutex);
-       mutex_init(&super->s_object_alias_mutex);
-       INIT_LIST_HEAD(&super->s_freeing_list);
-
        ret = logfs_init_rw(sb);
        if (ret)
                return ret;
@@ -601,6 +597,10 @@ static struct dentry *logfs_mount(struct file_system_type *type, int flags,
        if (!super)
                return ERR_PTR(-ENOMEM);
 
+       mutex_init(&super->s_dirop_mutex);
+       mutex_init(&super->s_object_alias_mutex);
+       INIT_LIST_HEAD(&super->s_freeing_list);
+
        if (!devname)
                err = logfs_get_sb_bdev(super, type, devname);
        else if (strncmp(devname, "mtd", 3))
index 54fc993e3027d4dff98dcf4f14ebbb4d817cdb0f..e3c4f112ebf754525de7d5634c298e9d4ba4fba8 100644 (file)
@@ -179,7 +179,7 @@ EXPORT_SYMBOL(putname);
 static int acl_permission_check(struct inode *inode, int mask, unsigned int flags,
                int (*check_acl)(struct inode *inode, int mask, unsigned int flags))
 {
-       umode_t                 mode = inode->i_mode;
+       unsigned int mode = inode->i_mode;
 
        mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
 
index 6f8192f4cfc759b653c186c5122565bacdd96631..be79dc9f386da265328c1975127e8f577c85ed25 100644 (file)
@@ -117,6 +117,8 @@ static int filelayout_async_handle_error(struct rpc_task *task,
        case -EKEYEXPIRED:
                rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX);
                break;
+       case -NFS4ERR_RETRY_UNCACHED_REP:
+               break;
        default:
                dprintk("%s DS error. Retry through MDS %d\n", __func__,
                        task->tk_status);
@@ -416,7 +418,8 @@ static int
 filelayout_check_layout(struct pnfs_layout_hdr *lo,
                        struct nfs4_filelayout_segment *fl,
                        struct nfs4_layoutget_res *lgr,
-                       struct nfs4_deviceid *id)
+                       struct nfs4_deviceid *id,
+                       gfp_t gfp_flags)
 {
        struct nfs4_file_layout_dsaddr *dsaddr;
        int status = -EINVAL;
@@ -439,7 +442,7 @@ filelayout_check_layout(struct pnfs_layout_hdr *lo,
        /* find and reference the deviceid */
        dsaddr = nfs4_fl_find_get_deviceid(id);
        if (dsaddr == NULL) {
-               dsaddr = get_device_info(lo->plh_inode, id);
+               dsaddr = get_device_info(lo->plh_inode, id, gfp_flags);
                if (dsaddr == NULL)
                        goto out;
        }
@@ -500,7 +503,8 @@ static int
 filelayout_decode_layout(struct pnfs_layout_hdr *flo,
                         struct nfs4_filelayout_segment *fl,
                         struct nfs4_layoutget_res *lgr,
-                        struct nfs4_deviceid *id)
+                        struct nfs4_deviceid *id,
+                        gfp_t gfp_flags)
 {
        struct xdr_stream stream;
        struct xdr_buf buf = {
@@ -516,7 +520,7 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
 
        dprintk("%s: set_layout_map Begin\n", __func__);
 
-       scratch = alloc_page(GFP_KERNEL);
+       scratch = alloc_page(gfp_flags);
        if (!scratch)
                return -ENOMEM;
 
@@ -554,13 +558,13 @@ filelayout_decode_layout(struct pnfs_layout_hdr *flo,
                goto out_err;
 
        fl->fh_array = kzalloc(fl->num_fh * sizeof(struct nfs_fh *),
-                              GFP_KERNEL);
+                              gfp_flags);
        if (!fl->fh_array)
                goto out_err;
 
        for (i = 0; i < fl->num_fh; i++) {
                /* Do we want to use a mempool here? */
-               fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), GFP_KERNEL);
+               fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags);
                if (!fl->fh_array[i])
                        goto out_err_free;
 
@@ -605,19 +609,20 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg)
 
 static struct pnfs_layout_segment *
 filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
-                     struct nfs4_layoutget_res *lgr)
+                     struct nfs4_layoutget_res *lgr,
+                     gfp_t gfp_flags)
 {
        struct nfs4_filelayout_segment *fl;
        int rc;
        struct nfs4_deviceid id;
 
        dprintk("--> %s\n", __func__);
-       fl = kzalloc(sizeof(*fl), GFP_KERNEL);
+       fl = kzalloc(sizeof(*fl), gfp_flags);
        if (!fl)
                return NULL;
 
-       rc = filelayout_decode_layout(layoutid, fl, lgr, &id);
-       if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id)) {
+       rc = filelayout_decode_layout(layoutid, fl, lgr, &id, gfp_flags);
+       if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, &id, gfp_flags)) {
                _filelayout_free_lseg(fl);
                return NULL;
        }
@@ -633,7 +638,7 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
                int size = (fl->stripe_type == STRIPE_SPARSE) ?
                        fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
 
-               fl->commit_buckets = kcalloc(size, sizeof(struct list_head), GFP_KERNEL);
+               fl->commit_buckets = kcalloc(size, sizeof(struct list_head), gfp_flags);
                if (!fl->commit_buckets) {
                        filelayout_free_lseg(&fl->generic_hdr);
                        return NULL;
index 7c44579f583250a34c97d622023ec335a70a0cca..2b461d77b43a9efa1c8235a955c18941ee8f56b5 100644 (file)
@@ -104,6 +104,6 @@ extern struct nfs4_file_layout_dsaddr *
 nfs4_fl_find_get_deviceid(struct nfs4_deviceid *dev_id);
 extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
 struct nfs4_file_layout_dsaddr *
-get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id);
+get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags);
 
 #endif /* FS_NFS_NFS4FILELAYOUT_H */
index de5350f2b2492139bf9b9d913a5bdf9e99a50ec3..db07c7af139519a3b938e44e0042a8d91102434c 100644 (file)
@@ -225,11 +225,11 @@ nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr)
 }
 
 static struct nfs4_pnfs_ds *
-nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port)
+nfs4_pnfs_ds_add(struct inode *inode, u32 ip_addr, u32 port, gfp_t gfp_flags)
 {
        struct nfs4_pnfs_ds *tmp_ds, *ds;
 
-       ds = kzalloc(sizeof(*tmp_ds), GFP_KERNEL);
+       ds = kzalloc(sizeof(*tmp_ds), gfp_flags);
        if (!ds)
                goto out;
 
@@ -261,7 +261,7 @@ out:
  * Currently only support ipv4, and one multi-path address.
  */
 static struct nfs4_pnfs_ds *
-decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode)
+decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode, gfp_t gfp_flags)
 {
        struct nfs4_pnfs_ds *ds = NULL;
        char *buf;
@@ -303,7 +303,7 @@ decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode)
                        rlen);
                goto out_err;
        }
-       buf = kmalloc(rlen + 1, GFP_KERNEL);
+       buf = kmalloc(rlen + 1, gfp_flags);
        if (!buf) {
                dprintk("%s: Not enough memory\n", __func__);
                goto out_err;
@@ -333,7 +333,7 @@ decode_and_add_ds(struct xdr_stream *streamp, struct inode *inode)
        sscanf(pstr, "-%d-%d", &tmp[0], &tmp[1]);
        port = htons((tmp[0] << 8) | (tmp[1]));
 
-       ds = nfs4_pnfs_ds_add(inode, ip_addr, port);
+       ds = nfs4_pnfs_ds_add(inode, ip_addr, port, gfp_flags);
        dprintk("%s: Decoded address and port %s\n", __func__, buf);
 out_free:
        kfree(buf);
@@ -343,7 +343,7 @@ out_err:
 
 /* Decode opaque device data and return the result */
 static struct nfs4_file_layout_dsaddr*
-decode_device(struct inode *ino, struct pnfs_device *pdev)
+decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags)
 {
        int i;
        u32 cnt, num;
@@ -362,7 +362,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev)
        struct page *scratch;
 
        /* set up xdr stream */
-       scratch = alloc_page(GFP_KERNEL);
+       scratch = alloc_page(gfp_flags);
        if (!scratch)
                goto out_err;
 
@@ -384,7 +384,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev)
        }
 
        /* read stripe indices */
-       stripe_indices = kcalloc(cnt, sizeof(u8), GFP_KERNEL);
+       stripe_indices = kcalloc(cnt, sizeof(u8), gfp_flags);
        if (!stripe_indices)
                goto out_err_free_scratch;
 
@@ -423,7 +423,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev)
 
        dsaddr = kzalloc(sizeof(*dsaddr) +
                        (sizeof(struct nfs4_pnfs_ds *) * (num - 1)),
-                       GFP_KERNEL);
+                       gfp_flags);
        if (!dsaddr)
                goto out_err_free_stripe_indices;
 
@@ -452,7 +452,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev)
                for (j = 0; j < mp_count; j++) {
                        if (j == 0) {
                                dsaddr->ds_list[i] = decode_and_add_ds(&stream,
-                                       ino);
+                                       ino, gfp_flags);
                                if (dsaddr->ds_list[i] == NULL)
                                        goto out_err_free_deviceid;
                        } else {
@@ -503,12 +503,12 @@ out_err:
  * available devices.
  */
 static struct nfs4_file_layout_dsaddr *
-decode_and_add_device(struct inode *inode, struct pnfs_device *dev)
+decode_and_add_device(struct inode *inode, struct pnfs_device *dev, gfp_t gfp_flags)
 {
        struct nfs4_file_layout_dsaddr *d, *new;
        long hash;
 
-       new = decode_device(inode, dev);
+       new = decode_device(inode, dev, gfp_flags);
        if (!new) {
                printk(KERN_WARNING "%s: Could not decode or add device\n",
                        __func__);
@@ -537,7 +537,7 @@ decode_and_add_device(struct inode *inode, struct pnfs_device *dev)
  * of available devices, and return it.
  */
 struct nfs4_file_layout_dsaddr *
-get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id)
+get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags)
 {
        struct pnfs_device *pdev = NULL;
        u32 max_resp_sz;
@@ -556,17 +556,17 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id)
        dprintk("%s inode %p max_resp_sz %u max_pages %d\n",
                __func__, inode, max_resp_sz, max_pages);
 
-       pdev = kzalloc(sizeof(struct pnfs_device), GFP_KERNEL);
+       pdev = kzalloc(sizeof(struct pnfs_device), gfp_flags);
        if (pdev == NULL)
                return NULL;
 
-       pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL);
+       pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
        if (pages == NULL) {
                kfree(pdev);
                return NULL;
        }
        for (i = 0; i < max_pages; i++) {
-               pages[i] = alloc_page(GFP_KERNEL);
+               pages[i] = alloc_page(gfp_flags);
                if (!pages[i])
                        goto out_free;
        }
@@ -587,7 +587,7 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id)
         * Found new device, need to decode it and then add it to the
         * list of known devices for this mountpoint.
         */
-       dsaddr = decode_and_add_device(inode, pdev);
+       dsaddr = decode_and_add_device(inode, pdev, gfp_flags);
 out_free:
        for (i = 0; i < max_pages; i++)
                __free_page(pages[i]);
index 69c0f3c5ee7a9c1bfa6374ea3aad7ed7421cf2fa..cf1b339c3937f7d959c6e5cbcf4a056ac16fbb2e 100644 (file)
@@ -300,6 +300,7 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
                        ret = nfs4_delay(server->client, &exception->timeout);
                        if (ret != 0)
                                break;
+               case -NFS4ERR_RETRY_UNCACHED_REP:
                case -NFS4ERR_OLD_STATEID:
                        exception->retry = 1;
                        break;
@@ -3695,6 +3696,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
                        rpc_delay(task, NFS4_POLL_RETRY_MAX);
                        task->tk_status = 0;
                        return -EAGAIN;
+               case -NFS4ERR_RETRY_UNCACHED_REP:
                case -NFS4ERR_OLD_STATEID:
                        task->tk_status = 0;
                        return -EAGAIN;
@@ -4844,6 +4846,8 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
                dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
                rpc_delay(task, NFS4_POLL_RETRY_MIN);
                task->tk_status = 0;
+               /* fall through */
+       case -NFS4ERR_RETRY_UNCACHED_REP:
                nfs_restart_rpc(task, data->clp);
                return;
        }
@@ -5479,6 +5483,8 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
                break;
        case -NFS4ERR_DELAY:
                rpc_delay(task, NFS4_POLL_RETRY_MAX);
+               /* fall through */
+       case -NFS4ERR_RETRY_UNCACHED_REP:
                return -EAGAIN;
        default:
                nfs4_schedule_lease_recovery(clp);
index ff681ab65d31f54654521e560b9cd5780db8318f..f57f5281a52090ac8445a3ebb362515708225661 100644 (file)
@@ -383,6 +383,7 @@ pnfs_destroy_all_layouts(struct nfs_client *clp)
                                plh_layouts);
                dprintk("%s freeing layout for inode %lu\n", __func__,
                        lo->plh_inode->i_ino);
+               list_del_init(&lo->plh_layouts);
                pnfs_destroy_layout(NFS_I(lo->plh_inode));
        }
 }
@@ -466,7 +467,8 @@ pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
 static struct pnfs_layout_segment *
 send_layoutget(struct pnfs_layout_hdr *lo,
           struct nfs_open_context *ctx,
-          u32 iomode)
+          u32 iomode,
+          gfp_t gfp_flags)
 {
        struct inode *ino = lo->plh_inode;
        struct nfs_server *server = NFS_SERVER(ino);
@@ -479,7 +481,7 @@ send_layoutget(struct pnfs_layout_hdr *lo,
        dprintk("--> %s\n", __func__);
 
        BUG_ON(ctx == NULL);
-       lgp = kzalloc(sizeof(*lgp), GFP_KERNEL);
+       lgp = kzalloc(sizeof(*lgp), gfp_flags);
        if (lgp == NULL)
                return NULL;
 
@@ -487,12 +489,12 @@ send_layoutget(struct pnfs_layout_hdr *lo,
        max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
        max_pages = max_resp_sz >> PAGE_SHIFT;
 
-       pages = kzalloc(max_pages * sizeof(struct page *), GFP_KERNEL);
+       pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
        if (!pages)
                goto out_err_free;
 
        for (i = 0; i < max_pages; i++) {
-               pages[i] = alloc_page(GFP_KERNEL);
+               pages[i] = alloc_page(gfp_flags);
                if (!pages[i])
                        goto out_err_free;
        }
@@ -508,6 +510,7 @@ send_layoutget(struct pnfs_layout_hdr *lo,
        lgp->args.layout.pages = pages;
        lgp->args.layout.pglen = max_pages * PAGE_SIZE;
        lgp->lsegpp = &lseg;
+       lgp->gfp_flags = gfp_flags;
 
        /* Synchronously retrieve layout information from server and
         * store in lseg.
@@ -665,11 +668,11 @@ pnfs_insert_layout(struct pnfs_layout_hdr *lo,
 }
 
 static struct pnfs_layout_hdr *
-alloc_init_layout_hdr(struct inode *ino)
+alloc_init_layout_hdr(struct inode *ino, gfp_t gfp_flags)
 {
        struct pnfs_layout_hdr *lo;
 
-       lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL);
+       lo = kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags);
        if (!lo)
                return NULL;
        atomic_set(&lo->plh_refcount, 1);
@@ -681,7 +684,7 @@ alloc_init_layout_hdr(struct inode *ino)
 }
 
 static struct pnfs_layout_hdr *
-pnfs_find_alloc_layout(struct inode *ino)
+pnfs_find_alloc_layout(struct inode *ino, gfp_t gfp_flags)
 {
        struct nfs_inode *nfsi = NFS_I(ino);
        struct pnfs_layout_hdr *new = NULL;
@@ -696,7 +699,7 @@ pnfs_find_alloc_layout(struct inode *ino)
                        return nfsi->layout;
        }
        spin_unlock(&ino->i_lock);
-       new = alloc_init_layout_hdr(ino);
+       new = alloc_init_layout_hdr(ino, gfp_flags);
        spin_lock(&ino->i_lock);
 
        if (likely(nfsi->layout == NULL))       /* Won the race? */
@@ -756,7 +759,8 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode)
 struct pnfs_layout_segment *
 pnfs_update_layout(struct inode *ino,
                   struct nfs_open_context *ctx,
-                  enum pnfs_iomode iomode)
+                  enum pnfs_iomode iomode,
+                  gfp_t gfp_flags)
 {
        struct nfs_inode *nfsi = NFS_I(ino);
        struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
@@ -767,7 +771,7 @@ pnfs_update_layout(struct inode *ino,
        if (!pnfs_enabled_sb(NFS_SERVER(ino)))
                return NULL;
        spin_lock(&ino->i_lock);
-       lo = pnfs_find_alloc_layout(ino);
+       lo = pnfs_find_alloc_layout(ino, gfp_flags);
        if (lo == NULL) {
                dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
                goto out_unlock;
@@ -807,7 +811,7 @@ pnfs_update_layout(struct inode *ino,
                spin_unlock(&clp->cl_lock);
        }
 
-       lseg = send_layoutget(lo, ctx, iomode);
+       lseg = send_layoutget(lo, ctx, iomode, gfp_flags);
        if (!lseg && first) {
                spin_lock(&clp->cl_lock);
                list_del_init(&lo->plh_layouts);
@@ -846,7 +850,7 @@ pnfs_layout_process(struct nfs4_layoutget *lgp)
                goto out;
        }
        /* Inject layout blob into I/O device driver */
-       lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res);
+       lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
        if (!lseg || IS_ERR(lseg)) {
                if (!lseg)
                        status = -ENOMEM;
@@ -899,7 +903,8 @@ static int pnfs_read_pg_test(struct nfs_pageio_descriptor *pgio,
                /* This is first coelesce call for a series of nfs_pages */
                pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
                                                   prev->wb_context,
-                                                  IOMODE_READ);
+                                                  IOMODE_READ,
+                                                  GFP_KERNEL);
        }
        return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
 }
@@ -921,7 +926,8 @@ static int pnfs_write_pg_test(struct nfs_pageio_descriptor *pgio,
                /* This is first coelesce call for a series of nfs_pages */
                pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
                                                   prev->wb_context,
-                                                  IOMODE_RW);
+                                                  IOMODE_RW,
+                                                  GFP_NOFS);
        }
        return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
 }
index bc4827202e7a2c862ae628c1e27b0bae48a9a93d..0c015bad9e7aa4c780297746a988581c8c69acda 100644 (file)
@@ -70,7 +70,7 @@ struct pnfs_layoutdriver_type {
        const u32 id;
        const char *name;
        struct module *owner;
-       struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr);
+       struct pnfs_layout_segment * (*alloc_lseg) (struct pnfs_layout_hdr *layoutid, struct nfs4_layoutget_res *lgr, gfp_t gfp_flags);
        void (*free_lseg) (struct pnfs_layout_segment *lseg);
 
        /* test for nfs page cache coalescing */
@@ -126,7 +126,7 @@ void get_layout_hdr(struct pnfs_layout_hdr *lo);
 void put_lseg(struct pnfs_layout_segment *lseg);
 struct pnfs_layout_segment *
 pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
-                  enum pnfs_iomode access_type);
+                  enum pnfs_iomode access_type, gfp_t gfp_flags);
 void set_pnfs_layoutdriver(struct nfs_server *, u32 id);
 void unset_pnfs_layoutdriver(struct nfs_server *);
 enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *,
@@ -245,7 +245,7 @@ static inline void put_lseg(struct pnfs_layout_segment *lseg)
 
 static inline struct pnfs_layout_segment *
 pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx,
-                  enum pnfs_iomode access_type)
+                  enum pnfs_iomode access_type, gfp_t gfp_flags)
 {
        return NULL;
 }
index 7cded2b12a05eb0016421304afc87b68458fd03b..2bcf0dc306a1000e2211ef117670ac47f6ef4407 100644 (file)
@@ -288,7 +288,7 @@ static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc)
        atomic_set(&req->wb_complete, requests);
 
        BUG_ON(desc->pg_lseg != NULL);
-       lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ);
+       lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL);
        ClearPageError(page);
        offset = 0;
        nbytes = desc->pg_count;
@@ -351,7 +351,7 @@ static int nfs_pagein_one(struct nfs_pageio_descriptor *desc)
        }
        req = nfs_list_entry(data->pages.next);
        if ((!lseg) && list_is_singular(&data->pages))
-               lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ);
+               lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_READ, GFP_KERNEL);
 
        ret = nfs_read_rpcsetup(req, data, &nfs_read_full_ops, desc->pg_count,
                                0, lseg);
index 3bd5d7e80f6cc60fbd7f46a68c8abd60c126eb54..49c715b4ac927b9bbd283e0f991118d34bf9af10 100644 (file)
@@ -939,7 +939,7 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc)
        atomic_set(&req->wb_complete, requests);
 
        BUG_ON(desc->pg_lseg);
-       lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW);
+       lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS);
        ClearPageError(page);
        offset = 0;
        nbytes = desc->pg_count;
@@ -1013,7 +1013,7 @@ static int nfs_flush_one(struct nfs_pageio_descriptor *desc)
        }
        req = nfs_list_entry(data->pages.next);
        if ((!lseg) && list_is_singular(&data->pages))
-               lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW);
+               lseg = pnfs_update_layout(desc->pg_inode, req->wb_context, IOMODE_RW, GFP_NOFS);
 
        if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
            (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
index 0a0a66d98cce85f327b32080c8a97045e8babdf7..f7684483785e848c1e59657ab141801100796012 100644 (file)
@@ -646,7 +646,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
        unsigned long group, group_offset;
        int i, j, n, ret;
 
-       for (i = 0; i < nitems; i += n) {
+       for (i = 0; i < nitems; i = j) {
                group = nilfs_palloc_group(inode, entry_nrs[i], &group_offset);
                ret = nilfs_palloc_get_desc_block(inode, group, 0, &desc_bh);
                if (ret < 0)
index 643720209a9899506b9a1c477b458b91b5070d4c..9a3e6bbff27bd4839b487c2c14282fd9ef1a4675 100644 (file)
@@ -539,25 +539,41 @@ static int o2hb_verify_crc(struct o2hb_region *reg,
 
 /* We want to make sure that nobody is heartbeating on top of us --
  * this will help detect an invalid configuration. */
-static int o2hb_check_last_timestamp(struct o2hb_region *reg)
+static void o2hb_check_last_timestamp(struct o2hb_region *reg)
 {
-       int node_num, ret;
        struct o2hb_disk_slot *slot;
        struct o2hb_disk_heartbeat_block *hb_block;
+       char *errstr;
 
-       node_num = o2nm_this_node();
-
-       ret = 1;
-       slot = &reg->hr_slots[node_num];
+       slot = &reg->hr_slots[o2nm_this_node()];
        /* Don't check on our 1st timestamp */
-       if (slot->ds_last_time) {
-               hb_block = slot->ds_raw_block;
+       if (!slot->ds_last_time)
+               return;
 
-               if (le64_to_cpu(hb_block->hb_seq) != slot->ds_last_time)
-                       ret = 0;
-       }
+       hb_block = slot->ds_raw_block;
+       if (le64_to_cpu(hb_block->hb_seq) == slot->ds_last_time &&
+           le64_to_cpu(hb_block->hb_generation) == slot->ds_last_generation &&
+           hb_block->hb_node == slot->ds_node_num)
+               return;
 
-       return ret;
+#define ERRSTR1                "Another node is heartbeating on device"
+#define ERRSTR2                "Heartbeat generation mismatch on device"
+#define ERRSTR3                "Heartbeat sequence mismatch on device"
+
+       if (hb_block->hb_node != slot->ds_node_num)
+               errstr = ERRSTR1;
+       else if (le64_to_cpu(hb_block->hb_generation) !=
+                slot->ds_last_generation)
+               errstr = ERRSTR2;
+       else
+               errstr = ERRSTR3;
+
+       mlog(ML_ERROR, "%s (%s): expected(%u:0x%llx, 0x%llx), "
+            "ondisk(%u:0x%llx, 0x%llx)\n", errstr, reg->hr_dev_name,
+            slot->ds_node_num, (unsigned long long)slot->ds_last_generation,
+            (unsigned long long)slot->ds_last_time, hb_block->hb_node,
+            (unsigned long long)le64_to_cpu(hb_block->hb_generation),
+            (unsigned long long)le64_to_cpu(hb_block->hb_seq));
 }
 
 static inline void o2hb_prepare_block(struct o2hb_region *reg,
@@ -983,9 +999,7 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
        /* With an up to date view of the slots, we can check that no
         * other node has been improperly configured to heartbeat in
         * our slot. */
-       if (!o2hb_check_last_timestamp(reg))
-               mlog(ML_ERROR, "Device \"%s\": another node is heartbeating "
-                    "in our slot!\n", reg->hr_dev_name);
+       o2hb_check_last_timestamp(reg);
 
        /* fill in the proper info for our next heartbeat */
        o2hb_prepare_block(reg, reg->hr_generation);
@@ -999,8 +1013,8 @@ static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
        }
 
        i = -1;
-       while((i = find_next_bit(configured_nodes, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
-
+       while((i = find_next_bit(configured_nodes,
+                                O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) {
                change |= o2hb_check_slot(reg, &reg->hr_slots[i]);
        }
 
@@ -1690,6 +1704,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
        struct file *filp = NULL;
        struct inode *inode = NULL;
        ssize_t ret = -EINVAL;
+       int live_threshold;
 
        if (reg->hr_bdev)
                goto out;
@@ -1766,8 +1781,18 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
         * A node is considered live after it has beat LIVE_THRESHOLD
         * times.  We're not steady until we've given them a chance
         * _after_ our first read.
+        * The default threshold is bare minimum so as to limit the delay
+        * during mounts. For global heartbeat, the threshold doubled for the
+        * first region.
         */
-       atomic_set(&reg->hr_steady_iterations, O2HB_LIVE_THRESHOLD + 1);
+       live_threshold = O2HB_LIVE_THRESHOLD;
+       if (o2hb_global_heartbeat_active()) {
+               spin_lock(&o2hb_live_lock);
+               if (o2hb_pop_count(&o2hb_region_bitmap, O2NM_MAX_REGIONS) == 1)
+                       live_threshold <<= 1;
+               spin_unlock(&o2hb_live_lock);
+       }
+       atomic_set(&reg->hr_steady_iterations, live_threshold + 1);
 
        hb_task = kthread_run(o2hb_thread, reg, "o2hb-%s",
                              reg->hr_item.ci_name);
index 9fe5b8fd658f0a3db09f1919512a0479a241ecca..8582e3f4f120647d05df81556639e627882b5811 100644 (file)
@@ -2868,7 +2868,7 @@ static int ocfs2_expand_inline_dir(struct inode *dir, struct buffer_head *di_bh,
                bytes = blocks_wanted << sb->s_blocksize_bits;
        struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
        struct ocfs2_inode_info *oi = OCFS2_I(dir);
-       struct ocfs2_alloc_context *data_ac;
+       struct ocfs2_alloc_context *data_ac = NULL;
        struct ocfs2_alloc_context *meta_ac = NULL;
        struct buffer_head *dirdata_bh = NULL;
        struct buffer_head *dx_root_bh = NULL;
index 7540a492eaba0188d624a08f73688eee04838288..3b179d6cbde09be9017eb21fbefcc5df8b217a53 100644 (file)
@@ -1614,7 +1614,8 @@ static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
        spin_unlock(&dlm->spinlock);
 
        /* Support for global heartbeat and node info was added in 1.1 */
-       if (dlm_protocol.pv_major > 1 || dlm_protocol.pv_minor > 0) {
+       if (dlm->dlm_locking_proto.pv_major > 1 ||
+           dlm->dlm_locking_proto.pv_minor > 0) {
                status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map);
                if (status) {
                        mlog_errno(status);
index fede57ed005ff417b80b5c3bc9c1d25fe3aaa526..84d166328cf7448f36cfcb0a54f08eefda50e18d 100644 (file)
@@ -2574,6 +2574,9 @@ fail:
                res->state &= ~DLM_LOCK_RES_MIGRATING;
                wake = 1;
                spin_unlock(&res->spinlock);
+               if (dlm_is_host_down(ret))
+                       dlm_wait_for_node_death(dlm, target,
+                                               DLM_NODE_DEATH_WAIT_MAX);
                goto leave;
        }
 
index 41565ae528566b3a5bfca14092aad21bba721812..89659d6dc2067276f2cebcea63a9bcc34a0a2486 100644 (file)
@@ -1607,6 +1607,9 @@ static void ocfs2_calc_trunc_pos(struct inode *inode,
        range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
 
        if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
+               /*
+                * remove an entire extent record.
+                */
                *trunc_cpos = le32_to_cpu(rec->e_cpos);
                /*
                 * Skip holes if any.
@@ -1617,7 +1620,16 @@ static void ocfs2_calc_trunc_pos(struct inode *inode,
                *blkno = le64_to_cpu(rec->e_blkno);
                *trunc_end = le32_to_cpu(rec->e_cpos);
        } else if (range > trunc_start) {
+               /*
+                * remove a partial extent record, which means we're
+                * removing the last extent record.
+                */
                *trunc_cpos = trunc_start;
+               /*
+                * skip hole if any.
+                */
+               if (range < *trunc_end)
+                       *trunc_end = range;
                *trunc_len = *trunc_end - trunc_start;
                coff = trunc_start - le32_to_cpu(rec->e_cpos);
                *blkno = le64_to_cpu(rec->e_blkno) +
index b141a44605ca076132cd704e22226ac8ce263fa7..295d56454e8b23b6e9d97a6bd258e6170311c8f5 100644 (file)
@@ -1260,6 +1260,9 @@ void ocfs2_complete_mount_recovery(struct ocfs2_super *osb)
 {
        struct ocfs2_journal *journal = osb->journal;
 
+       if (ocfs2_is_hard_readonly(osb))
+               return;
+
        /* No need to queue up our truncate_log as regular cleanup will catch
         * that */
        ocfs2_queue_recovery_completion(journal, osb->slot_num,
index ac0ccb5026a2d107eb5e5e38c84e4bdfe51682ed..19d6750d1d6ce3b5571e463ee2fb2d768985d644 100644 (file)
@@ -348,6 +348,12 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
                goto fail;
        }
 
+       /* Check that sizeof_partition_entry has the correct value */
+       if (le32_to_cpu((*gpt)->sizeof_partition_entry) != sizeof(gpt_entry)) {
+               pr_debug("GUID Partitition Entry Size check failed.\n");
+               goto fail;
+       }
+
        if (!(*ptes = alloc_read_gpt_entries(state, *gpt)))
                goto fail;
 
index 2e7addfd9803559ca03a2527aaab161006e3b482..318d8654989bf70479321fb8599b0dcf9ac29365 100644 (file)
@@ -214,7 +214,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
        int flags = vma->vm_flags;
        unsigned long ino = 0;
        unsigned long long pgoff = 0;
-       unsigned long start;
+       unsigned long start, end;
        dev_t dev = 0;
        int len;
 
@@ -227,13 +227,15 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
 
        /* We don't show the stack guard page in /proc/maps */
        start = vma->vm_start;
-       if (vma->vm_flags & VM_GROWSDOWN)
-               if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
-                       start += PAGE_SIZE;
+       if (stack_guard_page_start(vma, start))
+               start += PAGE_SIZE;
+       end = vma->vm_end;
+       if (stack_guard_page_end(vma, end))
+               end -= PAGE_SIZE;
 
        seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
                        start,
-                       vma->vm_end,
+                       end,
                        flags & VM_READ ? 'r' : '-',
                        flags & VM_WRITE ? 'w' : '-',
                        flags & VM_EXEC ? 'x' : '-',
index 4d0cb1241460fbc2eacd709188293d5782b1dd3b..40fa780ebea7d6280cc83f30ca4db9b0a0052b98 100644 (file)
@@ -174,26 +174,6 @@ void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
        spin_unlock(&c->buds_lock);
 }
 
-/**
- * ubifs_create_buds_lists - create journal head buds lists for remount rw.
- * @c: UBIFS file-system description object
- */
-void ubifs_create_buds_lists(struct ubifs_info *c)
-{
-       struct rb_node *p;
-
-       spin_lock(&c->buds_lock);
-       p = rb_first(&c->buds);
-       while (p) {
-               struct ubifs_bud *bud = rb_entry(p, struct ubifs_bud, rb);
-               struct ubifs_jhead *jhead = &c->jheads[bud->jhead];
-
-               list_add_tail(&bud->list, &jhead->buds_list);
-               p = rb_next(p);
-       }
-       spin_unlock(&c->buds_lock);
-}
-
 /**
  * ubifs_add_bud_to_log - add a new bud to the log.
  * @c: UBIFS file-system description object
index eed0fcff8d731710dc7723d25b1adb10ecbe6805..d3d6d365bfc11345d49e3a6bbdc33dd97c476ea9 100644 (file)
@@ -59,6 +59,7 @@ enum {
  * @new_size: truncation new size
  * @free: amount of free space in a bud
  * @dirty: amount of dirty space in a bud from padding and deletion nodes
+ * @jhead: journal head number of the bud
  *
  * UBIFS journal replay must compare node sequence numbers, which means it must
  * build a tree of node information to insert into the TNC.
@@ -80,6 +81,7 @@ struct replay_entry {
                struct {
                        int free;
                        int dirty;
+                       int jhead;
                };
        };
 };
@@ -159,6 +161,11 @@ static int set_bud_lprops(struct ubifs_info *c, struct replay_entry *r)
                err = PTR_ERR(lp);
                goto out;
        }
+
+       /* Make sure the journal head points to the latest bud */
+       err = ubifs_wbuf_seek_nolock(&c->jheads[r->jhead].wbuf, r->lnum,
+                                    c->leb_size - r->free, UBI_SHORTTERM);
+
 out:
        ubifs_release_lprops(c);
        return err;
@@ -627,10 +634,6 @@ static int replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead,
        ubifs_assert(sleb->endpt - offs >= used);
        ubifs_assert(sleb->endpt % c->min_io_size == 0);
 
-       if (sleb->endpt + c->min_io_size <= c->leb_size && !c->ro_mount)
-               err = ubifs_wbuf_seek_nolock(&c->jheads[jhead].wbuf, lnum,
-                                            sleb->endpt, UBI_SHORTTERM);
-
        *dirty = sleb->endpt - offs - used;
        *free = c->leb_size - sleb->endpt;
 
@@ -653,12 +656,14 @@ out_dump:
  * @sqnum: sequence number
  * @free: amount of free space in bud
  * @dirty: amount of dirty space from padding and deletion nodes
+ * @jhead: journal head number for the bud
  *
  * This function inserts a reference node to the replay tree and returns zero
  * in case of success or a negative error code in case of failure.
  */
 static int insert_ref_node(struct ubifs_info *c, int lnum, int offs,
-                          unsigned long long sqnum, int free, int dirty)
+                          unsigned long long sqnum, int free, int dirty,
+                          int jhead)
 {
        struct rb_node **p = &c->replay_tree.rb_node, *parent = NULL;
        struct replay_entry *r;
@@ -688,6 +693,7 @@ static int insert_ref_node(struct ubifs_info *c, int lnum, int offs,
        r->flags = REPLAY_REF;
        r->free = free;
        r->dirty = dirty;
+       r->jhead = jhead;
 
        rb_link_node(&r->rb, parent, p);
        rb_insert_color(&r->rb, &c->replay_tree);
@@ -712,7 +718,7 @@ static int replay_buds(struct ubifs_info *c)
                if (err)
                        return err;
                err = insert_ref_node(c, b->bud->lnum, b->bud->start, b->sqnum,
-                                     free, dirty);
+                                     free, dirty, b->bud->jhead);
                if (err)
                        return err;
        }
index be6c7b008f38233650cc119ff76117d61bb6c4e6..04ad07f4fcc3da11b05e868442ed6f3bc7ad7729 100644 (file)
@@ -1257,12 +1257,12 @@ static int mount_ubifs(struct ubifs_info *c)
                goto out_free;
        }
 
+       err = alloc_wbufs(c);
+       if (err)
+               goto out_cbuf;
+
        sprintf(c->bgt_name, BGT_NAME_PATTERN, c->vi.ubi_num, c->vi.vol_id);
        if (!c->ro_mount) {
-               err = alloc_wbufs(c);
-               if (err)
-                       goto out_cbuf;
-
                /* Create background thread */
                c->bgt = kthread_create(ubifs_bg_thread, c, "%s", c->bgt_name);
                if (IS_ERR(c->bgt)) {
@@ -1631,12 +1631,6 @@ static int ubifs_remount_rw(struct ubifs_info *c)
        if (err)
                goto out;
 
-       err = alloc_wbufs(c);
-       if (err)
-               goto out;
-
-       ubifs_create_buds_lists(c);
-
        /* Create background thread */
        c->bgt = kthread_create(ubifs_bg_thread, c, "%s", c->bgt_name);
        if (IS_ERR(c->bgt)) {
@@ -1744,7 +1738,6 @@ static void ubifs_remount_ro(struct ubifs_info *c)
        if (err)
                ubifs_ro_mode(c, err);
 
-       free_wbufs(c);
        vfree(c->orph_buf);
        c->orph_buf = NULL;
        kfree(c->write_reserve_buf);
index e4f9c1b0836c9ef8ef9feb3515d2f29a97565004..3e898a48122d3921f56683c313b071968cc4a9df 100644 (file)
@@ -926,6 +926,7 @@ restart:
                                        XFS_LOOKUP_BATCH,
                                        XFS_ICI_RECLAIM_TAG);
                        if (!nr_found) {
+                               done = 1;
                                rcu_read_unlock();
                                break;
                        }
index acdb92f14d518fe43ac9c0be62d67ae49831ca1d..5fc2380092c8b88eae987cdfcf2bbd280ee92cdb 100644 (file)
@@ -346,20 +346,23 @@ xfs_ail_delete(
  */
 STATIC void
 xfs_ail_worker(
-       struct work_struct *work)
+       struct work_struct      *work)
 {
-       struct xfs_ail  *ailp = container_of(to_delayed_work(work),
+       struct xfs_ail          *ailp = container_of(to_delayed_work(work),
                                        struct xfs_ail, xa_work);
-       long            tout;
-       xfs_lsn_t       target =  ailp->xa_target;
-       xfs_lsn_t       lsn;
-       xfs_log_item_t  *lip;
-       int             flush_log, count, stuck;
-       xfs_mount_t     *mp = ailp->xa_mount;
+       xfs_mount_t             *mp = ailp->xa_mount;
        struct xfs_ail_cursor   *cur = &ailp->xa_cursors;
-       int             push_xfsbufd = 0;
+       xfs_log_item_t          *lip;
+       xfs_lsn_t               lsn;
+       xfs_lsn_t               target;
+       long                    tout = 10;
+       int                     flush_log = 0;
+       int                     stuck = 0;
+       int                     count = 0;
+       int                     push_xfsbufd = 0;
 
        spin_lock(&ailp->xa_lock);
+       target = ailp->xa_target;
        xfs_trans_ail_cursor_init(ailp, cur);
        lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn);
        if (!lip || XFS_FORCED_SHUTDOWN(mp)) {
@@ -368,8 +371,7 @@ xfs_ail_worker(
                 */
                xfs_trans_ail_cursor_done(ailp, cur);
                spin_unlock(&ailp->xa_lock);
-               ailp->xa_last_pushed_lsn = 0;
-               return;
+               goto out_done;
        }
 
        XFS_STATS_INC(xs_push_ail);
@@ -386,8 +388,7 @@ xfs_ail_worker(
         * lots of contention on the AIL lists.
         */
        lsn = lip->li_lsn;
-       flush_log = stuck = count = 0;
-       while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) {
+       while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
                int     lock_result;
                /*
                 * If we can lock the item without sleeping, unlock the AIL
@@ -480,21 +481,25 @@ xfs_ail_worker(
        }
 
        /* assume we have more work to do in a short while */
-       tout = 10;
+out_done:
        if (!count) {
                /* We're past our target or empty, so idle */
                ailp->xa_last_pushed_lsn = 0;
 
                /*
-                * Check for an updated push target before clearing the
-                * XFS_AIL_PUSHING_BIT. If the target changed, we've got more
-                * work to do. Wait a bit longer before starting that work.
+                * We clear the XFS_AIL_PUSHING_BIT first before checking
+                * whether the target has changed. If the target has changed,
+                * this pushes the requeue race directly onto the result of the
+                * atomic test/set bit, so we are guaranteed that either the
+                * the pusher that changed the target or ourselves will requeue
+                * the work (but not both).
                 */
+               clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
                smp_rmb();
-               if (ailp->xa_target == target) {
-                       clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
+               if (XFS_LSN_CMP(ailp->xa_target, target) == 0 ||
+                   test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
                        return;
-               }
+
                tout = 50;
        } else if (XFS_LSN_CMP(lsn, target) >= 0) {
                /*
@@ -553,7 +558,7 @@ xfs_ail_push(
         * the XFS_AIL_PUSHING_BIT.
         */
        smp_wmb();
-       ailp->xa_target = threshold_lsn;
+       xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
        if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
                queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
 }
index ade09d7b427100da6057cb04c7e43d484c84ac25..c99c3d3e78113d8a8596d0696464034c5e20fe8b 100644 (file)
@@ -127,7 +127,7 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
 
 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
 
-bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
+int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
 bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
 int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
 int drm_fb_helper_debug_enter(struct fb_info *info);
index c2f93a8ae2e14b414bc6976c811f92406f7c4557..564b14aa7e165e70e45d741f1f2b88dd26992a65 100644 (file)
@@ -86,7 +86,7 @@ static inline bool drm_mm_initialized(struct drm_mm *mm)
 }
 #define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
                                                &(mm)->head_node.node_list, \
-                                               node_list);
+                                               node_list)
 #define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
        for (entry = (mm)->prev_scanned_node, \
                next = entry ? list_entry(entry->node_list.next, \
index 816e30cbd9689ac59fbea5d92ca06c16d1558384..f04b2a3b0f49b7117cb74163af0997a7a6fd4742 100644 (file)
        {0x1002, 0x6719, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x671c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x671d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x671f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6721, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6722, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6729, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6739, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x673e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6740, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6741, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x688D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x689b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x689d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x689e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68b9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x68ba, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x68bf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x68c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
index 7aa5dddb2098eb1cb6affa231cff2350c81e97c9..787f7b6fd62237683eb5dad3ecbd77958128f3f1 100644 (file)
@@ -910,6 +910,7 @@ struct drm_radeon_cs {
 #define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */
 #define RADEON_INFO_NUM_BACKENDS       0x0a /* DB/backends for r600+ - need for OQ */
 #define RADEON_INFO_NUM_TILE_PIPES     0x0b /* tile pipes for r600+ */
+#define RADEON_INFO_FUSION_GART_WORKING        0x0c /* fusion writes to GTT were broken before this */
 
 struct drm_radeon_info {
        uint32_t                request;
index b8613e806aa9c0b410b119519bc7044e44889570..01eca1794e148efb906d3aa9fe0327abb49cbd8a 100644 (file)
@@ -111,6 +111,8 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
        __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
 #define alloc_bootmem_node(pgdat, x) \
        __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
+#define alloc_bootmem_node_nopanic(pgdat, x) \
+       __alloc_bootmem_node_nopanic(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
 #define alloc_bootmem_pages_node(pgdat, x) \
        __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
 #define alloc_bootmem_pages_node_nopanic(pgdat, x) \
index 16ee8b49a20030cc190c36d4689e43e0c01348a8..d4675af963fae88fcb4388a3865dbd19a6fcf740 100644 (file)
@@ -546,18 +546,7 @@ extern bool has_capability_noaudit(struct task_struct *t, int cap);
 extern bool capable(int cap);
 extern bool ns_capable(struct user_namespace *ns, int cap);
 extern bool task_ns_capable(struct task_struct *t, int cap);
-
-/**
- * nsown_capable - Check superior capability to one's own user_ns
- * @cap: The capability in question
- *
- * Return true if the current task has the given superior capability
- * targeted at its own user namespace.
- */
-static inline bool nsown_capable(int cap)
-{
-       return ns_capable(current_user_ns(), cap);
-}
+extern bool nsown_capable(int cap);
 
 /* audit system wants to get cap info from files as well */
 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
index 9aeeb0ba200363909bfd52a5a59f11cee7c54f5c..be16b61283ccb9855eabe82a97e4fd2345409f90 100644 (file)
@@ -146,6 +146,7 @@ struct cred {
        void            *security;      /* subjective LSM security */
 #endif
        struct user_struct *user;       /* real user ID subscription */
+       struct user_namespace *user_ns; /* cached user->user_ns */
        struct group_info *group_info;  /* supplementary groups for euid/fsgid */
        struct rcu_head rcu;            /* RCU deletion hook */
 };
@@ -354,10 +355,15 @@ static inline void put_cred(const struct cred *_cred)
 #define current_fsgid()        (current_cred_xxx(fsgid))
 #define current_cap()          (current_cred_xxx(cap_effective))
 #define current_user()         (current_cred_xxx(user))
-#define _current_user_ns()     (current_cred_xxx(user)->user_ns)
 #define current_security()     (current_cred_xxx(security))
 
-extern struct user_namespace *current_user_ns(void);
+#ifdef CONFIG_USER_NS
+#define current_user_ns() (current_cred_xxx(user_ns))
+#else
+extern struct user_namespace init_user_ns;
+#define current_user_ns() (&init_user_ns)
+#endif
+
 
 #define current_uid_gid(_uid, _gid)            \
 do {                                           \
index ab8dfc095709674c9afca67152af5c651f1e2d03..d08399db6e2c6076f8e496a155ca667bbfa70af9 100644 (file)
@@ -442,7 +442,6 @@ struct device {
        struct dev_archdata     archdata;
 
        struct device_node      *of_node; /* associated device tree node */
-       const struct of_device_id *of_match; /* matching of_device_id from driver */
 
        dev_t                   devt;   /* dev_t, creates the sysfs "dev" */
 
index df728c1c29ed39c0b25a8ead2fcc0f89696c911f..6a8274877171aa7959cb95e5370ccc49a626d665 100644 (file)
@@ -832,6 +832,7 @@ struct fb_tile_ops {
 #define FBINFO_CAN_FORCE_OUTPUT     0x200000
 
 struct fb_info {
+       atomic_t count;
        int node;
        int flags;
        struct mutex lock;              /* Lock for open/release/ioctl funcs */
index 70e4efabe0fb6983ed9524f4ade4e5d91e2ce2d0..ebeb2f3ad068db1ec6d8cee92a1bcfb6cd097630 100644 (file)
@@ -61,7 +61,7 @@ struct flex_array {
 struct flex_array *flex_array_alloc(int element_size, unsigned int total,
                gfp_t flags);
 int flex_array_prealloc(struct flex_array *fa, unsigned int start,
-               unsigned int end, gfp_t flags);
+               unsigned int nr_elements, gfp_t flags);
 void flex_array_free(struct flex_array *fa);
 void flex_array_free_parts(struct flex_array *fa);
 int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
index dbd860af08041bbaa5e333104aaa3ab0849cb676..cdf9495df204aa917605613b220c852349528b54 100644 (file)
@@ -358,7 +358,6 @@ struct inodes_stat_t {
 #define FS_EXTENT_FL                   0x00080000 /* Extents */
 #define FS_DIRECTIO_FL                 0x00100000 /* Use direct i/o */
 #define FS_NOCOW_FL                    0x00800000 /* Do not cow file */
-#define FS_COW_FL                      0x02000000 /* Cow file */
 #define FS_RESERVED_FL                 0x80000000 /* reserved for ext2 lib */
 
 #define FS_FL_USER_VISIBLE             0x0003DFFF /* User visible flags */
index 22b32af1b5ec3d1aa81ddebbc2de1ace29c17cc6..b5a550a39a70ddd0a119479ce4a528892c6a7aa9 100644 (file)
@@ -37,6 +37,7 @@ struct trace_entry {
        unsigned char           flags;
        unsigned char           preempt_count;
        int                     pid;
+       int                     padding;
 };
 
 #define FTRACE_MAX_EVENT                                               \
index bfb8f934521e02b313cc1a497ec08850ac3493c1..56d8fc87fbbc70e8f1d5de07aa0bd7aaccd6887c 100644 (file)
@@ -353,6 +353,8 @@ extern unsigned long get_zeroed_page(gfp_t gfp_mask);
 
 void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
 void free_pages_exact(void *virt, size_t size);
+/* This is different from alloc_pages_exact_node !!! */
+void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask);
 
 #define __get_free_page(gfp_mask) \
                __get_free_pages((gfp_mask), 0)
index afe4db49402d142036761855cd9c9b6714bfec8d..632d1567a1b676f4c74dfeec798951081fed108c 100644 (file)
@@ -81,7 +81,9 @@ struct wm831x_touch_pdata {
        int rpu;               /** Pen down sensitivity resistor divider */
        int pressure;          /** Report pressure (boolean) */
        unsigned int data_irq; /** Touch data ready IRQ */
+       int data_irqf;         /** IRQ flags for data ready IRQ */
        unsigned int pd_irq;   /** Touch pendown detect IRQ */
+       int pd_irqf;           /** IRQ flags for pen down IRQ */
 };
 
 enum wm831x_watchdog_action {
index 2348db26bc3d969d58b18253b975ef8042d8d278..6507dde38b16602a1b819492088f6ce5ea8fba40 100644 (file)
@@ -1011,11 +1011,33 @@ int set_page_dirty_lock(struct page *page);
 int clear_page_dirty_for_io(struct page *page);
 
 /* Is the vma a continuation of the stack vma above it? */
-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
+static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
 {
        return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
 }
 
+static inline int stack_guard_page_start(struct vm_area_struct *vma,
+                                            unsigned long addr)
+{
+       return (vma->vm_flags & VM_GROWSDOWN) &&
+               (vma->vm_start == addr) &&
+               !vma_growsdown(vma->vm_prev, addr);
+}
+
+/* Is the vma a continuation of the stack vma below it? */
+static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
+{
+       return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
+}
+
+static inline int stack_guard_page_end(struct vm_area_struct *vma,
+                                          unsigned long addr)
+{
+       return (vma->vm_flags & VM_GROWSUP) &&
+               (vma->vm_end == addr) &&
+               !vma_growsup(vma->vm_next, addr);
+}
+
 extern unsigned long move_page_tables(struct vm_area_struct *vma,
                unsigned long old_addr, struct vm_area_struct *new_vma,
                unsigned long new_addr, unsigned long len);
index 890dce2426392d632ae439e15764169276ab3611..7e371f7df9c463270a15b23a1b3fce52b26fc02d 100644 (file)
@@ -233,6 +233,7 @@ struct nfs4_layoutget {
        struct nfs4_layoutget_args args;
        struct nfs4_layoutget_res res;
        struct pnfs_layout_segment **lsegpp;
+       gfp_t gfp_flags;
 };
 
 struct nfs4_getdeviceinfo_args {
index 8bfe6c1d43654069b8fa2e1bbc61ab4d34dd4d22..ae5638480ef22141f0699854d4b59fbba57aeffe 100644 (file)
@@ -21,8 +21,7 @@ extern void of_device_make_bus_id(struct device *dev);
 static inline int of_driver_match_device(struct device *dev,
                                         const struct device_driver *drv)
 {
-       dev->of_match = of_match_device(drv->of_match_table, dev);
-       return dev->of_match != NULL;
+       return of_match_device(drv->of_match_table, dev) != NULL;
 }
 
 extern struct platform_device *of_dev_get(struct platform_device *dev);
@@ -58,6 +57,11 @@ static inline int of_device_uevent(struct device *dev,
 
 static inline void of_device_node_put(struct device *dev) { }
 
+static inline const struct of_device_id *of_match_device(
+               const struct of_device_id *matches, const struct device *dev)
+{
+       return NULL;
+}
 #endif /* CONFIG_OF_DEVICE */
 
 #endif /* _LINUX_OF_DEVICE_H */
index 4e2c9150a78525859455ffb1e06551d4b3781b33..8abe8d78c4bfad95d409b8ef74e19d9e6fad11d0 100644 (file)
 #define PCI_DEVICE_ID_INTEL_82840_HB   0x1a21
 #define PCI_DEVICE_ID_INTEL_82845_HB   0x1a30
 #define PCI_DEVICE_ID_INTEL_IOAT       0x1a38
-#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS  0x1c22
 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN        0x1c41
 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX        0x1c5f
-#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS     0x1d22
 #define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0     0x1d40
 #define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1     0x1d41
 #define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN   0x2310
 #define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX   0x231f
-#define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS     0x2330
 #define PCI_DEVICE_ID_INTEL_82801AA_0  0x2410
 #define PCI_DEVICE_ID_INTEL_82801AA_1  0x2411
 #define PCI_DEVICE_ID_INTEL_82801AA_3  0x2413
 #define PCI_DEVICE_ID_INTEL_ICH10_5    0x3a60
 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN      0x3b00
 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX      0x3b1f
-#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS        0x3b30
 #define PCI_DEVICE_ID_INTEL_IOAT_SNB   0x402f
 #define PCI_DEVICE_ID_INTEL_5100_16    0x65f0
 #define PCI_DEVICE_ID_INTEL_5100_21    0x65f5
index 3a5c4449fd36050f667ee13553924bdf44d9a3f8..8b97308e65df3ccf094d79af23266e2da5fc73af 100644 (file)
@@ -948,7 +948,7 @@ do {                                                                        \
        irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
 # endif
 # define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)    \
-       __pcpu_double_call_return_int(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
+       __pcpu_double_call_return_bool(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
 #endif
 
 #endif /* __LINUX_PERCPU_H */
index 838c1149251a1e8edff75dd2db6ef58092542386..eaf4350c0f905bfb5e2dfa6380118fe805b1b785 100644 (file)
@@ -208,6 +208,8 @@ static inline struct proc_dir_entry *proc_symlink(const char *name,
                struct proc_dir_entry *parent,const char *dest) {return NULL;}
 static inline struct proc_dir_entry *proc_mkdir(const char *name,
        struct proc_dir_entry *parent) {return NULL;}
+static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
+       mode_t mode, struct proc_dir_entry *parent) { return NULL; }
 
 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
        mode_t mode, struct proc_dir_entry *base, 
index a1147e5dd245e8d7d262598b3c920ceb796efd00..9178d5cc0b0143a0e81336e6dea09e1cbedab86e 100644 (file)
@@ -189,6 +189,10 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
                child->ptrace = current->ptrace;
                __ptrace_link(child, current->parent);
        }
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+       atomic_set(&child->ptrace_bp_refcnt, 1);
+#endif
 }
 
 /**
@@ -350,6 +354,13 @@ extern int task_current_syscall(struct task_struct *target, long *callno,
                                unsigned long args[6], unsigned int maxargs,
                                unsigned long *sp, unsigned long *pc);
 
-#endif
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+extern int ptrace_get_breakpoints(struct task_struct *tsk);
+extern void ptrace_put_breakpoints(struct task_struct *tsk);
+#else
+static inline void ptrace_put_breakpoints(struct task_struct *tsk) { }
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
+#endif /* __KERNEL */
 
 #endif
index 18d63cea28481c478906f791e384b520ad1f483b..781abd13767302cce2ba08db01a211e3178de0d9 100644 (file)
@@ -1537,6 +1537,9 @@ struct task_struct {
                unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
        } memcg_batch;
 #endif
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+       atomic_t ptrace_bp_refcnt;
+#endif
 };
 
 /* Future-safe accessor for struct task_struct's cpus_allowed. */
index 0e1855079fbbbea3eab3f1f224a7fd2430cd5874..605b0aa8d852f63d4d0c7a3cf18add1f518e2acd 100644 (file)
@@ -68,6 +68,7 @@ struct usbnet {
 #              define EVENT_RX_PAUSED  5
 #              define EVENT_DEV_WAKING 6
 #              define EVENT_DEV_ASLEEP 7
+#              define EVENT_DEV_OPEN   8
 };
 
 static inline struct usb_driver *driver_of(struct usb_interface *intf)
index 88bdd010d65d3791eb634cae72709e01fc2c2090..2fa8d1341a0afa291e1fd5aa74bf0f7c4c91806f 100644 (file)
@@ -38,9 +38,19 @@ static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner)
        return outer;
 }
 
-#define        INET_ECN_xmit(sk) do { inet_sk(sk)->tos |= INET_ECN_ECT_0; } while (0)
-#define        INET_ECN_dontxmit(sk) \
-       do { inet_sk(sk)->tos &= ~INET_ECN_MASK; } while (0)
+static inline void INET_ECN_xmit(struct sock *sk)
+{
+       inet_sk(sk)->tos |= INET_ECN_ECT_0;
+       if (inet6_sk(sk) != NULL)
+               inet6_sk(sk)->tclass |= INET_ECN_ECT_0;
+}
+
+static inline void INET_ECN_dontxmit(struct sock *sk)
+{
+       inet_sk(sk)->tos &= ~INET_ECN_MASK;
+       if (inet6_sk(sk) != NULL)
+               inet6_sk(sk)->tclass &= ~INET_ECN_MASK;
+}
 
 #define IP6_ECN_flow_init(label) do {          \
       (label) &= ~htonl(INET_ECN_MASK << 20);  \
index d516f00c8e0fc39c9aa0dc8a75aa2728df635388..86aefed6140b347b1f51e80096df1f209f48afce 100644 (file)
@@ -791,6 +791,7 @@ struct ip_vs_app {
 /* IPVS in network namespace */
 struct netns_ipvs {
        int                     gen;            /* Generation */
+       int                     enable;         /* enable like nf_hooks do */
        /*
         *      Hash table: for real service lookups
         */
@@ -1089,6 +1090,22 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
        atomic_inc(&ctl_cp->n_control);
 }
 
+/*
+ * IPVS netns init & cleanup functions
+ */
+extern int __ip_vs_estimator_init(struct net *net);
+extern int __ip_vs_control_init(struct net *net);
+extern int __ip_vs_protocol_init(struct net *net);
+extern int __ip_vs_app_init(struct net *net);
+extern int __ip_vs_conn_init(struct net *net);
+extern int __ip_vs_sync_init(struct net *net);
+extern void __ip_vs_conn_cleanup(struct net *net);
+extern void __ip_vs_app_cleanup(struct net *net);
+extern void __ip_vs_protocol_cleanup(struct net *net);
+extern void __ip_vs_control_cleanup(struct net *net);
+extern void __ip_vs_estimator_cleanup(struct net *net);
+extern void __ip_vs_sync_cleanup(struct net *net);
+extern void __ip_vs_service_cleanup(struct net *net);
 
 /*
  *      IPVS application functions
index 75b8e2968c9b1ef5e5cafadae67ab07934964043..f57e7d46a453db3fc52bc53cbb9dfc6cbdf26119 100644 (file)
@@ -199,7 +199,7 @@ struct llc_pdu_sn {
        u8 ssap;
        u8 ctrl_1;
        u8 ctrl_2;
-};
+} __packed;
 
 static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb)
 {
@@ -211,7 +211,7 @@ struct llc_pdu_un {
        u8 dsap;
        u8 ssap;
        u8 ctrl_1;
-};
+} __packed;
 
 static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
 {
@@ -359,7 +359,7 @@ struct llc_xid_info {
        u8 fmt_id;      /* always 0x81 for LLC */
        u8 type;        /* different if NULL/non-NULL LSAP */
        u8 rw;          /* sender receive window */
-};
+} __packed;
 
 /**
  *     llc_pdu_init_as_xid_cmd - sets bytes 3, 4 & 5 of LLC header as XID
@@ -415,7 +415,7 @@ struct llc_frmr_info {
        u8  curr_ssv;           /* current send state variable val */
        u8  curr_rsv;           /* current receive state variable */
        u8  ind_bits;           /* indicator bits set with macro */
-};
+} __packed;
 
 extern void llc_pdu_set_cmd_rsp(struct sk_buff *skb, u8 type);
 extern void llc_pdu_set_pf_bit(struct sk_buff *skb, u8 bit_value);
index 6ae4bc5ce8a712796774e32637f875f4e98b173e..20afeaa39395ecbbed9dacf934c1df72184f68bb 100644 (file)
@@ -324,6 +324,7 @@ struct xfrm_state_afinfo {
        int                     (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
        int                     (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
        int                     (*output)(struct sk_buff *skb);
+       int                     (*output_finish)(struct sk_buff *skb);
        int                     (*extract_input)(struct xfrm_state *x,
                                                 struct sk_buff *skb);
        int                     (*extract_output)(struct xfrm_state *x,
@@ -1454,6 +1455,7 @@ static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
 extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
 extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
 extern int xfrm4_output(struct sk_buff *skb);
+extern int xfrm4_output_finish(struct sk_buff *skb);
 extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
 extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
 extern int xfrm6_extract_header(struct sk_buff *skb);
@@ -1470,6 +1472,7 @@ extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr);
 extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
 extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
 extern int xfrm6_output(struct sk_buff *skb);
+extern int xfrm6_output_finish(struct sk_buff *skb);
 extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
                                 u8 **prevhdr);
 
index 2d3ec509468557521a65ac7fb016eff3728dcd59..dd82e02ddde3d353d657b6fd8e7e08feff7fa0ad 100644 (file)
@@ -169,6 +169,7 @@ struct scsi_device {
                                sdev_dev;
 
        struct execute_work     ew; /* used to get process context on put */
+       struct work_struct      requeue_work;
 
        struct scsi_dh_data     *scsi_dh_data;
        enum scsi_device_state sdev_state;
index e3615c09374170669afa041d8eb9027a55037b8e..9fe3a36646e9ce47886bd7049684ce68d05808e4 100644 (file)
@@ -10,6 +10,7 @@
  */
 #define show_gfp_flags(flags)                                          \
        (flags) ? __print_flags(flags, "|",                             \
+       {(unsigned long)GFP_TRANSHUGE,          "GFP_TRANSHUGE"},       \
        {(unsigned long)GFP_HIGHUSER_MOVABLE,   "GFP_HIGHUSER_MOVABLE"}, \
        {(unsigned long)GFP_HIGHUSER,           "GFP_HIGHUSER"},        \
        {(unsigned long)GFP_USER,               "GFP_USER"},            \
@@ -32,6 +33,9 @@
        {(unsigned long)__GFP_HARDWALL,         "GFP_HARDWALL"},        \
        {(unsigned long)__GFP_THISNODE,         "GFP_THISNODE"},        \
        {(unsigned long)__GFP_RECLAIMABLE,      "GFP_RECLAIMABLE"},     \
-       {(unsigned long)__GFP_MOVABLE,          "GFP_MOVABLE"}          \
+       {(unsigned long)__GFP_MOVABLE,          "GFP_MOVABLE"},         \
+       {(unsigned long)__GFP_NOTRACK,          "GFP_NOTRACK"},         \
+       {(unsigned long)__GFP_NO_KSWAPD,        "GFP_NO_KSWAPD"},       \
+       {(unsigned long)__GFP_OTHER_NODE,       "GFP_OTHER_NODE"}       \
        ) : "GFP_NOWAIT"
 
index d886b1e9278e32e2fcffd5fb4005ee16a7061a33..7a71e0a9992a13cd5e05c52c1d1a4ca4fa4ea6c4 100644 (file)
@@ -1226,7 +1226,6 @@ config SLAB
          per cpu and per node queues.
 
 config SLUB
-       depends on BROKEN || NUMA || !DISCONTIGMEM
        bool "SLUB (Unqueued Allocator)"
        help
           SLUB is a slab allocator that minimizes cache line usage
index bf0c734d0c123f6b5733629f2f1bff4614197a3f..32a80e08ff4b8ebf725ebc8d3a526c2dee3f00f4 100644 (file)
@@ -399,3 +399,15 @@ bool task_ns_capable(struct task_struct *t, int cap)
        return ns_capable(task_cred_xxx(t, user)->user_ns, cap);
 }
 EXPORT_SYMBOL(task_ns_capable);
+
+/**
+ * nsown_capable - Check superior capability to one's own user_ns
+ * @cap: The capability in question
+ *
+ * Return true if the current task has the given superior capability
+ * targeted at its own user namespace.
+ */
+bool nsown_capable(int cap)
+{
+       return ns_capable(current_user_ns(), cap);
+}
index 5557b55048df1a35fff09933a274c9a5e32b7084..8093c16b84b13d2e9e0714af279b29b0549a0652 100644 (file)
@@ -54,6 +54,7 @@ struct cred init_cred = {
        .cap_effective          = CAP_INIT_EFF_SET,
        .cap_bset               = CAP_INIT_BSET,
        .user                   = INIT_USER,
+       .user_ns                = &init_user_ns,
        .group_info             = &init_groups,
 #ifdef CONFIG_KEYS
        .tgcred                 = &init_tgcred,
@@ -410,6 +411,11 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
                        goto error_put;
        }
 
+       /* cache user_ns in cred.  Doesn't need a refcount because it will
+        * stay pinned by cred->user
+        */
+       new->user_ns = new->user->user_ns;
+
 #ifdef CONFIG_KEYS
        /* new threads get their own thread keyrings if their parent already
         * had one */
@@ -741,12 +747,6 @@ int set_create_files_as(struct cred *new, struct inode *inode)
 }
 EXPORT_SYMBOL(set_create_files_as);
 
-struct user_namespace *current_user_ns(void)
-{
-       return _current_user_ns();
-}
-EXPORT_SYMBOL(current_user_ns);
-
 #ifdef CONFIG_DEBUG_CREDENTIALS
 
 bool creds_are_invalid(const struct cred *cred)
index f5d2f63bae0b70b2818edd92e06040906f115216..8dd87418154205940341f98641191fe1c1d6ea70 100644 (file)
@@ -1016,7 +1016,7 @@ NORET_TYPE void do_exit(long code)
        /*
         * FIXME: do that only when needed, using sched_exit tracepoint
         */
-       flush_ptrace_hw_breakpoint(tsk);
+       ptrace_put_breakpoints(tsk);
 
        exit_notify(tsk, group_dead);
 #ifdef CONFIG_NUMA
index dd201bd35103e221fc14debc1d09419529bf6f3d..834899f2500fc550d3da66306d31386eb80baf91 100644 (file)
@@ -419,7 +419,7 @@ int show_interrupts(struct seq_file *p, void *v)
        } else {
                seq_printf(p, " %8s", "None");
        }
-#ifdef CONFIG_GENIRC_IRQ_SHOW_LEVEL
+#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
        seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
 #endif
        if (desc->name)
index 8935369d503a309a036e75f975a903982fa6af80..6275970b2189b3a970fd46b2794a23c87fde6240 100644 (file)
@@ -216,7 +216,6 @@ int suspend_devices_and_enter(suspend_state_t state)
                        goto Close;
        }
        suspend_console();
-       pm_restrict_gfp_mask();
        suspend_test_start();
        error = dpm_suspend_start(PMSG_SUSPEND);
        if (error) {
@@ -233,7 +232,6 @@ int suspend_devices_and_enter(suspend_state_t state)
        suspend_test_start();
        dpm_resume_end(PMSG_RESUME);
        suspend_test_finish("resume devices");
-       pm_restore_gfp_mask();
        resume_console();
  Close:
        if (suspend_ops->end)
@@ -294,7 +292,9 @@ int enter_state(suspend_state_t state)
                goto Finish;
 
        pr_debug("PM: Entering %s sleep\n", pm_states[state]);
+       pm_restrict_gfp_mask();
        error = suspend_devices_and_enter(state);
+       pm_restore_gfp_mask();
 
  Finish:
        pr_debug("PM: Finishing wakeup.\n");
index c36c3b9e8a84f59ad5894a1e2d04c7252b656ebe..7d02d33be699f97d4956c76ea71b939bb0832d06 100644 (file)
@@ -135,8 +135,10 @@ static int snapshot_release(struct inode *inode, struct file *filp)
        free_basic_memory_bitmaps();
        data = filp->private_data;
        free_all_swap_pages(data->swap);
-       if (data->frozen)
+       if (data->frozen) {
+               pm_restore_gfp_mask();
                thaw_processes();
+       }
        pm_notifier_call_chain(data->mode == O_RDONLY ?
                        PM_POST_HIBERNATION : PM_POST_RESTORE);
        atomic_inc(&snapshot_device_available);
@@ -379,6 +381,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
                 * PM_HIBERNATION_PREPARE
                 */
                error = suspend_devices_and_enter(PM_SUSPEND_MEM);
+               data->ready = 0;
                break;
 
        case SNAPSHOT_PLATFORM_SUPPORT:
index 0fc1eed28d2783e0b2779fe50d1ff86fbf4ca858..dc7ab65f3b36cb0b71c8365e13d0e73a9601f6ee 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
 #include <linux/regset.h>
+#include <linux/hw_breakpoint.h>
 
 
 /*
@@ -879,3 +880,19 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
        return ret;
 }
 #endif /* CONFIG_COMPAT */
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+int ptrace_get_breakpoints(struct task_struct *tsk)
+{
+       if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt))
+               return 0;
+
+       return -1;
+}
+
+void ptrace_put_breakpoints(struct task_struct *tsk)
+{
+       if (atomic_dec_and_test(&tsk->ptrace_bp_refcnt))
+               flush_ptrace_hw_breakpoint(tsk);
+}
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
index 6519cf62d9cd99706bb59ce0e5776733396b22ab..0e17c10f8a9daae8fdd8c665d69788670549b1c0 100644 (file)
@@ -685,8 +685,8 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
        /* Add clocksource to the clcoksource list */
        mutex_lock(&clocksource_mutex);
        clocksource_enqueue(cs);
-       clocksource_select();
        clocksource_enqueue_watchdog(cs);
+       clocksource_select();
        mutex_unlock(&clocksource_mutex);
        return 0;
 }
@@ -706,8 +706,8 @@ int clocksource_register(struct clocksource *cs)
 
        mutex_lock(&clocksource_mutex);
        clocksource_enqueue(cs);
-       clocksource_select();
        clocksource_enqueue_watchdog(cs);
+       clocksource_select();
        mutex_unlock(&clocksource_mutex);
        return 0;
 }
index da800ffa810c2f248ebcb6e8d1aa2395343bbd0a..723c7637e55a2376766776b869d8217fb367b044 100644 (file)
@@ -522,10 +522,11 @@ static void tick_broadcast_init_next_event(struct cpumask *mask,
  */
 void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
 {
+       int cpu = smp_processor_id();
+
        /* Set it up only once ! */
        if (bc->event_handler != tick_handle_oneshot_broadcast) {
                int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
-               int cpu = smp_processor_id();
 
                bc->event_handler = tick_handle_oneshot_broadcast;
                clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
@@ -551,6 +552,15 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
                        tick_broadcast_set_event(tick_next_period, 1);
                } else
                        bc->next_event.tv64 = KTIME_MAX;
+       } else {
+               /*
+                * The first cpu which switches to oneshot mode sets
+                * the bit for all other cpus which are in the general
+                * (periodic) broadcast mask. So the bit is set and
+                * would prevent the first broadcast enter after this
+                * to program the bc device.
+                */
+               tick_broadcast_clear_oneshot(cpu);
        }
 }
 
index d38c16a06a6fb25d4f31559af0c1544f6cae987f..1cb49be7c7fb5b30fdab269d044b130899b93340 100644 (file)
@@ -1110,6 +1110,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
 
        entry->preempt_count            = pc & 0xff;
        entry->pid                      = (tsk) ? tsk->pid : 0;
+       entry->padding                  = 0;
        entry->flags =
 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
                (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
index e88f74fe1d4ce02fedf3e6052feaf2cbdd2017da..2fe110341359539b0498d49472bbdee19e344dc2 100644 (file)
@@ -116,6 +116,7 @@ static int trace_define_common_fields(void)
        __common_field(unsigned char, flags);
        __common_field(unsigned char, preempt_count);
        __common_field(int, pid);
+       __common_field(int, padding);
 
        return ret;
 }
index c0ea40ba20828b67c10e29d790ca13846572b8a0..854b57bd7d9d346276fe83599f5cbd306c6e0fe4 100644 (file)
@@ -232,10 +232,10 @@ EXPORT_SYMBOL(flex_array_clear);
 
 /**
  * flex_array_prealloc - guarantee that array space exists
- * @fa:                the flex array for which to preallocate parts
- * @start:     index of first array element for which space is allocated
- * @end:       index of last (inclusive) element for which space is allocated
- * @flags:     page allocation flags
+ * @fa:                        the flex array for which to preallocate parts
+ * @start:             index of first array element for which space is allocated
+ * @nr_elements:       number of elements for which space is allocated
+ * @flags:             page allocation flags
  *
  * This will guarantee that no future calls to flex_array_put()
  * will allocate memory.  It can be used if you are expecting to
@@ -245,14 +245,24 @@ EXPORT_SYMBOL(flex_array_clear);
  * Locking must be provided by the caller.
  */
 int flex_array_prealloc(struct flex_array *fa, unsigned int start,
-                       unsigned int end, gfp_t flags)
+                       unsigned int nr_elements, gfp_t flags)
 {
        int start_part;
        int end_part;
        int part_nr;
+       unsigned int end;
        struct flex_array_part *part;
 
-       if (start >= fa->total_nr_elements || end >= fa->total_nr_elements)
+       if (!start && !nr_elements)
+               return 0;
+       if (start >= fa->total_nr_elements)
+               return -ENOSPC;
+       if (!nr_elements)
+               return 0;
+
+       end = start + nr_elements - 1;
+
+       if (end >= fa->total_nr_elements)
                return -ENOSPC;
        if (elements_fit_in_base(fa))
                return 0;
@@ -343,6 +353,8 @@ int flex_array_shrink(struct flex_array *fa)
        int part_nr;
        int ret = 0;
 
+       if (!fa->total_nr_elements)
+               return 0;
        if (elements_fit_in_base(fa))
                return ret;
        for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) {
index bc0ac6b333dc95464da43485d40a5966ef60eeb6..dfd60192bc2ede37835fa8ed894bc024b0f1908a 100644 (file)
@@ -797,7 +797,7 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
        return string(buf, end, uuid, spec);
 }
 
-int kptr_restrict = 1;
+int kptr_restrict __read_mostly;
 
 /*
  * Show a '%p' thing.  A kernel extension is that the '%p' is followed
index ea5fa4fe9d67879aa47be92ad9220a4d8d1beb31..a6cdc969ea42a7b5df5e74e8cb4d712f6f7bbd3b 100644 (file)
@@ -969,6 +969,9 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
                         */
                        tmp = b->in[b->in_pos++];
 
+                       if (tmp == 0x00)
+                               return XZ_STREAM_END;
+
                        if (tmp >= 0xE0 || tmp == 0x01) {
                                s->lzma2.need_props = true;
                                s->lzma2.need_dict_reset = false;
@@ -1001,9 +1004,6 @@ XZ_EXTERN enum xz_ret xz_dec_lzma2_run(struct xz_dec_lzma2 *s,
                                                lzma_reset(s);
                                }
                        } else {
-                               if (tmp == 0x00)
-                                       return XZ_STREAM_END;
-
                                if (tmp > 0x02)
                                        return XZ_DATA_ERROR;
 
index 607098d47e745b37d375c0aa49d63e5507a9f4b0..61e66f026563b4b473fc73922d58a984c490c827 100644 (file)
@@ -1359,7 +1359,7 @@ split_fallthrough:
                 */
                mark_page_accessed(page);
        }
-       if (flags & FOLL_MLOCK) {
+       if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
                /*
                 * The preliminary mapping check is mainly to avoid the
                 * pointless overhead of lock_page on the ZERO_PAGE
@@ -1412,9 +1412,8 @@ no_page_table:
 
 static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
 {
-       return (vma->vm_flags & VM_GROWSDOWN) &&
-               (vma->vm_start == addr) &&
-               !vma_stack_continue(vma->vm_prev, addr);
+       return stack_guard_page_start(vma, addr) ||
+              stack_guard_page_end(vma, addr+PAGE_SIZE);
 }
 
 /**
@@ -1551,13 +1550,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        continue;
                }
 
-               /*
-                * If we don't actually want the page itself,
-                * and it's the stack guard page, just skip it.
-                */
-               if (!pages && stack_guard_page(vma, start))
-                       goto next_page;
-
                do {
                        struct page *page;
                        unsigned int foll_flags = gup_flags;
@@ -1574,6 +1566,11 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                int ret;
                                unsigned int fault_flags = 0;
 
+                               /* For mlock, just skip the stack guard page. */
+                               if (foll_flags & FOLL_MLOCK) {
+                                       if (stack_guard_page(vma, start))
+                                               goto next_page;
+                               }
                                if (foll_flags & FOLL_WRITE)
                                        fault_flags |= FAULT_FLAG_WRITE;
                                if (nonblocking)
index 6b55e3efe0df4ebd9aafd71e9317bc664956006f..516b2c2ddd5a55b244a89af33eeb3b6f3b88d410 100644 (file)
@@ -162,7 +162,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
        VM_BUG_ON(end   > vma->vm_end);
        VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
-       gup_flags = FOLL_TOUCH;
+       gup_flags = FOLL_TOUCH | FOLL_MLOCK;
        /*
         * We want to touch writable mappings with a write fault in order
         * to break COW, except for shared mappings because these don't COW
@@ -178,9 +178,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
        if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
                gup_flags |= FOLL_FORCE;
 
-       if (vma->vm_flags & VM_LOCKED)
-               gup_flags |= FOLL_MLOCK;
-
        return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
                                NULL, NULL, nonblocking);
 }
index e27e0cf0de03ccb81b252953932fc356c3499c10..772140c53ab185ebc76d1f185d6feb9fb8935c15 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1767,10 +1767,13 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                size = address - vma->vm_start;
                grow = (address - vma->vm_end) >> PAGE_SHIFT;
 
-               error = acct_stack_growth(vma, size, grow);
-               if (!error) {
-                       vma->vm_end = address;
-                       perf_event_mmap(vma);
+               error = -ENOMEM;
+               if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
+                       error = acct_stack_growth(vma, size, grow);
+                       if (!error) {
+                               vma->vm_end = address;
+                               perf_event_mmap(vma);
+                       }
                }
        }
        vma_unlock_anon_vma(vma);
index 9f8a97b9a350d17ec070d5e741c04f8d9998e7a8..3f8bce264df66f712e9a44092f871d9f90eafe22 100644 (file)
@@ -2317,6 +2317,21 @@ void free_pages(unsigned long addr, unsigned int order)
 
 EXPORT_SYMBOL(free_pages);
 
+static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
+{
+       if (addr) {
+               unsigned long alloc_end = addr + (PAGE_SIZE << order);
+               unsigned long used = addr + PAGE_ALIGN(size);
+
+               split_page(virt_to_page((void *)addr), order);
+               while (used < alloc_end) {
+                       free_page(used);
+                       used += PAGE_SIZE;
+               }
+       }
+       return (void *)addr;
+}
+
 /**
  * alloc_pages_exact - allocate an exact number physically-contiguous pages.
  * @size: the number of bytes to allocate
@@ -2336,21 +2351,32 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
        unsigned long addr;
 
        addr = __get_free_pages(gfp_mask, order);
-       if (addr) {
-               unsigned long alloc_end = addr + (PAGE_SIZE << order);
-               unsigned long used = addr + PAGE_ALIGN(size);
-
-               split_page(virt_to_page((void *)addr), order);
-               while (used < alloc_end) {
-                       free_page(used);
-                       used += PAGE_SIZE;
-               }
-       }
-
-       return (void *)addr;
+       return make_alloc_exact(addr, order, size);
 }
 EXPORT_SYMBOL(alloc_pages_exact);
 
+/**
+ * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
+ *                        pages on a node.
+ * @nid: the preferred node ID where memory should be allocated
+ * @size: the number of bytes to allocate
+ * @gfp_mask: GFP flags for the allocation
+ *
+ * Like alloc_pages_exact(), but try to allocate on node nid first before falling
+ * back.
+ * Note this is not alloc_pages_exact_node() which allocates on a specific node,
+ * but is not exact.
+ */
+void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
+{
+       unsigned order = get_order(size);
+       struct page *p = alloc_pages_node(nid, gfp_mask, order);
+       if (!p)
+               return NULL;
+       return make_alloc_exact((unsigned long)page_address(p), order, size);
+}
+EXPORT_SYMBOL(alloc_pages_exact_nid);
+
 /**
  * free_pages_exact - release memory allocated via alloc_pages_exact()
  * @virt: the value returned by alloc_pages_exact.
@@ -3564,7 +3590,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
 
        if (!slab_is_available()) {
                zone->wait_table = (wait_queue_head_t *)
-                       alloc_bootmem_node(pgdat, alloc_size);
+                       alloc_bootmem_node_nopanic(pgdat, alloc_size);
        } else {
                /*
                 * This case means that a zone whose size was 0 gets new memory
@@ -4141,7 +4167,8 @@ static void __init setup_usemap(struct pglist_data *pgdat,
        unsigned long usemapsize = usemap_size(zonesize);
        zone->pageblock_flags = NULL;
        if (usemapsize)
-               zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
+               zone->pageblock_flags = alloc_bootmem_node_nopanic(pgdat,
+                                                                  usemapsize);
 }
 #else
 static inline void setup_usemap(struct pglist_data *pgdat,
@@ -4307,7 +4334,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
                size =  (end - start) * sizeof(struct page);
                map = alloc_remap(pgdat->node_id, size);
                if (!map)
-                       map = alloc_bootmem_node(pgdat, size);
+                       map = alloc_bootmem_node_nopanic(pgdat, size);
                pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
        }
 #ifndef CONFIG_NEED_MULTIPLE_NODES
index 99055010cecef74586af9490be326fbd8bd6b1e5..2daadc322ba61678cfc9120e3b65c551b5b96d2d 100644 (file)
@@ -134,7 +134,7 @@ static void *__init_refok alloc_page_cgroup(size_t size, int nid)
 {
        void *addr = NULL;
 
-       addr = alloc_pages_exact(size, GFP_KERNEL | __GFP_NOWARN);
+       addr = alloc_pages_exact_nid(nid, size, GFP_KERNEL | __GFP_NOWARN);
        if (addr)
                return addr;
 
index 8fa27e4e582a236886eba7d875e6c612130468fc..dfc7069102ee638215d7f494a0893c26e7687325 100644 (file)
@@ -852,7 +852,7 @@ static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_
 
 static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
 {
-       struct inode *inode;
+       struct address_space *mapping;
        unsigned long idx;
        unsigned long size;
        unsigned long limit;
@@ -875,8 +875,10 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
        if (size > SHMEM_NR_DIRECT)
                size = SHMEM_NR_DIRECT;
        offset = shmem_find_swp(entry, ptr, ptr+size);
-       if (offset >= 0)
+       if (offset >= 0) {
+               shmem_swp_balance_unmap();
                goto found;
+       }
        if (!info->i_indirect)
                goto lost2;
 
@@ -914,11 +916,11 @@ static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, s
                        if (size > ENTRIES_PER_PAGE)
                                size = ENTRIES_PER_PAGE;
                        offset = shmem_find_swp(entry, ptr, ptr+size);
-                       shmem_swp_unmap(ptr);
                        if (offset >= 0) {
                                shmem_dir_unmap(dir);
                                goto found;
                        }
+                       shmem_swp_unmap(ptr);
                }
        }
 lost1:
@@ -928,8 +930,7 @@ lost2:
        return 0;
 found:
        idx += offset;
-       inode = igrab(&info->vfs_inode);
-       spin_unlock(&info->lock);
+       ptr += offset;
 
        /*
         * Move _head_ to start search for next from here.
@@ -940,37 +941,18 @@ found:
         */
        if (shmem_swaplist.next != &info->swaplist)
                list_move_tail(&shmem_swaplist, &info->swaplist);
-       mutex_unlock(&shmem_swaplist_mutex);
 
-       error = 1;
-       if (!inode)
-               goto out;
        /*
-        * Charge page using GFP_KERNEL while we can wait.
-        * Charged back to the user(not to caller) when swap account is used.
-        * add_to_page_cache() will be called with GFP_NOWAIT.
+        * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
+        * but also to hold up shmem_evict_inode(): so inode cannot be freed
+        * beneath us (pagelock doesn't help until the page is in pagecache).
         */
-       error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
-       if (error)
-               goto out;
-       error = radix_tree_preload(GFP_KERNEL);
-       if (error) {
-               mem_cgroup_uncharge_cache_page(page);
-               goto out;
-       }
-       error = 1;
-
-       spin_lock(&info->lock);
-       ptr = shmem_swp_entry(info, idx, NULL);
-       if (ptr && ptr->val == entry.val) {
-               error = add_to_page_cache_locked(page, inode->i_mapping,
-                                               idx, GFP_NOWAIT);
-               /* does mem_cgroup_uncharge_cache_page on error */
-       } else  /* we must compensate for our precharge above */
-               mem_cgroup_uncharge_cache_page(page);
+       mapping = info->vfs_inode.i_mapping;
+       error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT);
+       /* which does mem_cgroup_uncharge_cache_page on error */
 
        if (error == -EEXIST) {
-               struct page *filepage = find_get_page(inode->i_mapping, idx);
+               struct page *filepage = find_get_page(mapping, idx);
                error = 1;
                if (filepage) {
                        /*
@@ -990,14 +972,8 @@ found:
                swap_free(entry);
                error = 1;      /* not an error, but entry was found */
        }
-       if (ptr)
-               shmem_swp_unmap(ptr);
+       shmem_swp_unmap(ptr);
        spin_unlock(&info->lock);
-       radix_tree_preload_end();
-out:
-       unlock_page(page);
-       page_cache_release(page);
-       iput(inode);            /* allows for NULL */
        return error;
 }
 
@@ -1009,6 +985,26 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
        struct list_head *p, *next;
        struct shmem_inode_info *info;
        int found = 0;
+       int error;
+
+       /*
+        * Charge page using GFP_KERNEL while we can wait, before taking
+        * the shmem_swaplist_mutex which might hold up shmem_writepage().
+        * Charged back to the user (not to caller) when swap account is used.
+        * add_to_page_cache() will be called with GFP_NOWAIT.
+        */
+       error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
+       if (error)
+               goto out;
+       /*
+        * Try to preload while we can wait, to not make a habit of
+        * draining atomic reserves; but don't latch on to this cpu,
+        * it's okay if sometimes we get rescheduled after this.
+        */
+       error = radix_tree_preload(GFP_KERNEL);
+       if (error)
+               goto uncharge;
+       radix_tree_preload_end();
 
        mutex_lock(&shmem_swaplist_mutex);
        list_for_each_safe(p, next, &shmem_swaplist) {
@@ -1016,17 +1012,19 @@ int shmem_unuse(swp_entry_t entry, struct page *page)
                found = shmem_unuse_inode(info, entry, page);
                cond_resched();
                if (found)
-                       goto out;
+                       break;
        }
        mutex_unlock(&shmem_swaplist_mutex);
-       /*
-        * Can some race bring us here?  We've been holding page lock,
-        * so I think not; but would rather try again later than BUG()
-        */
+
+uncharge:
+       if (!found)
+               mem_cgroup_uncharge_cache_page(page);
+       if (found < 0)
+               error = found;
+out:
        unlock_page(page);
        page_cache_release(page);
-out:
-       return (found < 0) ? found : 0;
+       return error;
 }
 
 /*
@@ -1064,7 +1062,25 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
        else
                swap.val = 0;
 
+       /*
+        * Add inode to shmem_unuse()'s list of swapped-out inodes,
+        * if it's not already there.  Do it now because we cannot take
+        * mutex while holding spinlock, and must do so before the page
+        * is moved to swap cache, when its pagelock no longer protects
+        * the inode from eviction.  But don't unlock the mutex until
+        * we've taken the spinlock, because shmem_unuse_inode() will
+        * prune a !swapped inode from the swaplist under both locks.
+        */
+       if (swap.val) {
+               mutex_lock(&shmem_swaplist_mutex);
+               if (list_empty(&info->swaplist))
+                       list_add_tail(&info->swaplist, &shmem_swaplist);
+       }
+
        spin_lock(&info->lock);
+       if (swap.val)
+               mutex_unlock(&shmem_swaplist_mutex);
+
        if (index >= info->next_index) {
                BUG_ON(!(info->flags & SHMEM_TRUNCATE));
                goto unlock;
@@ -1084,21 +1100,10 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
                delete_from_page_cache(page);
                shmem_swp_set(info, entry, swap.val);
                shmem_swp_unmap(entry);
-               if (list_empty(&info->swaplist))
-                       inode = igrab(inode);
-               else
-                       inode = NULL;
                spin_unlock(&info->lock);
                swap_shmem_alloc(swap);
                BUG_ON(page_mapped(page));
                swap_writepage(page, wbc);
-               if (inode) {
-                       mutex_lock(&shmem_swaplist_mutex);
-                       /* move instead of add in case we're racing */
-                       list_move_tail(&info->swaplist, &shmem_swaplist);
-                       mutex_unlock(&shmem_swaplist_mutex);
-                       iput(inode);
-               }
                return 0;
        }
 
@@ -1400,20 +1405,14 @@ repeat:
                if (sbinfo->max_blocks) {
                        if (percpu_counter_compare(&sbinfo->used_blocks,
                                                sbinfo->max_blocks) >= 0 ||
-                           shmem_acct_block(info->flags)) {
-                               spin_unlock(&info->lock);
-                               error = -ENOSPC;
-                               goto failed;
-                       }
+                           shmem_acct_block(info->flags))
+                               goto nospace;
                        percpu_counter_inc(&sbinfo->used_blocks);
                        spin_lock(&inode->i_lock);
                        inode->i_blocks += BLOCKS_PER_PAGE;
                        spin_unlock(&inode->i_lock);
-               } else if (shmem_acct_block(info->flags)) {
-                       spin_unlock(&info->lock);
-                       error = -ENOSPC;
-                       goto failed;
-               }
+               } else if (shmem_acct_block(info->flags))
+                       goto nospace;
 
                if (!filepage) {
                        int ret;
@@ -1493,6 +1492,24 @@ done:
        error = 0;
        goto out;
 
+nospace:
+       /*
+        * Perhaps the page was brought in from swap between find_lock_page
+        * and taking info->lock?  We allow for that at add_to_page_cache_lru,
+        * but must also avoid reporting a spurious ENOSPC while working on a
+        * full tmpfs.  (When filepage has been passed in to shmem_getpage, it
+        * is already in page cache, which prevents this race from occurring.)
+        */
+       if (!filepage) {
+               struct page *page = find_get_page(mapping, idx);
+               if (page) {
+                       spin_unlock(&info->lock);
+                       page_cache_release(page);
+                       goto repeat;
+               }
+       }
+       spin_unlock(&info->lock);
+       error = -ENOSPC;
 failed:
        if (*pagep != filepage) {
                unlock_page(filepage);
index 94d2a33a866e153830bfd1dbb285e26006262f6f..9d2e5e46bf09b85f6038232b6b3f32f928862ea4 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1940,7 +1940,7 @@ redo:
                 * Since this is without lock semantics the protection is only against
                 * code executing on this cpu *not* from access by other cpus.
                 */
-               if (unlikely(!this_cpu_cmpxchg_double(
+               if (unlikely(!irqsafe_cpu_cmpxchg_double(
                                s->cpu_slab->freelist, s->cpu_slab->tid,
                                object, tid,
                                get_freepointer(s, object), next_tid(tid)))) {
@@ -2145,7 +2145,7 @@ redo:
                set_freepointer(s, object, c->freelist);
 
 #ifdef CONFIG_CMPXCHG_LOCAL
-               if (unlikely(!this_cpu_cmpxchg_double(
+               if (unlikely(!irqsafe_cpu_cmpxchg_double(
                                s->cpu_slab->freelist, s->cpu_slab->tid,
                                c->freelist, tid,
                                object, next_tid(tid)))) {
index a448db377cb046d0871506f156b430dceed818fe..5602f1a1b1e724ba547bdee3ef21d773fd24ee04 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -396,6 +396,9 @@ static void lru_deactivate_fn(struct page *page, void *arg)
        if (!PageLRU(page))
                return;
 
+       if (PageUnevictable(page))
+               return;
+
        /* Some processes are using the page */
        if (page_mapped(page))
                return;
index f6b435c80079337489b17122f0d0ab1b829e12c3..8bfd45050a61494f919e90dfb1538ea8310f4f20 100644 (file)
@@ -937,7 +937,7 @@ keep_lumpy:
         * back off and wait for congestion to clear because further reclaim
         * will encounter the same problem
         */
-       if (nr_dirty == nr_congested && nr_dirty != 0)
+       if (nr_dirty && nr_dirty == nr_congested && scanning_global_lru(sc))
                zone_set_flag(zone, ZONE_CONGESTED);
 
        free_page_list(&free_pages);
index 7850412f52b7e23f817969fea24a560bcbdda4e8..0eb1a886b370bb6f5db3935dae51d46fe0159cdc 100644 (file)
@@ -124,6 +124,9 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
 
        grp->nr_vlans--;
 
+       if (vlan->flags & VLAN_FLAG_GVRP)
+               vlan_gvrp_request_leave(dev);
+
        vlan_group_set_device(grp, vlan_id, NULL);
        if (!grp->killall)
                synchronize_net();
index e34ea9e5e28bcefbe502fb81a4f23432294b65da..b2ff6c8d3603daffbdaaa1573e07f18e6e1cb1dc 100644 (file)
@@ -487,9 +487,6 @@ static int vlan_dev_stop(struct net_device *dev)
        struct vlan_dev_info *vlan = vlan_dev_info(dev);
        struct net_device *real_dev = vlan->real_dev;
 
-       if (vlan->flags & VLAN_FLAG_GVRP)
-               vlan_gvrp_request_leave(dev);
-
        dev_mc_unsync(real_dev, dev);
        dev_uc_unsync(real_dev, dev);
        if (dev->flags & IFF_ALLMULTI)
index 77367745be9bd4d03c3b7100c8108d71c8536d98..a9aa2dd66482d31add8af88598902429c1a56c34 100644 (file)
@@ -614,7 +614,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
 
        err = c->trans_mod->request(c, req);
        if (err < 0) {
-               if (err != -ERESTARTSYS)
+               if (err != -ERESTARTSYS && err != -EFAULT)
                        c->status = Disconnected;
                goto reterr;
        }
index b58a501cf3d124a06939d266002bf48a92863ee1..a873277cb996810162ee875db9485952144541bb 100644 (file)
@@ -674,6 +674,7 @@ int p9dirent_read(char *buf, int len, struct p9_dirent *dirent,
        }
 
        strcpy(dirent->d_name, nameptr);
+       kfree(nameptr);
 
 out:
        return fake_pdu.offset;
index e883172f9aa2c1096d363483f252a603bba7f108..9a70ebdec56e11357ba1f78c0938385881c7d4b3 100644 (file)
@@ -63,7 +63,7 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len,
                int nr_pages, u8 rw)
 {
        uint32_t first_page_bytes = 0;
-       uint32_t pdata_mapped_pages;
+       int32_t pdata_mapped_pages;
        struct trans_rpage_info  *rpinfo;
 
        *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1);
@@ -75,14 +75,9 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len,
        rpinfo = req->tc->private;
        pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf,
                        nr_pages, rw, &rpinfo->rp_data[0]);
+       if (pdata_mapped_pages <= 0)
+               return pdata_mapped_pages;
 
-       if (pdata_mapped_pages < 0) {
-               printk(KERN_ERR "get_user_pages_fast failed:%d udata:%p"
-                               "nr_pages:%d\n", pdata_mapped_pages,
-                               req->tc->pubuf, nr_pages);
-               pdata_mapped_pages = 0;
-               return -EIO;
-       }
        rpinfo->rp_nr_pages = pdata_mapped_pages;
        if (*pdata_off) {
                *pdata_len = first_page_bytes;
index c83f618282f709e424bc30af470139e77586dd66..b5a8afc2be331f5ac5b26649e4609e21039f8b85 100644 (file)
@@ -587,10 +587,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
        hci_req_cancel(hdev, ENODEV);
        hci_req_lock(hdev);
 
-       /* Stop timer, it might be running */
-       del_timer_sync(&hdev->cmd_timer);
-
        if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
+               del_timer_sync(&hdev->cmd_timer);
                hci_req_unlock(hdev);
                return 0;
        }
@@ -629,6 +627,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
 
        /* Drop last sent command */
        if (hdev->sent_cmd) {
+               del_timer_sync(&hdev->cmd_timer);
                kfree_skb(hdev->sent_cmd);
                hdev->sent_cmd = NULL;
        }
index cebe7588469fe8a82150d7dd26bea5a981cdfbb1..b2570159a04418bf213a512f6f1e1589faf7ff8a 100644 (file)
@@ -2387,8 +2387,6 @@ static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *s
        if (!conn)
                goto unlock;
 
-       hci_conn_hold(conn);
-
        conn->remote_cap = ev->capability;
        conn->remote_oob = ev->oob_data;
        conn->remote_auth = ev->authentication;
index ca27f3a41536f35d4d608f9f0929b50105658f4d..2c8dd4494c63966ffd0e5df4f66546139dea2f81 100644 (file)
@@ -1051,6 +1051,7 @@ static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
        tx_skb = skb_clone(skb, GFP_ATOMIC);
        bt_cb(skb)->retries++;
        control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
+       control &= L2CAP_CTRL_SAR;
 
        if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
                control |= L2CAP_CTRL_FINAL;
index e2160792e1bce49869fb5137a73deeabed8e1135..0c7badad62af07ab3e983c0871311a9212c481a3 100644 (file)
@@ -164,7 +164,7 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
                        goto drop;
 
                /* If STP is turned off, then forward */
-               if (p->br->stp_enabled == BR_NO_STP)
+               if (p->br->stp_enabled == BR_NO_STP && dest[5] == 0)
                        goto forward;
 
                if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
index f3bc322c589128d14d6367368597f6da66958bcb..74ef4d4846a4f93bb11b048a20940d75acf56f2f 100644 (file)
@@ -737,7 +737,7 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
                nf_bridge->mask |= BRNF_PKT_TYPE;
        }
 
-       if (br_parse_ip_options(skb))
+       if (pf == PF_INET && br_parse_ip_options(skb))
                return NF_DROP;
 
        /* The physdev module checks on this */
index 893669caa8de6c56960f804da8d2125f07823fb3..1a92b369c8202c8706c42452950594233882ca06 100644 (file)
@@ -1766,7 +1766,7 @@ static int compat_table_info(const struct ebt_table_info *info,
 
        newinfo->entries_size = size;
 
-       xt_compat_init_offsets(AF_INET, info->nentries);
+       xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
        return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
                                                        entries, newinfo);
 }
@@ -1882,7 +1882,7 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
        struct xt_match *match;
        struct xt_target *wt;
        void *dst = NULL;
-       int off, pad = 0, ret = 0;
+       int off, pad = 0;
        unsigned int size_kern, entry_offset, match_size = mwt->match_size;
 
        strlcpy(name, mwt->u.name, sizeof(name));
@@ -1935,13 +1935,6 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
                break;
        }
 
-       if (!dst) {
-               ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
-                                       off + ebt_compat_entry_padsize());
-               if (ret < 0)
-                       return ret;
-       }
-
        state->buf_kern_offset += match_size + off;
        state->buf_user_offset += match_size;
        pad = XT_ALIGN(size_kern) - size_kern;
@@ -2016,50 +2009,6 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
        return growth;
 }
 
-#define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...)          \
-({                                                          \
-       unsigned int __i;                                   \
-       int __ret = 0;                                      \
-       struct compat_ebt_entry_mwt *__watcher;             \
-                                                           \
-       for (__i = e->watchers_offset;                      \
-            __i < (e)->target_offset;                      \
-            __i += __watcher->watcher_size +               \
-            sizeof(struct compat_ebt_entry_mwt)) {         \
-               __watcher = (void *)(e) + __i;              \
-               __ret = fn(__watcher , ## args);            \
-               if (__ret != 0)                             \
-                       break;                              \
-       }                                                   \
-       if (__ret == 0) {                                   \
-               if (__i != (e)->target_offset)              \
-                       __ret = -EINVAL;                    \
-       }                                                   \
-       __ret;                                              \
-})
-
-#define EBT_COMPAT_MATCH_ITERATE(e, fn, args...)            \
-({                                                          \
-       unsigned int __i;                                   \
-       int __ret = 0;                                      \
-       struct compat_ebt_entry_mwt *__match;               \
-                                                           \
-       for (__i = sizeof(struct ebt_entry);                \
-            __i < (e)->watchers_offset;                    \
-            __i += __match->match_size +                   \
-            sizeof(struct compat_ebt_entry_mwt)) {         \
-               __match = (void *)(e) + __i;                \
-               __ret = fn(__match , ## args);              \
-               if (__ret != 0)                             \
-                       break;                              \
-       }                                                   \
-       if (__ret == 0) {                                   \
-               if (__i != (e)->watchers_offset)            \
-                       __ret = -EINVAL;                    \
-       }                                                   \
-       __ret;                                              \
-})
-
 /* called for all ebt_entry structures. */
 static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
                          unsigned int *total,
@@ -2132,6 +2081,14 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
                }
        }
 
+       if (state->buf_kern_start == NULL) {
+               unsigned int offset = buf_start - (char *) base;
+
+               ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
+               if (ret < 0)
+                       return ret;
+       }
+
        startoff = state->buf_user_offset - startoff;
 
        BUG_ON(*total < startoff);
@@ -2240,6 +2197,7 @@ static int compat_do_replace(struct net *net, void __user *user,
 
        xt_compat_lock(NFPROTO_BRIDGE);
 
+       xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
        ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
        if (ret < 0)
                goto out_unlock;
index 57b1aed79014cdd6022fa04175a1344139f57334..8a6a05e7c3c8de434caf5ffbbb42316086c847b2 100644 (file)
@@ -1427,9 +1427,14 @@ static int bcm_init(struct sock *sk)
 static int bcm_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
-       struct bcm_sock *bo = bcm_sk(sk);
+       struct bcm_sock *bo;
        struct bcm_op *op, *next;
 
+       if (sk == NULL)
+               return 0;
+
+       bo = bcm_sk(sk);
+
        /* remove bcm_ops, timer, rx_unregister(), etc. */
 
        unregister_netdevice_notifier(&bo->notifier);
index 649acfa7c70a98ceb5fd3f131544f02e3cf838c8..0eb39a7fdf64afff51c7e59bd0985433302c60ee 100644 (file)
@@ -305,7 +305,12 @@ static int raw_init(struct sock *sk)
 static int raw_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
-       struct raw_sock *ro = raw_sk(sk);
+       struct raw_sock *ro;
+
+       if (!sk)
+               return 0;
+
+       ro = raw_sk(sk);
 
        unregister_netdevice_notifier(&ro->notifier);
 
index 05f357828a2fb8de1bb2be9f4103c90058484259..e15a82ccc05f69789e162c5caa51d4bb2cc082a5 100644 (file)
@@ -2267,6 +2267,19 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
        m->more_to_follow = false;
        m->pool = NULL;
 
+       /* middle */
+       m->middle = NULL;
+
+       /* data */
+       m->nr_pages = 0;
+       m->page_alignment = 0;
+       m->pages = NULL;
+       m->pagelist = NULL;
+       m->bio = NULL;
+       m->bio_iter = NULL;
+       m->bio_seg = 0;
+       m->trail = NULL;
+
        /* front */
        if (front_len) {
                if (front_len > PAGE_CACHE_SIZE) {
@@ -2286,19 +2299,6 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
        }
        m->front.iov_len = front_len;
 
-       /* middle */
-       m->middle = NULL;
-
-       /* data */
-       m->nr_pages = 0;
-       m->page_alignment = 0;
-       m->pages = NULL;
-       m->pagelist = NULL;
-       m->bio = NULL;
-       m->bio_iter = NULL;
-       m->bio_seg = 0;
-       m->trail = NULL;
-
        dout("ceph_msg_new %p front %d\n", m, front_len);
        return m;
 
index 5a80f41c0cbaf207cf71310161856b5a31e65b4a..6b5dda1cb5df1a7adf6b93a49201b34d81eb8dcb 100644 (file)
@@ -470,8 +470,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
                                         snapc, ops,
                                         use_mempool,
                                         GFP_NOFS, NULL, NULL);
-       if (IS_ERR(req))
-               return req;
+       if (!req)
+               return NULL;
 
        /* calculate max write size */
        calc_layout(osdc, vino, layout, off, plen, req, ops);
index c2ac599fa0f642f368025ababaa6c95476874c24..b624fe4d9bd7ce408ab93d324ad7cc8413d8f4b8 100644 (file)
@@ -1284,11 +1284,13 @@ static int dev_close_many(struct list_head *head)
  */
 int dev_close(struct net_device *dev)
 {
-       LIST_HEAD(single);
+       if (dev->flags & IFF_UP) {
+               LIST_HEAD(single);
 
-       list_add(&dev->unreg_list, &single);
-       dev_close_many(&single);
-       list_del(&single);
+               list_add(&dev->unreg_list, &single);
+               dev_close_many(&single);
+               list_del(&single);
+       }
        return 0;
 }
 EXPORT_SYMBOL(dev_close);
@@ -4773,7 +4775,7 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm
                 * is never reached
                 */
                WARN_ON(1);
-               err = -EINVAL;
+               err = -ENOTTY;
                break;
 
        }
@@ -5041,7 +5043,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
                /* Set the per device memory buffer space.
                 * Not applicable in our case */
        case SIOCSIFLINK:
-               return -EINVAL;
+               return -ENOTTY;
 
        /*
         *      Unknown or private ioctl.
@@ -5062,7 +5064,7 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
                /* Take care of Wireless Extensions */
                if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
                        return wext_handle_ioctl(net, &ifr, cmd, arg);
-               return -EINVAL;
+               return -ENOTTY;
        }
 }
 
@@ -5184,27 +5186,27 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
        /* Fix illegal checksum combinations */
        if ((features & NETIF_F_HW_CSUM) &&
            (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-               netdev_info(dev, "mixed HW and IP checksum settings.\n");
+               netdev_warn(dev, "mixed HW and IP checksum settings.\n");
                features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
        }
 
        if ((features & NETIF_F_NO_CSUM) &&
            (features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-               netdev_info(dev, "mixed no checksumming and other settings.\n");
+               netdev_warn(dev, "mixed no checksumming and other settings.\n");
                features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
        }
 
        /* Fix illegal SG+CSUM combinations. */
        if ((features & NETIF_F_SG) &&
            !(features & NETIF_F_ALL_CSUM)) {
-               netdev_info(dev,
-                           "Dropping NETIF_F_SG since no checksum feature.\n");
+               netdev_dbg(dev,
+                       "Dropping NETIF_F_SG since no checksum feature.\n");
                features &= ~NETIF_F_SG;
        }
 
        /* TSO requires that SG is present as well. */
        if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
-               netdev_info(dev, "Dropping TSO features since no SG feature.\n");
+               netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
                features &= ~NETIF_F_ALL_TSO;
        }
 
@@ -5214,7 +5216,7 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
 
        /* Software GSO depends on SG. */
        if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
-               netdev_info(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
+               netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
                features &= ~NETIF_F_GSO;
        }
 
@@ -5224,13 +5226,13 @@ u32 netdev_fix_features(struct net_device *dev, u32 features)
                if (!((features & NETIF_F_GEN_CSUM) ||
                    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
                            == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
-                       netdev_info(dev,
+                       netdev_dbg(dev,
                                "Dropping NETIF_F_UFO since no checksum offload features.\n");
                        features &= ~NETIF_F_UFO;
                }
 
                if (!(features & NETIF_F_SG)) {
-                       netdev_info(dev,
+                       netdev_dbg(dev,
                                "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
                        features &= ~NETIF_F_UFO;
                }
@@ -5412,12 +5414,6 @@ int register_netdevice(struct net_device *dev)
        dev->features |= NETIF_F_SOFT_FEATURES;
        dev->wanted_features = dev->features & dev->hw_features;
 
-       /* Avoid warning from netdev_fix_features() for GSO without SG */
-       if (!(dev->wanted_features & NETIF_F_SG)) {
-               dev->wanted_features &= ~NETIF_F_GSO;
-               dev->features &= ~NETIF_F_GSO;
-       }
-
        /* Enable GRO and NETIF_F_HIGHDMA for vlans by default,
         * vlan_dev_init() will do the dev->features check, so these features
         * are enabled only if supported by underlying device.
index f06ffcfc8d712421040c71a56513b90dcfca96c7..4b2ab657ac8e616f415312d96a2b9c0189489a18 100644 (file)
@@ -123,6 +123,8 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
                case DCCPO_CHANGE_L ... DCCPO_CONFIRM_R:
                        if (pkt_type == DCCP_PKT_DATA)      /* RFC 4340, 6 */
                                break;
+                       if (len == 0)
+                               goto out_invalid_option;
                        rc = dccp_feat_parse_options(sk, dreq, mandatory, opt,
                                                    *value, value + 1, len - 1);
                        if (rc)
index 87bb5f4de0e84601817a0f63733a725ebb478b03..c53ded2a98dfbcd26113a2265fc14f22fa352971 100644 (file)
@@ -41,12 +41,12 @@ config NET_DSA_MV88E6XXX_NEED_PPU
        default n
 
 config NET_DSA_MV88E6131
-       bool "Marvell 88E6095/6095F/6131 ethernet switch chip support"
+       bool "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
        select NET_DSA_MV88E6XXX
        select NET_DSA_MV88E6XXX_NEED_PPU
        select NET_DSA_TAG_DSA
        ---help---
-         This enables support for the Marvell 88E6095/6095F/6131
+         This enables support for the Marvell 88E6085/6095/6095F/6131
          ethernet switch chips.
 
 config NET_DSA_MV88E6123_61_65
index 3da418894efcc715ceca776061028c7e7ce4d551..45f7411e90baf02fbed1642676f6448b74d024cb 100644 (file)
@@ -207,8 +207,15 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
         * mode, but do not enable forwarding of unknown unicasts.
         */
        val = 0x0433;
-       if (p == dsa_upstream_port(ds))
+       if (p == dsa_upstream_port(ds)) {
                val |= 0x0104;
+               /*
+                * On 6085, unknown multicast forward is controlled
+                * here rather than in Port Control 2 register.
+                */
+               if (ps->id == ID_6085)
+                       val |= 0x0008;
+       }
        if (ds->dsa_port_mask & (1 << p))
                val |= 0x0100;
        REG_WRITE(addr, 0x04, val);
@@ -251,10 +258,19 @@ static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
         * If this is the upstream port for this switch, enable
         * forwarding of unknown multicast addresses.
         */
-       val = 0x0080 | dsa_upstream_port(ds);
-       if (p == dsa_upstream_port(ds))
-               val |= 0x0040;
-       REG_WRITE(addr, 0x08, val);
+       if (ps->id == ID_6085)
+               /*
+                * on 6085, bits 3:0 are reserved, bit 6 control ARP
+                * mirroring, and multicast forward is handled in
+                * Port Control register.
+                */
+               REG_WRITE(addr, 0x08, 0x0080);
+       else {
+               val = 0x0080 | dsa_upstream_port(ds);
+               if (p == dsa_upstream_port(ds))
+                       val |= 0x0040;
+               REG_WRITE(addr, 0x08, val);
+       }
 
        /*
         * Rate Control: disable ingress rate limiting.
index 5345b0bee6df816858c1424d0ccb50988d133aa2..cd9ca0811cfaf30a179db870bfe628d4d0261a2e 100644 (file)
@@ -1680,7 +1680,7 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
                return;
 
        cnf->sysctl = NULL;
-       unregister_sysctl_table(t->sysctl_header);
+       unregister_net_sysctl_table(t->sysctl_header);
        kfree(t->dev_name);
        kfree(t);
 }
index e9013d6c1f513adfbde31e16041a5c82235b6af4..5fe9b8b41df34fe8cdf7af72ce10138a3f92191a 100644 (file)
@@ -1978,9 +1978,6 @@ struct fib_table *fib_trie_table(u32 id)
        t = (struct trie *) tb->tb_data;
        memset(t, 0, sizeof(*t));
 
-       if (id == RT_TABLE_LOCAL)
-               pr_info("IPv4 FIB: Using LC-trie version %s\n", VERSION);
-
        return tb;
 }
 
index a1151b8adf3c6d65409ea8944973432187e49fe0..b1d282f11be7e23ae4099901ac4be00c5fd8cd12 100644 (file)
@@ -223,31 +223,30 @@ static void ip_expire(unsigned long arg)
 
        if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
                struct sk_buff *head = qp->q.fragments;
+               const struct iphdr *iph;
+               int err;
 
                rcu_read_lock();
                head->dev = dev_get_by_index_rcu(net, qp->iif);
                if (!head->dev)
                        goto out_rcu_unlock;
 
+               /* skb dst is stale, drop it, and perform route lookup again */
+               skb_dst_drop(head);
+               iph = ip_hdr(head);
+               err = ip_route_input_noref(head, iph->daddr, iph->saddr,
+                                          iph->tos, head->dev);
+               if (err)
+                       goto out_rcu_unlock;
+
                /*
-                * Only search router table for the head fragment,
-                * when defraging timeout at PRE_ROUTING HOOK.
+                * Only an end host needs to send an ICMP
+                * "Fragment Reassembly Timeout" message, per RFC792.
                 */
-               if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) {
-                       const struct iphdr *iph = ip_hdr(head);
-                       int err = ip_route_input(head, iph->daddr, iph->saddr,
-                                                iph->tos, head->dev);
-                       if (unlikely(err))
-                               goto out_rcu_unlock;
-
-                       /*
-                        * Only an end host needs to send an ICMP
-                        * "Fragment Reassembly Timeout" message, per RFC792.
-                        */
-                       if (skb_rtable(head)->rt_type != RTN_LOCAL)
-                               goto out_rcu_unlock;
+               if (qp->user == IP_DEFRAG_CONNTRACK_IN &&
+                   skb_rtable(head)->rt_type != RTN_LOCAL)
+                       goto out_rcu_unlock;
 
-               }
 
                /* Send an ICMP "Fragment Reassembly Timeout" message. */
                icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
index c1acf69858fd722e6a755c8bf029a56039605b67..99e6e4bb1c72d044b97ba93ad6c0bd944d170edd 100644 (file)
@@ -2690,6 +2690,12 @@ static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
 }
 
+static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
+                                         unsigned long old)
+{
+       return NULL;
+}
+
 static struct dst_ops ipv4_dst_blackhole_ops = {
        .family                 =       AF_INET,
        .protocol               =       cpu_to_be16(ETH_P_IP),
@@ -2698,6 +2704,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
        .default_mtu            =       ipv4_blackhole_default_mtu,
        .default_advmss         =       ipv4_default_advmss,
        .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
+       .cow_metrics            =       ipv4_rt_blackhole_cow_metrics,
 };
 
 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
index 34340c9c95fab4f3c9aa108fbed4dd7f69028cd9..f376b05cca818fd9496fe0cb8540a051e3e7e50a 100644 (file)
@@ -93,6 +93,7 @@ struct bictcp {
        u32     ack_cnt;        /* number of acks */
        u32     tcp_cwnd;       /* estimated tcp cwnd */
 #define ACK_RATIO_SHIFT        4
+#define ACK_RATIO_LIMIT (32u << ACK_RATIO_SHIFT)
        u16     delayed_ack;    /* estimate the ratio of Packets/ACKs << 4 */
        u8      sample_cnt;     /* number of samples to decide curr_rtt */
        u8      found;          /* the exit point is found? */
@@ -398,8 +399,12 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
        u32 delay;
 
        if (icsk->icsk_ca_state == TCP_CA_Open) {
-               cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT;
-               ca->delayed_ack += cnt;
+               u32 ratio = ca->delayed_ack;
+
+               ratio -= ca->delayed_ack >> ACK_RATIO_SHIFT;
+               ratio += cnt;
+
+               ca->delayed_ack = min(ratio, ACK_RATIO_LIMIT);
        }
 
        /* Some calls are for duplicates without timetamps */
index 571aa96a175c956fb81728030441309286b938c9..2d51840e53a114be661abd42200a632b9ebf791e 100644 (file)
@@ -69,7 +69,7 @@ int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(xfrm4_prepare_output);
 
-static int xfrm4_output_finish(struct sk_buff *skb)
+int xfrm4_output_finish(struct sk_buff *skb)
 {
 #ifdef CONFIG_NETFILTER
        if (!skb_dst(skb)->xfrm) {
@@ -86,7 +86,11 @@ static int xfrm4_output_finish(struct sk_buff *skb)
 
 int xfrm4_output(struct sk_buff *skb)
 {
+       struct dst_entry *dst = skb_dst(skb);
+       struct xfrm_state *x = dst->xfrm;
+
        return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb,
-                           NULL, skb_dst(skb)->dev, xfrm4_output_finish,
+                           NULL, dst->dev,
+                           x->outer_mode->afinfo->output_finish,
                            !(IPCB(skb)->flags & IPSKB_REROUTED));
 }
index 1717c64628d1c5c4625ca7b25a2b02422b52c61b..805d63ef4340fae1176ee8c2c0be0e24d1b60ef6 100644 (file)
@@ -78,6 +78,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
        .init_tempsel           = __xfrm4_init_tempsel,
        .init_temprop           = xfrm4_init_temprop,
        .output                 = xfrm4_output,
+       .output_finish          = xfrm4_output_finish,
        .extract_input          = xfrm4_extract_input,
        .extract_output         = xfrm4_extract_output,
        .transport_finish       = xfrm4_transport_finish,
index 1493534116df489b8a41411face52d2b06279928..a7bda0757053278e58218766df54cb49fac100d6 100644 (file)
@@ -4537,7 +4537,7 @@ static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
 
        t = p->sysctl;
        p->sysctl = NULL;
-       unregister_sysctl_table(t->sysctl_header);
+       unregister_net_sysctl_table(t->sysctl_header);
        kfree(t->dev_name);
        kfree(t);
 }
index 5aa8ec88f1946296ce4f776f8927f764cf4edbc1..59dccfbb5b11332f7f3957d2fb80a0f94a2784da 100644 (file)
@@ -371,7 +371,7 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
        iv = esp_tmp_iv(aead, tmp, seqhilen);
        req = esp_tmp_req(aead, iv);
        asg = esp_req_sg(aead, req);
-       sg = asg + 1;
+       sg = asg + sglists;
 
        skb->ip_summed = CHECKSUM_NONE;
 
index 28e74488a329770597426c98a3d3b06b58267ef3..a5a4c5dd53961cf1b2432c8317592d1099eafda2 100644 (file)
@@ -45,6 +45,8 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
        int tcphoff, needs_ack;
        const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
        struct ipv6hdr *ip6h;
+#define DEFAULT_TOS_VALUE      0x0U
+       const __u8 tclass = DEFAULT_TOS_VALUE;
        struct dst_entry *dst = NULL;
        u8 proto;
        struct flowi6 fl6;
@@ -124,7 +126,7 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
        skb_put(nskb, sizeof(struct ipv6hdr));
        skb_reset_network_header(nskb);
        ip6h = ipv6_hdr(nskb);
-       ip6h->version = 6;
+       *(__be32 *)ip6h =  htonl(0x60000000 | (tclass << 20));
        ip6h->hop_limit = ip6_dst_hoplimit(dst);
        ip6h->nexthdr = IPPROTO_TCP;
        ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr);
index 843406f14d7b2b37ac33b039f52fe80feb132cba..fd0eec6f88c6d71d9eba6f1d104001d23bcb2721 100644 (file)
@@ -153,6 +153,12 @@ static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
 {
 }
 
+static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
+                                        unsigned long old)
+{
+       return NULL;
+}
+
 static struct dst_ops ip6_dst_blackhole_ops = {
        .family                 =       AF_INET6,
        .protocol               =       cpu_to_be16(ETH_P_IPV6),
@@ -161,6 +167,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
        .default_mtu            =       ip6_blackhole_default_mtu,
        .default_advmss         =       ip6_default_advmss,
        .update_pmtu            =       ip6_rt_blackhole_update_pmtu,
+       .cow_metrics            =       ip6_rt_blackhole_cow_metrics,
 };
 
 static const u32 ip6_template_metrics[RTAX_MAX] = {
@@ -2012,7 +2019,6 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
        rt->dst.output = ip6_output;
        rt->rt6i_dev = net->loopback_dev;
        rt->rt6i_idev = idev;
-       dst_metric_set(&rt->dst, RTAX_HOPLIMIT, -1);
        rt->dst.obsolete = -1;
 
        rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
index 15c37746845ed1ad678866756b4057421b7f3f0a..9e305d74b3d41a99a065da67fe54bd3a4a9ebaa6 100644 (file)
@@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features)
        skb->ip_summed = CHECKSUM_NONE;
 
        /* Check if there is enough headroom to insert fragment header. */
-       if ((skb_headroom(skb) < frag_hdr_sz) &&
+       if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) &&
            pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC))
                goto out;
 
index 8e688b3de9abc62ec03c7dd90b33c951791c5587..49a91c5f5623b80298001b11927eb763883c1553 100644 (file)
@@ -79,7 +79,7 @@ int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(xfrm6_prepare_output);
 
-static int xfrm6_output_finish(struct sk_buff *skb)
+int xfrm6_output_finish(struct sk_buff *skb)
 {
 #ifdef CONFIG_NETFILTER
        IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
@@ -97,9 +97,9 @@ static int __xfrm6_output(struct sk_buff *skb)
        if ((x && x->props.mode == XFRM_MODE_TUNNEL) &&
            ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
                dst_allfrag(skb_dst(skb)))) {
-                       return ip6_fragment(skb, xfrm6_output_finish);
+                       return ip6_fragment(skb, x->outer_mode->afinfo->output_finish);
        }
-       return xfrm6_output_finish(skb);
+       return x->outer_mode->afinfo->output_finish(skb);
 }
 
 int xfrm6_output(struct sk_buff *skb)
index afe941e9415cdd895c08059f9ca76770187f9c2b..248f0b2a7ee93448b30e55a884f15ec7f897a240 100644 (file)
@@ -178,6 +178,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
        .tmpl_sort              = __xfrm6_tmpl_sort,
        .state_sort             = __xfrm6_state_sort,
        .output                 = xfrm6_output,
+       .output_finish          = xfrm6_output_finish,
        .extract_input          = xfrm6_extract_input,
        .extract_output         = xfrm6_extract_output,
        .transport_finish       = xfrm6_transport_finish,
index 334213571ad0ef9257c73a7d6ab81febb3ef9c36..44049733c4eaf6ab450e9ea48e676102669531ba 100644 (file)
@@ -1504,6 +1504,8 @@ int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
        enum ieee80211_smps_mode old_req;
        int err;
 
+       lockdep_assert_held(&sdata->u.mgd.mtx);
+
        old_req = sdata->u.mgd.req_smps;
        sdata->u.mgd.req_smps = smps_mode;
 
index dacace6b139301ad5625a738465190e2827af160..9ea7c0d0103f8d9f15fc5b7d04109316ee675ea2 100644 (file)
@@ -177,9 +177,9 @@ static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
        if (sdata->vif.type != NL80211_IFTYPE_STATION)
                return -EOPNOTSUPP;
 
-       mutex_lock(&local->iflist_mtx);
+       mutex_lock(&sdata->u.mgd.mtx);
        err = __ieee80211_request_smps(sdata, smps_mode);
-       mutex_unlock(&local->iflist_mtx);
+       mutex_unlock(&sdata->u.mgd.mtx);
 
        return err;
 }
index ce4596ed1268ca9039298f7a05c280a9e3fca190..bd1224fd216aeb034278d6cc34f3c38287ac754c 100644 (file)
@@ -237,6 +237,10 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
                                     &local->dynamic_ps_disable_work);
        }
 
+       /* Don't restart the timer if we're not disassociated */
+       if (!ifmgd->associated)
+               return TX_CONTINUE;
+
        mod_timer(&local->dynamic_ps_timer, jiffies +
                  msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
 
index 2dc6de13ac1890d586d1e9a3d70817b329afbec8..059af3120be741ecce63e07ebaa9c9e428281343 100644 (file)
@@ -572,11 +572,11 @@ static const struct file_operations ip_vs_app_fops = {
        .open    = ip_vs_app_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
-       .release = seq_release,
+       .release = seq_release_net,
 };
 #endif
 
-static int __net_init __ip_vs_app_init(struct net *net)
+int __net_init __ip_vs_app_init(struct net *net)
 {
        struct netns_ipvs *ipvs = net_ipvs(net);
 
@@ -585,26 +585,17 @@ static int __net_init __ip_vs_app_init(struct net *net)
        return 0;
 }
 
-static void __net_exit __ip_vs_app_cleanup(struct net *net)
+void __net_exit __ip_vs_app_cleanup(struct net *net)
 {
        proc_net_remove(net, "ip_vs_app");
 }
 
-static struct pernet_operations ip_vs_app_ops = {
-       .init = __ip_vs_app_init,
-       .exit = __ip_vs_app_cleanup,
-};
-
 int __init ip_vs_app_init(void)
 {
-       int rv;
-
-       rv = register_pernet_subsys(&ip_vs_app_ops);
-       return rv;
+       return 0;
 }
 
 
 void ip_vs_app_cleanup(void)
 {
-       unregister_pernet_subsys(&ip_vs_app_ops);
 }
index c97bd45975bed636c99dd55e263869f8be11a98b..bf28ac2fc99b7cf164d82417e6248049787ff923 100644 (file)
@@ -1046,7 +1046,7 @@ static const struct file_operations ip_vs_conn_fops = {
        .open    = ip_vs_conn_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
-       .release = seq_release,
+       .release = seq_release_net,
 };
 
 static const char *ip_vs_origin_name(unsigned flags)
@@ -1114,7 +1114,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
        .open    = ip_vs_conn_sync_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
-       .release = seq_release,
+       .release = seq_release_net,
 };
 
 #endif
@@ -1258,22 +1258,17 @@ int __net_init __ip_vs_conn_init(struct net *net)
        return 0;
 }
 
-static void __net_exit __ip_vs_conn_cleanup(struct net *net)
+void __net_exit __ip_vs_conn_cleanup(struct net *net)
 {
        /* flush all the connection entries first */
        ip_vs_conn_flush(net);
        proc_net_remove(net, "ip_vs_conn");
        proc_net_remove(net, "ip_vs_conn_sync");
 }
-static struct pernet_operations ipvs_conn_ops = {
-       .init = __ip_vs_conn_init,
-       .exit = __ip_vs_conn_cleanup,
-};
 
 int __init ip_vs_conn_init(void)
 {
        int idx;
-       int retc;
 
        /* Compute size and mask */
        ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
@@ -1309,17 +1304,14 @@ int __init ip_vs_conn_init(void)
                rwlock_init(&__ip_vs_conntbl_lock_array[idx].l);
        }
 
-       retc = register_pernet_subsys(&ipvs_conn_ops);
-
        /* calculate the random value for connection hash */
        get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd));
 
-       return retc;
+       return 0;
 }
 
 void ip_vs_conn_cleanup(void)
 {
-       unregister_pernet_subsys(&ipvs_conn_ops);
        /* Release the empty cache */
        kmem_cache_destroy(ip_vs_conn_cachep);
        vfree(ip_vs_conn_tab);
index 07accf6b2401a05aa89b4a543e89741781547ff6..a74dae6c5dbce9f184e52e29298662d1ebc76c51 100644 (file)
@@ -1113,6 +1113,9 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
                return NF_ACCEPT;
 
        net = skb_net(skb);
+       if (!net_ipvs(net)->enable)
+               return NF_ACCEPT;
+
        ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
 #ifdef CONFIG_IP_VS_IPV6
        if (af == AF_INET6) {
@@ -1343,6 +1346,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
                return NF_ACCEPT; /* The packet looks wrong, ignore */
 
        net = skb_net(skb);
+
        pd = ip_vs_proto_data_get(net, cih->protocol);
        if (!pd)
                return NF_ACCEPT;
@@ -1529,6 +1533,11 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
                              IP_VS_DBG_ADDR(af, &iph.daddr), hooknum);
                return NF_ACCEPT;
        }
+       /* ipvs enabled in this netns ? */
+       net = skb_net(skb);
+       if (!net_ipvs(net)->enable)
+               return NF_ACCEPT;
+
        ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
 
        /* Bad... Do not break raw sockets */
@@ -1562,7 +1571,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
                        ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
                }
 
-       net = skb_net(skb);
        /* Protocol supported? */
        pd = ip_vs_proto_data_get(net, iph.protocol);
        if (unlikely(!pd))
@@ -1588,7 +1596,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
        }
 
        IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet");
-       net = skb_net(skb);
        ipvs = net_ipvs(net);
        /* Check the server status */
        if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
@@ -1743,10 +1750,16 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,
                   int (*okfn)(struct sk_buff *))
 {
        int r;
+       struct net *net;
 
        if (ip_hdr(skb)->protocol != IPPROTO_ICMP)
                return NF_ACCEPT;
 
+       /* ipvs enabled in this netns ? */
+       net = skb_net(skb);
+       if (!net_ipvs(net)->enable)
+               return NF_ACCEPT;
+
        return ip_vs_in_icmp(skb, &r, hooknum);
 }
 
@@ -1757,10 +1770,16 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,
                      int (*okfn)(struct sk_buff *))
 {
        int r;
+       struct net *net;
 
        if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
                return NF_ACCEPT;
 
+       /* ipvs enabled in this netns ? */
+       net = skb_net(skb);
+       if (!net_ipvs(net)->enable)
+               return NF_ACCEPT;
+
        return ip_vs_in_icmp_v6(skb, &r, hooknum);
 }
 #endif
@@ -1884,19 +1903,70 @@ static int __net_init __ip_vs_init(struct net *net)
                pr_err("%s(): no memory.\n", __func__);
                return -ENOMEM;
        }
+       /* Hold the beast until a service is registerd */
+       ipvs->enable = 0;
        ipvs->net = net;
        /* Counters used for creating unique names */
        ipvs->gen = atomic_read(&ipvs_netns_cnt);
        atomic_inc(&ipvs_netns_cnt);
        net->ipvs = ipvs;
+
+       if (__ip_vs_estimator_init(net) < 0)
+               goto estimator_fail;
+
+       if (__ip_vs_control_init(net) < 0)
+               goto control_fail;
+
+       if (__ip_vs_protocol_init(net) < 0)
+               goto protocol_fail;
+
+       if (__ip_vs_app_init(net) < 0)
+               goto app_fail;
+
+       if (__ip_vs_conn_init(net) < 0)
+               goto conn_fail;
+
+       if (__ip_vs_sync_init(net) < 0)
+               goto sync_fail;
+
        printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n",
                         sizeof(struct netns_ipvs), ipvs->gen);
        return 0;
+/*
+ * Error handling
+ */
+
+sync_fail:
+       __ip_vs_conn_cleanup(net);
+conn_fail:
+       __ip_vs_app_cleanup(net);
+app_fail:
+       __ip_vs_protocol_cleanup(net);
+protocol_fail:
+       __ip_vs_control_cleanup(net);
+control_fail:
+       __ip_vs_estimator_cleanup(net);
+estimator_fail:
+       return -ENOMEM;
 }
 
 static void __net_exit __ip_vs_cleanup(struct net *net)
 {
-       IP_VS_DBG(10, "ipvs netns %d released\n", net_ipvs(net)->gen);
+       __ip_vs_service_cleanup(net);   /* ip_vs_flush() with locks */
+       __ip_vs_conn_cleanup(net);
+       __ip_vs_app_cleanup(net);
+       __ip_vs_protocol_cleanup(net);
+       __ip_vs_control_cleanup(net);
+       __ip_vs_estimator_cleanup(net);
+       IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen);
+}
+
+static void __net_exit __ip_vs_dev_cleanup(struct net *net)
+{
+       EnterFunction(2);
+       net_ipvs(net)->enable = 0;      /* Disable packet reception */
+       __ip_vs_sync_cleanup(net);
+       LeaveFunction(2);
 }
 
 static struct pernet_operations ipvs_core_ops = {
@@ -1906,6 +1976,10 @@ static struct pernet_operations ipvs_core_ops = {
        .size = sizeof(struct netns_ipvs),
 };
 
+static struct pernet_operations ipvs_core_dev_ops = {
+       .exit = __ip_vs_dev_cleanup,
+};
+
 /*
  *     Initialize IP Virtual Server
  */
@@ -1913,10 +1987,6 @@ static int __init ip_vs_init(void)
 {
        int ret;
 
-       ret = register_pernet_subsys(&ipvs_core_ops);   /* Alloc ip_vs struct */
-       if (ret < 0)
-               return ret;
-
        ip_vs_estimator_init();
        ret = ip_vs_control_init();
        if (ret < 0) {
@@ -1944,15 +2014,28 @@ static int __init ip_vs_init(void)
                goto cleanup_conn;
        }
 
+       ret = register_pernet_subsys(&ipvs_core_ops);   /* Alloc ip_vs struct */
+       if (ret < 0)
+               goto cleanup_sync;
+
+       ret = register_pernet_device(&ipvs_core_dev_ops);
+       if (ret < 0)
+               goto cleanup_sub;
+
        ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
        if (ret < 0) {
                pr_err("can't register hooks.\n");
-               goto cleanup_sync;
+               goto cleanup_dev;
        }
 
        pr_info("ipvs loaded.\n");
+
        return ret;
 
+cleanup_dev:
+       unregister_pernet_device(&ipvs_core_dev_ops);
+cleanup_sub:
+       unregister_pernet_subsys(&ipvs_core_ops);
 cleanup_sync:
        ip_vs_sync_cleanup();
   cleanup_conn:
@@ -1964,20 +2047,20 @@ cleanup_sync:
        ip_vs_control_cleanup();
   cleanup_estimator:
        ip_vs_estimator_cleanup();
-       unregister_pernet_subsys(&ipvs_core_ops);       /* free ip_vs struct */
        return ret;
 }
 
 static void __exit ip_vs_cleanup(void)
 {
        nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
+       unregister_pernet_device(&ipvs_core_dev_ops);
+       unregister_pernet_subsys(&ipvs_core_ops);       /* free ip_vs struct */
        ip_vs_sync_cleanup();
        ip_vs_conn_cleanup();
        ip_vs_app_cleanup();
        ip_vs_protocol_cleanup();
        ip_vs_control_cleanup();
        ip_vs_estimator_cleanup();
-       unregister_pernet_subsys(&ipvs_core_ops);       /* free ip_vs struct */
        pr_info("ipvs unloaded.\n");
 }
 
index ae47090bf45fe1fdfbf99192950aacb951cb1af1..37890f228b19852272e022621a707740d9fe0f71 100644 (file)
@@ -69,6 +69,11 @@ int ip_vs_get_debug_level(void)
 }
 #endif
 
+
+/*  Protos */
+static void __ip_vs_del_service(struct ip_vs_service *svc);
+
+
 #ifdef CONFIG_IP_VS_IPV6
 /* Taken from rt6_fill_node() in net/ipv6/route.c, is there a better way? */
 static int __ip_vs_addr_is_local_v6(struct net *net,
@@ -1214,6 +1219,8 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
        write_unlock_bh(&__ip_vs_svc_lock);
 
        *svc_p = svc;
+       /* Now there is a service - full throttle */
+       ipvs->enable = 1;
        return 0;
 
 
@@ -1472,6 +1479,84 @@ static int ip_vs_flush(struct net *net)
        return 0;
 }
 
+/*
+ *     Delete service by {netns} in the service table.
+ *     Called by __ip_vs_cleanup()
+ */
+void __ip_vs_service_cleanup(struct net *net)
+{
+       EnterFunction(2);
+       /* Check for "full" addressed entries */
+       mutex_lock(&__ip_vs_mutex);
+       ip_vs_flush(net);
+       mutex_unlock(&__ip_vs_mutex);
+       LeaveFunction(2);
+}
+/*
+ * Release dst hold by dst_cache
+ */
+static inline void
+__ip_vs_dev_reset(struct ip_vs_dest *dest, struct net_device *dev)
+{
+       spin_lock_bh(&dest->dst_lock);
+       if (dest->dst_cache && dest->dst_cache->dev == dev) {
+               IP_VS_DBG_BUF(3, "Reset dev:%s dest %s:%u ,dest->refcnt=%d\n",
+                             dev->name,
+                             IP_VS_DBG_ADDR(dest->af, &dest->addr),
+                             ntohs(dest->port),
+                             atomic_read(&dest->refcnt));
+               ip_vs_dst_reset(dest);
+       }
+       spin_unlock_bh(&dest->dst_lock);
+
+}
+/*
+ * Netdev event receiver
+ * Currently only NETDEV_UNREGISTER is handled, i.e. if we hold a reference to
+ * a device that is "unregister" it must be released.
+ */
+static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
+                           void *ptr)
+{
+       struct net_device *dev = ptr;
+       struct net *net = dev_net(dev);
+       struct ip_vs_service *svc;
+       struct ip_vs_dest *dest;
+       unsigned int idx;
+
+       if (event != NETDEV_UNREGISTER)
+               return NOTIFY_DONE;
+       IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
+       EnterFunction(2);
+       mutex_lock(&__ip_vs_mutex);
+       for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
+               list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
+                       if (net_eq(svc->net, net)) {
+                               list_for_each_entry(dest, &svc->destinations,
+                                                   n_list) {
+                                       __ip_vs_dev_reset(dest, dev);
+                               }
+                       }
+               }
+
+               list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
+                       if (net_eq(svc->net, net)) {
+                               list_for_each_entry(dest, &svc->destinations,
+                                                   n_list) {
+                                       __ip_vs_dev_reset(dest, dev);
+                               }
+                       }
+
+               }
+       }
+
+       list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) {
+               __ip_vs_dev_reset(dest, dev);
+       }
+       mutex_unlock(&__ip_vs_mutex);
+       LeaveFunction(2);
+       return NOTIFY_DONE;
+}
 
 /*
  *     Zero counters in a service or all services
@@ -1981,7 +2066,7 @@ static const struct file_operations ip_vs_info_fops = {
        .open    = ip_vs_info_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
-       .release = seq_release_private,
+       .release = seq_release_net,
 };
 
 #endif
@@ -2024,7 +2109,7 @@ static const struct file_operations ip_vs_stats_fops = {
        .open = ip_vs_stats_seq_open,
        .read = seq_read,
        .llseek = seq_lseek,
-       .release = single_release,
+       .release = single_release_net,
 };
 
 static int ip_vs_stats_percpu_show(struct seq_file *seq, void *v)
@@ -2093,7 +2178,7 @@ static const struct file_operations ip_vs_stats_percpu_fops = {
        .open = ip_vs_stats_percpu_seq_open,
        .read = seq_read,
        .llseek = seq_lseek,
-       .release = single_release,
+       .release = single_release_net,
 };
 #endif
 
@@ -3588,6 +3673,10 @@ void __net_init __ip_vs_control_cleanup_sysctl(struct net *net) { }
 
 #endif
 
+static struct notifier_block ip_vs_dst_notifier = {
+       .notifier_call = ip_vs_dst_event,
+};
+
 int __net_init __ip_vs_control_init(struct net *net)
 {
        int idx;
@@ -3626,7 +3715,7 @@ err:
        return -ENOMEM;
 }
 
-static void __net_exit __ip_vs_control_cleanup(struct net *net)
+void __net_exit __ip_vs_control_cleanup(struct net *net)
 {
        struct netns_ipvs *ipvs = net_ipvs(net);
 
@@ -3639,11 +3728,6 @@ static void __net_exit __ip_vs_control_cleanup(struct net *net)
        free_percpu(ipvs->tot_stats.cpustats);
 }
 
-static struct pernet_operations ipvs_control_ops = {
-       .init = __ip_vs_control_init,
-       .exit = __ip_vs_control_cleanup,
-};
-
 int __init ip_vs_control_init(void)
 {
        int idx;
@@ -3657,33 +3741,32 @@ int __init ip_vs_control_init(void)
                INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
        }
 
-       ret = register_pernet_subsys(&ipvs_control_ops);
-       if (ret) {
-               pr_err("cannot register namespace.\n");
-               goto err;
-       }
-
        smp_wmb();      /* Do we really need it now ? */
 
        ret = nf_register_sockopt(&ip_vs_sockopts);
        if (ret) {
                pr_err("cannot register sockopt.\n");
-               goto err_net;
+               goto err_sock;
        }
 
        ret = ip_vs_genl_register();
        if (ret) {
                pr_err("cannot register Generic Netlink interface.\n");
-               nf_unregister_sockopt(&ip_vs_sockopts);
-               goto err_net;
+               goto err_genl;
        }
 
+       ret = register_netdevice_notifier(&ip_vs_dst_notifier);
+       if (ret < 0)
+               goto err_notf;
+
        LeaveFunction(2);
        return 0;
 
-err_net:
-       unregister_pernet_subsys(&ipvs_control_ops);
-err:
+err_notf:
+       ip_vs_genl_unregister();
+err_genl:
+       nf_unregister_sockopt(&ip_vs_sockopts);
+err_sock:
        return ret;
 }
 
@@ -3691,7 +3774,6 @@ err:
 void ip_vs_control_cleanup(void)
 {
        EnterFunction(2);
-       unregister_pernet_subsys(&ipvs_control_ops);
        ip_vs_genl_unregister();
        nf_unregister_sockopt(&ip_vs_sockopts);
        LeaveFunction(2);
index 8c8766ca56ada43698df6e51bcaad8338e4ef40a..508cce98777c743558eecaa66d479889db6d8c50 100644 (file)
@@ -192,7 +192,7 @@ void ip_vs_read_estimator(struct ip_vs_stats_user *dst,
        dst->outbps = (e->outbps + 0xF) >> 5;
 }
 
-static int __net_init __ip_vs_estimator_init(struct net *net)
+int __net_init __ip_vs_estimator_init(struct net *net)
 {
        struct netns_ipvs *ipvs = net_ipvs(net);
 
@@ -203,24 +203,16 @@ static int __net_init __ip_vs_estimator_init(struct net *net)
        return 0;
 }
 
-static void __net_exit __ip_vs_estimator_exit(struct net *net)
+void __net_exit __ip_vs_estimator_cleanup(struct net *net)
 {
        del_timer_sync(&net_ipvs(net)->est_timer);
 }
-static struct pernet_operations ip_vs_app_ops = {
-       .init = __ip_vs_estimator_init,
-       .exit = __ip_vs_estimator_exit,
-};
 
 int __init ip_vs_estimator_init(void)
 {
-       int rv;
-
-       rv = register_pernet_subsys(&ip_vs_app_ops);
-       return rv;
+       return 0;
 }
 
 void ip_vs_estimator_cleanup(void)
 {
-       unregister_pernet_subsys(&ip_vs_app_ops);
 }
index 17484a4416ef91158d03d868ee3c698e3179cf6e..eb86028536fc94debd108fdea43ab8f36aa00fcd 100644 (file)
@@ -316,7 +316,7 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
 /*
  * per network name-space init
  */
-static int __net_init __ip_vs_protocol_init(struct net *net)
+int __net_init __ip_vs_protocol_init(struct net *net)
 {
 #ifdef CONFIG_IP_VS_PROTO_TCP
        register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp);
@@ -336,7 +336,7 @@ static int __net_init __ip_vs_protocol_init(struct net *net)
        return 0;
 }
 
-static void __net_exit __ip_vs_protocol_cleanup(struct net *net)
+void __net_exit __ip_vs_protocol_cleanup(struct net *net)
 {
        struct netns_ipvs *ipvs = net_ipvs(net);
        struct ip_vs_proto_data *pd;
@@ -349,11 +349,6 @@ static void __net_exit __ip_vs_protocol_cleanup(struct net *net)
        }
 }
 
-static struct pernet_operations ipvs_proto_ops = {
-       .init = __ip_vs_protocol_init,
-       .exit = __ip_vs_protocol_cleanup,
-};
-
 int __init ip_vs_protocol_init(void)
 {
        char protocols[64];
@@ -382,7 +377,6 @@ int __init ip_vs_protocol_init(void)
        REGISTER_PROTOCOL(&ip_vs_protocol_esp);
 #endif
        pr_info("Registered protocols (%s)\n", &protocols[2]);
-       return register_pernet_subsys(&ipvs_proto_ops);
 
        return 0;
 }
@@ -393,7 +387,6 @@ void ip_vs_protocol_cleanup(void)
        struct ip_vs_protocol *pp;
        int i;
 
-       unregister_pernet_subsys(&ipvs_proto_ops);
        /* unregister all the ipvs protocols */
        for (i = 0; i < IP_VS_PROTO_TAB_SIZE; i++) {
                while ((pp = ip_vs_proto_table[i]) != NULL)
index 3e7961e85e9caaf63d92615a3ffab970775583c4..e292e5bddc7000d3fbbb9f1b130dd3039dcee296 100644 (file)
@@ -1303,13 +1303,18 @@ static struct socket *make_send_sock(struct net *net)
        struct socket *sock;
        int result;
 
-       /* First create a socket */
-       result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
+       /* First create a socket move it to right name space later */
+       result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
        if (result < 0) {
                pr_err("Error during creation of socket; terminating\n");
                return ERR_PTR(result);
        }
-
+       /*
+        * Kernel sockets that are a part of a namespace, should not
+        * hold a reference to a namespace in order to allow to stop it.
+        * After sk_change_net should be released using sk_release_kernel.
+        */
+       sk_change_net(sock->sk, net);
        result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn);
        if (result < 0) {
                pr_err("Error setting outbound mcast interface\n");
@@ -1334,8 +1339,8 @@ static struct socket *make_send_sock(struct net *net)
 
        return sock;
 
-  error:
-       sock_release(sock);
+error:
+       sk_release_kernel(sock->sk);
        return ERR_PTR(result);
 }
 
@@ -1350,12 +1355,17 @@ static struct socket *make_receive_sock(struct net *net)
        int result;
 
        /* First create a socket */
-       result = __sock_create(net, PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock, 1);
+       result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
        if (result < 0) {
                pr_err("Error during creation of socket; terminating\n");
                return ERR_PTR(result);
        }
-
+       /*
+        * Kernel sockets that are a part of a namespace, should not
+        * hold a reference to a namespace in order to allow to stop it.
+        * After sk_change_net should be released using sk_release_kernel.
+        */
+       sk_change_net(sock->sk, net);
        /* it is equivalent to the REUSEADDR option in user-space */
        sock->sk->sk_reuse = 1;
 
@@ -1377,8 +1387,8 @@ static struct socket *make_receive_sock(struct net *net)
 
        return sock;
 
-  error:
-       sock_release(sock);
+error:
+       sk_release_kernel(sock->sk);
        return ERR_PTR(result);
 }
 
@@ -1473,7 +1483,7 @@ static int sync_thread_master(void *data)
                ip_vs_sync_buff_release(sb);
 
        /* release the sending multicast socket */
-       sock_release(tinfo->sock);
+       sk_release_kernel(tinfo->sock->sk);
        kfree(tinfo);
 
        return 0;
@@ -1513,7 +1523,7 @@ static int sync_thread_backup(void *data)
        }
 
        /* release the sending multicast socket */
-       sock_release(tinfo->sock);
+       sk_release_kernel(tinfo->sock->sk);
        kfree(tinfo->buf);
        kfree(tinfo);
 
@@ -1601,7 +1611,7 @@ outtinfo:
 outbuf:
        kfree(buf);
 outsocket:
-       sock_release(sock);
+       sk_release_kernel(sock->sk);
 out:
        return result;
 }
@@ -1610,6 +1620,7 @@ out:
 int stop_sync_thread(struct net *net, int state)
 {
        struct netns_ipvs *ipvs = net_ipvs(net);
+       int retc = -EINVAL;
 
        IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
 
@@ -1629,7 +1640,7 @@ int stop_sync_thread(struct net *net, int state)
                spin_lock_bh(&ipvs->sync_lock);
                ipvs->sync_state &= ~IP_VS_STATE_MASTER;
                spin_unlock_bh(&ipvs->sync_lock);
-               kthread_stop(ipvs->master_thread);
+               retc = kthread_stop(ipvs->master_thread);
                ipvs->master_thread = NULL;
        } else if (state == IP_VS_STATE_BACKUP) {
                if (!ipvs->backup_thread)
@@ -1639,22 +1650,20 @@ int stop_sync_thread(struct net *net, int state)
                        task_pid_nr(ipvs->backup_thread));
 
                ipvs->sync_state &= ~IP_VS_STATE_BACKUP;
-               kthread_stop(ipvs->backup_thread);
+               retc = kthread_stop(ipvs->backup_thread);
                ipvs->backup_thread = NULL;
-       } else {
-               return -EINVAL;
        }
 
        /* decrease the module use count */
        ip_vs_use_count_dec();
 
-       return 0;
+       return retc;
 }
 
 /*
  * Initialize data struct for each netns
  */
-static int __net_init __ip_vs_sync_init(struct net *net)
+int __net_init __ip_vs_sync_init(struct net *net)
 {
        struct netns_ipvs *ipvs = net_ipvs(net);
 
@@ -1668,24 +1677,24 @@ static int __net_init __ip_vs_sync_init(struct net *net)
        return 0;
 }
 
-static void __ip_vs_sync_cleanup(struct net *net)
+void __ip_vs_sync_cleanup(struct net *net)
 {
-       stop_sync_thread(net, IP_VS_STATE_MASTER);
-       stop_sync_thread(net, IP_VS_STATE_BACKUP);
-}
+       int retc;
 
-static struct pernet_operations ipvs_sync_ops = {
-       .init = __ip_vs_sync_init,
-       .exit = __ip_vs_sync_cleanup,
-};
+       retc = stop_sync_thread(net, IP_VS_STATE_MASTER);
+       if (retc && retc != -ESRCH)
+               pr_err("Failed to stop Master Daemon\n");
 
+       retc = stop_sync_thread(net, IP_VS_STATE_BACKUP);
+       if (retc && retc != -ESRCH)
+               pr_err("Failed to stop Backup Daemon\n");
+}
 
 int __init ip_vs_sync_init(void)
 {
-       return register_pernet_subsys(&ipvs_sync_ops);
+       return 0;
 }
 
 void ip_vs_sync_cleanup(void)
 {
-       unregister_pernet_subsys(&ipvs_sync_ops);
 }
index 30bf8a167fc8800ff26b4ab72cafe3e219f38139..482e90c6185087f2827b9f27448c74ac87eb0ef6 100644 (file)
@@ -1334,6 +1334,7 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
        struct nf_conn *ct;
        int err = -EINVAL;
        struct nf_conntrack_helper *helper;
+       struct nf_conn_tstamp *tstamp;
 
        ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
        if (IS_ERR(ct))
@@ -1451,6 +1452,9 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
                __set_bit(IPS_EXPECTED_BIT, &ct->status);
                ct->master = master_ct;
        }
+       tstamp = nf_conn_tstamp_find(ct);
+       if (tstamp)
+               tstamp->start = ktime_to_ns(ktime_get_real());
 
        add_timer(&ct->timeout);
        nf_conntrack_hash_insert(ct);
index a9adf4c6b299ee35a72c4617e0d3e565031de940..8a025a585d2f7c6d45dd07bcdfc9680826c88edd 100644 (file)
@@ -455,6 +455,7 @@ void xt_compat_flush_offsets(u_int8_t af)
                vfree(xt[af].compat_tab);
                xt[af].compat_tab = NULL;
                xt[af].number = 0;
+               xt[af].cur = 0;
        }
 }
 EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
@@ -473,8 +474,7 @@ int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
                else
                        return mid ? tmp[mid - 1].delta : 0;
        }
-       WARN_ON_ONCE(1);
-       return 0;
+       return left ? tmp[left - 1].delta : 0;
 }
 EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
 
index 0a229191e55b2f8e15e00e459b2ceb20fe226893..ae8271652efa9cf5a6f939fcedc3238ca6c865c7 100644 (file)
@@ -99,7 +99,7 @@ tos_tg6(struct sk_buff *skb, const struct xt_action_param *par)
        u_int8_t orig, nv;
 
        orig = ipv6_get_dsfield(iph);
-       nv   = (orig & info->tos_mask) ^ info->tos_value;
+       nv   = (orig & ~info->tos_mask) ^ info->tos_value;
 
        if (orig != nv) {
                if (!skb_make_writable(skb, sizeof(struct iphdr)))
index 481a86fdc40973bb43f62cfd2a36b0d4746f8a63..61805d7b38aa62e880c1e8f4e32f79d1bf5ffdc2 100644 (file)
@@ -272,11 +272,6 @@ static int conntrack_mt_check(const struct xt_mtchk_param *par)
 {
        int ret;
 
-       if (strcmp(par->table, "raw") == 0) {
-               pr_info("state is undetermined at the time of raw table\n");
-               return -EINVAL;
-       }
-
        ret = nf_ct_l3proto_try_module_get(par->family);
        if (ret < 0)
                pr_info("cannot load conntrack support for proto=%u\n",
index 3a43a830476884464abbaba9eae091b166ecabe8..b1d75beb7e20e8e3155a84be37161871023396ac 100644 (file)
@@ -524,6 +524,8 @@ static int unix_dgram_connect(struct socket *, struct sockaddr *,
                              int, int);
 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
                                  struct msghdr *, size_t);
+static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
+                                 struct msghdr *, size_t, int);
 
 static const struct proto_ops unix_stream_ops = {
        .family =       PF_UNIX,
@@ -583,7 +585,7 @@ static const struct proto_ops unix_seqpacket_ops = {
        .setsockopt =   sock_no_setsockopt,
        .getsockopt =   sock_no_getsockopt,
        .sendmsg =      unix_seqpacket_sendmsg,
-       .recvmsg =      unix_dgram_recvmsg,
+       .recvmsg =      unix_seqpacket_recvmsg,
        .mmap =         sock_no_mmap,
        .sendpage =     sock_no_sendpage,
 };
@@ -1699,6 +1701,18 @@ static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
        return unix_dgram_sendmsg(kiocb, sock, msg, len);
 }
 
+static int unix_seqpacket_recvmsg(struct kiocb *iocb, struct socket *sock,
+                             struct msghdr *msg, size_t size,
+                             int flags)
+{
+       struct sock *sk = sock->sk;
+
+       if (sk->sk_state != TCP_ESTABLISHED)
+               return -ENOTCONN;
+
+       return unix_dgram_recvmsg(iocb, sock, msg, size, flags);
+}
+
 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
 {
        struct unix_sock *u = unix_sk(sk);
index 15792d8b62721f2ce3b33ccb24556942063a966b..b4d745ea8ee14acb200b0e4dc4f5685d012d9e30 100644 (file)
@@ -1406,6 +1406,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
        struct net *net = xp_net(policy);
        unsigned long now = jiffies;
        struct net_device *dev;
+       struct xfrm_mode *inner_mode;
        struct dst_entry *dst_prev = NULL;
        struct dst_entry *dst0 = NULL;
        int i = 0;
@@ -1436,6 +1437,17 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
                        goto put_states;
                }
 
+               if (xfrm[i]->sel.family == AF_UNSPEC) {
+                       inner_mode = xfrm_ip2inner_mode(xfrm[i],
+                                                       xfrm_af2proto(family));
+                       if (!inner_mode) {
+                               err = -EAFNOSUPPORT;
+                               dst_release(dst);
+                               goto put_states;
+                       }
+               } else
+                       inner_mode = xfrm[i]->inner_mode;
+
                if (!dst_prev)
                        dst0 = dst1;
                else {
@@ -1464,7 +1476,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
                dst1->lastuse = now;
 
                dst1->input = dst_discard;
-               dst1->output = xfrm[i]->outer_mode->afinfo->output;
+               dst1->output = inner_mode->afinfo->output;
 
                dst1->next = dst_prev;
                dst_prev = dst1;
index f218385950ca06a3a8b6830c51c29ac370ca2ea0..47f1b8638df9987dca1cf7ad2bbce5c674044881 100644 (file)
@@ -532,9 +532,12 @@ int xfrm_init_replay(struct xfrm_state *x)
 
        if (replay_esn) {
                if (replay_esn->replay_window >
-                   replay_esn->bmp_len * sizeof(__u32))
+                   replay_esn->bmp_len * sizeof(__u32) * 8)
                        return -EINVAL;
 
+       if ((x->props.flags & XFRM_STATE_ESN) && replay_esn->replay_window == 0)
+               return -EINVAL;
+
        if ((x->props.flags & XFRM_STATE_ESN) && x->replay_esn)
                x->repl = &xfrm_replay_esn;
        else
index 5d1d60d3ca832c419bd41bf7b70d84bacb222494..c658cb3bc7c3ccf213fffee288f8b211a2c93293 100644 (file)
@@ -124,6 +124,9 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
 {
        struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
 
+       if ((p->flags & XFRM_STATE_ESN) && !rt)
+               return -EINVAL;
+
        if (!rt)
                return 0;
 
index f7cf0ea6faeaf2abee48ea3db68c0befddd00502..8fb248843009de5e6b71a01cae7b884cd597ba36 100644 (file)
@@ -1578,7 +1578,8 @@ static int may_create(struct inode *dir,
                return rc;
 
        if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
-               rc = security_transition_sid(sid, dsec->sid, tclass, NULL, &newsid);
+               rc = security_transition_sid(sid, dsec->sid, tclass,
+                                            &dentry->d_name, &newsid);
                if (rc)
                        return rc;
        }
index e7b850ad57ee1547018ff37c4c3bb496d6bccb21..7102457661d645cd499d770819ad92e3970f6831 100644 (file)
@@ -502,7 +502,7 @@ static int policydb_index(struct policydb *p)
                goto out;
 
        rc = flex_array_prealloc(p->type_val_to_struct_array, 0,
-                                p->p_types.nprim - 1, GFP_KERNEL | __GFP_ZERO);
+                                p->p_types.nprim, GFP_KERNEL | __GFP_ZERO);
        if (rc)
                goto out;
 
@@ -519,7 +519,7 @@ static int policydb_index(struct policydb *p)
                        goto out;
 
                rc = flex_array_prealloc(p->sym_val_to_name[i],
-                                        0, p->symtab[i].nprim - 1,
+                                        0, p->symtab[i].nprim,
                                         GFP_KERNEL | __GFP_ZERO);
                if (rc)
                        goto out;
@@ -1819,8 +1819,6 @@ static int filename_trans_read(struct policydb *p, void *fp)
                goto out;
        nel = le32_to_cpu(buf[0]);
 
-       printk(KERN_ERR "%s: nel=%d\n", __func__, nel);
-
        last = p->filename_trans;
        while (last && last->next)
                last = last->next;
@@ -1857,8 +1855,6 @@ static int filename_trans_read(struct policydb *p, void *fp)
                        goto out;
                name[len] = 0;
 
-               printk(KERN_ERR "%s: ft=%p ft->name=%p ft->name=%s\n", __func__, ft, ft->name, ft->name);
-
                rc = next_entry(buf, fp, sizeof(u32) * 4);
                if (rc)
                        goto out;
@@ -2375,7 +2371,7 @@ int policydb_read(struct policydb *p, void *fp)
                goto bad;
 
        /* preallocate so we don't have to worry about the put ever failing */
-       rc = flex_array_prealloc(p->type_attr_map_array, 0, p->p_types.nprim - 1,
+       rc = flex_array_prealloc(p->type_attr_map_array, 0, p->p_types.nprim,
                                 GFP_KERNEL | __GFP_ZERO);
        if (rc)
                goto bad;
index 33f0ba5559a77201ab8bf7752f7e242cf0bcc1ac..62e959120c44cc82542ccca98f0df7791e99b022 100644 (file)
@@ -44,10 +44,10 @@ static struct snd_pcm_hardware snd_vortex_playback_hw_adb = {
        .channels_min = 1,
        .channels_max = 2,
        .buffer_bytes_max = 0x10000,
-       .period_bytes_min = 0x1,
+       .period_bytes_min = 0x20,
        .period_bytes_max = 0x1000,
        .periods_min = 2,
-       .periods_max = 32,
+       .periods_max = 1024,
 };
 
 #ifndef CHIP_AU8820
@@ -140,6 +140,9 @@ static int snd_vortex_pcm_open(struct snd_pcm_substream *substream)
                                        SNDRV_PCM_HW_PARAM_PERIOD_BYTES)) < 0)
                return err;
 
+       snd_pcm_hw_constraint_step(runtime, 0,
+                                       SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 64);
+
        if (VORTEX_PCM_TYPE(substream->pcm) != VORTEX_PCM_WT) {
 #ifndef CHIP_AU8820
                if (VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_A3D) {
index d3bd2c10180f4964134778d0e3bf574ba5c0d2ab..c82979a8cd0930026e84eced9fa684d4c4ccac84 100644 (file)
@@ -1704,11 +1704,11 @@ static void alc_apply_fixup(struct hda_codec *codec, int action)
                                   codec->chip_name, fix->type);
                        break;
                }
-               if (!fix[id].chained)
+               if (!fix->chained)
                        break;
                if (++depth > 10)
                        break;
-               id = fix[id].chain_id;
+               id = fix->chain_id;
        }
 }
 
@@ -5645,6 +5645,7 @@ static void fillup_priv_adc_nids(struct hda_codec *codec, hda_nid_t *nids,
 static struct snd_pci_quirk beep_white_list[] = {
        SND_PCI_QUIRK(0x1043, 0x829f, "ASUS", 1),
        SND_PCI_QUIRK(0x1043, 0x83ce, "EeePC", 1),
+       SND_PCI_QUIRK(0x1043, 0x831a, "EeePC", 1),
        SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1),
        {}
 };
@@ -9863,6 +9864,7 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC883_LAPTOP_EAPD),
        SND_PCI_QUIRK(0x10f1, 0x2350, "TYAN-S2350", ALC888_6ST_DELL),
        SND_PCI_QUIRK(0x108e, 0x534d, NULL, ALC883_3ST_6ch),
+       SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte P35 DS3R", ALC882_6ST_DIG),
 
        SND_PCI_QUIRK(0x1462, 0x0349, "MSI", ALC883_TARGA_2ch_DIG),
        SND_PCI_QUIRK(0x1462, 0x040d, "MSI", ALC883_TARGA_2ch_DIG),
@@ -10699,7 +10701,6 @@ enum {
        PINFIX_LENOVO_Y530,
        PINFIX_PB_M5210,
        PINFIX_ACER_ASPIRE_7736,
-       PINFIX_GIGABYTE_880GM,
 };
 
 static const struct alc_fixup alc882_fixups[] = {
@@ -10731,13 +10732,6 @@ static const struct alc_fixup alc882_fixups[] = {
                .type = ALC_FIXUP_SKU,
                .v.sku = ALC_FIXUP_SKU_IGNORE,
        },
-       [PINFIX_GIGABYTE_880GM] = {
-               .type = ALC_FIXUP_PINS,
-               .v.pins = (const struct alc_pincfg[]) {
-                       { 0x14, 0x1114410 }, /* set as speaker */
-                       { }
-               }
-       },
 };
 
 static struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -10745,7 +10739,6 @@ static struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", PINFIX_LENOVO_Y530),
        SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", PINFIX_ABIT_AW9D_MAX),
        SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", PINFIX_ACER_ASPIRE_7736),
-       SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte", PINFIX_GIGABYTE_880GM),
        {}
 };
 
@@ -18805,6 +18798,8 @@ static struct snd_pci_quirk alc662_cfg_tbl[] = {
                      ALC662_3ST_6ch_DIG),
        SND_PCI_QUIRK(0x1179, 0xff6e, "Toshiba NB20x", ALC662_AUTO),
        SND_PCI_QUIRK(0x144d, 0xca00, "Samsung NC10", ALC272_SAMSUNG_NC10),
+       SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte 945GCM-S2L",
+                     ALC662_3ST_6ch_DIG),
        SND_PCI_QUIRK(0x152d, 0x2304, "Quanta WH1", ALC663_ASUS_H13),
        SND_PCI_QUIRK(0x1565, 0x820f, "Biostar TA780G M2+", ALC662_3ST_6ch_DIG),
        SND_PCI_QUIRK(0x1631, 0xc10c, "PB RS65", ALC663_ASUS_M51VA),
@@ -19478,7 +19473,7 @@ enum {
        ALC662_FIXUP_IDEAPAD,
        ALC272_FIXUP_MARIO,
        ALC662_FIXUP_CZC_P10T,
-       ALC662_FIXUP_GIGABYTE,
+       ALC662_FIXUP_SKU_IGNORE,
 };
 
 static const struct alc_fixup alc662_fixups[] = {
@@ -19507,20 +19502,17 @@ static const struct alc_fixup alc662_fixups[] = {
                        {}
                }
        },
-       [ALC662_FIXUP_GIGABYTE] = {
-               .type = ALC_FIXUP_PINS,
-               .v.pins = (const struct alc_pincfg[]) {
-                       { 0x14, 0x1114410 }, /* set as speaker */
-                       { }
-               }
+       [ALC662_FIXUP_SKU_IGNORE] = {
+               .type = ALC_FIXUP_SKU,
+               .v.sku = ALC_FIXUP_SKU_IGNORE,
        },
 };
 
 static struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
+       SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
        SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
-       SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte", ALC662_FIXUP_GIGABYTE),
        SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
index 1371b57c11e823209336a3a2f86b6274e550a0b9..0997031c48d228e3e0a2af8d9b1fde55ab54ed8d 100644 (file)
@@ -1292,14 +1292,18 @@ static void notify_aa_path_ctls(struct hda_codec *codec)
 {
        int i;
        struct snd_ctl_elem_id id;
-       const char *labels[] = {"Mic", "Front Mic", "Line"};
+       const char *labels[] = {"Mic", "Front Mic", "Line", "Rear Mic"};
+       struct snd_kcontrol *ctl;
 
        memset(&id, 0, sizeof(id));
        id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
        for (i = 0; i < ARRAY_SIZE(labels); i++) {
                sprintf(id.name, "%s Playback Volume", labels[i]);
-               snd_ctl_notify(codec->bus->card, SNDRV_CTL_EVENT_MASK_VALUE,
-                              &id);
+               ctl = snd_hda_find_mixer_ctl(codec, id.name);
+               if (ctl)
+                       snd_ctl_notify(codec->bus->card,
+                                       SNDRV_CTL_EVENT_MASK_VALUE,
+                                       &ctl->id);
        }
 }
 
index 2727befd158ecfd253ce25072439bf6d01e43ff0..b04d28039c1665d42473bf79df2936938e20acfc 100644 (file)
@@ -139,7 +139,7 @@ SOC_DOUBLE_R("Capture Volume", SSM2602_LINVOL, SSM2602_RINVOL, 0, 31, 0),
 SOC_DOUBLE_R("Capture Switch", SSM2602_LINVOL, SSM2602_RINVOL, 7, 1, 1),
 
 SOC_SINGLE("Mic Boost (+20dB)", SSM2602_APANA, 0, 1, 0),
-SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 7, 1, 0),
+SOC_SINGLE("Mic Boost2 (+20dB)", SSM2602_APANA, 8, 1, 0),
 SOC_SINGLE("Mic Switch", SSM2602_APANA, 1, 1, 1),
 
 SOC_SINGLE("Sidetone Playback Volume", SSM2602_APANA, 6, 3, 1),
@@ -602,7 +602,7 @@ static struct snd_soc_codec_driver soc_codec_dev_ssm2602 = {
        .read = ssm2602_read_reg_cache,
        .write = ssm2602_write,
        .set_bias_level = ssm2602_set_bias_level,
-       .reg_cache_size = sizeof(ssm2602_reg),
+       .reg_cache_size = ARRAY_SIZE(ssm2602_reg),
        .reg_word_size = sizeof(u16),
        .reg_cache_default = ssm2602_reg,
 };
@@ -614,7 +614,7 @@ static struct snd_soc_codec_driver soc_codec_dev_ssm2602 = {
  *    low  = 0x1a
  *    high = 0x1b
  */
-static int ssm2602_i2c_probe(struct i2c_client *i2c,
+static int __devinit ssm2602_i2c_probe(struct i2c_client *i2c,
                             const struct i2c_device_id *id)
 {
        struct ssm2602_priv *ssm2602;
@@ -635,7 +635,7 @@ static int ssm2602_i2c_probe(struct i2c_client *i2c,
        return ret;
 }
 
-static int ssm2602_i2c_remove(struct i2c_client *client)
+static int __devexit ssm2602_i2c_remove(struct i2c_client *client)
 {
        snd_soc_unregister_codec(&client->dev);
        kfree(i2c_get_clientdata(client));
@@ -655,7 +655,7 @@ static struct i2c_driver ssm2602_i2c_driver = {
                .owner = THIS_MODULE,
        },
        .probe = ssm2602_i2c_probe,
-       .remove = ssm2602_i2c_remove,
+       .remove = __devexit_p(ssm2602_i2c_remove),
        .id_table = ssm2602_i2c_id,
 };
 #endif
index 48ffd406a71d18e2170a7b8ca69214a9de4ee8a6..a7b8f301bad39b3ef7a691273d4d3ba4bf4b9097 100644 (file)
@@ -601,9 +601,7 @@ static struct snd_soc_codec_driver soc_codec_dev_uda134x = {
        .reg_cache_step = 1,
        .read = uda134x_read_reg_cache,
        .write = uda134x_write,
-#ifdef POWER_OFF_ON_STANDBY
        .set_bias_level = uda134x_set_bias_level,
-#endif
 };
 
 static int __devinit uda134x_codec_probe(struct platform_device *pdev)
index f52b623bb692c3a0b3954e12e7e5027446a6d136..824d1c8c8a3536b8d2d8b5f36a699c5f808aa2f4 100644 (file)
@@ -692,7 +692,7 @@ SOC_ENUM("DRC Smoothing Threshold", drc_smoothing),
 SOC_SINGLE_TLV("DRC Startup Volume", WM8903_DRC_0, 6, 18, 0, drc_tlv_startup),
 
 SOC_DOUBLE_R_TLV("Digital Capture Volume", WM8903_ADC_DIGITAL_VOLUME_LEFT,
-                WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 96, 0, digital_tlv),
+                WM8903_ADC_DIGITAL_VOLUME_RIGHT, 1, 120, 0, digital_tlv),
 SOC_ENUM("ADC Companding Mode", adc_companding),
 SOC_SINGLE("ADC Companding Switch", WM8903_AUDIO_INTERFACE_0, 3, 1, 0),
 
index a5af834c8ef52088c9a735b85cceebe095b95300..4ddc6d3b66786a14738b99049f17cfb140a6d598 100644 (file)
@@ -434,17 +434,21 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
                mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
                mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
 
-               mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, (0x7 << 26));
+               mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
+                               ACLKX | AHCLKX | AFSX);
                break;
        case SND_SOC_DAIFMT_CBM_CFS:
                /* codec is clock master and frame slave */
-               mcasp_set_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
+               mcasp_clr_bits(base + DAVINCI_MCASP_ACLKXCTL_REG, ACLKXE);
                mcasp_set_bits(base + DAVINCI_MCASP_TXFMCTL_REG, AFSXE);
 
-               mcasp_set_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
+               mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
                mcasp_set_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
 
-               mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG, (0x2d << 26));
+               mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG,
+                               ACLKX | ACLKR);
+               mcasp_set_bits(base + DAVINCI_MCASP_PDIR_REG,
+                               AFSX | AFSR);
                break;
        case SND_SOC_DAIFMT_CBM_CFM:
                /* codec is clock and frame master */
@@ -454,7 +458,8 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
                mcasp_clr_bits(base + DAVINCI_MCASP_ACLKRCTL_REG, ACLKRE);
                mcasp_clr_bits(base + DAVINCI_MCASP_RXFMCTL_REG, AFSRE);
 
-               mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG, (0x3f << 26));
+               mcasp_clr_bits(base + DAVINCI_MCASP_PDIR_REG,
+                               ACLKX | AHCLKX | AFSX | ACLKR | AHCLKR | AFSR);
                break;
 
        default:
@@ -644,7 +649,7 @@ static void davinci_hw_param(struct davinci_audio_dev *dev, int stream)
                mcasp_set_reg(dev->base + DAVINCI_MCASP_TXTDM_REG, mask);
                mcasp_set_bits(dev->base + DAVINCI_MCASP_TXFMT_REG, TXORD);
 
-               if ((dev->tdm_slots >= 2) || (dev->tdm_slots <= 32))
+               if ((dev->tdm_slots >= 2) && (dev->tdm_slots <= 32))
                        mcasp_mod_bits(dev->base + DAVINCI_MCASP_TXFMCTL_REG,
                                        FSXMOD(dev->tdm_slots), FSXMOD(0x1FF));
                else
@@ -660,7 +665,7 @@ static void davinci_hw_param(struct davinci_audio_dev *dev, int stream)
                                AHCLKRE);
                mcasp_set_reg(dev->base + DAVINCI_MCASP_RXTDM_REG, mask);
 
-               if ((dev->tdm_slots >= 2) || (dev->tdm_slots <= 32))
+               if ((dev->tdm_slots >= 2) && (dev->tdm_slots <= 32))
                        mcasp_mod_bits(dev->base + DAVINCI_MCASP_RXFMCTL_REG,
                                        FSRMOD(dev->tdm_slots), FSRMOD(0x1FF));
                else
index 419bf4f5534a6c082f8e5b5aa7a1a9ce3e43bcf7..cd22a54b2f14b3cd408d64acbbc519786320f573 100644 (file)
@@ -133,7 +133,7 @@ static void jz4740_i2s_shutdown(struct snd_pcm_substream *substream,
        struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
        uint32_t conf;
 
-       if (!dai->active)
+       if (dai->active)
                return;
 
        conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF);
index d567c322a2fb471788d7b8f47345a6b9fd6d5aed..6b1f9d3bf34e2ba37056216fa9635d592435e843 100644 (file)
@@ -376,6 +376,11 @@ static int sst_platform_pcm_hw_params(struct snd_pcm_substream *substream,
        return 0;
 }
 
+static int sst_platform_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+       return snd_pcm_lib_free_pages(substream);
+}
+
 static struct snd_pcm_ops sst_platform_ops = {
        .open = sst_platform_open,
        .close = sst_platform_close,
@@ -384,6 +389,7 @@ static struct snd_pcm_ops sst_platform_ops = {
        .trigger = sst_platform_pcm_trigger,
        .pointer = sst_platform_pcm_pointer,
        .hw_params = sst_platform_pcm_hw_params,
+       .hw_free = sst_platform_pcm_hw_free,
 };
 
 static void sst_pcm_free(struct snd_pcm *pcm)
index f6b3a3ce59196078e11c670c94a417edc1517b5b..0e80daee8b6f5e01c669bbd45fc59a168e51e873 100644 (file)
@@ -236,18 +236,18 @@ static struct snd_soc_dai_link goni_dai[] = {
        .name = "WM8994",
        .stream_name = "WM8994 HiFi",
        .cpu_dai_name = "samsung-i2s.0",
-       .codec_dai_name = "wm8994-hifi",
+       .codec_dai_name = "wm8994-aif1",
        .platform_name = "samsung-audio",
-       .codec_name = "wm8994-codec.0-0x1a",
+       .codec_name = "wm8994-codec.0-001a",
        .init = goni_wm8994_init,
        .ops = &goni_hifi_ops,
 }, {
        .name = "WM8994 Voice",
        .stream_name = "Voice",
        .cpu_dai_name = "goni-voice-dai",
-       .codec_dai_name = "wm8994-voice",
+       .codec_dai_name = "wm8994-aif2",
        .platform_name = "samsung-audio",
-       .codec_name = "wm8994-codec.0-0x1a",
+       .codec_name = "wm8994-codec.0-001a",
        .ops = &goni_voice_ops,
 },
 };
index d8562ce4de7ab9ea5796e0521261fe520d8a3330..dd55d1069468d6a41dd60ae6263f26e3f4efca45 100644 (file)
@@ -3291,6 +3291,8 @@ int snd_soc_register_card(struct snd_soc_card *card)
        if (!card->name || !card->dev)
                return -EINVAL;
 
+       dev_set_drvdata(card->dev, card);
+
        snd_soc_initialize_card_lists(card);
 
        soc_init_card_debugfs(card);
index 5b792d2c8061aa0292595186d8cf60fadf8456dc..f079b5e2ab287cd0d726216bd6b656fd15c2e6d6 100644 (file)
@@ -176,9 +176,11 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof
                        if (!rate)
                                continue;
                        /* C-Media CM6501 mislabels its 96 kHz altsetting */
+                       /* Terratec Aureon 7.1 USB C-Media 6206, too */
                        if (rate == 48000 && nr_rates == 1 &&
                            (chip->usb_id == USB_ID(0x0d8c, 0x0201) ||
-                            chip->usb_id == USB_ID(0x0d8c, 0x0102)) &&
+                            chip->usb_id == USB_ID(0x0d8c, 0x0102) ||
+                            chip->usb_id == USB_ID(0x0ccd, 0x00b1)) &&
                            fp->altsetting == 5 && fp->maxpacksize == 392)
                                rate = 96000;
                        /* Creative VF0470 Live Cam reports 16 kHz instead of 8kHz */
index ec07e62e53f317e5751b1dcd7a1427ed3288b6a6..1b94ec3a3368bd01c622f686edc74957cee4d36d 100644 (file)
@@ -533,6 +533,7 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev,
 
        case USB_ID(0x0d8c, 0x0102):
                /* C-Media CM6206 / CM106-Like Sound Device */
+       case USB_ID(0x0ccd, 0x00b1): /* Terratec Aureon 7.1 USB */
                return snd_usb_cm6206_boot_quirk(dev);
 
        case USB_ID(0x133e, 0x0815):
index 207dee5c5b163fd51ceafbb6b171df9c0708893f..0c542563ea6c2206aecc883c0be8a70dfad74919 100644 (file)
@@ -35,15 +35,21 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
                                  -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
                                  -e s/sh[234].*/sh/ )
 
+CC = $(CROSS_COMPILE)gcc
+AR = $(CROSS_COMPILE)ar
+
 # Additional ARCH settings for x86
 ifeq ($(ARCH),i386)
         ARCH := x86
 endif
 ifeq ($(ARCH),x86_64)
-       RAW_ARCH := x86_64
-        ARCH := x86
-       ARCH_CFLAGS := -DARCH_X86_64
-       ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S
+       ARCH := x86
+       IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1)
+       ifeq (${IS_X86_64}, 1)
+               RAW_ARCH := x86_64
+               ARCH_CFLAGS := -DARCH_X86_64
+               ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S
+       endif
 endif
 
 #
@@ -119,8 +125,6 @@ lib = lib
 
 export prefix bindir sharedir sysconfdir
 
-CC = $(CROSS_COMPILE)gcc
-AR = $(CROSS_COMPILE)ar
 RM = rm -f
 MKDIR = mkdir
 FIND = find
index 416538248a4bb04eaf2e28a1a2f58f062e6372fc..0974f957b8fa2634409ff46e8de915b1380e87a8 100644 (file)
@@ -427,7 +427,7 @@ static void mmap_read_all(void)
 {
        int i;
 
-       for (i = 0; i < evsel_list->cpus->nr; i++) {
+       for (i = 0; i < evsel_list->nr_mmaps; i++) {
                if (evsel_list->mmap[i].base)
                        mmap_read(&evsel_list->mmap[i]);
        }
index 11e3c84583622bac20ed799cfab29e7066c6d6eb..2f9a337b182fe98e66a70128a6661c2be3e0342c 100644 (file)
@@ -549,7 +549,7 @@ static int test__basic_mmap(void)
                        ++foo;
                }
 
-       while ((event = perf_evlist__read_on_cpu(evlist, 0)) != NULL) {
+       while ((event = perf_evlist__mmap_read(evlist, 0)) != NULL) {
                struct perf_sample sample;
 
                if (event->header.type != PERF_RECORD_SAMPLE) {
index 7e3d6e310bf839f09e5d6afed39c8c1530a13807..ebfc7cf5f63bd94256165ed1d5b2a5f2e1c25ee0 100644 (file)
@@ -801,12 +801,12 @@ static void perf_event__process_sample(const union perf_event *event,
        }
 }
 
-static void perf_session__mmap_read_cpu(struct perf_session *self, int cpu)
+static void perf_session__mmap_read_idx(struct perf_session *self, int idx)
 {
        struct perf_sample sample;
        union perf_event *event;
 
-       while ((event = perf_evlist__read_on_cpu(top.evlist, cpu)) != NULL) {
+       while ((event = perf_evlist__mmap_read(top.evlist, idx)) != NULL) {
                perf_session__parse_sample(self, event, &sample);
 
                if (event->header.type == PERF_RECORD_SAMPLE)
@@ -820,8 +820,8 @@ static void perf_session__mmap_read(struct perf_session *self)
 {
        int i;
 
-       for (i = 0; i < top.evlist->cpus->nr; i++)
-               perf_session__mmap_read_cpu(self, i);
+       for (i = 0; i < top.evlist->nr_mmaps; i++)
+               perf_session__mmap_read_idx(self, i);
 }
 
 static void start_counters(struct perf_evlist *evlist)
index 45da8d186b492d4f82512862aef867acd4649f09..23eb22b05d271bf2d317bffde819ceecd790a318 100644 (file)
@@ -166,11 +166,11 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
        return NULL;
 }
 
-union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu)
+union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
 {
        /* XXX Move this to perf.c, making it generally available */
        unsigned int page_size = sysconf(_SC_PAGE_SIZE);
-       struct perf_mmap *md = &evlist->mmap[cpu];
+       struct perf_mmap *md = &evlist->mmap[idx];
        unsigned int head = perf_mmap__read_head(md);
        unsigned int old = md->prev;
        unsigned char *data = md->base + page_size;
@@ -235,31 +235,37 @@ union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu)
 
 void perf_evlist__munmap(struct perf_evlist *evlist)
 {
-       int cpu;
+       int i;
 
-       for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
-               if (evlist->mmap[cpu].base != NULL) {
-                       munmap(evlist->mmap[cpu].base, evlist->mmap_len);
-                       evlist->mmap[cpu].base = NULL;
+       for (i = 0; i < evlist->nr_mmaps; i++) {
+               if (evlist->mmap[i].base != NULL) {
+                       munmap(evlist->mmap[i].base, evlist->mmap_len);
+                       evlist->mmap[i].base = NULL;
                }
        }
+
+       free(evlist->mmap);
+       evlist->mmap = NULL;
 }
 
 int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
 {
-       evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap));
+       evlist->nr_mmaps = evlist->cpus->nr;
+       if (evlist->cpus->map[0] == -1)
+               evlist->nr_mmaps = evlist->threads->nr;
+       evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
        return evlist->mmap != NULL ? 0 : -ENOMEM;
 }
 
 static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel,
-                              int cpu, int prot, int mask, int fd)
+                              int idx, int prot, int mask, int fd)
 {
-       evlist->mmap[cpu].prev = 0;
-       evlist->mmap[cpu].mask = mask;
-       evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot,
+       evlist->mmap[idx].prev = 0;
+       evlist->mmap[idx].mask = mask;
+       evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
                                      MAP_SHARED, fd, 0);
-       if (evlist->mmap[cpu].base == MAP_FAILED) {
-               if (evlist->cpus->map[cpu] == -1 && evsel->attr.inherit)
+       if (evlist->mmap[idx].base == MAP_FAILED) {
+               if (evlist->cpus->map[idx] == -1 && evsel->attr.inherit)
                        ui__warning("Inherit is not allowed on per-task "
                                    "events using mmap.\n");
                return -1;
@@ -269,6 +275,86 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *ev
        return 0;
 }
 
+static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
+{
+       struct perf_evsel *evsel;
+       int cpu, thread;
+
+       for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+               int output = -1;
+
+               for (thread = 0; thread < evlist->threads->nr; thread++) {
+                       list_for_each_entry(evsel, &evlist->entries, node) {
+                               int fd = FD(evsel, cpu, thread);
+
+                               if (output == -1) {
+                                       output = fd;
+                                       if (__perf_evlist__mmap(evlist, evsel, cpu,
+                                                               prot, mask, output) < 0)
+                                               goto out_unmap;
+                               } else {
+                                       if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
+                                               goto out_unmap;
+                               }
+
+                               if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+                                   perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
+                                       goto out_unmap;
+                       }
+               }
+       }
+
+       return 0;
+
+out_unmap:
+       for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+               if (evlist->mmap[cpu].base != NULL) {
+                       munmap(evlist->mmap[cpu].base, evlist->mmap_len);
+                       evlist->mmap[cpu].base = NULL;
+               }
+       }
+       return -1;
+}
+
+static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
+{
+       struct perf_evsel *evsel;
+       int thread;
+
+       for (thread = 0; thread < evlist->threads->nr; thread++) {
+               int output = -1;
+
+               list_for_each_entry(evsel, &evlist->entries, node) {
+                       int fd = FD(evsel, 0, thread);
+
+                       if (output == -1) {
+                               output = fd;
+                               if (__perf_evlist__mmap(evlist, evsel, thread,
+                                                       prot, mask, output) < 0)
+                                       goto out_unmap;
+                       } else {
+                               if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
+                                       goto out_unmap;
+                       }
+
+                       if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
+                           perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
+                               goto out_unmap;
+               }
+       }
+
+       return 0;
+
+out_unmap:
+       for (thread = 0; thread < evlist->threads->nr; thread++) {
+               if (evlist->mmap[thread].base != NULL) {
+                       munmap(evlist->mmap[thread].base, evlist->mmap_len);
+                       evlist->mmap[thread].base = NULL;
+               }
+       }
+       return -1;
+}
+
 /** perf_evlist__mmap - Create per cpu maps to receive events
  *
  * @evlist - list of events
@@ -287,11 +373,11 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *ev
 int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
 {
        unsigned int page_size = sysconf(_SC_PAGE_SIZE);
-       int mask = pages * page_size - 1, cpu;
-       struct perf_evsel *first_evsel, *evsel;
+       int mask = pages * page_size - 1;
+       struct perf_evsel *evsel;
        const struct cpu_map *cpus = evlist->cpus;
        const struct thread_map *threads = evlist->threads;
-       int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
+       int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
 
        if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
                return -ENOMEM;
@@ -301,43 +387,18 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
 
        evlist->overwrite = overwrite;
        evlist->mmap_len = (pages + 1) * page_size;
-       first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node);
 
        list_for_each_entry(evsel, &evlist->entries, node) {
                if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
                    evsel->sample_id == NULL &&
                    perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
                        return -ENOMEM;
-
-               for (cpu = 0; cpu < cpus->nr; cpu++) {
-                       for (thread = 0; thread < threads->nr; thread++) {
-                               int fd = FD(evsel, cpu, thread);
-
-                               if (evsel->idx || thread) {
-                                       if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT,
-                                                 FD(first_evsel, cpu, 0)) != 0)
-                                               goto out_unmap;
-                               } else if (__perf_evlist__mmap(evlist, evsel, cpu,
-                                                              prot, mask, fd) < 0)
-                                       goto out_unmap;
-
-                               if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
-                                   perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
-                                       goto out_unmap;
-                       }
-               }
        }
 
-       return 0;
+       if (evlist->cpus->map[0] == -1)
+               return perf_evlist__mmap_per_thread(evlist, prot, mask);
 
-out_unmap:
-       for (cpu = 0; cpu < cpus->nr; cpu++) {
-               if (evlist->mmap[cpu].base != NULL) {
-                       munmap(evlist->mmap[cpu].base, evlist->mmap_len);
-                       evlist->mmap[cpu].base = NULL;
-               }
-       }
-       return -1;
+       return perf_evlist__mmap_per_cpu(evlist, prot, mask);
 }
 
 int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
@@ -348,7 +409,7 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
        if (evlist->threads == NULL)
                return -1;
 
-       if (target_tid != -1)
+       if (cpu_list == NULL && target_tid != -1)
                evlist->cpus = cpu_map__dummy_new();
        else
                evlist->cpus = cpu_map__new(cpu_list);
index 8b1cb7a4c5f1edbe2d5b3beafce04d2b0606049e..7109d7add14e8ae782e63096b343efd167dfeec4 100644 (file)
@@ -17,6 +17,7 @@ struct perf_evlist {
        struct hlist_head heads[PERF_EVLIST__HLIST_SIZE];
        int              nr_entries;
        int              nr_fds;
+       int              nr_mmaps;
        int              mmap_len;
        bool             overwrite;
        union perf_event event_copy;
@@ -46,7 +47,7 @@ void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
 
 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
 
-union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu);
+union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
 
 int perf_evlist__alloc_mmap(struct perf_evlist *evlist);
 int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
index f5e38451fdc505e329ac77415e9e8aba5fbe7ebb..99c722672f842650f8e0ee5d7d4225435b7a4d0d 100644 (file)
@@ -680,7 +680,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
                                         &cpu, &sample_id_all))
                return NULL;
 
-       event = perf_evlist__read_on_cpu(evlist, cpu);
+       event = perf_evlist__mmap_read(evlist, cpu);
        if (event != NULL) {
                struct perf_evsel *first;
                PyObject *pyevent = pyrf_event__new(event);