]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge tag 'powerpc-4.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 15 Jan 2016 21:18:47 +0000 (13:18 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 15 Jan 2016 21:18:47 +0000 (13:18 -0800)
Pull powerpc updates from Michael Ellerman:
 "Core:
   - Ground work for the new Power9 MMU from Aneesh Kumar K.V
   - Optimise FP/VMX/VSX context switching from Anton Blanchard

  Misc:
   - Various cleanups from Krzysztof Kozlowski, John Ogness, Rashmica
     Gupta, Russell Currey, Gavin Shan, Daniel Axtens, Michael Neuling,
     Andrew Donnellan
   - Allow wrapper to work on non-english system from Laurent Vivier
   - Add rN aliases to the pt_regs_offset table from Rashmica Gupta
   - Fix module autoload for rackmeter & axonram drivers from Luis de
     Bethencourt
   - Include KVM guest test in all interrupt vectors from Paul Mackerras
   - Fix DSCR inheritance over fork() from Anton Blanchard
   - Make value-returning atomics & {cmp}xchg* & their atomic_ versions
     fully ordered from Boqun Feng
   - Print MSR TM bits in oops messages from Michael Neuling
   - Add TM signal return & invalid stack selftests from Michael Neuling
   - Limit EPOW reset event warnings from Vipin K Parashar
   - Remove the Cell QPACE code from Rashmica Gupta
   - Append linux_banner to exception information in xmon from Rashmica
     Gupta
   - Add selftest to check if VSRs are corrupted from Rashmica Gupta
   - Remove broken GregorianDay() from Daniel Axtens
   - Import Anton's context_switch2 benchmark into selftests from
     Michael Ellerman
   - Add selftest script to test HMI functionality from Daniel Axtens
   - Remove obsolete OPAL v2 support from Stewart Smith
   - Make enter_rtas() private from Michael Ellerman
   - PPR exception cleanups from Michael Ellerman
   - Add page soft dirty tracking from Laurent Dufour
   - Add support for Nvlink NPUs from Alistair Popple
   - Add support for kexec on 476fpe from Alistair Popple
   - Enable kernel CPU dlpar from sysfs from Nathan Fontenot
   - Copy only required pieces of the mm_context_t to the paca from
     Michael Neuling
   - Add a kmsg_dumper that flushes OPAL console output on panic from
     Russell Currey
   - Implement save_stack_trace_regs() to enable kprobe stack tracing
     from Steven Rostedt
   - Add HWCAP bits for Power9 from Michael Ellerman
   - Fix _PAGE_PTE breaking swapoff from Aneesh Kumar K.V
   - Fix _PAGE_SWP_SOFT_DIRTY breaking swapoff from Hugh Dickins
   - scripts/recordmcount.pl: support data in text section on powerpc
     from Ulrich Weigand
   - Handle R_PPC64_ENTRY relocations in modules from Ulrich Weigand

  cxl:
   - cxl: Fix possible idr warning when contexts are released from
     Vaibhav Jain
   - cxl: use correct operator when writing pcie config space values
     from Andrew Donnellan
   - cxl: Fix DSI misses when the context owning task exits from Vaibhav
     Jain
   - cxl: fix build for GCC 4.6.x from Brian Norris
   - cxl: use -Werror only with CONFIG_PPC_WERROR from Brian Norris
   - cxl: Enable PCI device ID for future IBM CXL adapter from Uma
     Krishnan

  Freescale:
   - Freescale updates from Scott: Highlights include moving QE code out
     of arch/powerpc (to be shared with arm), device tree updates, and
     minor fixes"

* tag 'powerpc-4.5-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (149 commits)
  powerpc/module: Handle R_PPC64_ENTRY relocations
  scripts/recordmcount.pl: support data in text section on powerpc
  powerpc/powernv: Fix OPAL_CONSOLE_FLUSH prototype and usages
  powerpc/mm: fix _PAGE_SWP_SOFT_DIRTY breaking swapoff
  powerpc/mm: Fix _PAGE_PTE breaking swapoff
  cxl: Enable PCI device ID for future IBM CXL adapter
  cxl: use -Werror only with CONFIG_PPC_WERROR
  cxl: fix build for GCC 4.6.x
  powerpc: Add HWCAP bits for Power9
  powerpc/powernv: Reserve PE#0 on NPU
  powerpc/powernv: Change NPU PE# assignment
  powerpc/powernv: Fix update of NVLink DMA mask
  powerpc/powernv: Remove misleading comment in pci.c
  powerpc: Implement save_stack_trace_regs() to enable kprobe stack tracing
  powerpc: Fix build break due to paca mm_context_t changes
  cxl: Fix DSI misses when the context owning task exits
  MAINTAINERS: Update Scott Wood's e-mail address
  powerpc/powernv: Fix minor off-by-one error in opal_mce_check_early_recovery()
  powerpc: Fix style of self-test config prompts
  powerpc/powernv: Only delay opal_rtc_read() retry when necessary
  ...

251 files changed:
Documentation/devicetree/bindings/serial/8250.txt
Documentation/devicetree/bindings/thermal/qoriq-thermal.txt [new file with mode: 0644]
Documentation/kernel-parameters.txt
MAINTAINERS
arch/powerpc/Kconfig
arch/powerpc/Kconfig.debug
arch/powerpc/boot/Makefile
arch/powerpc/boot/dts/fsl/b4si-post.dtsi
arch/powerpc/boot/dts/fsl/bsc9132qds.dts
arch/powerpc/boot/dts/fsl/bsc9132si-post.dtsi
arch/powerpc/boot/dts/fsl/bsc9132si-pre.dtsi
arch/powerpc/boot/dts/fsl/p1010rdb.dtsi
arch/powerpc/boot/dts/fsl/t1023rdb.dts
arch/powerpc/boot/dts/fsl/t1023si-post.dtsi
arch/powerpc/boot/dts/fsl/t1024qds.dts
arch/powerpc/boot/dts/fsl/t1024rdb.dts
arch/powerpc/boot/dts/fsl/t1024si-post.dtsi
arch/powerpc/boot/dts/fsl/t102xsi-pre.dtsi
arch/powerpc/boot/dts/fsl/t1040d4rdb.dts
arch/powerpc/boot/dts/fsl/t1040qds.dts
arch/powerpc/boot/dts/fsl/t1040rdb.dts
arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
arch/powerpc/boot/dts/fsl/t1042d4rdb.dts
arch/powerpc/boot/dts/fsl/t1042qds.dts
arch/powerpc/boot/dts/fsl/t1042rdb.dts
arch/powerpc/boot/dts/fsl/t1042rdb_pi.dts
arch/powerpc/boot/dts/fsl/t1042si-post.dtsi
arch/powerpc/boot/dts/fsl/t104xsi-pre.dtsi
arch/powerpc/boot/wrapper
arch/powerpc/configs/mpc85xx_basic_defconfig
arch/powerpc/configs/ppc64_defconfig
arch/powerpc/crypto/aes-spe-glue.c
arch/powerpc/crypto/sha1-spe-glue.c
arch/powerpc/crypto/sha256-spe-glue.c
arch/powerpc/include/asm/book3s/32/hash.h [moved from arch/powerpc/include/asm/pte-hash32.h with 93% similarity]
arch/powerpc/include/asm/book3s/32/pgtable.h [new file with mode: 0644]
arch/powerpc/include/asm/book3s/64/hash-4k.h [new file with mode: 0644]
arch/powerpc/include/asm/book3s/64/hash-64k.h [new file with mode: 0644]
arch/powerpc/include/asm/book3s/64/hash.h [new file with mode: 0644]
arch/powerpc/include/asm/book3s/64/pgtable.h [new file with mode: 0644]
arch/powerpc/include/asm/book3s/pgtable.h [new file with mode: 0644]
arch/powerpc/include/asm/cmpxchg.h
arch/powerpc/include/asm/cpm.h
arch/powerpc/include/asm/exception-64s.h
arch/powerpc/include/asm/firmware.h
arch/powerpc/include/asm/io.h
arch/powerpc/include/asm/mmu-hash64.h
arch/powerpc/include/asm/nohash/32/pgtable.h [moved from arch/powerpc/include/asm/pgtable-ppc32.h with 96% similarity]
arch/powerpc/include/asm/nohash/32/pte-40x.h [moved from arch/powerpc/include/asm/pte-40x.h with 95% similarity]
arch/powerpc/include/asm/nohash/32/pte-44x.h [moved from arch/powerpc/include/asm/pte-44x.h with 96% similarity]
arch/powerpc/include/asm/nohash/32/pte-8xx.h [moved from arch/powerpc/include/asm/pte-8xx.h with 95% similarity]
arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h [moved from arch/powerpc/include/asm/pte-fsl-booke.h with 88% similarity]
arch/powerpc/include/asm/nohash/64/pgtable-4k.h [moved from arch/powerpc/include/asm/pgtable-ppc64-4k.h with 92% similarity]
arch/powerpc/include/asm/nohash/64/pgtable-64k.h [moved from arch/powerpc/include/asm/pgtable-ppc64-64k.h with 64% similarity]
arch/powerpc/include/asm/nohash/64/pgtable.h [moved from arch/powerpc/include/asm/pgtable-ppc64.h with 51% similarity]
arch/powerpc/include/asm/nohash/pgtable.h [new file with mode: 0644]
arch/powerpc/include/asm/nohash/pte-book3e.h [moved from arch/powerpc/include/asm/pte-book3e.h with 95% similarity]
arch/powerpc/include/asm/opal-api.h
arch/powerpc/include/asm/opal.h
arch/powerpc/include/asm/paca.h
arch/powerpc/include/asm/page.h
arch/powerpc/include/asm/pci-bridge.h
arch/powerpc/include/asm/pci.h
arch/powerpc/include/asm/pgalloc-32.h
arch/powerpc/include/asm/pgalloc-64.h
arch/powerpc/include/asm/pgtable.h
arch/powerpc/include/asm/plpar_wrappers.h
arch/powerpc/include/asm/ppc_asm.h
arch/powerpc/include/asm/processor.h
arch/powerpc/include/asm/pte-common.h
arch/powerpc/include/asm/pte-hash64-4k.h [deleted file]
arch/powerpc/include/asm/pte-hash64-64k.h [deleted file]
arch/powerpc/include/asm/pte-hash64.h [deleted file]
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/rtas.h
arch/powerpc/include/asm/switch_to.h
arch/powerpc/include/asm/synch.h
arch/powerpc/include/asm/time.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/asm/vdso_datapage.h
arch/powerpc/include/uapi/asm/cputable.h
arch/powerpc/include/uapi/asm/elf.h
arch/powerpc/kernel/align.c
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/fpu.S
arch/powerpc/kernel/head_fsl_booke.S
arch/powerpc/kernel/idle_power7.S
arch/powerpc/kernel/misc_32.S
arch/powerpc/kernel/module_64.c
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/stacktrace.c
arch/powerpc/kernel/swsusp.c
arch/powerpc/kernel/systbl_chk.c
arch/powerpc/kernel/systbl_chk.sh
arch/powerpc/kernel/time.c
arch/powerpc/kernel/traps.c
arch/powerpc/kernel/vdso.c
arch/powerpc/kernel/vdso32/datapage.S
arch/powerpc/kernel/vdso64/datapage.S
arch/powerpc/kernel/vector.S
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_paired_singles.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/booke.c
arch/powerpc/lib/vmx-helper.c
arch/powerpc/lib/xor_vmx.c
arch/powerpc/mm/40x_mmu.c
arch/powerpc/mm/Makefile
arch/powerpc/mm/hash64_4k.c [new file with mode: 0644]
arch/powerpc/mm/hash64_64k.c [new file with mode: 0644]
arch/powerpc/mm/hash_low_64.S [deleted file]
arch/powerpc/mm/hash_native_64.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hugepage-hash64.c
arch/powerpc/mm/hugetlbpage-book3e.c
arch/powerpc/mm/hugetlbpage-hash64.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/init_64.c
arch/powerpc/mm/pgtable.c
arch/powerpc/mm/pgtable_64.c
arch/powerpc/mm/slb.c
arch/powerpc/mm/slice.c
arch/powerpc/platforms/83xx/km83xx.c
arch/powerpc/platforms/83xx/misc.c
arch/powerpc/platforms/83xx/mpc832x_mds.c
arch/powerpc/platforms/83xx/mpc832x_rdb.c
arch/powerpc/platforms/83xx/mpc836x_mds.c
arch/powerpc/platforms/83xx/mpc836x_rdk.c
arch/powerpc/platforms/85xx/bsc913x_qds.c
arch/powerpc/platforms/85xx/common.c
arch/powerpc/platforms/85xx/corenet_generic.c
arch/powerpc/platforms/85xx/mpc85xx_ads.c
arch/powerpc/platforms/85xx/mpc85xx_mds.c
arch/powerpc/platforms/85xx/mpc85xx_rdb.c
arch/powerpc/platforms/85xx/twr_p102x.c
arch/powerpc/platforms/Kconfig
arch/powerpc/platforms/cell/Kconfig
arch/powerpc/platforms/cell/Makefile
arch/powerpc/platforms/cell/qpace_setup.c [deleted file]
arch/powerpc/platforms/cell/spufs/run.c
arch/powerpc/platforms/maple/time.c
arch/powerpc/platforms/powermac/bootx_init.c
arch/powerpc/platforms/powermac/pic.c
arch/powerpc/platforms/powernv/Makefile
arch/powerpc/platforms/powernv/eeh-powernv.c
arch/powerpc/platforms/powernv/idle.c
arch/powerpc/platforms/powernv/npu-dma.c [new file with mode: 0644]
arch/powerpc/platforms/powernv/opal-kmsg.c [new file with mode: 0644]
arch/powerpc/platforms/powernv/opal-prd.c
arch/powerpc/platforms/powernv/opal-rtc.c
arch/powerpc/platforms/powernv/opal-wrappers.S
arch/powerpc/platforms/powernv/opal-xscom.c
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/pci.c
arch/powerpc/platforms/powernv/pci.h
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/powernv/smp.c
arch/powerpc/platforms/pseries/dlpar.c
arch/powerpc/platforms/pseries/hotplug-cpu.c
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/pseries.h
arch/powerpc/platforms/pseries/ras.c
arch/powerpc/sysdev/Makefile
arch/powerpc/sysdev/axonram.c
arch/powerpc/sysdev/cpm_common.c
arch/powerpc/sysdev/fsl_lbc.c
arch/powerpc/sysdev/fsl_pci.c
arch/powerpc/xmon/xmon.c
drivers/cpufreq/powernv-cpufreq.c
drivers/cpuidle/cpuidle-powernv.c
drivers/crypto/vmx/aes.c
drivers/crypto/vmx/aes_cbc.c
drivers/crypto/vmx/aes_ctr.c
drivers/crypto/vmx/ghash.c
drivers/macintosh/rack-meter.c
drivers/macintosh/via-pmu.c
drivers/misc/cxl/Makefile
drivers/misc/cxl/api.c
drivers/misc/cxl/context.c
drivers/misc/cxl/cxl.h
drivers/misc/cxl/fault.c
drivers/misc/cxl/file.c
drivers/misc/cxl/pci.c
drivers/misc/cxl/vphb.c
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/freescale/ucc_geth.h
drivers/rtc/rtc-opal.c
drivers/soc/Kconfig
drivers/soc/Makefile
drivers/soc/fsl/Makefile [new file with mode: 0644]
drivers/soc/fsl/qe/Kconfig [moved from arch/powerpc/sysdev/qe_lib/Kconfig with 54% similarity]
drivers/soc/fsl/qe/Makefile [moved from arch/powerpc/sysdev/qe_lib/Makefile with 69% similarity]
drivers/soc/fsl/qe/gpio.c [moved from arch/powerpc/sysdev/qe_lib/gpio.c with 99% similarity]
drivers/soc/fsl/qe/qe.c [moved from arch/powerpc/sysdev/qe_lib/qe.c with 98% similarity]
drivers/soc/fsl/qe/qe_common.c [new file with mode: 0644]
drivers/soc/fsl/qe/qe_ic.c [moved from arch/powerpc/sysdev/qe_lib/qe_ic.c with 99% similarity]
drivers/soc/fsl/qe/qe_ic.h [moved from arch/powerpc/sysdev/qe_lib/qe_ic.h with 97% similarity]
drivers/soc/fsl/qe/qe_io.c [moved from arch/powerpc/sysdev/qe_lib/qe_io.c with 99% similarity]
drivers/soc/fsl/qe/ucc.c [moved from arch/powerpc/sysdev/qe_lib/ucc.c with 98% similarity]
drivers/soc/fsl/qe/ucc_fast.c [moved from arch/powerpc/sysdev/qe_lib/ucc_fast.c with 98% similarity]
drivers/soc/fsl/qe/ucc_slow.c [moved from arch/powerpc/sysdev/qe_lib/ucc_slow.c with 98% similarity]
drivers/soc/fsl/qe/usb.c [moved from arch/powerpc/sysdev/qe_lib/usb.c with 96% similarity]
drivers/spi/spi-fsl-cpm.c
drivers/tty/serial/ucc_uart.c
drivers/usb/gadget/udc/fsl_qe_udc.c
drivers/usb/host/fhci-hcd.c
drivers/usb/host/fhci-hub.c
drivers/usb/host/fhci-sched.c
drivers/usb/host/fhci.h
include/linux/genalloc.h
include/soc/fsl/qe/immap_qe.h [moved from arch/powerpc/include/asm/immap_qe.h with 100% similarity]
include/soc/fsl/qe/qe.h [moved from arch/powerpc/include/asm/qe.h with 95% similarity]
include/soc/fsl/qe/qe_ic.h [moved from arch/powerpc/include/asm/qe_ic.h with 100% similarity]
include/soc/fsl/qe/ucc.h [moved from arch/powerpc/include/asm/ucc.h with 96% similarity]
include/soc/fsl/qe/ucc_fast.h [moved from arch/powerpc/include/asm/ucc_fast.h with 98% similarity]
include/soc/fsl/qe/ucc_slow.h [moved from arch/powerpc/include/asm/ucc_slow.h with 99% similarity]
lib/genalloc.c
lib/raid6/altivec.uc
scripts/recordmcount.pl
tools/testing/selftests/powerpc/benchmarks/.gitignore
tools/testing/selftests/powerpc/benchmarks/Makefile
tools/testing/selftests/powerpc/benchmarks/context_switch.c [new file with mode: 0644]
tools/testing/selftests/powerpc/dscr/dscr_inherit_exec_test.c
tools/testing/selftests/powerpc/dscr/dscr_inherit_test.c
tools/testing/selftests/powerpc/harness.c
tools/testing/selftests/powerpc/pmu/Makefile
tools/testing/selftests/powerpc/pmu/ebb/Makefile
tools/testing/selftests/powerpc/pmu/ebb/ebb.c
tools/testing/selftests/powerpc/pmu/lib.c
tools/testing/selftests/powerpc/pmu/lib.h
tools/testing/selftests/powerpc/scripts/hmi.sh [new file with mode: 0755]
tools/testing/selftests/powerpc/tm/.gitignore
tools/testing/selftests/powerpc/tm/Makefile
tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
tools/testing/selftests/powerpc/tm/tm-signal-msr-resv.c [new file with mode: 0644]
tools/testing/selftests/powerpc/tm/tm-signal-stack.c [new file with mode: 0644]
tools/testing/selftests/powerpc/tm/tm-syscall.c
tools/testing/selftests/powerpc/tm/tm-vmxcopy.c [new file with mode: 0644]
tools/testing/selftests/powerpc/tm/tm.h [new file with mode: 0644]
tools/testing/selftests/powerpc/utils.c [new file with mode: 0644]
tools/testing/selftests/powerpc/utils.h

index 91d5ab0e60fc4eda6894ea94213bfb1ca8eb4572..936ab5b87324ce7cf022320a777342d2507dfd19 100644 (file)
@@ -14,7 +14,6 @@ Required properties:
          tegra132, or tegra210.
        - "nxp,lpc3220-uart"
        - "ralink,rt2880-uart"
-       - "ibm,qpace-nwp-serial"
        - "altr,16550-FIFO32"
        - "altr,16550-FIFO64"
        - "altr,16550-FIFO128"
diff --git a/Documentation/devicetree/bindings/thermal/qoriq-thermal.txt b/Documentation/devicetree/bindings/thermal/qoriq-thermal.txt
new file mode 100644 (file)
index 0000000..66223d5
--- /dev/null
@@ -0,0 +1,63 @@
+* Thermal Monitoring Unit (TMU) on Freescale QorIQ SoCs
+
+Required properties:
+- compatible : Must include "fsl,qoriq-tmu". The version of the device is
+       determined by the TMU IP Block Revision Register (IPBRR0) at
+       offset 0x0BF8.
+       Table of correspondences between IPBRR0 values and example  chips:
+               Value           Device
+               ----------      -----
+               0x01900102      T1040
+- reg : Address range of TMU registers.
+- interrupts : Contains the interrupt for TMU.
+- fsl,tmu-range : The values to be programmed into TTRnCR, as specified by
+       the SoC reference manual. The first cell is TTR0CR, the second is
+       TTR1CR, etc.
+- fsl,tmu-calibration : A list of cell pairs containing temperature
+       calibration data, as specified by the SoC reference manual.
+       The first cell of each pair is the value to be written to TTCFGR,
+       and the second is the value to be written to TSCFGR.
+
+Example:
+
+tmu@f0000 {
+       compatible = "fsl,qoriq-tmu";
+       reg = <0xf0000 0x1000>;
+       interrupts = <18 2 0 0>;
+       fsl,tmu-range = <0x000a0000 0x00090026 0x0008004a 0x0001006a>;
+       fsl,tmu-calibration = <0x00000000 0x00000025
+                              0x00000001 0x00000028
+                              0x00000002 0x0000002d
+                              0x00000003 0x00000031
+                              0x00000004 0x00000036
+                              0x00000005 0x0000003a
+                              0x00000006 0x00000040
+                              0x00000007 0x00000044
+                              0x00000008 0x0000004a
+                              0x00000009 0x0000004f
+                              0x0000000a 0x00000054
+
+                              0x00010000 0x0000000d
+                              0x00010001 0x00000013
+                              0x00010002 0x00000019
+                              0x00010003 0x0000001f
+                              0x00010004 0x00000025
+                              0x00010005 0x0000002d
+                              0x00010006 0x00000033
+                              0x00010007 0x00000043
+                              0x00010008 0x0000004b
+                              0x00010009 0x00000053
+
+                              0x00020000 0x00000010
+                              0x00020001 0x00000017
+                              0x00020002 0x0000001f
+                              0x00020003 0x00000029
+                              0x00020004 0x00000031
+                              0x00020005 0x0000003c
+                              0x00020006 0x00000042
+                              0x00020007 0x0000004d
+                              0x00020008 0x00000056
+
+                              0x00030000 0x00000012
+                              0x00030001 0x0000001d>;
+};
index 168fd79dc697a06648b28a1125a07265880b58aa..5a6235ed3663dd3eb667bedbee1fa0b9bdcc8ce3 100644 (file)
@@ -2993,6 +2993,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        may be specified.
                        Format: <port>,<port>....
 
+       ppc_strict_facility_enable
+                       [PPC] This option catches any kernel floating point,
+                       Altivec, VSX and SPE outside of regions specifically
+                       allowed (eg kernel_enable_fpu()/kernel_disable_fpu()).
+                       There is some performance impact when enabling this.
+
        print-fatal-signals=
                        [KNL] debug: print fatal signals
 
index d14baa149e2d73cbbf711ba7c8e902229208a4df..04d62b1e8b173de9520dd7aba2a17b76dc447dfa 100644 (file)
@@ -4490,8 +4490,9 @@ F:        include/linux/fs_enet_pd.h
 FREESCALE QUICC ENGINE LIBRARY
 L:     linuxppc-dev@lists.ozlabs.org
 S:     Orphan
-F:     arch/powerpc/sysdev/qe_lib/
-F:     arch/powerpc/include/asm/*qe.h
+F:     drivers/soc/fsl/qe/
+F:     include/soc/fsl/*qe*.h
+F:     include/soc/fsl/*ucc*.h
 
 FREESCALE USB PERIPHERAL DRIVERS
 M:     Li Yang <leoli@freescale.com>
@@ -6444,7 +6445,7 @@ S:        Maintained
 F:     arch/powerpc/platforms/8xx/
 
 LINUX FOR POWERPC EMBEDDED PPC83XX AND PPC85XX
-M:     Scott Wood <scottwood@freescale.com>
+M:     Scott Wood <oss@buserror.net>
 M:     Kumar Gala <galak@kernel.crashing.org>
 W:     http://www.penguinppc.org/
 L:     linuxppc-dev@lists.ozlabs.org
index 85eabc49de61ad8900938def85791ef5d62c1402..7d5a8350f9132b47aee3a7a93bc721b19c135ee4 100644 (file)
@@ -560,6 +560,7 @@ choice
 
 config PPC_4K_PAGES
        bool "4k page size"
+       select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S
 
 config PPC_16K_PAGES
        bool "16k page size"
@@ -568,6 +569,7 @@ config PPC_16K_PAGES
 config PPC_64K_PAGES
        bool "64k page size"
        depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64)
+       select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S
 
 config PPC_256K_PAGES
        bool "256k page size"
@@ -1075,8 +1077,6 @@ source "drivers/Kconfig"
 
 source "fs/Kconfig"
 
-source "arch/powerpc/sysdev/qe_lib/Kconfig"
-
 source "lib/Kconfig"
 
 source "arch/powerpc/Kconfig.debug"
index a0e44a9c456fcee2a187f3af8a911e8cf63e10d2..638f9ce740f52b9f3a38247dc0b51dd922efac13 100644 (file)
@@ -64,17 +64,17 @@ config PPC_EMULATED_STATS
          emulated.
 
 config CODE_PATCHING_SELFTEST
-       bool "Run self-tests of the code-patching code."
+       bool "Run self-tests of the code-patching code"
        depends on DEBUG_KERNEL
        default n
 
 config FTR_FIXUP_SELFTEST
-       bool "Run self-tests of the feature-fixup code."
+       bool "Run self-tests of the feature-fixup code"
        depends on DEBUG_KERNEL
        default n
 
 config MSI_BITMAP_SELFTEST
-       bool "Run self-tests of the MSI bitmap code."
+       bool "Run self-tests of the MSI bitmap code"
        depends on DEBUG_KERNEL
        default n
 
index 99e4487248ff358eb5e4777b1d9732ffaa7bfa57..61165101342c896445165d829eeac9241dfdf412 100644 (file)
@@ -113,7 +113,6 @@ src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
 src-plat-$(CONFIG_PPC_PSERIES) += pseries-head.S
 src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S
 src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S
-src-plat-$(CONFIG_PPC_CELL_QPACE) += pseries-head.S
 
 src-wlib := $(sort $(src-wlib-y))
 src-plat := $(sort $(src-plat-y))
@@ -217,7 +216,6 @@ image-$(CONFIG_PPC_POWERNV)         += zImage.pseries
 image-$(CONFIG_PPC_MAPLE)              += zImage.maple
 image-$(CONFIG_PPC_IBM_CELL_BLADE)     += zImage.pseries
 image-$(CONFIG_PPC_PS3)                        += dtbImage.ps3
-image-$(CONFIG_PPC_CELL_QPACE)         += zImage.pseries
 image-$(CONFIG_PPC_CHRP)               += zImage.chrp
 image-$(CONFIG_PPC_EFIKA)              += zImage.chrp
 image-$(CONFIG_PPC_PMAC)               += zImage.pmac
index 74866ac52f39baf287e976805a05f84b0598902c..1b33f5157c8afcc85197b93b82f24cba7722ccf5 100644 (file)
        fman@400000 {
                interrupts = <96 2 0 0>, <16 2 1 30>;
 
+               muram@0 {
+                       compatible = "fsl,fman-muram";
+                       reg = <0x0 0x80000>;
+               };
+
                enet0: ethernet@e0000 {
                };
 
index 70882ade606d42d711b0116b92cedcbc8933192b..56e6f1337e96325d71e588d69c1a8133baf6d6f4 100644 (file)
        soc: soc@ff700000 {
                ranges = <0x0 0x0 0xff700000 0x100000>;
        };
+
+       pci0: pcie@ff70a000 {
+               reg = <0 0xff70a000 0 0x1000>;
+               ranges = <0x2000000 0x0 0x90000000 0 0x90000000 0x0 0x20000000
+                         0x1000000 0x0 0x00000000 0 0xc0010000 0x0 0x10000>;
+               pcie@0 {
+                       ranges = <0x2000000 0x0 0x90000000
+                                 0x2000000 0x0 0x90000000
+                                 0x0 0x20000000
+
+                                 0x1000000 0x0 0x0
+                                 0x1000000 0x0 0x0
+                                 0x0 0x100000>;
+               };
+       };
 };
 
 /include/ "bsc9132qds.dtsi"
index c72307198140564034a6348a170fad62878c95d4..b5f071574e831afb112b48ca973ce6b37f11812b 100644 (file)
        interrupts = <16 2 0 0 20 2 0 0>;
 };
 
+/* controller at 0xa000 */
+&pci0 {
+       compatible = "fsl,bsc9132-pcie", "fsl,qoriq-pcie-v2.2";
+       device_type = "pci";
+       #size-cells = <2>;
+       #address-cells = <3>;
+       bus-range = <0 255>;
+       interrupts = <16 2 0 0>;
+
+       pcie@0 {
+               reg = <0 0 0 0 0>;
+               #interrupt-cells = <1>;
+               #size-cells = <2>;
+               #address-cells = <3>;
+               device_type = "pci";
+               interrupts = <16 2 0 0>;
+               interrupt-map-mask = <0xf800 0 0 7>;
+
+               interrupt-map = <
+                       /* IDSEL 0x0 */
+                       0000 0x0 0x0 0x1 &mpic 0x0 0x2 0x0 0x0
+                       0000 0x0 0x0 0x2 &mpic 0x1 0x2 0x0 0x0
+                       0000 0x0 0x0 0x3 &mpic 0x2 0x2 0x0 0x0
+                       0000 0x0 0x0 0x4 &mpic 0x3 0x2 0x0 0x0
+                       >;
+       };
+};
+
 &soc {
        #address-cells = <1>;
        #size-cells = <1>;
index 301a9dba579067370b9d4217860cf1dbc1d0e4c9..90f7949fe31285f365045635ed155a9f4d476154 100644 (file)
@@ -45,6 +45,7 @@
                serial0 = &serial0;
                ethernet0 = &enet0;
                ethernet1 = &enet1;
+               pci0 = &pci0;
        };
 
        cpus {
index 0f0ced69835a066e9358f1ca08a04853b71d33df..14b62950503893dc323913be9870188c12e2115f 100644 (file)
                phy-connection-type = "sgmii";
        };
 };
+
+&pci0 {
+       pcie@0 {
+               interrupt-map = <
+                       /* IDSEL 0x0 */
+                       /*
+                        *irq[4:5] are active-high
+                        *irq[6:7] are active-low
+                        */
+                       0000 0x0 0x0 0x1 &mpic 0x4 0x2 0x0 0x0
+                       0000 0x0 0x0 0x2 &mpic 0x5 0x2 0x0 0x0
+                       0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0
+                       0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0
+                       >;
+       };
+};
index 2b2fff4a12a2f81b4ce51952bbc425b2f6b1d3f0..6bd842beb1dcfbb019a6d033079ccb530048f43c 100644 (file)
        };
 };
 
-/include/ "t1023si-post.dtsi"
+#include "t1023si-post.dtsi"
index 518ddaa8da2de05b891ef40715dff5c03e359daf..99e421df79d4c87016ce26ebe0a473f032ea0038 100644 (file)
@@ -32,6 +32,8 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <dt-bindings/thermal/thermal.h>
+
 &ifc {
        #address-cells = <2>;
        #size-cells = <1>;
                reg = <0xea000 0x4000>;
        };
 
+       tmu: tmu@f0000 {
+               compatible = "fsl,qoriq-tmu";
+               reg = <0xf0000 0x1000>;
+               interrupts = <18 2 0 0>;
+               fsl,tmu-range = <0xb0000 0xa0026 0x80048 0x30061>;
+               fsl,tmu-calibration = <0x00000000 0x0000000f
+                                      0x00000001 0x00000017
+                                      0x00000002 0x0000001e
+                                      0x00000003 0x00000026
+                                      0x00000004 0x0000002e
+                                      0x00000005 0x00000035
+                                      0x00000006 0x0000003d
+                                      0x00000007 0x00000044
+                                      0x00000008 0x0000004c
+                                      0x00000009 0x00000053
+                                      0x0000000a 0x0000005b
+                                      0x0000000b 0x00000064
+
+                                      0x00010000 0x00000011
+                                      0x00010001 0x0000001c
+                                      0x00010002 0x00000024
+                                      0x00010003 0x0000002b
+                                      0x00010004 0x00000034
+                                      0x00010005 0x00000039
+                                      0x00010006 0x00000042
+                                      0x00010007 0x0000004c
+                                      0x00010008 0x00000051
+                                      0x00010009 0x0000005a
+                                      0x0001000a 0x00000063
+
+                                      0x00020000 0x00000013
+                                      0x00020001 0x00000019
+                                      0x00020002 0x00000024
+                                      0x00020003 0x0000002c
+                                      0x00020004 0x00000035
+                                      0x00020005 0x0000003d
+                                      0x00020006 0x00000046
+                                      0x00020007 0x00000050
+                                      0x00020008 0x00000059
+
+                                      0x00030000 0x00000002
+                                      0x00030001 0x0000000d
+                                      0x00030002 0x00000019
+                                      0x00030003 0x00000024>;
+               #thermal-sensor-cells = <0>;
+       };
+
+       thermal-zones {
+               cpu_thermal: cpu-thermal {
+                       polling-delay-passive = <1000>;
+                       polling-delay = <5000>;
+
+                       thermal-sensors = <&tmu>;
+
+                       trips {
+                               cpu_alert: cpu-alert {
+                                       temperature = <85000>;
+                                       hysteresis = <2000>;
+                                       type = "passive";
+                               };
+                               cpu_crit: cpu-crit {
+                                       temperature = <95000>;
+                                       hysteresis = <2000>;
+                                       type = "critical";
+                               };
+                       };
+
+                       cooling-maps {
+                               map0 {
+                                       trip = <&cpu_alert>;
+                                       cooling-device =
+                                               <&cpu0 THERMAL_NO_LIMIT
+                                                       THERMAL_NO_LIMIT>;
+                               };
+                               map1 {
+                                       trip = <&cpu_alert>;
+                                       cooling-device =
+                                               <&cpu1 THERMAL_NO_LIMIT
+                                                       THERMAL_NO_LIMIT>;
+                               };
+                       };
+               };
+       };
+
        scfg: global-utilities@fc000 {
                compatible = "fsl,t1023-scfg";
                reg = <0xfc000 0x1000>;
index 43cd5b50cd0aa083ad1e50edb5c24b0495c8f76a..6a3581b8e1f85e235b995ed831aa08861a5081e1 100644 (file)
        };
 };
 
-/include/ "t1024si-post.dtsi"
+#include "t1024si-post.dtsi"
index 429d8c73650a3609d6bce226f7b72644f574af8f..0ccc7d03335eb7e3ca1ba1e280cf2f523ecddd53 100644 (file)
        };
 };
 
-/include/ "t1024si-post.dtsi"
+#include "t1024si-post.dtsi"
index 95e3af8d768e3c24c160e52a341aea4f11f2935f..bb480346a58d8f41797f47367a20fd0a95a41cdf 100644 (file)
@@ -32,7 +32,7 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-/include/ "t1023si-post.dtsi"
+#include "t1023si-post.dtsi"
 
 / {
        aliases {
index 3e1528abf3f47328925c4e01677da3ae20b08aaf..9d08a363bab322490740c6533c284df77c386c2f 100644 (file)
@@ -76,6 +76,7 @@
                        reg = <0>;
                        clocks = <&mux0>;
                        next-level-cache = <&L2_1>;
+                       #cooling-cells = <2>;
                        L2_1: l2-cache {
                                next-level-cache = <&cpc>;
                        };
@@ -85,6 +86,7 @@
                        reg = <1>;
                        clocks = <&mux1>;
                        next-level-cache = <&L2_2>;
+                       #cooling-cells = <2>;
                        L2_2: l2-cache {
                                next-level-cache = <&cpc>;
                        };
index 681746efd31ddc7dbb36f66d1453b9b501917fe2..fb6bc02ebb606072a3a503220f7fdac4617ea41f 100644 (file)
@@ -43,4 +43,4 @@
        interrupt-parent = <&mpic>;
 };
 
-/include/ "t1040si-post.dtsi"
+#include "t1040si-post.dtsi"
index 4d298659468c7dfeedf668a6039a62395188e75c..5f76edc7838c105565b86e71c30ea0d463289ccf 100644 (file)
@@ -43,4 +43,4 @@
        interrupt-parent = <&mpic>;
 };
 
-/include/ "t1040si-post.dtsi"
+#include "t1040si-post.dtsi"
index 8f9e65b47515db0eb3a5ddbdbbb8221f2709a5c4..cf194154bbdce37151403052608a29514dace66c 100644 (file)
@@ -45,4 +45,4 @@
        };
 };
 
-/include/ "t1040si-post.dtsi"
+#include "t1040si-post.dtsi"
index d30b3de1cfc57eec5d9f0817eb6a649c2c371ba1..e0f4da55477405daa5f9eb8f1b065187c0af0574 100644 (file)
@@ -32,6 +32,8 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+#include <dt-bindings/thermal/thermal.h>
+
 &bman_fbpr {
        compatible = "fsl,bman-fbpr";
        alloc-ranges = <0 0 0x10000 0>;
                reg        = <0xea000 0x4000>;
        };
 
+       tmu: tmu@f0000 {
+               compatible = "fsl,qoriq-tmu";
+               reg = <0xf0000 0x1000>;
+               interrupts = <18 2 0 0>;
+               fsl,tmu-range = <0xa0000 0x90026 0x8004a 0x1006a>;
+               fsl,tmu-calibration = <0x00000000 0x00000025
+                                      0x00000001 0x00000028
+                                      0x00000002 0x0000002d
+                                      0x00000003 0x00000031
+                                      0x00000004 0x00000036
+                                      0x00000005 0x0000003a
+                                      0x00000006 0x00000040
+                                      0x00000007 0x00000044
+                                      0x00000008 0x0000004a
+                                      0x00000009 0x0000004f
+                                      0x0000000a 0x00000054
+
+                                      0x00010000 0x0000000d
+                                      0x00010001 0x00000013
+                                      0x00010002 0x00000019
+                                      0x00010003 0x0000001f
+                                      0x00010004 0x00000025
+                                      0x00010005 0x0000002d
+                                      0x00010006 0x00000033
+                                      0x00010007 0x00000043
+                                      0x00010008 0x0000004b
+                                      0x00010009 0x00000053
+
+                                      0x00020000 0x00000010
+                                      0x00020001 0x00000017
+                                      0x00020002 0x0000001f
+                                      0x00020003 0x00000029
+                                      0x00020004 0x00000031
+                                      0x00020005 0x0000003c
+                                      0x00020006 0x00000042
+                                      0x00020007 0x0000004d
+                                      0x00020008 0x00000056
+
+                                      0x00030000 0x00000012
+                                      0x00030001 0x0000001d>;
+               #thermal-sensor-cells = <0>;
+       };
+
+       thermal-zones {
+               cpu_thermal: cpu-thermal {
+                       polling-delay-passive = <1000>;
+                       polling-delay = <5000>;
+
+                       thermal-sensors = <&tmu>;
+
+                       trips {
+                               cpu_alert: cpu-alert {
+                                       temperature = <85000>;
+                                       hysteresis = <2000>;
+                                       type = "passive";
+                               };
+                               cpu_crit: cpu-crit {
+                                       temperature = <95000>;
+                                       hysteresis = <2000>;
+                                       type = "critical";
+                               };
+                       };
+
+                       cooling-maps {
+                               map0 {
+                                       trip = <&cpu_alert>;
+                                       cooling-device =
+                                               <&cpu0 THERMAL_NO_LIMIT
+                                                       THERMAL_NO_LIMIT>;
+                               };
+                               map1 {
+                                       trip = <&cpu_alert>;
+                                       cooling-device =
+                                               <&cpu1 THERMAL_NO_LIMIT
+                                                       THERMAL_NO_LIMIT>;
+                               };
+                               map2 {
+                                       trip = <&cpu_alert>;
+                                       cooling-device =
+                                               <&cpu2 THERMAL_NO_LIMIT
+                                                       THERMAL_NO_LIMIT>;
+                               };
+                               map3 {
+                                       trip = <&cpu_alert>;
+                                       cooling-device =
+                                               <&cpu3 THERMAL_NO_LIMIT
+                                                       THERMAL_NO_LIMIT>;
+                               };
+                       };
+               };
+       };
+
        scfg: global-utilities@fc000 {
                compatible = "fsl,t1040-scfg";
                reg = <0xfc000 0x1000>;
index b245b31b8279e226616027bc0dc8933ce2050e17..2a5a90dd272e290da584639c10dafac0742ab30d 100644 (file)
@@ -50,4 +50,4 @@
        };
 };
 
-/include/ "t1040si-post.dtsi"
+#include "t1042si-post.dtsi"
index 4ab9bbe7c5c5b73196dea1e2fe7460cbcd38c4dc..90a4a73bb905d50712a9d714e8226790c61ea18e 100644 (file)
@@ -43,4 +43,4 @@
        interrupt-parent = <&mpic>;
 };
 
-/include/ "t1042si-post.dtsi"
+#include "t1042si-post.dtsi"
index 67af56bc5ee980a5c7bf6f6d941688e435d1f38c..8d908e795e4db42c71d5b12d067ae3cbb7b062e7 100644 (file)
@@ -45,4 +45,4 @@
        };
 };
 
-/include/ "t1042si-post.dtsi"
+#include "t1042si-post.dtsi"
index 2f67677530a44c7e13d2b8939aba7293b9101fc8..98c001019d6a39c8e06cceaabea68687341c579d 100644 (file)
@@ -54,4 +54,4 @@
        };
 };
 
-/include/ "t1042si-post.dtsi"
+#include "t1042si-post.dtsi"
index 319b74f29724fa733bc2d800b2c2a0e8fa48508c..a5544f93689c79f74e67bc6677b26f8e7ee862eb 100644 (file)
@@ -32,6 +32,6 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-/include/ "t1040si-post.dtsi"
+#include "t1040si-post.dtsi"
 
 /* Place holder for ethernet related device tree nodes */
index fcfa38ae5e026e9e44ff876bf3c425f16f7c7a85..6db0ee8b1384f0744ba26e8b6f7a6df771f70eec 100644 (file)
@@ -76,6 +76,7 @@
                        reg = <0>;
                        clocks = <&mux0>;
                        next-level-cache = <&L2_1>;
+                       #cooling-cells = <2>;
                        L2_1: l2-cache {
                                next-level-cache = <&cpc>;
                        };
@@ -85,6 +86,7 @@
                        reg = <1>;
                        clocks = <&mux1>;
                        next-level-cache = <&L2_2>;
+                       #cooling-cells = <2>;
                        L2_2: l2-cache {
                                next-level-cache = <&cpc>;
                        };
@@ -94,6 +96,7 @@
                        reg = <2>;
                        clocks = <&mux2>;
                        next-level-cache = <&L2_3>;
+                       #cooling-cells = <2>;
                        L2_3: l2-cache {
                                next-level-cache = <&cpc>;
                        };
                        reg = <3>;
                        clocks = <&mux3>;
                        next-level-cache = <&L2_4>;
+                       #cooling-cells = <2>;
                        L2_4: l2-cache {
                                next-level-cache = <&cpc>;
                        };
index ceaa75d5a684330aa58246c3991fc2954f4ac353..6a19fcef5596fa7ae5211b27cdc59b4562995d72 100755 (executable)
@@ -154,7 +154,7 @@ if [ -z "$kernel" ]; then
     kernel=vmlinux
 fi
 
-elfformat="`${CROSS}objdump -p "$kernel" | grep 'file format' | awk '{print $4}'`"
+LANG=C elfformat="`${CROSS}objdump -p "$kernel" | grep 'file format' | awk '{print $4}'`"
 case "$elfformat" in
     elf64-powerpcle)   format=elf64lppc        ;;
     elf64-powerpc)     format=elf32ppc ;;
index 850bd195d0e816a7841a0070320039ff9c749c52..b1593fe6f70bc3140833577cf0305b9a416fe942 100644 (file)
@@ -12,6 +12,7 @@ CONFIG_P1010_RDB=y
 CONFIG_P1022_DS=y
 CONFIG_P1022_RDK=y
 CONFIG_P1023_RDB=y
+CONFIG_TWR_P102x=y
 CONFIG_SBC8548=y
 CONFIG_SOCRATES=y
 CONFIG_STX_GP3=y
index 2c041b535a64ed58d3be2aa79916f94308190b92..b041fb60737664ca2e1b4c914c8e8eb4cab66f76 100644 (file)
@@ -36,7 +36,6 @@ CONFIG_PS3_ROM=m
 CONFIG_PS3_FLASH=m
 CONFIG_PS3_LPM=m
 CONFIG_PPC_IBM_CELL_BLADE=y
-CONFIG_PPC_CELL_QPACE=y
 CONFIG_RTAS_FLASH=m
 CONFIG_IBMEBUS=y
 CONFIG_CPU_FREQ_PMAC64=y
index bd5e63f72ad40ad5d9c2c5561bbc2af7c9196980..93ee046d12cde4f15dabcc6b6ae1b657920353c3 100644 (file)
@@ -85,6 +85,7 @@ static void spe_begin(void)
 
 static void spe_end(void)
 {
+       disable_kernel_spe();
        /* reenable preemption */
        preempt_enable();
 }
index 3e1d2221252180ca302cca3e068f113c515b0a2a..f9ebc38d3fe79376956c83bf4bda9af3f11dd792 100644 (file)
@@ -46,6 +46,7 @@ static void spe_begin(void)
 
 static void spe_end(void)
 {
+       disable_kernel_spe();
        /* reenable preemption */
        preempt_enable();
 }
index f4a616fe1a822e9b262d0848d7e235d97e99a8b3..718a079dcdbfb3b92919c58b1e084a4769f0f4db 100644 (file)
@@ -47,6 +47,7 @@ static void spe_begin(void)
 
 static void spe_end(void)
 {
+       disable_kernel_spe();
        /* reenable preemption */
        preempt_enable();
 }
similarity index 93%
rename from arch/powerpc/include/asm/pte-hash32.h
rename to arch/powerpc/include/asm/book3s/32/hash.h
index 62cfb0c663bb9e513bfded47604f007dc3222865..264b754d65b0b60e554a7c22c18b1b87b996ba8b 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _ASM_POWERPC_PTE_HASH32_H
-#define _ASM_POWERPC_PTE_HASH32_H
+#ifndef _ASM_POWERPC_BOOK3S_32_HASH_H
+#define _ASM_POWERPC_BOOK3S_32_HASH_H
 #ifdef __KERNEL__
 
 /*
@@ -43,4 +43,4 @@
 #define PTE_ATOMIC_UPDATES     1
 
 #endif /* __KERNEL__ */
-#endif /*  _ASM_POWERPC_PTE_HASH32_H */
+#endif /* _ASM_POWERPC_BOOK3S_32_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
new file mode 100644 (file)
index 0000000..38b33dc
--- /dev/null
@@ -0,0 +1,482 @@
+#ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
+#define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
+
+#include <asm-generic/pgtable-nopmd.h>
+
+#include <asm/book3s/32/hash.h>
+
+/* And here we include common definitions */
+#include <asm/pte-common.h>
+
+/*
+ * The normal case is that PTEs are 32-bits and we have a 1-page
+ * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
+ *
+ * For any >32-bit physical address platform, we can use the following
+ * two level page table layout where the pgdir is 8KB and the MS 13 bits
+ * are an index to the second level table.  The combined pgdir/pmd first
+ * level has 2048 entries and the second level has 512 64-bit PTE entries.
+ * -Matt
+ */
+/* PGDIR_SHIFT determines what a top-level page table entry can map */
+#define PGDIR_SHIFT    (PAGE_SHIFT + PTE_SHIFT)
+#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK     (~(PGDIR_SIZE-1))
+
+#define PTRS_PER_PTE   (1 << PTE_SHIFT)
+#define PTRS_PER_PMD   1
+#define PTRS_PER_PGD   (1 << (32 - PGDIR_SHIFT))
+
+#define USER_PTRS_PER_PGD      (TASK_SIZE / PGDIR_SIZE)
+/*
+ * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
+ * value (for now) on others, from where we can start layout kernel
+ * virtual space that goes below PKMAP and FIXMAP
+ */
+#ifdef CONFIG_HIGHMEM
+#define KVIRT_TOP      PKMAP_BASE
+#else
+#define KVIRT_TOP      (0xfe000000UL)  /* for now, could be FIXMAP_BASE ? */
+#endif
+
+/*
+ * ioremap_bot starts at that address. Early ioremaps move down from there,
+ * until mem_init() at which point this becomes the top of the vmalloc
+ * and ioremap space
+ */
+#ifdef CONFIG_NOT_COHERENT_CACHE
+#define IOREMAP_TOP    ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
+#else
+#define IOREMAP_TOP    KVIRT_TOP
+#endif
+
+/*
+ * Just any arbitrary offset to the start of the vmalloc VM area: the
+ * current 16MB value just means that there will be a 64MB "hole" after the
+ * physical memory until the kernel virtual memory starts.  That means that
+ * any out-of-bounds memory accesses will hopefully be caught.
+ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
+ * area for the same reason. ;)
+ *
+ * We no longer map larger than phys RAM with the BATs so we don't have
+ * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
+ * about clashes between our early calls to ioremap() that start growing down
+ * from ioremap_base being run into the VM area allocations (growing upwards
+ * from VMALLOC_START).  For this reason we have ioremap_bot to check when
+ * we actually run into our mappings setup in the early boot with the VM
+ * system.  This really does become a problem for machines with good amounts
+ * of RAM.  -- Cort
+ */
+#define VMALLOC_OFFSET (0x1000000) /* 16M */
+#ifdef PPC_PIN_SIZE
+#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#else
+#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
+#endif
+#define VMALLOC_END    ioremap_bot
+
+#ifndef __ASSEMBLY__
+#include <linux/sched.h>
+#include <linux/threads.h>
+#include <asm/io.h>                    /* For sub-arch specific PPC_PIN_SIZE */
+
+extern unsigned long ioremap_bot;
+
+/*
+ * entries per page directory level: our page-table tree is two-level, so
+ * we don't really have any PMD directory.
+ */
+#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_SHIFT)
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << (32 - PGDIR_SHIFT))
+
+#define pte_ERROR(e) \
+       pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
+               (unsigned long long)pte_val(e))
+#define pgd_ERROR(e) \
+       pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+/*
+ * Bits in a linux-style PTE.  These match the bits in the
+ * (hardware-defined) PowerPC PTE as closely as possible.
+ */
+
+#define pte_clear(mm, addr, ptep) \
+       do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
+
+#define pmd_none(pmd)          (!pmd_val(pmd))
+#define        pmd_bad(pmd)            (pmd_val(pmd) & _PMD_BAD)
+#define        pmd_present(pmd)        (pmd_val(pmd) & _PMD_PRESENT_MASK)
+static inline void pmd_clear(pmd_t *pmdp)
+{
+       *pmdp = __pmd(0);
+}
+
+
+/*
+ * When flushing the tlb entry for a page, we also need to flush the hash
+ * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
+ */
+extern int flush_hash_pages(unsigned context, unsigned long va,
+                           unsigned long pmdval, int count);
+
+/* Add an HPTE to the hash table */
+extern void add_hash_page(unsigned context, unsigned long va,
+                         unsigned long pmdval);
+
+/* Flush an entry from the TLB/hash table */
+extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
+                            unsigned long address);
+
+/*
+ * PTE updates. This function is called whenever an existing
+ * valid PTE is updated. This does -not- include set_pte_at()
+ * which nowadays only sets a new PTE.
+ *
+ * Depending on the type of MMU, we may need to use atomic updates
+ * and the PTE may be either 32 or 64 bit wide. In the later case,
+ * when using atomic updates, only the low part of the PTE is
+ * accessed atomically.
+ *
+ * In addition, on 44x, we also maintain a global flag indicating
+ * that an executable user mapping was modified, which is needed
+ * to properly flush the virtually tagged instruction cache of
+ * those implementations.
+ */
+#ifndef CONFIG_PTE_64BIT
+static inline unsigned long pte_update(pte_t *p,
+                                      unsigned long clr,
+                                      unsigned long set)
+{
+       unsigned long old, tmp;
+
+       __asm__ __volatile__("\
+1:     lwarx   %0,0,%3\n\
+       andc    %1,%0,%4\n\
+       or      %1,%1,%5\n"
+       PPC405_ERR77(0,%3)
+"      stwcx.  %1,0,%3\n\
+       bne-    1b"
+       : "=&r" (old), "=&r" (tmp), "=m" (*p)
+       : "r" (p), "r" (clr), "r" (set), "m" (*p)
+       : "cc" );
+
+       return old;
+}
+#else /* CONFIG_PTE_64BIT */
+static inline unsigned long long pte_update(pte_t *p,
+                                           unsigned long clr,
+                                           unsigned long set)
+{
+       unsigned long long old;
+       unsigned long tmp;
+
+       __asm__ __volatile__("\
+1:     lwarx   %L0,0,%4\n\
+       lwzx    %0,0,%3\n\
+       andc    %1,%L0,%5\n\
+       or      %1,%1,%6\n"
+       PPC405_ERR77(0,%3)
+"      stwcx.  %1,0,%4\n\
+       bne-    1b"
+       : "=&r" (old), "=&r" (tmp), "=m" (*p)
+       : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
+       : "cc" );
+
+       return old;
+}
+#endif /* CONFIG_PTE_64BIT */
+
+/*
+ * 2.6 calls this without flushing the TLB entry; this is wrong
+ * for our hash-based implementation, we fix that up here.
+ */
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
+{
+       unsigned long old;
+       old = pte_update(ptep, _PAGE_ACCESSED, 0);
+       if (old & _PAGE_HASHPTE) {
+               unsigned long ptephys = __pa(ptep) & PAGE_MASK;
+               flush_hash_pages(context, addr, ptephys, 1);
+       }
+       return (old & _PAGE_ACCESSED) != 0;
+}
+#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
+       __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+                                      pte_t *ptep)
+{
+       return __pte(pte_update(ptep, ~_PAGE_HASHPTE, 0));
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+                                     pte_t *ptep)
+{
+       pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
+}
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       ptep_set_wrprotect(mm, addr, ptep);
+}
+
+
+static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
+{
+       unsigned long set = pte_val(entry) &
+               (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
+       unsigned long clr = ~pte_val(entry) & _PAGE_RO;
+
+       pte_update(ptep, clr, set);
+}
+
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B)  (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
+
+/*
+ * Note that on Book E processors, the pmd contains the kernel virtual
+ * (lowmem) address of the pte page.  The physical address is less useful
+ * because everything runs with translation enabled (even the TLB miss
+ * handler).  On everything else the pmd contains the physical address
+ * of the pte page.  -- paulus
+ */
+#ifndef CONFIG_BOOKE
+#define pmd_page_vaddr(pmd)    \
+       ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd)          \
+       pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
+#else
+#define pmd_page_vaddr(pmd)    \
+       ((unsigned long) (pmd_val(pmd) & PAGE_MASK))
+#define pmd_page(pmd)          \
+       pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
+#endif
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+/* to find an entry in a page-table-directory */
+#define pgd_index(address)      ((address) >> PGDIR_SHIFT)
+#define pgd_offset(mm, address)         ((mm)->pgd + pgd_index(address))
+
+/* Find an entry in the third-level page table.. */
+#define pte_index(address)             \
+       (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, addr)   \
+       ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
+#define pte_offset_map(dir, addr)              \
+       ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr))
+#define pte_unmap(pte)         kunmap_atomic(pte)
+
+/*
+ * Encode and decode a swap entry.
+ * Note that the bits we use in a PTE for representing a swap entry
+ * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
+ *   -- paulus
+ */
+#define __swp_type(entry)              ((entry).val & 0x1f)
+#define __swp_offset(entry)            ((entry).val >> 5)
+#define __swp_entry(type, offset)      ((swp_entry_t) { (type) | ((offset) << 5) })
+#define __pte_to_swp_entry(pte)                ((swp_entry_t) { pte_val(pte) >> 3 })
+#define __swp_entry_to_pte(x)          ((pte_t) { (x).val << 3 })
+
+#ifndef CONFIG_PPC_4K_PAGES
+void pgtable_cache_init(void);
+#else
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init()   do { } while (0)
+#endif
+
+extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
+                     pmd_t **pmdp);
+
+/* Generic accessors to PTE bits */
+static inline int pte_write(pte_t pte)         { return !!(pte_val(pte) & _PAGE_RW);}
+static inline int pte_dirty(pte_t pte)         { return !!(pte_val(pte) & _PAGE_DIRTY); }
+static inline int pte_young(pte_t pte)         { return !!(pte_val(pte) & _PAGE_ACCESSED); }
+static inline int pte_special(pte_t pte)       { return !!(pte_val(pte) & _PAGE_SPECIAL); }
+static inline int pte_none(pte_t pte)          { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
+static inline pgprot_t pte_pgprot(pte_t pte)   { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+
+static inline int pte_present(pte_t pte)
+{
+       return pte_val(pte) & _PAGE_PRESENT;
+}
+
+/* Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * Even if PTEs can be unsigned long long, a PFN is always an unsigned
+ * long for now.
+ */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
+{
+       return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
+                    pgprot_val(pgprot));
+}
+
+static inline unsigned long pte_pfn(pte_t pte)
+{
+       return pte_val(pte) >> PTE_RPN_SHIFT;
+}
+
+/* Generic modifiers for PTE bits */
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~_PAGE_RW);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~_PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_RW);
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_SPECIAL);
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+       return pte;
+}
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+       return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+
+
+/* This low level function performs the actual PTE insertion
+ * Setting the PTE depends on the MMU type and other factors. It's
+ * an horrible mess that I'm not going to try to clean up now but
+ * I'm keeping it in one place rather than spread around
+ */
+static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+                               pte_t *ptep, pte_t pte, int percpu)
+{
+#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
+       /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
+        * helper pte_update() which does an atomic update. We need to do that
+        * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
+        * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
+        * the hash bits instead (ie, same as the non-SMP case)
+        */
+       if (percpu)
+               *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+                             | (pte_val(pte) & ~_PAGE_HASHPTE));
+       else
+               pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
+
+#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
+       /* Second case is 32-bit with 64-bit PTE.  In this case, we
+        * can just store as long as we do the two halves in the right order
+        * with a barrier in between. This is possible because we take care,
+        * in the hash code, to pre-invalidate if the PTE was already hashed,
+        * which synchronizes us with any concurrent invalidation.
+        * In the percpu case, we also fallback to the simple update preserving
+        * the hash bits
+        */
+       if (percpu) {
+               *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+                             | (pte_val(pte) & ~_PAGE_HASHPTE));
+               return;
+       }
+       if (pte_val(*ptep) & _PAGE_HASHPTE)
+               flush_hash_entry(mm, ptep, addr);
+       __asm__ __volatile__("\
+               stw%U0%X0 %2,%0\n\
+               eieio\n\
+               stw%U0%X0 %L2,%1"
+       : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
+       : "r" (pte) : "memory");
+
+#elif defined(CONFIG_PPC_STD_MMU_32)
+       /* Third case is 32-bit hash table in UP mode, we need to preserve
+        * the _PAGE_HASHPTE bit since we may not have invalidated the previous
+        * translation in the hash yet (done in a subsequent flush_tlb_xxx())
+        * and see we need to keep track that this PTE needs invalidating
+        */
+       *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+                     | (pte_val(pte) & ~_PAGE_HASHPTE));
+
+#else
+#error "Not supported "
+#endif
+}
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+
+#define _PAGE_CACHE_CTL        (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
+                        _PAGE_WRITETHRU)
+
+#define pgprot_noncached pgprot_noncached
+static inline pgprot_t pgprot_noncached(pgprot_t prot)
+{
+       return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+                       _PAGE_NO_CACHE | _PAGE_GUARDED);
+}
+
+#define pgprot_noncached_wc pgprot_noncached_wc
+static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
+{
+       return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+                       _PAGE_NO_CACHE);
+}
+
+#define pgprot_cached pgprot_cached
+static inline pgprot_t pgprot_cached(pgprot_t prot)
+{
+       return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+                       _PAGE_COHERENT);
+}
+
+#define pgprot_cached_wthru pgprot_cached_wthru
+static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
+{
+       return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+                       _PAGE_COHERENT | _PAGE_WRITETHRU);
+}
+
+#define pgprot_cached_noncoherent pgprot_cached_noncoherent
+static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
+{
+       return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
+}
+
+#define pgprot_writecombine pgprot_writecombine
+static inline pgprot_t pgprot_writecombine(pgprot_t prot)
+{
+       return pgprot_noncached_wc(prot);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /*  _ASM_POWERPC_BOOK3S_32_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
new file mode 100644 (file)
index 0000000..ea0414d
--- /dev/null
@@ -0,0 +1,132 @@
+#ifndef _ASM_POWERPC_BOOK3S_64_HASH_4K_H
+#define _ASM_POWERPC_BOOK3S_64_HASH_4K_H
+/*
+ * Entries per page directory level.  The PTE level must use a 64b record
+ * for each page table entry.  The PMD and PGD level use a 32b record for
+ * each entry by assuming that each entry is page aligned.
+ */
+#define PTE_INDEX_SIZE  9
+#define PMD_INDEX_SIZE  7
+#define PUD_INDEX_SIZE  9
+#define PGD_INDEX_SIZE  9
+
+#ifndef __ASSEMBLY__
+#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
+#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
+#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+#endif /* __ASSEMBLY__ */
+
+#define PTRS_PER_PTE   (1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PMD   (1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PUD   (1 << PUD_INDEX_SIZE)
+#define PTRS_PER_PGD   (1 << PGD_INDEX_SIZE)
+
+/* PMD_SHIFT determines what a second-level page table entry can map */
+#define PMD_SHIFT      (PAGE_SHIFT + PTE_INDEX_SIZE)
+#define PMD_SIZE       (1UL << PMD_SHIFT)
+#define PMD_MASK       (~(PMD_SIZE-1))
+
+/* With 4k base page size, hugepage PTEs go at the PMD level */
+#define MIN_HUGEPTE_SHIFT      PMD_SHIFT
+
+/* PUD_SHIFT determines what a third-level page table entry can map */
+#define PUD_SHIFT      (PMD_SHIFT + PMD_INDEX_SIZE)
+#define PUD_SIZE       (1UL << PUD_SHIFT)
+#define PUD_MASK       (~(PUD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
+#define PGDIR_SHIFT    (PUD_SHIFT + PUD_INDEX_SIZE)
+#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK     (~(PGDIR_SIZE-1))
+
+/* Bits to mask out from a PMD to get to the PTE page */
+#define PMD_MASKED_BITS                0
+/* Bits to mask out from a PUD to get to the PMD page */
+#define PUD_MASKED_BITS                0
+/* Bits to mask out from a PGD to get to the PUD page */
+#define PGD_MASKED_BITS                0
+
+/* PTE flags to conserve for HPTE identification */
+#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
+                        _PAGE_F_SECOND | _PAGE_F_GIX)
+
+/* shift to put page number into pte */
+#define PTE_RPN_SHIFT  (18)
+
+#define _PAGE_4K_PFN           0
+#ifndef __ASSEMBLY__
+/*
+ * 4-level page tables related bits
+ */
+
+#define pgd_none(pgd)          (!pgd_val(pgd))
+#define pgd_bad(pgd)           (pgd_val(pgd) == 0)
+#define pgd_present(pgd)       (pgd_val(pgd) != 0)
+#define pgd_page_vaddr(pgd)    (pgd_val(pgd) & ~PGD_MASKED_BITS)
+
+static inline void pgd_clear(pgd_t *pgdp)
+{
+       *pgdp = __pgd(0);
+}
+
+static inline pte_t pgd_pte(pgd_t pgd)
+{
+       return __pte(pgd_val(pgd));
+}
+
+static inline pgd_t pte_pgd(pte_t pte)
+{
+       return __pgd(pte_val(pte));
+}
+extern struct page *pgd_page(pgd_t pgd);
+
+#define pud_offset(pgdp, addr) \
+  (((pud_t *) pgd_page_vaddr(*(pgdp))) + \
+    (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
+
+#define pud_ERROR(e) \
+       pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e))
+
+/*
+ * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */
+#define remap_4k_pfn(vma, addr, pfn, prot)     \
+       remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
+
+#ifdef CONFIG_HUGETLB_PAGE
+/*
+ * For 4k page size, we support explicit hugepage via hugepd
+ */
+static inline int pmd_huge(pmd_t pmd)
+{
+       return 0;
+}
+
+static inline int pud_huge(pud_t pud)
+{
+       return 0;
+}
+
+static inline int pgd_huge(pgd_t pgd)
+{
+       return 0;
+}
+#define pgd_huge pgd_huge
+
+static inline int hugepd_ok(hugepd_t hpd)
+{
+       /*
+        * if it is not a pte and have hugepd shift mask
+        * set, then it is a hugepd directory pointer
+        */
+       if (!(hpd.pd & _PAGE_PTE) &&
+           ((hpd.pd & HUGEPD_SHIFT_MASK) != 0))
+               return true;
+       return false;
+}
+#define is_hugepd(hpd)         (hugepd_ok(hpd))
+#endif
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_BOOK3S_64_HASH_4K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash-64k.h b/arch/powerpc/include/asm/book3s/64/hash-64k.h
new file mode 100644 (file)
index 0000000..9e55e3b
--- /dev/null
@@ -0,0 +1,312 @@
+#ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H
+#define _ASM_POWERPC_BOOK3S_64_HASH_64K_H
+
+#include <asm-generic/pgtable-nopud.h>
+
+#define PTE_INDEX_SIZE  8
+#define PMD_INDEX_SIZE  10
+#define PUD_INDEX_SIZE 0
+#define PGD_INDEX_SIZE  12
+
+#define PTRS_PER_PTE   (1 << PTE_INDEX_SIZE)
+#define PTRS_PER_PMD   (1 << PMD_INDEX_SIZE)
+#define PTRS_PER_PGD   (1 << PGD_INDEX_SIZE)
+
+/* With 4k base page size, hugepage PTEs go at the PMD level */
+#define MIN_HUGEPTE_SHIFT      PAGE_SHIFT
+
+/* PMD_SHIFT determines what a second-level page table entry can map */
+#define PMD_SHIFT      (PAGE_SHIFT + PTE_INDEX_SIZE)
+#define PMD_SIZE       (1UL << PMD_SHIFT)
+#define PMD_MASK       (~(PMD_SIZE-1))
+
+/* PGDIR_SHIFT determines what a third-level page table entry can map */
+#define PGDIR_SHIFT    (PMD_SHIFT + PMD_INDEX_SIZE)
+#define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
+#define PGDIR_MASK     (~(PGDIR_SIZE-1))
+
+#define _PAGE_COMBO    0x00040000 /* this is a combo 4k page */
+#define _PAGE_4K_PFN   0x00080000 /* PFN is for a single 4k page */
+/*
+ * Used to track subpage group valid if _PAGE_COMBO is set
+ * This overloads _PAGE_F_GIX and _PAGE_F_SECOND
+ */
+#define _PAGE_COMBO_VALID      (_PAGE_F_GIX | _PAGE_F_SECOND)
+
+/* PTE flags to conserve for HPTE identification */
+#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_F_SECOND | \
+                        _PAGE_F_GIX | _PAGE_HASHPTE | _PAGE_COMBO)
+
+/* Shift to put page number into pte.
+ *
+ * That gives us a max RPN of 34 bits, which means a max of 50 bits
+ * of addressable physical space, or 46 bits for the special 4k PFNs.
+ */
+#define PTE_RPN_SHIFT  (30)
+/*
+ * we support 16 fragments per PTE page of 64K size.
+ */
+#define PTE_FRAG_NR    16
+/*
+ * We use a 2K PTE page fragment and another 2K for storing
+ * real_pte_t hash index
+ */
+#define PTE_FRAG_SIZE_SHIFT  12
+#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
+
+/*
+ * Bits to mask out from a PMD to get to the PTE page
+ * PMDs point to PTE table fragments which are PTE_FRAG_SIZE aligned.
+ */
+#define PMD_MASKED_BITS                (PTE_FRAG_SIZE - 1)
+/* Bits to mask out from a PGD/PUD to get to the PMD page */
+#define PUD_MASKED_BITS                0x1ff
+
+#ifndef __ASSEMBLY__
+
+/*
+ * With 64K pages on hash table, we have a special PTE format that
+ * uses a second "half" of the page table to encode sub-page information
+ * in order to deal with 64K made of 4K HW pages. Thus we override the
+ * generic accessors and iterators here
+ */
+#define __real_pte __real_pte
+static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
+{
+       real_pte_t rpte;
+       unsigned long *hidxp;
+
+       rpte.pte = pte;
+       rpte.hidx = 0;
+       if (pte_val(pte) & _PAGE_COMBO) {
+               /*
+                * Make sure we order the hidx load against the _PAGE_COMBO
+                * check. The store side ordering is done in __hash_page_4K
+                */
+               smp_rmb();
+               hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
+               rpte.hidx = *hidxp;
+       }
+       return rpte;
+}
+
+static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
+{
+       if ((pte_val(rpte.pte) & _PAGE_COMBO))
+               return (rpte.hidx >> (index<<2)) & 0xf;
+       return (pte_val(rpte.pte) >> _PAGE_F_GIX_SHIFT) & 0xf;
+}
+
+#define __rpte_to_pte(r)       ((r).pte)
+extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
+/*
+ * Trick: we set __end to va + 64k, which happens works for
+ * a 16M page as well as we want only one iteration
+ */
+#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift)    \
+       do {                                                            \
+               unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT));  \
+               unsigned __split = (psize == MMU_PAGE_4K ||             \
+                                   psize == MMU_PAGE_64K_AP);          \
+               shift = mmu_psize_defs[psize].shift;                    \
+               for (index = 0; vpn < __end; index++,                   \
+                            vpn += (1L << (shift - VPN_SHIFT))) {      \
+                       if (!__split || __rpte_sub_valid(rpte, index))  \
+                               do {
+
+#define pte_iterate_hashed_end() } while(0); } } while(0)
+
+#define pte_pagesize_index(mm, addr, pte)      \
+       (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
+
+#define remap_4k_pfn(vma, addr, pfn, prot)                             \
+       (WARN_ON(((pfn) >= (1UL << (64 - PTE_RPN_SHIFT)))) ? -EINVAL :  \
+               remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE,        \
+                       __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)))
+
+#define PTE_TABLE_SIZE PTE_FRAG_SIZE
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + (sizeof(unsigned long) << PMD_INDEX_SIZE))
+#else
+#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
+#endif
+#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
+
+#define pgd_pte(pgd)   (pud_pte(((pud_t){ pgd })))
+#define pte_pgd(pte)   ((pgd_t)pte_pud(pte))
+
+#ifdef CONFIG_HUGETLB_PAGE
+/*
+ * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
+ * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
+ *
+ * Defined in such a way that we can optimize away code block at build time
+ * if CONFIG_HUGETLB_PAGE=n.
+ */
+static inline int pmd_huge(pmd_t pmd)
+{
+       /*
+        * leaf pte for huge page
+        */
+       return !!(pmd_val(pmd) & _PAGE_PTE);
+}
+
+static inline int pud_huge(pud_t pud)
+{
+       /*
+        * leaf pte for huge page
+        */
+       return !!(pud_val(pud) & _PAGE_PTE);
+}
+
+static inline int pgd_huge(pgd_t pgd)
+{
+       /*
+        * leaf pte for huge page
+        */
+       return !!(pgd_val(pgd) & _PAGE_PTE);
+}
+#define pgd_huge pgd_huge
+
+#ifdef CONFIG_DEBUG_VM
+extern int hugepd_ok(hugepd_t hpd);
+#define is_hugepd(hpd)               (hugepd_ok(hpd))
+#else
+/*
+ * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't
+ * need to setup hugepage directory for them. Our pte and page directory format
+ * enable us to have this enabled.
+ */
+static inline int hugepd_ok(hugepd_t hpd)
+{
+       return 0;
+}
+#define is_hugepd(pdep)                        0
+#endif /* CONFIG_DEBUG_VM */
+
+#endif /* CONFIG_HUGETLB_PAGE */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern unsigned long pmd_hugepage_update(struct mm_struct *mm,
+                                        unsigned long addr,
+                                        pmd_t *pmdp,
+                                        unsigned long clr,
+                                        unsigned long set);
+static inline char *get_hpte_slot_array(pmd_t *pmdp)
+{
+       /*
+        * The hpte hindex is stored in the pgtable whose address is in the
+        * second half of the PMD
+        *
+        * Order this load with the test for pmd_trans_huge in the caller
+        */
+       smp_rmb();
+       return *(char **)(pmdp + PTRS_PER_PMD);
+
+
+}
+/*
+ * The linux hugepage PMD now include the pmd entries followed by the address
+ * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits.
+ * [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per
+ * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and
+ * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t.
+ *
+ * The last three bits are intentionally left to zero. This memory location
+ * are also used as normal page PTE pointers. So if we have any pointers
+ * left around while we collapse a hugepage, we need to make sure
+ * _PAGE_PRESENT bit of that is zero when we look at them
+ */
+static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
+{
+       return (hpte_slot_array[index] >> 3) & 0x1;
+}
+
+static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array,
+                                          int index)
+{
+       return hpte_slot_array[index] >> 4;
+}
+
+static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
+                                       unsigned int index, unsigned int hidx)
+{
+       hpte_slot_array[index] = hidx << 4 | 0x1 << 3;
+}
+
+/*
+ *
+ * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs
+ * page. The hugetlbfs page table walking and mangling paths are totally
+ * separated form the core VM paths and they're differentiated by
+ *  VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run.
+ *
+ * pmd_trans_huge() is defined as false at build time if
+ * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build
+ * time in such case.
+ *
+ * For ppc64 we need to differntiate from explicit hugepages from THP, because
+ * for THP we also track the subpage details at the pmd level. We don't do
+ * that for explicit huge pages.
+ *
+ */
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+       return !!((pmd_val(pmd) & (_PAGE_PTE | _PAGE_THP_HUGE)) ==
+                 (_PAGE_PTE | _PAGE_THP_HUGE));
+}
+
+static inline int pmd_trans_splitting(pmd_t pmd)
+{
+       if (pmd_trans_huge(pmd))
+               return pmd_val(pmd) & _PAGE_SPLITTING;
+       return 0;
+}
+
+static inline int pmd_large(pmd_t pmd)
+{
+       return !!(pmd_val(pmd) & _PAGE_PTE);
+}
+
+static inline pmd_t pmd_mknotpresent(pmd_t pmd)
+{
+       return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT);
+}
+
+static inline pmd_t pmd_mksplitting(pmd_t pmd)
+{
+       return __pmd(pmd_val(pmd) | _PAGE_SPLITTING);
+}
+
+#define __HAVE_ARCH_PMD_SAME
+static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
+{
+       return (((pmd_val(pmd_a) ^ pmd_val(pmd_b)) & ~_PAGE_HPTEFLAGS) == 0);
+}
+
+static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
+                                             unsigned long addr, pmd_t *pmdp)
+{
+       unsigned long old;
+
+       if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
+               return 0;
+       old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
+       return ((old & _PAGE_ACCESSED) != 0);
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+                                     pmd_t *pmdp)
+{
+
+       if ((pmd_val(*pmdp) & _PAGE_RW) == 0)
+               return;
+
+       pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0);
+}
+
+#endif /*  CONFIG_TRANSPARENT_HUGEPAGE */
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
new file mode 100644 (file)
index 0000000..2ff8b3d
--- /dev/null
@@ -0,0 +1,551 @@
+#ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
+#define _ASM_POWERPC_BOOK3S_64_HASH_H
+#ifdef __KERNEL__
+
+/*
+ * Common bits between 4K and 64K pages in a linux-style PTE.
+ * These match the bits in the (hardware-defined) PowerPC PTE as closely
+ * as possible. Additional bits may be defined in pgtable-hash64-*.h
+ *
+ * Note: We only support user read/write permissions. Supervisor always
+ * have full read/write to pages above PAGE_OFFSET (pages below that
+ * always use the user access permissions).
+ *
+ * We could create separate kernel read-only if we used the 3 PP bits
+ * combinations that newer processors provide but we currently don't.
+ */
+#define _PAGE_PTE              0x00001
+#define _PAGE_PRESENT          0x00002 /* software: pte contains a translation */
+#define _PAGE_BIT_SWAP_TYPE    2
+#define _PAGE_USER             0x00004 /* matches one of the PP bits */
+#define _PAGE_EXEC             0x00008 /* No execute on POWER4 and newer (we invert) */
+#define _PAGE_GUARDED          0x00010
+/* We can derive Memory coherence from _PAGE_NO_CACHE */
+#define _PAGE_COHERENT         0x0
+#define _PAGE_NO_CACHE         0x00020 /* I: cache inhibit */
+#define _PAGE_WRITETHRU                0x00040 /* W: cache write-through */
+#define _PAGE_DIRTY            0x00080 /* C: page changed */
+#define _PAGE_ACCESSED         0x00100 /* R: page referenced */
+#define _PAGE_RW               0x00200 /* software: user write access allowed */
+#define _PAGE_HASHPTE          0x00400 /* software: pte has an associated HPTE */
+#define _PAGE_BUSY             0x00800 /* software: PTE & hash are busy */
+#define _PAGE_F_GIX            0x07000 /* full page: hidx bits */
+#define _PAGE_F_GIX_SHIFT      12
+#define _PAGE_F_SECOND         0x08000 /* Whether to use secondary hash or not */
+#define _PAGE_SPECIAL          0x10000 /* software: special page */
+
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _PAGE_SOFT_DIRTY       0x20000 /* software: software dirty tracking */
+#else
+#define _PAGE_SOFT_DIRTY       0x00000
+#endif
+
+/*
+ * THP pages can't be special. So use the _PAGE_SPECIAL
+ */
+#define _PAGE_SPLITTING _PAGE_SPECIAL
+
+/*
+ * We need to differentiate between explicit huge page and THP huge
+ * page, since THP huge page also need to track real subpage details
+ */
+#define _PAGE_THP_HUGE  _PAGE_4K_PFN
+
+/*
+ * set of bits not changed in pmd_modify.
+ */
+#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS |              \
+                        _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \
+                        _PAGE_THP_HUGE | _PAGE_PTE | _PAGE_SOFT_DIRTY)
+
+#ifdef CONFIG_PPC_64K_PAGES
+#include <asm/book3s/64/hash-64k.h>
+#else
+#include <asm/book3s/64/hash-4k.h>
+#endif
+
+/*
+ * Size of EA range mapped by our pagetables.
+ */
+#define PGTABLE_EADDR_SIZE     (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
+                                PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
+#define PGTABLE_RANGE          (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define PMD_CACHE_INDEX        (PMD_INDEX_SIZE + 1)
+#else
+#define PMD_CACHE_INDEX        PMD_INDEX_SIZE
+#endif
+/*
+ * Define the address range of the kernel non-linear virtual area
+ */
+#define KERN_VIRT_START ASM_CONST(0xD000000000000000)
+#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000)
+
+/*
+ * The vmalloc space starts at the beginning of that region, and
+ * occupies half of it on hash CPUs and a quarter of it on Book3E
+ * (we keep a quarter for the virtual memmap)
+ */
+#define VMALLOC_START  KERN_VIRT_START
+#define VMALLOC_SIZE   (KERN_VIRT_SIZE >> 1)
+#define VMALLOC_END    (VMALLOC_START + VMALLOC_SIZE)
+
+/*
+ * Region IDs
+ */
+#define REGION_SHIFT           60UL
+#define REGION_MASK            (0xfUL << REGION_SHIFT)
+#define REGION_ID(ea)          (((unsigned long)(ea)) >> REGION_SHIFT)
+
+#define VMALLOC_REGION_ID      (REGION_ID(VMALLOC_START))
+#define KERNEL_REGION_ID       (REGION_ID(PAGE_OFFSET))
+#define VMEMMAP_REGION_ID      (0xfUL) /* Server only */
+#define USER_REGION_ID         (0UL)
+
+/*
+ * Defines the address of the vmemap area, in its own region on
+ * hash table CPUs.
+ */
+#define VMEMMAP_BASE           (VMEMMAP_REGION_ID << REGION_SHIFT)
+
+#ifdef CONFIG_PPC_MM_SLICES
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+#endif /* CONFIG_PPC_MM_SLICES */
+
+/* No separate kernel read-only */
+#define _PAGE_KERNEL_RW                (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */
+#define _PAGE_KERNEL_RO                 _PAGE_KERNEL_RW
+#define _PAGE_KERNEL_RWX       (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
+
+/* Strong Access Ordering */
+#define _PAGE_SAO              (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT)
+
+/* No page size encoding in the linux PTE */
+#define _PAGE_PSIZE            0
+
+/* PTEIDX nibble */
+#define _PTEIDX_SECONDARY      0x8
+#define _PTEIDX_GROUP_IX       0x7
+
+/* Hash table based platforms need atomic updates of the linux PTE */
+#define PTE_ATOMIC_UPDATES     1
+#define _PTE_NONE_MASK _PAGE_HPTEFLAGS
+/*
+ * The mask convered by the RPN must be a ULL on 32-bit platforms with
+ * 64-bit PTEs
+ */
+#define PTE_RPN_MASK   (~((1UL << PTE_RPN_SHIFT) - 1))
+/*
+ * _PAGE_CHG_MASK masks of bits that are to be preserved across
+ * pgprot changes
+ */
+#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
+                        _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \
+                        _PAGE_SOFT_DIRTY)
+/*
+ * Mask of bits returned by pte_pgprot()
+ */
+#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
+                        _PAGE_WRITETHRU | _PAGE_4K_PFN | \
+                        _PAGE_USER | _PAGE_ACCESSED |  \
+                        _PAGE_RW |  _PAGE_DIRTY | _PAGE_EXEC | \
+                        _PAGE_SOFT_DIRTY)
+/*
+ * We define 2 sets of base prot bits, one for basic pages (ie,
+ * cacheable kernel and user pages) and one for non cacheable
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
+ * the processor might need it for DMA coherency.
+ */
+#define _PAGE_BASE_NC  (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
+#define _PAGE_BASE     (_PAGE_BASE_NC | _PAGE_COHERENT)
+
+/* Permission masks used to generate the __P and __S table,
+ *
+ * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
+ *
+ * Write permissions imply read permissions for now (we could make write-only
+ * pages on BookE but we don't bother for now). Execute permission control is
+ * possible on platforms that define _PAGE_EXEC
+ *
+ * Note due to the way vm flags are laid out, the bits are XWR
+ */
+#define PAGE_NONE      __pgprot(_PAGE_BASE)
+#define PAGE_SHARED    __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X  __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \
+                                _PAGE_EXEC)
+#define PAGE_COPY      __pgprot(_PAGE_BASE | _PAGE_USER )
+#define PAGE_COPY_X    __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_READONLY  __pgprot(_PAGE_BASE | _PAGE_USER )
+#define PAGE_READONLY_X        __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY_X
+#define __P101 PAGE_READONLY_X
+#define __P110 PAGE_COPY_X
+#define __P111 PAGE_COPY_X
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY_X
+#define __S101 PAGE_READONLY_X
+#define __S110 PAGE_SHARED_X
+#define __S111 PAGE_SHARED_X
+
+/* Permission masks used for kernel mappings */
+#define PAGE_KERNEL    __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
+#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+                                _PAGE_NO_CACHE)
+#define PAGE_KERNEL_NCG        __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+                                _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define PAGE_KERNEL_X  __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
+#define PAGE_KERNEL_ROX        __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
+
+/* Protection used for kernel text. We want the debuggers to be able to
+ * set breakpoints anywhere, so don't write protect the kernel text
+ * on platforms where such control is possible.
+ */
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
+       defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
+#define PAGE_KERNEL_TEXT       PAGE_KERNEL_X
+#else
+#define PAGE_KERNEL_TEXT       PAGE_KERNEL_ROX
+#endif
+
+/* Make modules code happy. We don't set RO yet */
+#define PAGE_KERNEL_EXEC       PAGE_KERNEL_X
+#define PAGE_AGP               (PAGE_KERNEL_NC)
+
+#define PMD_BAD_BITS           (PTE_TABLE_SIZE-1)
+#define PUD_BAD_BITS           (PMD_TABLE_SIZE-1)
+
+#ifndef __ASSEMBLY__
+#define        pmd_bad(pmd)            (!is_kernel_addr(pmd_val(pmd)) \
+                                || (pmd_val(pmd) & PMD_BAD_BITS))
+#define pmd_page_vaddr(pmd)    (pmd_val(pmd) & ~PMD_MASKED_BITS)
+
+#define        pud_bad(pud)            (!is_kernel_addr(pud_val(pud)) \
+                                || (pud_val(pud) & PUD_BAD_BITS))
+#define pud_page_vaddr(pud)    (pud_val(pud) & ~PUD_MASKED_BITS)
+
+#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1))
+#define pmd_index(address) (((address) >> (PMD_SHIFT)) & (PTRS_PER_PMD - 1))
+#define pte_index(address) (((address) >> (PAGE_SHIFT)) & (PTRS_PER_PTE - 1))
+
+extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
+                           pte_t *ptep, unsigned long pte, int huge);
+extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
+/* Atomic PTE updates */
+static inline unsigned long pte_update(struct mm_struct *mm,
+                                      unsigned long addr,
+                                      pte_t *ptep, unsigned long clr,
+                                      unsigned long set,
+                                      int huge)
+{
+       unsigned long old, tmp;
+
+       __asm__ __volatile__(
+       "1:     ldarx   %0,0,%3         # pte_update\n\
+       andi.   %1,%0,%6\n\
+       bne-    1b \n\
+       andc    %1,%0,%4 \n\
+       or      %1,%1,%7\n\
+       stdcx.  %1,0,%3 \n\
+       bne-    1b"
+       : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
+       : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set)
+       : "cc" );
+       /* huge pages use the old page table lock */
+       if (!huge)
+               assert_pte_locked(mm, addr);
+
+       if (old & _PAGE_HASHPTE)
+               hpte_need_flush(mm, addr, ptep, old, huge);
+
+       return old;
+}
+
+static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
+                                             unsigned long addr, pte_t *ptep)
+{
+       unsigned long old;
+
+       if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
+               return 0;
+       old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
+       return (old & _PAGE_ACCESSED) != 0;
+}
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define ptep_test_and_clear_young(__vma, __addr, __ptep)                  \
+({                                                                        \
+       int __r;                                                           \
+       __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
+       __r;                                                               \
+})
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
+                                     pte_t *ptep)
+{
+
+       if ((pte_val(*ptep) & _PAGE_RW) == 0)
+               return;
+
+       pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       if ((pte_val(*ptep) & _PAGE_RW) == 0)
+               return;
+
+       pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
+}
+
+/*
+ * We currently remove entries from the hashtable regardless of whether
+ * the entry was young or dirty. The generic routines only flush if the
+ * entry was young or dirty which is not good enough.
+ *
+ * We should be more intelligent about this but for the moment we override
+ * these functions and force a tlb flush unconditionally
+ */
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young(__vma, __address, __ptep)               \
+({                                                                     \
+       int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
+                                                 __ptep);              \
+       __young;                                                        \
+})
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+                                      unsigned long addr, pte_t *ptep)
+{
+       unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
+       return __pte(old);
+}
+
+static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
+                            pte_t * ptep)
+{
+       pte_update(mm, addr, ptep, ~0UL, 0, 0);
+}
+
+
+/* Set the dirty and/or accessed bits atomically in a linux PTE, this
+ * function doesn't need to flush the hash entry
+ */
+static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
+{
+       unsigned long bits = pte_val(entry) &
+               (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC |
+                _PAGE_SOFT_DIRTY);
+
+       unsigned long old, tmp;
+
+       __asm__ __volatile__(
+       "1:     ldarx   %0,0,%4\n\
+               andi.   %1,%0,%6\n\
+               bne-    1b \n\
+               or      %0,%3,%0\n\
+               stdcx.  %0,0,%4\n\
+               bne-    1b"
+       :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
+       :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
+       :"cc");
+}
+
+#define __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B)  (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
+
+/* Generic accessors to PTE bits */
+static inline int pte_write(pte_t pte)         { return !!(pte_val(pte) & _PAGE_RW);}
+static inline int pte_dirty(pte_t pte)         { return !!(pte_val(pte) & _PAGE_DIRTY); }
+static inline int pte_young(pte_t pte)         { return !!(pte_val(pte) & _PAGE_ACCESSED); }
+static inline int pte_special(pte_t pte)       { return !!(pte_val(pte) & _PAGE_SPECIAL); }
+static inline int pte_none(pte_t pte)          { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
+static inline pgprot_t pte_pgprot(pte_t pte)   { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+static inline bool pte_soft_dirty(pte_t pte)
+{
+       return !!(pte_val(pte) & _PAGE_SOFT_DIRTY);
+}
+static inline pte_t pte_mksoft_dirty(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY);
+}
+
+static inline pte_t pte_clear_soft_dirty(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~_PAGE_SOFT_DIRTY);
+}
+#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+
+#ifdef CONFIG_NUMA_BALANCING
+/*
+ * These work without NUMA balancing but the kernel does not care. See the
+ * comment in include/asm-generic/pgtable.h . On powerpc, this will only
+ * work for user pages and always return true for kernel pages.
+ */
+static inline int pte_protnone(pte_t pte)
+{
+       return (pte_val(pte) &
+               (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+static inline int pte_present(pte_t pte)
+{
+       return pte_val(pte) & _PAGE_PRESENT;
+}
+
+/* Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * Even if PTEs can be unsigned long long, a PFN is always an unsigned
+ * long for now.
+ */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
+{
+       return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
+                    pgprot_val(pgprot));
+}
+
+static inline unsigned long pte_pfn(pte_t pte)
+{
+       return pte_val(pte) >> PTE_RPN_SHIFT;
+}
+
+/* Generic modifiers for PTE bits */
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~_PAGE_RW);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~_PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_RW);
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_SPECIAL);
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+       return pte;
+}
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+       return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/* This low level function performs the actual PTE insertion
+ * Setting the PTE depends on the MMU type and other factors. It's
+ * an horrible mess that I'm not going to try to clean up now but
+ * I'm keeping it in one place rather than spread around
+ */
+static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+                               pte_t *ptep, pte_t pte, int percpu)
+{
+       /*
+        * Anything else just stores the PTE normally. That covers all 64-bit
+        * cases, and 32-bit non-hash with 32-bit PTEs.
+        */
+       *ptep = pte;
+}
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+
+#define _PAGE_CACHE_CTL        (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
+                        _PAGE_WRITETHRU)
+
+#define pgprot_noncached pgprot_noncached
+static inline pgprot_t pgprot_noncached(pgprot_t prot)
+{
+       return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+                       _PAGE_NO_CACHE | _PAGE_GUARDED);
+}
+
+#define pgprot_noncached_wc pgprot_noncached_wc
+static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
+{
+       return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+                       _PAGE_NO_CACHE);
+}
+
+#define pgprot_cached pgprot_cached
+static inline pgprot_t pgprot_cached(pgprot_t prot)
+{
+       return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+                       _PAGE_COHERENT);
+}
+
+#define pgprot_cached_wthru pgprot_cached_wthru
+static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
+{
+       return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
+                       _PAGE_COHERENT | _PAGE_WRITETHRU);
+}
+
+#define pgprot_cached_noncoherent pgprot_cached_noncoherent
+static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
+{
+       return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
+}
+
+#define pgprot_writecombine pgprot_writecombine
+static inline pgprot_t pgprot_writecombine(pgprot_t prot)
+{
+       return pgprot_noncached_wc(prot);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
+                                  pmd_t *pmdp, unsigned long old_pmd);
+#else
+static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
+                                         unsigned long addr, pmd_t *pmdp,
+                                         unsigned long old_pmd)
+{
+       WARN(1, "%s called with THP disabled\n", __func__);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
new file mode 100644 (file)
index 0000000..b3a5bad
--- /dev/null
@@ -0,0 +1,300 @@
+#ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
+#define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_
+/*
+ * This file contains the functions and defines necessary to modify and use
+ * the ppc64 hashed page table.
+ */
+
+#include <asm/book3s/64/hash.h>
+#include <asm/barrier.h>
+
+/*
+ * The second half of the kernel virtual space is used for IO mappings,
+ * it's itself carved into the PIO region (ISA and PHB IO space) and
+ * the ioremap space
+ *
+ *  ISA_IO_BASE = KERN_IO_START, 64K reserved area
+ *  PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
+ * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
+ */
+#define KERN_IO_START  (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
+#define FULL_IO_SIZE   0x80000000ul
+#define  ISA_IO_BASE   (KERN_IO_START)
+#define  ISA_IO_END    (KERN_IO_START + 0x10000ul)
+#define  PHB_IO_BASE   (ISA_IO_END)
+#define  PHB_IO_END    (KERN_IO_START + FULL_IO_SIZE)
+#define IOREMAP_BASE   (PHB_IO_END)
+#define IOREMAP_END    (KERN_VIRT_START + KERN_VIRT_SIZE)
+
+#define vmemmap                        ((struct page *)VMEMMAP_BASE)
+
+/* Advertise special mapping type for AGP */
+#define HAVE_PAGE_AGP
+
+/* Advertise support for _PAGE_SPECIAL */
+#define __HAVE_ARCH_PTE_SPECIAL
+
+#ifndef __ASSEMBLY__
+
+/*
+ * This is the default implementation of various PTE accessors, it's
+ * used in all cases except Book3S with 64K pages where we have a
+ * concept of sub-pages
+ */
+#ifndef __real_pte
+
+#ifdef CONFIG_STRICT_MM_TYPECHECKS
+#define __real_pte(e,p)                ((real_pte_t){(e)})
+#define __rpte_to_pte(r)       ((r).pte)
+#else
+#define __real_pte(e,p)                (e)
+#define __rpte_to_pte(r)       (__pte(r))
+#endif
+#define __rpte_to_hidx(r,index)        (pte_val(__rpte_to_pte(r)) >>_PAGE_F_GIX_SHIFT)
+
+#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift)       \
+       do {                                                             \
+               index = 0;                                               \
+               shift = mmu_psize_defs[psize].shift;                     \
+
+#define pte_iterate_hashed_end() } while(0)
+
+/*
+ * We expect this to be called only for user addresses or kernel virtual
+ * addresses other than the linear mapping.
+ */
+#define pte_pagesize_index(mm, addr, pte)      MMU_PAGE_4K
+
+#endif /* __real_pte */
+
+static inline void pmd_set(pmd_t *pmdp, unsigned long val)
+{
+       *pmdp = __pmd(val);
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+       *pmdp = __pmd(0);
+}
+
+#define pmd_none(pmd)          (!pmd_val(pmd))
+#define        pmd_present(pmd)        (!pmd_none(pmd))
+
+static inline void pud_set(pud_t *pudp, unsigned long val)
+{
+       *pudp = __pud(val);
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+       *pudp = __pud(0);
+}
+
+#define pud_none(pud)          (!pud_val(pud))
+#define pud_present(pud)       (pud_val(pud) != 0)
+
+extern struct page *pud_page(pud_t pud);
+extern struct page *pmd_page(pmd_t pmd);
+static inline pte_t pud_pte(pud_t pud)
+{
+       return __pte(pud_val(pud));
+}
+
+static inline pud_t pte_pud(pte_t pte)
+{
+       return __pud(pte_val(pte));
+}
+#define pud_write(pud)         pte_write(pud_pte(pud))
+#define pgd_write(pgd)         pte_write(pgd_pte(pgd))
+static inline void pgd_set(pgd_t *pgdp, unsigned long val)
+{
+       *pgdp = __pgd(val);
+}
+
+/*
+ * Find an entry in a page-table-directory.  We combine the address region
+ * (the high order N bits) and the pgd portion of the address.
+ */
+
+#define pgd_offset(mm, address)         ((mm)->pgd + pgd_index(address))
+
+#define pmd_offset(pudp,addr) \
+       (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr))
+
+#define pte_offset_kernel(dir,addr) \
+       (((pte_t *) pmd_page_vaddr(*(dir))) + pte_index(addr))
+
+#define pte_offset_map(dir,addr)       pte_offset_kernel((dir), (addr))
+#define pte_unmap(pte)                 do { } while(0)
+
+/* to find an entry in a kernel page-table-directory */
+/* This now only contains the vmalloc pages */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+#define pte_ERROR(e) \
+       pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pmd_ERROR(e) \
+       pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pgd_ERROR(e) \
+       pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/* Encode and de-code a swap entry */
+#define MAX_SWAPFILES_CHECK() do { \
+       BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
+       /*                                                      \
+        * Don't have overlapping bits with _PAGE_HPTEFLAGS     \
+        * We filter HPTEFLAGS on set_pte.                      \
+        */                                                     \
+       BUILD_BUG_ON(_PAGE_HPTEFLAGS & (0x1f << _PAGE_BIT_SWAP_TYPE)); \
+       BUILD_BUG_ON(_PAGE_HPTEFLAGS & _PAGE_SWP_SOFT_DIRTY);   \
+       } while (0)
+/*
+ * on pte we don't need handle RADIX_TREE_EXCEPTIONAL_SHIFT;
+ */
+#define SWP_TYPE_BITS 5
+#define __swp_type(x)          (((x).val >> _PAGE_BIT_SWAP_TYPE) \
+                               & ((1UL << SWP_TYPE_BITS) - 1))
+#define __swp_offset(x)                ((x).val >> PTE_RPN_SHIFT)
+#define __swp_entry(type, offset)      ((swp_entry_t) { \
+                                       ((type) << _PAGE_BIT_SWAP_TYPE) \
+                                       | ((offset) << PTE_RPN_SHIFT) })
+/*
+ * swp_entry_t must be independent of pte bits. We build a swp_entry_t from
+ * swap type and offset we get from swap and convert that to pte to find a
+ * matching pte in linux page table.
+ * Clear bits not found in swap entries here.
+ */
+#define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
+#define __swp_entry_to_pte(x)  __pte((x).val | _PAGE_PTE)
+
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _PAGE_SWP_SOFT_DIRTY   (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
+#else
+#define _PAGE_SWP_SOFT_DIRTY   0UL
+#endif /* CONFIG_MEM_SOFT_DIRTY */
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
+}
+static inline bool pte_swp_soft_dirty(pte_t pte)
+{
+       return !!(pte_val(pte) & _PAGE_SWP_SOFT_DIRTY);
+}
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY);
+}
+#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+
+void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
+void pgtable_cache_init(void);
+
+struct page *realmode_pfn_to_page(unsigned long pfn);
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
+extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
+extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
+extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+                      pmd_t *pmdp, pmd_t pmd);
+extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+                                pmd_t *pmd);
+extern int has_transparent_hugepage(void);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+       return __pte(pmd_val(pmd));
+}
+
+static inline pmd_t pte_pmd(pte_t pte)
+{
+       return __pmd(pte_val(pte));
+}
+
+static inline pte_t *pmdp_ptep(pmd_t *pmd)
+{
+       return (pte_t *)pmd;
+}
+
+#define pmd_pfn(pmd)           pte_pfn(pmd_pte(pmd))
+#define pmd_dirty(pmd)         pte_dirty(pmd_pte(pmd))
+#define pmd_young(pmd)         pte_young(pmd_pte(pmd))
+#define pmd_mkold(pmd)         pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_wrprotect(pmd)     pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd)       pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_mkyoung(pmd)       pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd)       pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+
+#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
+#define pmd_soft_dirty(pmd)    pte_soft_dirty(pmd_pte(pmd))
+#define pmd_mksoft_dirty(pmd)  pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
+#define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
+#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
+
+#ifdef CONFIG_NUMA_BALANCING
+static inline int pmd_protnone(pmd_t pmd)
+{
+       return pte_protnone(pmd_pte(pmd));
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write(pmd)         pte_write(pmd_pte(pmd))
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+       return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_THP_HUGE));
+}
+
+#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+extern int pmdp_set_access_flags(struct vm_area_struct *vma,
+                                unsigned long address, pmd_t *pmdp,
+                                pmd_t entry, int dirty);
+
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+                                    unsigned long address, pmd_t *pmdp);
+#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
+                                 unsigned long address, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+                                    unsigned long addr, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+extern void pmdp_splitting_flush(struct vm_area_struct *vma,
+                                unsigned long address, pmd_t *pmdp);
+
+extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+                                unsigned long address, pmd_t *pmdp);
+#define pmdp_collapse_flush pmdp_collapse_flush
+
+#define __HAVE_ARCH_PGTABLE_DEPOSIT
+extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+                                      pgtable_t pgtable);
+#define __HAVE_ARCH_PGTABLE_WITHDRAW
+extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+
+#define __HAVE_ARCH_PMDP_INVALIDATE
+extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+                           pmd_t *pmdp);
+
+#define pmd_move_must_withdraw pmd_move_must_withdraw
+struct spinlock;
+static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+                                        struct spinlock *old_pmd_ptl)
+{
+       /*
+        * Archs like ppc64 use pgtable to store per pmd
+        * specific information. So when we switch the pmd,
+        * we should also withdraw and deposit the pgtable
+        */
+       return true;
+}
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ */
diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
new file mode 100644 (file)
index 0000000..8b0f4a2
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef _ASM_POWERPC_BOOK3S_PGTABLE_H
+#define _ASM_POWERPC_BOOK3S_PGTABLE_H
+
+#ifdef CONFIG_PPC64
+#include <asm/book3s/64/pgtable.h>
+#else
+#include <asm/book3s/32/pgtable.h>
+#endif
+
+#define FIRST_USER_ADDRESS     0UL
+#ifndef __ASSEMBLY__
+/* Insert a PTE, top-level function is out of line. It uses an inline
+ * low level function in the respective pgtable-* files
+ */
+extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+                      pte_t pte);
+
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+                                pte_t *ptep, pte_t entry, int dirty);
+
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+                                    unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+#endif /* __ASSEMBLY__ */
+#endif
index ad6263cffb0fd0b3e09b192748f57f9049ce3cae..d1a8d93cccfd483b8181046434ebc3ef55d8704a 100644 (file)
@@ -18,12 +18,12 @@ __xchg_u32(volatile void *p, unsigned long val)
        unsigned long prev;
 
        __asm__ __volatile__(
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    lwarx   %0,0,%2 \n"
        PPC405_ERR77(0,%2)
 "      stwcx.  %3,0,%2 \n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        : "=&r" (prev), "+m" (*(volatile unsigned int *)p)
        : "r" (p), "r" (val)
        : "cc", "memory");
@@ -61,12 +61,12 @@ __xchg_u64(volatile void *p, unsigned long val)
        unsigned long prev;
 
        __asm__ __volatile__(
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    ldarx   %0,0,%2 \n"
        PPC405_ERR77(0,%2)
 "      stdcx.  %3,0,%2 \n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        : "=&r" (prev), "+m" (*(volatile unsigned long *)p)
        : "r" (p), "r" (val)
        : "cc", "memory");
@@ -151,14 +151,14 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
        unsigned int prev;
 
        __asm__ __volatile__ (
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    lwarx   %0,0,%2         # __cmpxchg_u32\n\
        cmpw    0,%0,%3\n\
        bne-    2f\n"
        PPC405_ERR77(0,%2)
 "      stwcx.  %4,0,%2\n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        "\n\
 2:"
        : "=&r" (prev), "+m" (*p)
@@ -197,13 +197,13 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
        unsigned long prev;
 
        __asm__ __volatile__ (
-       PPC_RELEASE_BARRIER
+       PPC_ATOMIC_ENTRY_BARRIER
 "1:    ldarx   %0,0,%2         # __cmpxchg_u64\n\
        cmpd    0,%0,%3\n\
        bne-    2f\n\
        stdcx.  %4,0,%2\n\
        bne-    1b"
-       PPC_ACQUIRE_BARRIER
+       PPC_ATOMIC_EXIT_BARRIER
        "\n\
 2:"
        : "=&r" (prev), "+m" (*p)
index 4398a6cdcf53cb886527c263c6c13c10accda195..2c5c5b47680402389629e97b1f56b5a205f68f6a 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/of.h>
+#include <soc/fsl/qe/qe.h>
 
 /*
  * SPI Parameter RAM common to QE and CPM.
@@ -155,49 +156,6 @@ typedef struct cpm_buf_desc {
  */
 #define BD_I2C_START           (0x0400)
 
-int cpm_muram_init(void);
-
-#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE)
-unsigned long cpm_muram_alloc(unsigned long size, unsigned long align);
-int cpm_muram_free(unsigned long offset);
-unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
-void __iomem *cpm_muram_addr(unsigned long offset);
-unsigned long cpm_muram_offset(void __iomem *addr);
-dma_addr_t cpm_muram_dma(void __iomem *addr);
-#else
-static inline unsigned long cpm_muram_alloc(unsigned long size,
-                                           unsigned long align)
-{
-       return -ENOSYS;
-}
-
-static inline int cpm_muram_free(unsigned long offset)
-{
-       return -ENOSYS;
-}
-
-static inline unsigned long cpm_muram_alloc_fixed(unsigned long offset,
-                                                 unsigned long size)
-{
-       return -ENOSYS;
-}
-
-static inline void __iomem *cpm_muram_addr(unsigned long offset)
-{
-       return NULL;
-}
-
-static inline unsigned long cpm_muram_offset(void __iomem *addr)
-{
-       return -ENOSYS;
-}
-
-static inline dma_addr_t cpm_muram_dma(void __iomem *addr)
-{
-       return 0;
-}
-#endif /* defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) */
-
 #ifdef CONFIG_CPM
 int cpm_command(u32 command, u8 opcode);
 #else
index 77f52b26dad6cc200bea3baf63385d6ca1025d02..93ae809fe5ea330bd19aa7ca98ce27b38fabd4dc 100644 (file)
@@ -129,15 +129,6 @@ BEGIN_FTR_SECTION_NESTED(941)                                              \
        mtspr   SPRN_PPR,ra;                                            \
 END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941)
 
-/*
- * Increase the priority on systems where PPR save/restore is not
- * implemented/ supported.
- */
-#define HMT_MEDIUM_PPR_DISCARD                                         \
-BEGIN_FTR_SECTION_NESTED(942)                                          \
-       HMT_MEDIUM;                                                     \
-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,0,942)  /*non P7*/              
-
 /*
  * Get an SPR into a register if the CPU has the given feature
  */
@@ -263,17 +254,6 @@ do_kvm_##n:                                                                \
 #define KVM_HANDLER_SKIP(area, h, n)
 #endif
 
-#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
-#define KVMTEST_PR(n)                  __KVMTEST(n)
-#define KVM_HANDLER_PR(area, h, n)     __KVM_HANDLER(area, h, n)
-#define KVM_HANDLER_PR_SKIP(area, h, n)        __KVM_HANDLER_SKIP(area, h, n)
-
-#else
-#define KVMTEST_PR(n)
-#define KVM_HANDLER_PR(area, h, n)
-#define KVM_HANDLER_PR_SKIP(area, h, n)
-#endif
-
 #define NOTEST(n)
 
 /*
@@ -353,27 +333,25 @@ do_kvm_##n:                                                               \
 /*
  * Exception vectors.
  */
-#define STD_EXCEPTION_PSERIES(loc, vec, label)         \
-       . = loc;                                        \
+#define STD_EXCEPTION_PSERIES(vec, label)              \
+       . = vec;                                        \
        .globl label##_pSeries;                         \
 label##_pSeries:                                       \
-       HMT_MEDIUM_PPR_DISCARD;                         \
        SET_SCRATCH0(r13);              /* save r13 */          \
        EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common,    \
-                                EXC_STD, KVMTEST_PR, vec)
+                                EXC_STD, KVMTEST, vec)
 
 /* Version of above for when we have to branch out-of-line */
 #define STD_EXCEPTION_PSERIES_OOL(vec, label)                  \
        .globl label##_pSeries;                                 \
 label##_pSeries:                                               \
-       EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec);        \
+       EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec);   \
        EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_STD)
 
 #define STD_EXCEPTION_HV(loc, vec, label)              \
        . = loc;                                        \
        .globl label##_hv;                              \
 label##_hv:                                            \
-       HMT_MEDIUM_PPR_DISCARD;                         \
        SET_SCRATCH0(r13);      /* save r13 */                  \
        EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common,    \
                                 EXC_HV, KVMTEST, vec)
@@ -389,7 +367,6 @@ label##_hv:                                         \
        . = loc;                                        \
        .globl label##_relon_pSeries;                   \
 label##_relon_pSeries:                                 \
-       HMT_MEDIUM_PPR_DISCARD;                         \
        /* No guest interrupts come through here */     \
        SET_SCRATCH0(r13);              /* save r13 */  \
        EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
@@ -405,7 +382,6 @@ label##_relon_pSeries:                                              \
        . = loc;                                        \
        .globl label##_relon_hv;                        \
 label##_relon_hv:                                      \
-       HMT_MEDIUM_PPR_DISCARD;                         \
        /* No guest interrupts come through here */     \
        SET_SCRATCH0(r13);      /* save r13 */          \
        EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
@@ -436,17 +412,13 @@ label##_relon_hv:                                         \
 #define _SOFTEN_TEST(h, vec)   __SOFTEN_TEST(h, vec)
 
 #define SOFTEN_TEST_PR(vec)                                            \
-       KVMTEST_PR(vec);                                                \
+       KVMTEST(vec);                                                   \
        _SOFTEN_TEST(EXC_STD, vec)
 
 #define SOFTEN_TEST_HV(vec)                                            \
        KVMTEST(vec);                                                   \
        _SOFTEN_TEST(EXC_HV, vec)
 
-#define SOFTEN_TEST_HV_201(vec)                                                \
-       KVMTEST(vec);                                                   \
-       _SOFTEN_TEST(EXC_STD, vec)
-
 #define SOFTEN_NOTEST_PR(vec)          _SOFTEN_TEST(EXC_STD, vec)
 #define SOFTEN_NOTEST_HV(vec)          _SOFTEN_TEST(EXC_HV, vec)
 
@@ -463,7 +435,6 @@ label##_relon_hv:                                           \
        . = loc;                                                        \
        .globl label##_pSeries;                                         \
 label##_pSeries:                                                       \
-       HMT_MEDIUM_PPR_DISCARD;                                         \
        _MASKABLE_EXCEPTION_PSERIES(vec, label,                         \
                                    EXC_STD, SOFTEN_TEST_PR)
 
@@ -481,7 +452,6 @@ label##_hv:                                                         \
        EXCEPTION_PROLOG_PSERIES_1(label##_common, EXC_HV);
 
 #define __MASKABLE_RELON_EXCEPTION_PSERIES(vec, label, h, extra)       \
-       HMT_MEDIUM_PPR_DISCARD;                                         \
        SET_SCRATCH0(r13);    /* save r13 */                            \
        EXCEPTION_PROLOG_0(PACA_EXGEN);                                 \
        __EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec);           \
index e05808a328db24ec39a8eb144e423cb7015f31a8..b0629249778b31ed536613d371d4005dd2e6fc11 100644 (file)
 #define FW_FEATURE_VPHN                ASM_CONST(0x0000000004000000)
 #define FW_FEATURE_XCMO                ASM_CONST(0x0000000008000000)
 #define FW_FEATURE_OPAL                ASM_CONST(0x0000000010000000)
-#define FW_FEATURE_OPALv2      ASM_CONST(0x0000000020000000)
 #define FW_FEATURE_SET_MODE    ASM_CONST(0x0000000040000000)
 #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
 #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
 #define FW_FEATURE_PRRN                ASM_CONST(0x0000000200000000)
-#define FW_FEATURE_OPALv3      ASM_CONST(0x0000000400000000)
 
 #ifndef __ASSEMBLY__
 
@@ -70,8 +68,7 @@ enum {
                FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
                FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
        FW_FEATURE_PSERIES_ALWAYS = 0,
-       FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 |
-               FW_FEATURE_OPALv3,
+       FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL,
        FW_FEATURE_POWERNV_ALWAYS = 0,
        FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
        FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
index 5879fde56f3c91080a585685f42ca6b411b39d24..6c1297ec374cc720f456e2c34f78299f8aa6468f 100644 (file)
@@ -385,6 +385,17 @@ static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
 {
        *(volatile unsigned long __force *)PCI_FIX_ADDR(addr) = v;
 }
+
+/*
+ * Real mode version of the above. stdcix is only supposed to be used
+ * in hypervisor real mode as per the architecture spec.
+ */
+static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
+{
+       __asm__ __volatile__("stdcix %0,0,%1"
+               : : "r" (val), "r" (paddr) : "memory");
+}
+
 #endif /* __powerpc64__ */
 
 /*
index ba3342bbdbdaac2e7015cce107121dfb9a1cc40e..7352d3f212df9f392cdf35a9293aab8dd20628fe 100644 (file)
@@ -21,7 +21,7 @@
  * need for various slices related matters. Note that this isn't the
  * complete pgtable.h but only a portion of it.
  */
-#include <asm/pgtable-ppc64.h>
+#include <asm/book3s/64/pgtable.h>
 #include <asm/bug.h>
 #include <asm/processor.h>
 
similarity index 96%
rename from arch/powerpc/include/asm/pgtable-ppc32.h
rename to arch/powerpc/include/asm/nohash/32/pgtable.h
index 9c326565d498fb1ba8d85e00f0bd20a2350c5a43..c82cbf52d19ea07cef31629c75b6502ea9955dcd 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _ASM_POWERPC_PGTABLE_PPC32_H
-#define _ASM_POWERPC_PGTABLE_PPC32_H
+#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
+#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
 
 #include <asm-generic/pgtable-nopmd.h>
 
@@ -106,17 +106,15 @@ extern int icache_44x_need_flush;
  */
 
 #if defined(CONFIG_40x)
-#include <asm/pte-40x.h>
+#include <asm/nohash/32/pte-40x.h>
 #elif defined(CONFIG_44x)
-#include <asm/pte-44x.h>
+#include <asm/nohash/32/pte-44x.h>
 #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
-#include <asm/pte-book3e.h>
+#include <asm/nohash/pte-book3e.h>
 #elif defined(CONFIG_FSL_BOOKE)
-#include <asm/pte-fsl-booke.h>
+#include <asm/nohash/32/pte-fsl-booke.h>
 #elif defined(CONFIG_8xx)
-#include <asm/pte-8xx.h>
-#else /* CONFIG_6xx */
-#include <asm/pte-hash32.h>
+#include <asm/nohash/32/pte-8xx.h>
 #endif
 
 /* And here we include common definitions */
@@ -130,7 +128,12 @@ extern int icache_44x_need_flush;
 #define pmd_none(pmd)          (!pmd_val(pmd))
 #define        pmd_bad(pmd)            (pmd_val(pmd) & _PMD_BAD)
 #define        pmd_present(pmd)        (pmd_val(pmd) & _PMD_PRESENT_MASK)
-#define        pmd_clear(pmdp)         do { pmd_val(*(pmdp)) = 0; } while (0)
+static inline void pmd_clear(pmd_t *pmdp)
+{
+       *pmdp = __pmd(0);
+}
+
+
 
 /*
  * When flushing the tlb entry for a page, we also need to flush the hash
@@ -337,4 +340,4 @@ extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep,
 
 #endif /* !__ASSEMBLY__ */
 
-#endif /* _ASM_POWERPC_PGTABLE_PPC32_H */
+#endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */
similarity index 95%
rename from arch/powerpc/include/asm/pte-40x.h
rename to arch/powerpc/include/asm/nohash/32/pte-40x.h
index 486b1ef813387975832055c2b2079b3d53bd98d9..9624ebdacc47783ff5bc9654d15972084a876772 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _ASM_POWERPC_PTE_40x_H
-#define _ASM_POWERPC_PTE_40x_H
+#ifndef _ASM_POWERPC_NOHASH_32_PTE_40x_H
+#define _ASM_POWERPC_NOHASH_32_PTE_40x_H
 #ifdef __KERNEL__
 
 /*
@@ -61,4 +61,4 @@
 #define PTE_ATOMIC_UPDATES     1
 
 #endif /* __KERNEL__ */
-#endif /*  _ASM_POWERPC_PTE_40x_H */
+#endif /*  _ASM_POWERPC_NOHASH_32_PTE_40x_H */
similarity index 96%
rename from arch/powerpc/include/asm/pte-44x.h
rename to arch/powerpc/include/asm/nohash/32/pte-44x.h
index 36f75fab23f52414d6c1dda923db8d6199b4cc1b..fdab41c654efc2b79be81f68b6e47056b52965b7 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _ASM_POWERPC_PTE_44x_H
-#define _ASM_POWERPC_PTE_44x_H
+#ifndef _ASM_POWERPC_NOHASH_32_PTE_44x_H
+#define _ASM_POWERPC_NOHASH_32_PTE_44x_H
 #ifdef __KERNEL__
 
 /*
@@ -94,4 +94,4 @@
 
 
 #endif /* __KERNEL__ */
-#endif /*  _ASM_POWERPC_PTE_44x_H */
+#endif /*  _ASM_POWERPC_NOHASH_32_PTE_44x_H */
similarity index 95%
rename from arch/powerpc/include/asm/pte-8xx.h
rename to arch/powerpc/include/asm/nohash/32/pte-8xx.h
index a0e2ba9609760e4108ce9aca0a4fd255fa15b1d1..3742b19196615c62989bf78ebbb0a33b26ee14e2 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _ASM_POWERPC_PTE_8xx_H
-#define _ASM_POWERPC_PTE_8xx_H
+#ifndef _ASM_POWERPC_NOHASH_32_PTE_8xx_H
+#define _ASM_POWERPC_NOHASH_32_PTE_8xx_H
 #ifdef __KERNEL__
 
 /*
@@ -62,4 +62,4 @@
                                 _PAGE_HWWRITE | _PAGE_EXEC)
 
 #endif /* __KERNEL__ */
-#endif /*  _ASM_POWERPC_PTE_8xx_H */
+#endif /*  _ASM_POWERPC_NOHASH_32_PTE_8xx_H */
similarity index 88%
rename from arch/powerpc/include/asm/pte-fsl-booke.h
rename to arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h
index 9f5c3d04a1a3ea3f3da542028b002fe304b9002f..5422d00c614520567ea6ba413927325fe76ee9ff 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _ASM_POWERPC_PTE_FSL_BOOKE_H
-#define _ASM_POWERPC_PTE_FSL_BOOKE_H
+#ifndef _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H
+#define _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H
 #ifdef __KERNEL__
 
 /* PTE bit definitions for Freescale BookE SW loaded TLB MMU based
@@ -37,4 +37,4 @@
 #define PTE_WIMGE_SHIFT (6)
 
 #endif /* __KERNEL__ */
-#endif /*  _ASM_POWERPC_PTE_FSL_BOOKE_H */
+#endif /*  _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H */
similarity index 92%
rename from arch/powerpc/include/asm/pgtable-ppc64-4k.h
rename to arch/powerpc/include/asm/nohash/64/pgtable-4k.h
index 132ee1d482c255696a12b57e9541a57e9b1de19b..fc7d51753f8111f77bc030ef99a1c3c79ef47ebd 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _ASM_POWERPC_PGTABLE_PPC64_4K_H
-#define _ASM_POWERPC_PGTABLE_PPC64_4K_H
+#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
+#define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H
 /*
  * Entries per page directory level.  The PTE level must use a 64b record
  * for each page table entry.  The PMD and PGD level use a 32b record for
 #define pgd_none(pgd)          (!pgd_val(pgd))
 #define pgd_bad(pgd)           (pgd_val(pgd) == 0)
 #define pgd_present(pgd)       (pgd_val(pgd) != 0)
-#define pgd_clear(pgdp)                (pgd_val(*(pgdp)) = 0)
 #define pgd_page_vaddr(pgd)    (pgd_val(pgd) & ~PGD_MASKED_BITS)
 
 #ifndef __ASSEMBLY__
 
+static inline void pgd_clear(pgd_t *pgdp)
+{
+       *pgdp = __pgd(0);
+}
+
 static inline pte_t pgd_pte(pgd_t pgd)
 {
        return __pte(pgd_val(pgd));
@@ -85,4 +89,4 @@ extern struct page *pgd_page(pgd_t pgd);
 #define remap_4k_pfn(vma, addr, pfn, prot)     \
        remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot))
 
-#endif /* _ASM_POWERPC_PGTABLE_PPC64_4K_H */
+#endif /* _ _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H */
similarity index 64%
rename from arch/powerpc/include/asm/pgtable-ppc64-64k.h
rename to arch/powerpc/include/asm/nohash/64/pgtable-64k.h
index 1de35bbd02a64048d03aef0f3bdb08b51d6d8c0a..570fb30be21c0bddd061f86a156b52080b3dcff1 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _ASM_POWERPC_PGTABLE_PPC64_64K_H
-#define _ASM_POWERPC_PGTABLE_PPC64_64K_H
+#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
+#define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H
 
 #include <asm-generic/pgtable-nopud.h>
 
@@ -9,8 +9,19 @@
 #define PUD_INDEX_SIZE 0
 #define PGD_INDEX_SIZE  12
 
+/*
+ * we support 32 fragments per PTE page of 64K size
+ */
+#define PTE_FRAG_NR    32
+/*
+ * We use a 2K PTE page fragment and another 2K for storing
+ * real_pte_t hash index
+ */
+#define PTE_FRAG_SIZE_SHIFT  11
+#define PTE_FRAG_SIZE (1UL << PTE_FRAG_SIZE_SHIFT)
+
 #ifndef __ASSEMBLY__
-#define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE)
+#define PTE_TABLE_SIZE PTE_FRAG_SIZE
 #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
 #endif /* __ASSEMBLY__ */
 #define PGDIR_SIZE     (1UL << PGDIR_SHIFT)
 #define PGDIR_MASK     (~(PGDIR_SIZE-1))
 
-/* Bits to mask out from a PMD to get to the PTE page */
-/* PMDs point to PTE table fragments which are 4K aligned.  */
-#define PMD_MASKED_BITS                0xfff
+/*
+ * Bits to mask out from a PMD to get to the PTE page
+ * PMDs point to PTE table fragments which are PTE_FRAG_SIZE aligned.
+ */
+#define PMD_MASKED_BITS                (PTE_FRAG_SIZE - 1)
 /* Bits to mask out from a PGD/PUD to get to the PMD page */
 #define PUD_MASKED_BITS                0x1ff
 
 #define pgd_pte(pgd)   (pud_pte(((pud_t){ pgd })))
 #define pte_pgd(pte)   ((pgd_t)pte_pud(pte))
 
-#endif /* _ASM_POWERPC_PGTABLE_PPC64_64K_H */
+#endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H */
similarity index 51%
rename from arch/powerpc/include/asm/pgtable-ppc64.h
rename to arch/powerpc/include/asm/nohash/64/pgtable.h
index 3245f2d96d4f59e5140348b8c4dddbe836c5dda6..b9f734dd5b81c24a8c2ae00c0cd1c6956d2e08b8 100644 (file)
@@ -1,14 +1,14 @@
-#ifndef _ASM_POWERPC_PGTABLE_PPC64_H_
-#define _ASM_POWERPC_PGTABLE_PPC64_H_
+#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_H
+#define _ASM_POWERPC_NOHASH_64_PGTABLE_H
 /*
  * This file contains the functions and defines necessary to modify and use
  * the ppc64 hashed page table.
  */
 
 #ifdef CONFIG_PPC_64K_PAGES
-#include <asm/pgtable-ppc64-64k.h>
+#include <asm/nohash/64/pgtable-64k.h>
 #else
-#include <asm/pgtable-ppc64-4k.h>
+#include <asm/nohash/64/pgtable-4k.h>
 #endif
 #include <asm/barrier.h>
 
@@ -18,7 +18,7 @@
  * Size of EA range mapped by our pagetables.
  */
 #define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
-                           PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
+                           PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
 #define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 /*
  * Include the PTE bits definitions
  */
-#ifdef CONFIG_PPC_BOOK3S
-#include <asm/pte-hash64.h>
-#else
-#include <asm/pte-book3e.h>
-#endif
+#include <asm/nohash/pte-book3e.h>
 #include <asm/pte-common.h>
 
 #ifdef CONFIG_PPC_MM_SLICES
 #endif /* CONFIG_PPC_MM_SLICES */
 
 #ifndef __ASSEMBLY__
-
-/*
- * This is the default implementation of various PTE accessors, it's
- * used in all cases except Book3S with 64K pages where we have a
- * concept of sub-pages
- */
-#ifndef __real_pte
-
-#ifdef CONFIG_STRICT_MM_TYPECHECKS
-#define __real_pte(e,p)                ((real_pte_t){(e)})
-#define __rpte_to_pte(r)       ((r).pte)
-#else
-#define __real_pte(e,p)                (e)
-#define __rpte_to_pte(r)       (__pte(r))
-#endif
-#define __rpte_to_hidx(r,index)        (pte_val(__rpte_to_pte(r)) >> 12)
-
-#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift)       \
-       do {                                                             \
-               index = 0;                                               \
-               shift = mmu_psize_defs[psize].shift;                     \
-
-#define pte_iterate_hashed_end() } while(0)
-
-/*
- * We expect this to be called only for user addresses or kernel virtual
- * addresses other than the linear mapping.
- */
-#define pte_pagesize_index(mm, addr, pte)      MMU_PAGE_4K
-
-#endif /* __real_pte */
-
-
 /* pte_clear moved to later in this file */
 
 #define PMD_BAD_BITS           (PTE_TABLE_SIZE-1)
 #define PUD_BAD_BITS           (PMD_TABLE_SIZE-1)
 
-#define pmd_set(pmdp, pmdval)  (pmd_val(*(pmdp)) = (pmdval))
+static inline void pmd_set(pmd_t *pmdp, unsigned long val)
+{
+       *pmdp = __pmd(val);
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+       *pmdp = __pmd(0);
+}
+
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+       return __pte(pmd_val(pmd));
+}
+
 #define pmd_none(pmd)          (!pmd_val(pmd))
 #define        pmd_bad(pmd)            (!is_kernel_addr(pmd_val(pmd)) \
                                 || (pmd_val(pmd) & PMD_BAD_BITS))
 #define        pmd_present(pmd)        (!pmd_none(pmd))
-#define        pmd_clear(pmdp)         (pmd_val(*(pmdp)) = 0)
 #define pmd_page_vaddr(pmd)    (pmd_val(pmd) & ~PMD_MASKED_BITS)
 extern struct page *pmd_page(pmd_t pmd);
 
-#define pud_set(pudp, pudval)  (pud_val(*(pudp)) = (pudval))
+static inline void pud_set(pud_t *pudp, unsigned long val)
+{
+       *pudp = __pud(val);
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+       *pudp = __pud(0);
+}
+
 #define pud_none(pud)          (!pud_val(pud))
 #define        pud_bad(pud)            (!is_kernel_addr(pud_val(pud)) \
                                 || (pud_val(pud) & PUD_BAD_BITS))
 #define pud_present(pud)       (pud_val(pud) != 0)
-#define pud_clear(pudp)                (pud_val(*(pudp)) = 0)
 #define pud_page_vaddr(pud)    (pud_val(pud) & ~PUD_MASKED_BITS)
 
 extern struct page *pud_page(pud_t pud);
@@ -177,9 +161,13 @@ static inline pud_t pte_pud(pte_t pte)
        return __pud(pte_val(pte));
 }
 #define pud_write(pud)         pte_write(pud_pte(pud))
-#define pgd_set(pgdp, pudp)    ({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
 #define pgd_write(pgd)         pte_write(pgd_pte(pgd))
 
+static inline void pgd_set(pgd_t *pgdp, unsigned long val)
+{
+       *pgdp = __pgd(val);
+}
+
 /*
  * Find an entry in a page-table-directory.  We combine the address region
  * (the high order N bits) and the pgd portion of the address.
@@ -373,254 +361,4 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *));
 void pgtable_cache_init(void);
 #endif /* __ASSEMBLY__ */
 
-/*
- * THP pages can't be special. So use the _PAGE_SPECIAL
- */
-#define _PAGE_SPLITTING _PAGE_SPECIAL
-
-/*
- * We need to differentiate between explicit huge page and THP huge
- * page, since THP huge page also need to track real subpage details
- */
-#define _PAGE_THP_HUGE  _PAGE_4K_PFN
-
-/*
- * set of bits not changed in pmd_modify.
- */
-#define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS |              \
-                        _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPLITTING | \
-                        _PAGE_THP_HUGE)
-
-#ifndef __ASSEMBLY__
-/*
- * The linux hugepage PMD now include the pmd entries followed by the address
- * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits.
- * [ 1 bit secondary | 3 bit hidx | 1 bit valid | 000]. We use one byte per
- * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and
- * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t.
- *
- * The last three bits are intentionally left to zero. This memory location
- * are also used as normal page PTE pointers. So if we have any pointers
- * left around while we collapse a hugepage, we need to make sure
- * _PAGE_PRESENT bit of that is zero when we look at them
- */
-static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
-{
-       return (hpte_slot_array[index] >> 3) & 0x1;
-}
-
-static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array,
-                                          int index)
-{
-       return hpte_slot_array[index] >> 4;
-}
-
-static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
-                                       unsigned int index, unsigned int hidx)
-{
-       hpte_slot_array[index] = hidx << 4 | 0x1 << 3;
-}
-
-struct page *realmode_pfn_to_page(unsigned long pfn);
-
-static inline char *get_hpte_slot_array(pmd_t *pmdp)
-{
-       /*
-        * The hpte hindex is stored in the pgtable whose address is in the
-        * second half of the PMD
-        *
-        * Order this load with the test for pmd_trans_huge in the caller
-        */
-       smp_rmb();
-       return *(char **)(pmdp + PTRS_PER_PMD);
-
-
-}
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
-                                  pmd_t *pmdp, unsigned long old_pmd);
-extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
-extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
-extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot);
-extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
-                      pmd_t *pmdp, pmd_t pmd);
-extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
-                                pmd_t *pmd);
-/*
- *
- * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs
- * page. The hugetlbfs page table walking and mangling paths are totally
- * separated form the core VM paths and they're differentiated by
- *  VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run.
- *
- * pmd_trans_huge() is defined as false at build time if
- * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build
- * time in such case.
- *
- * For ppc64 we need to differntiate from explicit hugepages from THP, because
- * for THP we also track the subpage details at the pmd level. We don't do
- * that for explicit huge pages.
- *
- */
-static inline int pmd_trans_huge(pmd_t pmd)
-{
-       /*
-        * leaf pte for huge page, bottom two bits != 00
-        */
-       return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE);
-}
-
-static inline int pmd_trans_splitting(pmd_t pmd)
-{
-       if (pmd_trans_huge(pmd))
-               return pmd_val(pmd) & _PAGE_SPLITTING;
-       return 0;
-}
-
-extern int has_transparent_hugepage(void);
-#else
-static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
-                                         unsigned long addr, pmd_t *pmdp,
-                                         unsigned long old_pmd)
-{
-
-       WARN(1, "%s called with THP disabled\n", __func__);
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-
-static inline int pmd_large(pmd_t pmd)
-{
-       /*
-        * leaf pte for huge page, bottom two bits != 00
-        */
-       return ((pmd_val(pmd) & 0x3) != 0x0);
-}
-
-static inline pte_t pmd_pte(pmd_t pmd)
-{
-       return __pte(pmd_val(pmd));
-}
-
-static inline pmd_t pte_pmd(pte_t pte)
-{
-       return __pmd(pte_val(pte));
-}
-
-static inline pte_t *pmdp_ptep(pmd_t *pmd)
-{
-       return (pte_t *)pmd;
-}
-
-#define pmd_pfn(pmd)           pte_pfn(pmd_pte(pmd))
-#define pmd_dirty(pmd)         pte_dirty(pmd_pte(pmd))
-#define pmd_young(pmd)         pte_young(pmd_pte(pmd))
-#define pmd_mkold(pmd)         pte_pmd(pte_mkold(pmd_pte(pmd)))
-#define pmd_wrprotect(pmd)     pte_pmd(pte_wrprotect(pmd_pte(pmd)))
-#define pmd_mkdirty(pmd)       pte_pmd(pte_mkdirty(pmd_pte(pmd)))
-#define pmd_mkyoung(pmd)       pte_pmd(pte_mkyoung(pmd_pte(pmd)))
-#define pmd_mkwrite(pmd)       pte_pmd(pte_mkwrite(pmd_pte(pmd)))
-
-#define __HAVE_ARCH_PMD_WRITE
-#define pmd_write(pmd)         pte_write(pmd_pte(pmd))
-
-static inline pmd_t pmd_mkhuge(pmd_t pmd)
-{
-       /* Do nothing, mk_pmd() does this part.  */
-       return pmd;
-}
-
-static inline pmd_t pmd_mknotpresent(pmd_t pmd)
-{
-       pmd_val(pmd) &= ~_PAGE_PRESENT;
-       return pmd;
-}
-
-static inline pmd_t pmd_mksplitting(pmd_t pmd)
-{
-       pmd_val(pmd) |= _PAGE_SPLITTING;
-       return pmd;
-}
-
-#define __HAVE_ARCH_PMD_SAME
-static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
-{
-       return (((pmd_val(pmd_a) ^ pmd_val(pmd_b)) & ~_PAGE_HPTEFLAGS) == 0);
-}
-
-#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
-extern int pmdp_set_access_flags(struct vm_area_struct *vma,
-                                unsigned long address, pmd_t *pmdp,
-                                pmd_t entry, int dirty);
-
-extern unsigned long pmd_hugepage_update(struct mm_struct *mm,
-                                        unsigned long addr,
-                                        pmd_t *pmdp,
-                                        unsigned long clr,
-                                        unsigned long set);
-
-static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
-                                             unsigned long addr, pmd_t *pmdp)
-{
-       unsigned long old;
-
-       if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
-               return 0;
-       old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
-       return ((old & _PAGE_ACCESSED) != 0);
-}
-
-#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
-extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
-                                    unsigned long address, pmd_t *pmdp);
-#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
-extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
-                                 unsigned long address, pmd_t *pmdp);
-
-#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
-extern pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
-                                    unsigned long addr, pmd_t *pmdp);
-
-#define __HAVE_ARCH_PMDP_SET_WRPROTECT
-static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
-                                     pmd_t *pmdp)
-{
-
-       if ((pmd_val(*pmdp) & _PAGE_RW) == 0)
-               return;
-
-       pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0);
-}
-
-#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
-extern void pmdp_splitting_flush(struct vm_area_struct *vma,
-                                unsigned long address, pmd_t *pmdp);
-
-extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
-                                unsigned long address, pmd_t *pmdp);
-#define pmdp_collapse_flush pmdp_collapse_flush
-
-#define __HAVE_ARCH_PGTABLE_DEPOSIT
-extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
-                                      pgtable_t pgtable);
-#define __HAVE_ARCH_PGTABLE_WITHDRAW
-extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
-
-#define __HAVE_ARCH_PMDP_INVALIDATE
-extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
-                           pmd_t *pmdp);
-
-#define pmd_move_must_withdraw pmd_move_must_withdraw
-struct spinlock;
-static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
-                                        struct spinlock *old_pmd_ptl)
-{
-       /*
-        * Archs like ppc64 use pgtable to store per pmd
-        * specific information. So when we switch the pmd,
-        * we should also withdraw and deposit the pgtable
-        */
-       return true;
-}
-#endif /* __ASSEMBLY__ */
-#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */
+#endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
new file mode 100644 (file)
index 0000000..1263c22
--- /dev/null
@@ -0,0 +1,252 @@
+#ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
+#define _ASM_POWERPC_NOHASH_PGTABLE_H
+
+#if defined(CONFIG_PPC64)
+#include <asm/nohash/64/pgtable.h>
+#else
+#include <asm/nohash/32/pgtable.h>
+#endif
+
+#ifndef __ASSEMBLY__
+
+/* Generic accessors to PTE bits */
+static inline int pte_write(pte_t pte)
+{
+       return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO;
+}
+static inline int pte_dirty(pte_t pte)         { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte)         { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_special(pte_t pte)       { return pte_val(pte) & _PAGE_SPECIAL; }
+static inline int pte_none(pte_t pte)          { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
+static inline pgprot_t pte_pgprot(pte_t pte)   { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+
+#ifdef CONFIG_NUMA_BALANCING
+/*
+ * These work without NUMA balancing but the kernel does not care. See the
+ * comment in include/asm-generic/pgtable.h . On powerpc, this will only
+ * work for user pages and always return true for kernel pages.
+ */
+static inline int pte_protnone(pte_t pte)
+{
+       return (pte_val(pte) &
+               (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
+}
+
+static inline int pmd_protnone(pmd_t pmd)
+{
+       return pte_protnone(pmd_pte(pmd));
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
+static inline int pte_present(pte_t pte)
+{
+       return pte_val(pte) & _PAGE_PRESENT;
+}
+
+/* Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ *
+ * Even if PTEs can be unsigned long long, a PFN is always an unsigned
+ * long for now.
+ */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
+       return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
+                    pgprot_val(pgprot)); }
+static inline unsigned long pte_pfn(pte_t pte) {
+       return pte_val(pte) >> PTE_RPN_SHIFT; }
+
+/* Generic modifiers for PTE bits */
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+       pte_basic_t ptev;
+
+       ptev = pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE);
+       ptev |= _PAGE_RO;
+       return __pte(ptev);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE));
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+       return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+       pte_basic_t ptev;
+
+       ptev = pte_val(pte) & ~_PAGE_RO;
+       ptev |= _PAGE_RW;
+       return __pte(ptev);
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+       return __pte(pte_val(pte) | _PAGE_SPECIAL);
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+       return pte;
+}
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+       return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/* Insert a PTE, top-level function is out of line. It uses an inline
+ * low level function in the respective pgtable-* files
+ */
+extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+                      pte_t pte);
+
+/* This low level function performs the actual PTE insertion
+ * Setting the PTE depends on the MMU type and other factors. It's
+ * an horrible mess that I'm not going to try to clean up now but
+ * I'm keeping it in one place rather than spread around
+ */
+static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
+                               pte_t *ptep, pte_t pte, int percpu)
+{
+#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
+       /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
+        * helper pte_update() which does an atomic update. We need to do that
+        * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
+        * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
+        * the hash bits instead (ie, same as the non-SMP case)
+        */
+       if (percpu)
+               *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+                             | (pte_val(pte) & ~_PAGE_HASHPTE));
+       else
+               pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
+
+#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
+       /* Second case is 32-bit with 64-bit PTE.  In this case, we
+        * can just store as long as we do the two halves in the right order
+        * with a barrier in between. This is possible because we take care,
+        * in the hash code, to pre-invalidate if the PTE was already hashed,
+        * which synchronizes us with any concurrent invalidation.
+        * In the percpu case, we also fallback to the simple update preserving
+        * the hash bits
+        */
+       if (percpu) {
+               *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+                             | (pte_val(pte) & ~_PAGE_HASHPTE));
+               return;
+       }
+#if _PAGE_HASHPTE != 0
+       if (pte_val(*ptep) & _PAGE_HASHPTE)
+               flush_hash_entry(mm, ptep, addr);
+#endif
+       __asm__ __volatile__("\
+               stw%U0%X0 %2,%0\n\
+               eieio\n\
+               stw%U0%X0 %L2,%1"
+       : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
+       : "r" (pte) : "memory");
+
+#elif defined(CONFIG_PPC_STD_MMU_32)
+       /* Third case is 32-bit hash table in UP mode, we need to preserve
+        * the _PAGE_HASHPTE bit since we may not have invalidated the previous
+        * translation in the hash yet (done in a subsequent flush_tlb_xxx())
+        * and see we need to keep track that this PTE needs invalidating
+        */
+       *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
+                     | (pte_val(pte) & ~_PAGE_HASHPTE));
+
+#else
+       /* Anything else just stores the PTE normally. That covers all 64-bit
+        * cases, and 32-bit non-hash with 32-bit PTEs.
+        */
+       *ptep = pte;
+
+#ifdef CONFIG_PPC_BOOK3E_64
+       /*
+        * With hardware tablewalk, a sync is needed to ensure that
+        * subsequent accesses see the PTE we just wrote.  Unlike userspace
+        * mappings, we can't tolerate spurious faults, so make sure
+        * the new PTE will be seen the first time.
+        */
+       if (is_kernel_addr(addr))
+               mb();
+#endif
+#endif
+}
+
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+                                pte_t *ptep, pte_t entry, int dirty);
+
+/*
+ * Macro to mark a page protection value as "uncacheable".
+ */
+
+#define _PAGE_CACHE_CTL        (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
+                        _PAGE_WRITETHRU)
+
+#define pgprot_noncached(prot)   (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+                                           _PAGE_NO_CACHE | _PAGE_GUARDED))
+
+#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+                                           _PAGE_NO_CACHE))
+
+#define pgprot_cached(prot)       (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+                                           _PAGE_COHERENT))
+
+#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
+                                           _PAGE_COHERENT | _PAGE_WRITETHRU))
+
+#define pgprot_cached_noncoherent(prot) \
+               (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
+
+#define pgprot_writecombine pgprot_noncached_wc
+
+struct file;
+extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+                                    unsigned long size, pgprot_t vma_prot);
+#define __HAVE_PHYS_MEM_ACCESS_PROT
+
+#ifdef CONFIG_HUGETLB_PAGE
+static inline int hugepd_ok(hugepd_t hpd)
+{
+       return (hpd.pd > 0);
+}
+
+static inline int pmd_huge(pmd_t pmd)
+{
+       return 0;
+}
+
+static inline int pud_huge(pud_t pud)
+{
+       return 0;
+}
+
+static inline int pgd_huge(pgd_t pgd)
+{
+       return 0;
+}
+#define pgd_huge               pgd_huge
+
+#define is_hugepd(hpd)         (hugepd_ok(hpd))
+#endif
+
+#endif /* __ASSEMBLY__ */
+#endif
similarity index 95%
rename from arch/powerpc/include/asm/pte-book3e.h
rename to arch/powerpc/include/asm/nohash/pte-book3e.h
index 8d8473278d91c37e1bf5b742bbe20f8938a9ed9a..e16807b78edf7bd218980aee68c3b8dec795fe84 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _ASM_POWERPC_PTE_BOOK3E_H
-#define _ASM_POWERPC_PTE_BOOK3E_H
+#ifndef _ASM_POWERPC_NOHASH_PTE_BOOK3E_H
+#define _ASM_POWERPC_NOHASH_PTE_BOOK3E_H
 #ifdef __KERNEL__
 
 /* PTE bit definitions for processors compliant to the Book3E
@@ -84,4 +84,4 @@
 #endif
 
 #endif /* __KERNEL__ */
-#endif /*  _ASM_POWERPC_PTE_FSL_BOOKE_H */
+#endif /*  _ASM_POWERPC_NOHASH_PTE_BOOK3E_H */
index 8374afed9d0a95cb373e94af41d2e2ceeaae9d90..f8faaaeeca1e16d57dd0a4d9dfff3cb73c7e055d 100644 (file)
 #define OPAL_LEDS_GET_INDICATOR                        114
 #define OPAL_LEDS_SET_INDICATOR                        115
 #define OPAL_CEC_REBOOT2                       116
-#define OPAL_LAST                              116
+#define OPAL_CONSOLE_FLUSH                     117
+#define OPAL_LAST                              117
 
 /* Device tree flags */
 
index 800115910e43a39828e80196aff5499e060c7638..07a99e638449acdbbcb9817919d3ce62f0d2b0a9 100644 (file)
@@ -35,6 +35,7 @@ int64_t opal_console_read(int64_t term_number, __be64 *length,
                          uint8_t *buffer);
 int64_t opal_console_write_buffer_space(int64_t term_number,
                                        __be64 *length);
+int64_t opal_console_flush(int64_t term_number);
 int64_t opal_rtc_read(__be32 *year_month_day,
                      __be64 *hour_minute_second_millisecond);
 int64_t opal_rtc_write(uint32_t year_month_day,
@@ -262,6 +263,8 @@ extern int opal_resync_timebase(void);
 
 extern void opal_lpc_init(void);
 
+extern void opal_kmsg_init(void);
+
 extern int opal_event_request(unsigned int opal_event_nr);
 
 struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
index 70bd4381f8e6ac19115dc37d17a66766811b5fcf..546540b910959570cc34d8dfa040ee155885ae00 100644 (file)
@@ -16,6 +16,7 @@
 
 #ifdef CONFIG_PPC64
 
+#include <linux/string.h>
 #include <asm/types.h>
 #include <asm/lppaca.h>
 #include <asm/mmu.h>
@@ -131,7 +132,16 @@ struct paca_struct {
        struct tlb_core_data tcd;
 #endif /* CONFIG_PPC_BOOK3E */
 
-       mm_context_t context;
+#ifdef CONFIG_PPC_BOOK3S
+       mm_context_id_t mm_ctx_id;
+#ifdef CONFIG_PPC_MM_SLICES
+       u64 mm_ctx_low_slices_psize;
+       unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
+#else
+       u16 mm_ctx_user_psize;
+       u16 mm_ctx_sllp;
+#endif
+#endif
 
        /*
         * then miscellaneous read-write fields
@@ -194,6 +204,23 @@ struct paca_struct {
 #endif
 };
 
+#ifdef CONFIG_PPC_BOOK3S
+static inline void copy_mm_to_paca(mm_context_t *context)
+{
+       get_paca()->mm_ctx_id = context->id;
+#ifdef CONFIG_PPC_MM_SLICES
+       get_paca()->mm_ctx_low_slices_psize = context->low_slices_psize;
+       memcpy(&get_paca()->mm_ctx_high_slices_psize,
+              &context->high_slices_psize, SLICE_ARRAY_SIZE);
+#else
+       get_paca()->mm_ctx_user_psize = context->user_psize;
+       get_paca()->mm_ctx_sllp = context->sllp;
+#endif
+}
+#else
+static inline void copy_mm_to_paca(mm_context_t *context){}
+#endif
+
 extern struct paca_struct *paca;
 extern void initialise_paca(struct paca_struct *new_paca, int cpu);
 extern void setup_paca(struct paca_struct *new_paca);
index 3140c19c448c2907f7c9f82bc2e4b815e175fb3f..e34124f6fbf27fd9af63d7da99c61e978b9da65e 100644 (file)
@@ -286,8 +286,11 @@ extern long long virt_phys_offset;
 
 /* PTE level */
 typedef struct { pte_basic_t pte; } pte_t;
-#define pte_val(x)     ((x).pte)
 #define __pte(x)       ((pte_t) { (x) })
+static inline pte_basic_t pte_val(pte_t x)
+{
+       return x.pte;
+}
 
 /* 64k pages additionally define a bigger "real PTE" type that gathers
  * the "second half" part of the PTE for pseudo 64k pages
@@ -301,21 +304,30 @@ typedef struct { pte_t pte; } real_pte_t;
 /* PMD level */
 #ifdef CONFIG_PPC64
 typedef struct { unsigned long pmd; } pmd_t;
-#define pmd_val(x)     ((x).pmd)
 #define __pmd(x)       ((pmd_t) { (x) })
+static inline unsigned long pmd_val(pmd_t x)
+{
+       return x.pmd;
+}
 
 /* PUD level exusts only on 4k pages */
 #ifndef CONFIG_PPC_64K_PAGES
 typedef struct { unsigned long pud; } pud_t;
-#define pud_val(x)     ((x).pud)
 #define __pud(x)       ((pud_t) { (x) })
+static inline unsigned long pud_val(pud_t x)
+{
+       return x.pud;
+}
 #endif /* !CONFIG_PPC_64K_PAGES */
 #endif /* CONFIG_PPC64 */
 
 /* PGD level */
 typedef struct { unsigned long pgd; } pgd_t;
-#define pgd_val(x)     ((x).pgd)
 #define __pgd(x)       ((pgd_t) { (x) })
+static inline unsigned long pgd_val(pgd_t x)
+{
+       return x.pgd;
+}
 
 /* Page protection bits */
 typedef struct { unsigned long pgprot; } pgprot_t;
@@ -329,8 +341,11 @@ typedef struct { unsigned long pgprot; } pgprot_t;
  */
 
 typedef pte_basic_t pte_t;
-#define pte_val(x)     (x)
 #define __pte(x)       (x)
+static inline pte_basic_t pte_val(pte_t pte)
+{
+       return pte;
+}
 
 #if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
 typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
@@ -341,67 +356,42 @@ typedef pte_t real_pte_t;
 
 #ifdef CONFIG_PPC64
 typedef unsigned long pmd_t;
-#define pmd_val(x)     (x)
 #define __pmd(x)       (x)
+static inline unsigned long pmd_val(pmd_t pmd)
+{
+       return pmd;
+}
 
 #ifndef CONFIG_PPC_64K_PAGES
 typedef unsigned long pud_t;
-#define pud_val(x)     (x)
 #define __pud(x)       (x)
+static inline unsigned long pud_val(pud_t pud)
+{
+       return pud;
+}
 #endif /* !CONFIG_PPC_64K_PAGES */
 #endif /* CONFIG_PPC64 */
 
 typedef unsigned long pgd_t;
-#define pgd_val(x)     (x)
-#define pgprot_val(x)  (x)
+#define __pgd(x)       (x)
+static inline unsigned long pgd_val(pgd_t pgd)
+{
+       return pgd;
+}
 
 typedef unsigned long pgprot_t;
-#define __pgd(x)       (x)
+#define pgprot_val(x)  (x)
 #define __pgprot(x)    (x)
 
 #endif
 
 typedef struct { signed long pd; } hugepd_t;
 
-#ifdef CONFIG_HUGETLB_PAGE
-#ifdef CONFIG_PPC_BOOK3S_64
-#ifdef CONFIG_PPC_64K_PAGES
-/*
- * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't
- * need to setup hugepage directory for them. Our pte and page directory format
- * enable us to have this enabled. But to avoid errors when implementing new
- * features disable hugepd for 64K. We enable a debug version here, So we catch
- * wrong usage.
- */
-#ifdef CONFIG_DEBUG_VM
-extern int hugepd_ok(hugepd_t hpd);
-#else
-#define hugepd_ok(x)   (0)
-#endif
-#else
-static inline int hugepd_ok(hugepd_t hpd)
-{
-       /*
-        * hugepd pointer, bottom two bits == 00 and next 4 bits
-        * indicate size of table
-        */
-       return (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
-}
-#endif
-#else
-static inline int hugepd_ok(hugepd_t hpd)
-{
-       return (hpd.pd > 0);
-}
-#endif
-
-#define is_hugepd(hpd)               (hugepd_ok(hpd))
-#define pgd_huge pgd_huge
-int pgd_huge(pgd_t pgd);
-#else /* CONFIG_HUGETLB_PAGE */
-#define is_hugepd(pdep)                        0
-#define pgd_huge(pgd)                  0
+#ifndef CONFIG_HUGETLB_PAGE
+#define is_hugepd(pdep)                (0)
+#define pgd_huge(pgd)          (0)
 #endif /* CONFIG_HUGETLB_PAGE */
+
 #define __hugepd(x) ((hugepd_t) { (x) })
 
 struct page;
index 37fc53587bb45b04d06b6601c19411aa6738edb8..54843ca5fa2bfa03871f28a3854b0d87f5685a76 100644 (file)
@@ -205,6 +205,7 @@ struct pci_dn {
 
        int     pci_ext_config_space;   /* for pci devices */
 
+       struct  pci_dev *pcidev;        /* back-pointer to the pci device */
 #ifdef CONFIG_EEH
        struct eeh_dev *edev;           /* eeh device */
 #endif
index 3453bd8dc18f32be59f81751d20f012e2b2480bc..6f8065a7d487f3e72609c98cfd8a348853ee87d8 100644 (file)
@@ -149,4 +149,8 @@ extern void pcibios_setup_phb_io_space(struct pci_controller *hose);
 extern void pcibios_scan_phb(struct pci_controller *hose);
 
 #endif /* __KERNEL__ */
+
+extern struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev);
+extern struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index);
+
 #endif /* __ASM_POWERPC_PCI_H */
index 842846c1b71185b1a5e086e8521ed65342ae9cb0..76d6b9e0c8a94aa2af6e4afc364ca1bcca26f7d8 100644 (file)
@@ -21,16 +21,34 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
 /* #define pgd_populate(mm, pmd, pte)      BUG() */
 
 #ifndef CONFIG_BOOKE
-#define pmd_populate_kernel(mm, pmd, pte)      \
-               (pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT)
-#define pmd_populate(mm, pmd, pte)     \
-               (pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT)
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
+                                      pte_t *pte)
+{
+       *pmdp = __pmd(__pa(pte) | _PMD_PRESENT);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
+                               pgtable_t pte_page)
+{
+       *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT);
+}
+
 #define pmd_pgtable(pmd) pmd_page(pmd)
 #else
-#define pmd_populate_kernel(mm, pmd, pte)      \
-               (pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT)
-#define pmd_populate(mm, pmd, pte)     \
-               (pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT)
+
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
+                                      pte_t *pte)
+{
+       *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
+                               pgtable_t pte_page)
+{
+       *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT);
+}
+
 #define pmd_pgtable(pmd) pmd_page(pmd)
 #endif
 
index 4b0be20fcbfdeee22498ea67f7a6b2adb3b55213..69ef28a817335717602ca5dfd654f731e259a3dd 100644 (file)
@@ -53,7 +53,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 
 #ifndef CONFIG_PPC_64K_PAGES
 
-#define pgd_populate(MM, PGD, PUD)     pgd_set(PGD, PUD)
+#define pgd_populate(MM, PGD, PUD)     pgd_set(PGD, (unsigned long)PUD)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
@@ -71,9 +71,18 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
        pud_set(pud, (unsigned long)pmd);
 }
 
-#define pmd_populate(mm, pmd, pte_page) \
-       pmd_populate_kernel(mm, pmd, page_address(pte_page))
-#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
+static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
+                                      pte_t *pte)
+{
+       pmd_set(pmd, (unsigned long)pte);
+}
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+                               pgtable_t pte_page)
+{
+       pmd_set(pmd, (unsigned long)page_address(pte_page));
+}
+
 #define pmd_pgtable(pmd) pmd_page(pmd)
 
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
@@ -154,16 +163,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
 }
 
 #else /* if CONFIG_PPC_64K_PAGES */
-/*
- * we support 16 fragments per PTE page.
- */
-#define PTE_FRAG_NR    16
-/*
- * We use a 2K PTE page fragment and another 2K for storing
- * real_pte_t hash index
- */
-#define PTE_FRAG_SIZE_SHIFT  12
-#define PTE_FRAG_SIZE (2 * PTRS_PER_PTE * sizeof(pte_t))
 
 extern pte_t *page_table_alloc(struct mm_struct *, unsigned long, int);
 extern void page_table_free(struct mm_struct *, unsigned long *, int);
index b64b4212b71f6fdba013f3f6b805528bcad1a221..ac9fb114e25d4301e439c212b469270edc7ce6d5 100644 (file)
@@ -1,6 +1,5 @@
 #ifndef _ASM_POWERPC_PGTABLE_H
 #define _ASM_POWERPC_PGTABLE_H
-#ifdef __KERNEL__
 
 #ifndef __ASSEMBLY__
 #include <linux/mmdebug.h>
@@ -13,210 +12,20 @@ struct mm_struct;
 
 #endif /* !__ASSEMBLY__ */
 
-#if defined(CONFIG_PPC64)
-#  include <asm/pgtable-ppc64.h>
+#ifdef CONFIG_PPC_BOOK3S
+#include <asm/book3s/pgtable.h>
 #else
-#  include <asm/pgtable-ppc32.h>
-#endif
-
-/*
- * We save the slot number & secondary bit in the second half of the
- * PTE page. We use the 8 bytes per each pte entry.
- */
-#define PTE_PAGE_HIDX_OFFSET (PTRS_PER_PTE * 8)
+#include <asm/nohash/pgtable.h>
+#endif /* !CONFIG_PPC_BOOK3S */
 
 #ifndef __ASSEMBLY__
 
 #include <asm/tlbflush.h>
 
-/* Generic accessors to PTE bits */
-static inline int pte_write(pte_t pte)
-{      return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO; }
-static inline int pte_dirty(pte_t pte)         { return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte)         { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_special(pte_t pte)       { return pte_val(pte) & _PAGE_SPECIAL; }
-static inline int pte_none(pte_t pte)          { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
-static inline pgprot_t pte_pgprot(pte_t pte)   { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
-
-#ifdef CONFIG_NUMA_BALANCING
-/*
- * These work without NUMA balancing but the kernel does not care. See the
- * comment in include/asm-generic/pgtable.h . On powerpc, this will only
- * work for user pages and always return true for kernel pages.
- */
-static inline int pte_protnone(pte_t pte)
-{
-       return (pte_val(pte) &
-               (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
-}
-
-static inline int pmd_protnone(pmd_t pmd)
-{
-       return pte_protnone(pmd_pte(pmd));
-}
-#endif /* CONFIG_NUMA_BALANCING */
-
-static inline int pte_present(pte_t pte)
-{
-       return pte_val(pte) & _PAGE_PRESENT;
-}
-
-/* Conversion functions: convert a page and protection to a page entry,
- * and a page entry and page directory to the page they refer to.
- *
- * Even if PTEs can be unsigned long long, a PFN is always an unsigned
- * long for now.
- */
-static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
-       return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
-                    pgprot_val(pgprot)); }
-static inline unsigned long pte_pfn(pte_t pte) {
-       return pte_val(pte) >> PTE_RPN_SHIFT; }
-
 /* Keep these as a macros to avoid include dependency mess */
 #define pte_page(x)            pfn_to_page(pte_pfn(x))
 #define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
 
-/* Generic modifiers for PTE bits */
-static inline pte_t pte_wrprotect(pte_t pte) {
-       pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE);
-       pte_val(pte) |= _PAGE_RO; return pte; }
-static inline pte_t pte_mkclean(pte_t pte) {
-       pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HWWRITE); return pte; }
-static inline pte_t pte_mkold(pte_t pte) {
-       pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkwrite(pte_t pte) {
-       pte_val(pte) &= ~_PAGE_RO;
-       pte_val(pte) |= _PAGE_RW; return pte; }
-static inline pte_t pte_mkdirty(pte_t pte) {
-       pte_val(pte) |= _PAGE_DIRTY; return pte; }
-static inline pte_t pte_mkyoung(pte_t pte) {
-       pte_val(pte) |= _PAGE_ACCESSED; return pte; }
-static inline pte_t pte_mkspecial(pte_t pte) {
-       pte_val(pte) |= _PAGE_SPECIAL; return pte; }
-static inline pte_t pte_mkhuge(pte_t pte) {
-       return pte; }
-static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{
-       pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
-       return pte;
-}
-
-
-/* Insert a PTE, top-level function is out of line. It uses an inline
- * low level function in the respective pgtable-* files
- */
-extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
-                      pte_t pte);
-
-/* This low level function performs the actual PTE insertion
- * Setting the PTE depends on the MMU type and other factors. It's
- * an horrible mess that I'm not going to try to clean up now but
- * I'm keeping it in one place rather than spread around
- */
-static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
-                               pte_t *ptep, pte_t pte, int percpu)
-{
-#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
-       /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
-        * helper pte_update() which does an atomic update. We need to do that
-        * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
-        * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
-        * the hash bits instead (ie, same as the non-SMP case)
-        */
-       if (percpu)
-               *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
-                             | (pte_val(pte) & ~_PAGE_HASHPTE));
-       else
-               pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte));
-
-#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
-       /* Second case is 32-bit with 64-bit PTE.  In this case, we
-        * can just store as long as we do the two halves in the right order
-        * with a barrier in between. This is possible because we take care,
-        * in the hash code, to pre-invalidate if the PTE was already hashed,
-        * which synchronizes us with any concurrent invalidation.
-        * In the percpu case, we also fallback to the simple update preserving
-        * the hash bits
-        */
-       if (percpu) {
-               *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
-                             | (pte_val(pte) & ~_PAGE_HASHPTE));
-               return;
-       }
-#if _PAGE_HASHPTE != 0
-       if (pte_val(*ptep) & _PAGE_HASHPTE)
-               flush_hash_entry(mm, ptep, addr);
-#endif
-       __asm__ __volatile__("\
-               stw%U0%X0 %2,%0\n\
-               eieio\n\
-               stw%U0%X0 %L2,%1"
-       : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
-       : "r" (pte) : "memory");
-
-#elif defined(CONFIG_PPC_STD_MMU_32)
-       /* Third case is 32-bit hash table in UP mode, we need to preserve
-        * the _PAGE_HASHPTE bit since we may not have invalidated the previous
-        * translation in the hash yet (done in a subsequent flush_tlb_xxx())
-        * and see we need to keep track that this PTE needs invalidating
-        */
-       *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
-                     | (pte_val(pte) & ~_PAGE_HASHPTE));
-
-#else
-       /* Anything else just stores the PTE normally. That covers all 64-bit
-        * cases, and 32-bit non-hash with 32-bit PTEs.
-        */
-       *ptep = pte;
-
-#ifdef CONFIG_PPC_BOOK3E_64
-       /*
-        * With hardware tablewalk, a sync is needed to ensure that
-        * subsequent accesses see the PTE we just wrote.  Unlike userspace
-        * mappings, we can't tolerate spurious faults, so make sure
-        * the new PTE will be seen the first time.
-        */
-       if (is_kernel_addr(addr))
-               mb();
-#endif
-#endif
-}
-
-
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
-                                pte_t *ptep, pte_t entry, int dirty);
-
-/*
- * Macro to mark a page protection value as "uncacheable".
- */
-
-#define _PAGE_CACHE_CTL        (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
-                        _PAGE_WRITETHRU)
-
-#define pgprot_noncached(prot)   (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
-                                           _PAGE_NO_CACHE | _PAGE_GUARDED))
-
-#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
-                                           _PAGE_NO_CACHE))
-
-#define pgprot_cached(prot)       (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
-                                           _PAGE_COHERENT))
-
-#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
-                                           _PAGE_COHERENT | _PAGE_WRITETHRU))
-
-#define pgprot_cached_noncoherent(prot) \
-               (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
-
-#define pgprot_writecombine pgprot_noncached_wc
-
-struct file;
-extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
-                                    unsigned long size, pgprot_t vma_prot);
-#define __HAVE_PHYS_MEM_ACCESS_PROT
-
 /*
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
@@ -271,5 +80,4 @@ static inline pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
 }
 #endif /* __ASSEMBLY__ */
 
-#endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_PGTABLE_H */
index 67859edbf8fd39aba27c8f8b7503533d6d9a3271..1b394247afc2dc86c76a26c9b5ed0274ae0de536 100644 (file)
@@ -201,6 +201,23 @@ static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
        return rc;
 }
 
+/*
+ * ptes must be 8*sizeof(unsigned long)
+ */
+static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
+                                   unsigned long *ptes)
+
+{
+       long rc;
+       unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+       rc = plpar_hcall9(H_READ, retbuf, flags | H_READ_4, ptex);
+
+       memcpy(ptes, retbuf, 8*sizeof(unsigned long));
+
+       return rc;
+}
+
 /*
  * plpar_pte_read_4_raw can be called in real mode.
  * ptes must be 8*sizeof(unsigned long)
index dd0fc18d81031c758e0f0c71d25b2dd83d56fb4b..499d9f89435a2dbe1216a58047bb3c1135932746 100644 (file)
@@ -413,24 +413,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
        FTR_SECTION_ELSE_NESTED(848);   \
        mtocrf (FXM), RS;               \
        ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848)
-
-/*
- * PPR restore macros used in entry_64.S
- * Used for P7 or later processors
- */
-#define HMT_MEDIUM_LOW_HAS_PPR                                         \
-BEGIN_FTR_SECTION_NESTED(944)                                          \
-       HMT_MEDIUM_LOW;                                                 \
-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,944)
-
-#define SET_DEFAULT_THREAD_PPR(ra, rb)                                 \
-BEGIN_FTR_SECTION_NESTED(945)                                          \
-       lis     ra,INIT_PPR@highest;    /* default ppr=3 */             \
-       ld      rb,PACACURRENT(r13);                                    \
-       sldi    ra,ra,32;       /* 11- 13 bits are used for ppr */      \
-       std     ra,TASKTHREADPPR(rb);                                   \
-END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945)
-
 #endif
 
 /*
index 5afea361beaae9ee917b38bca3ece1d5a070166a..ac2330820b9ae2de3c8fedfe4ffc47987f6ed992 100644 (file)
@@ -88,12 +88,6 @@ struct task_struct;
 void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
 void release_thread(struct task_struct *);
 
-/* Lazy FPU handling on uni-processor */
-extern struct task_struct *last_task_used_math;
-extern struct task_struct *last_task_used_altivec;
-extern struct task_struct *last_task_used_vsx;
-extern struct task_struct *last_task_used_spe;
-
 #ifdef CONFIG_PPC32
 
 #if CONFIG_TASK_SIZE > CONFIG_KERNEL_START
@@ -294,6 +288,7 @@ struct thread_struct {
 #endif
 #ifdef CONFIG_PPC64
        unsigned long   dscr;
+       unsigned long   fscr;
        /*
         * This member element dscr_inherit indicates that the process
         * has explicitly attempted and changed the DSCR register value
@@ -385,8 +380,6 @@ extern int set_endian(struct task_struct *tsk, unsigned int val);
 extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr);
 extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
 
-extern void fp_enable(void);
-extern void vec_enable(void);
 extern void load_fp_state(struct thread_fp_state *fp);
 extern void store_fp_state(struct thread_fp_state *fp);
 extern void load_vr_state(struct thread_vr_state *vr);
index 71537a319fc828474f71fd1855ed127b30d0cce5..1ec67b0430657c590fdbf2b230fc8ca926989e6e 100644 (file)
 #else
 #define _PAGE_RW 0
 #endif
+
+#ifndef _PAGE_PTE
+#define _PAGE_PTE 0
+#endif
+
 #ifndef _PMD_PRESENT_MASK
 #define _PMD_PRESENT_MASK      _PMD_PRESENT
 #endif
diff --git a/arch/powerpc/include/asm/pte-hash64-4k.h b/arch/powerpc/include/asm/pte-hash64-4k.h
deleted file mode 100644 (file)
index c134e80..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-/* To be include by pgtable-hash64.h only */
-
-/* PTE bits */
-#define _PAGE_HASHPTE  0x0400 /* software: pte has an associated HPTE */
-#define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */
-#define _PAGE_GROUP_IX  0x7000 /* software: HPTE index within group */
-#define _PAGE_F_SECOND  _PAGE_SECONDARY
-#define _PAGE_F_GIX     _PAGE_GROUP_IX
-#define _PAGE_SPECIAL  0x10000 /* software: special page */
-
-/* PTE flags to conserve for HPTE identification */
-#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | \
-                        _PAGE_SECONDARY | _PAGE_GROUP_IX)
-
-/* shift to put page number into pte */
-#define PTE_RPN_SHIFT  (17)
-
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
deleted file mode 100644 (file)
index 4f4ec2a..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-/* To be include by pgtable-hash64.h only */
-
-/* Additional PTE bits (don't change without checking asm in hash_low.S) */
-#define _PAGE_SPECIAL  0x00000400 /* software: special page */
-#define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */
-#define _PAGE_HPTE_SUB0        0x08000000 /* combo only: first sub page */
-#define _PAGE_COMBO    0x10000000 /* this is a combo 4k page */
-#define _PAGE_4K_PFN   0x20000000 /* PFN is for a single 4k page */
-
-/* For 64K page, we don't have a separate _PAGE_HASHPTE bit. Instead,
- * we set that to be the whole sub-bits mask. The C code will only
- * test this, so a multi-bit mask will work. For combo pages, this
- * is equivalent as effectively, the old _PAGE_HASHPTE was an OR of
- * all the sub bits. For real 64k pages, we now have the assembly set
- * _PAGE_HPTE_SUB0 in addition to setting the HIDX bits which overlap
- * that mask. This is fine as long as the HIDX bits are never set on
- * a PTE that isn't hashed, which is the case today.
- *
- * A little nit is for the huge page C code, which does the hashing
- * in C, we need to provide which bit to use.
- */
-#define _PAGE_HASHPTE  _PAGE_HPTE_SUB
-
-/* Note the full page bits must be in the same location as for normal
- * 4k pages as the same assembly will be used to insert 64K pages
- * whether the kernel has CONFIG_PPC_64K_PAGES or not
- */
-#define _PAGE_F_SECOND  0x00008000 /* full page: hidx bits */
-#define _PAGE_F_GIX     0x00007000 /* full page: hidx bits */
-
-/* PTE flags to conserve for HPTE identification */
-#define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_COMBO)
-
-/* Shift to put page number into pte.
- *
- * That gives us a max RPN of 34 bits, which means a max of 50 bits
- * of addressable physical space, or 46 bits for the special 4k PFNs.
- */
-#define PTE_RPN_SHIFT  (30)
-
-#ifndef __ASSEMBLY__
-
-/*
- * With 64K pages on hash table, we have a special PTE format that
- * uses a second "half" of the page table to encode sub-page information
- * in order to deal with 64K made of 4K HW pages. Thus we override the
- * generic accessors and iterators here
- */
-#define __real_pte __real_pte
-static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
-{
-       real_pte_t rpte;
-
-       rpte.pte = pte;
-       rpte.hidx = 0;
-       if (pte_val(pte) & _PAGE_COMBO) {
-               /*
-                * Make sure we order the hidx load against the _PAGE_COMBO
-                * check. The store side ordering is done in __hash_page_4K
-                */
-               smp_rmb();
-               rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE));
-       }
-       return rpte;
-}
-
-static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
-{
-       if ((pte_val(rpte.pte) & _PAGE_COMBO))
-               return (rpte.hidx >> (index<<2)) & 0xf;
-       return (pte_val(rpte.pte) >> 12) & 0xf;
-}
-
-#define __rpte_to_pte(r)       ((r).pte)
-#define __rpte_sub_valid(rpte, index) \
-       (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index)))
-
-/* Trick: we set __end to va + 64k, which happens works for
- * a 16M page as well as we want only one iteration
- */
-#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift)    \
-       do {                                                            \
-               unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT));  \
-               unsigned __split = (psize == MMU_PAGE_4K ||             \
-                                   psize == MMU_PAGE_64K_AP);          \
-               shift = mmu_psize_defs[psize].shift;                    \
-               for (index = 0; vpn < __end; index++,                   \
-                            vpn += (1L << (shift - VPN_SHIFT))) {      \
-                       if (!__split || __rpte_sub_valid(rpte, index))  \
-                               do {
-
-#define pte_iterate_hashed_end() } while(0); } } while(0)
-
-#define pte_pagesize_index(mm, addr, pte)      \
-       (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
-
-#define remap_4k_pfn(vma, addr, pfn, prot)                             \
-       (WARN_ON(((pfn) >= (1UL << (64 - PTE_RPN_SHIFT)))) ? -EINVAL :  \
-               remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE,        \
-                       __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN)))
-
-#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/pte-hash64.h b/arch/powerpc/include/asm/pte-hash64.h
deleted file mode 100644 (file)
index ef612c1..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-#ifndef _ASM_POWERPC_PTE_HASH64_H
-#define _ASM_POWERPC_PTE_HASH64_H
-#ifdef __KERNEL__
-
-/*
- * Common bits between 4K and 64K pages in a linux-style PTE.
- * These match the bits in the (hardware-defined) PowerPC PTE as closely
- * as possible. Additional bits may be defined in pgtable-hash64-*.h
- *
- * Note: We only support user read/write permissions. Supervisor always
- * have full read/write to pages above PAGE_OFFSET (pages below that
- * always use the user access permissions).
- *
- * We could create separate kernel read-only if we used the 3 PP bits
- * combinations that newer processors provide but we currently don't.
- */
-#define _PAGE_PRESENT          0x0001 /* software: pte contains a translation */
-#define _PAGE_USER             0x0002 /* matches one of the PP bits */
-#define _PAGE_BIT_SWAP_TYPE    2
-#define _PAGE_EXEC             0x0004 /* No execute on POWER4 and newer (we invert) */
-#define _PAGE_GUARDED          0x0008
-/* We can derive Memory coherence from _PAGE_NO_CACHE */
-#define _PAGE_NO_CACHE         0x0020 /* I: cache inhibit */
-#define _PAGE_WRITETHRU                0x0040 /* W: cache write-through */
-#define _PAGE_DIRTY            0x0080 /* C: page changed */
-#define _PAGE_ACCESSED         0x0100 /* R: page referenced */
-#define _PAGE_RW               0x0200 /* software: user write access allowed */
-#define _PAGE_BUSY             0x0800 /* software: PTE & hash are busy */
-
-/* No separate kernel read-only */
-#define _PAGE_KERNEL_RW                (_PAGE_RW | _PAGE_DIRTY) /* user access blocked by key */
-#define _PAGE_KERNEL_RO                 _PAGE_KERNEL_RW
-
-/* Strong Access Ordering */
-#define _PAGE_SAO              (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT)
-
-/* No page size encoding in the linux PTE */
-#define _PAGE_PSIZE            0
-
-/* PTEIDX nibble */
-#define _PTEIDX_SECONDARY      0x8
-#define _PTEIDX_GROUP_IX       0x7
-
-/* Hash table based platforms need atomic updates of the linux PTE */
-#define PTE_ATOMIC_UPDATES     1
-
-#ifdef CONFIG_PPC_64K_PAGES
-#include <asm/pte-hash64-64k.h>
-#else
-#include <asm/pte-hash64-4k.h>
-#endif
-
-#endif /* __KERNEL__ */
-#endif /*  _ASM_POWERPC_PTE_HASH64_H */
index 2220f7a60def314be5c8f23e18ceb8982061cb18..c4cb2ffc624ea5b442aec8c83c86e3a1710d9411 100644 (file)
 #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
                                     : : "r" (v) : "memory")
 #define mtmsr(v)       __mtmsrd((v), 0)
+#define __MTMSR                "mtmsrd"
 #else
 #define mtmsr(v)       asm volatile("mtmsr %0" : \
                                     : "r" ((unsigned long)(v)) \
                                     : "memory")
+#define __MTMSR                "mtmsr"
 #endif
 
+static inline void mtmsr_isync(unsigned long val)
+{
+       asm volatile(__MTMSR " %0; " ASM_FTR_IFCLR("isync", "nop", %1) : :
+                       "r" (val), "i" (CPU_FTR_ARCH_206) : "memory");
+}
+
 #define mfspr(rn)      ({unsigned long rval; \
                        asm volatile("mfspr %0," __stringify(rn) \
                                : "=r" (rval)); rval;})
                                     : "r" ((unsigned long)(v)) \
                                     : "memory")
 
+extern void msr_check_and_set(unsigned long bits);
+extern bool strict_msr_control;
+extern void __msr_check_and_clear(unsigned long bits);
+static inline void msr_check_and_clear(unsigned long bits)
+{
+       if (strict_msr_control)
+               __msr_check_and_clear(bits);
+}
+
 static inline unsigned long mfvtb (void)
 {
 #ifdef CONFIG_PPC_BOOK3S_64
index b77ef369c0f0ea9b3641201c049aa11378788db1..51400baa8d48d68a9192bb9e4557f1caedace8b9 100644 (file)
@@ -334,10 +334,11 @@ extern void (*rtas_flash_term_hook)(int);
 
 extern struct rtas_t rtas;
 
-extern void enter_rtas(unsigned long);
 extern int rtas_token(const char *service);
 extern int rtas_service_present(const char *service);
 extern int rtas_call(int token, int, int, int *, ...);
+void rtas_call_unlocked(struct rtas_args *args, int token, int nargs,
+                       int nret, ...);
 extern void rtas_restart(char *cmd);
 extern void rtas_power_off(void);
 extern void rtas_halt(void);
index 15cca17cba4b9fe47c5598006f9e61214e20295c..5b268b6be74c791cadfa2ea8575e298104bd529a 100644 (file)
@@ -4,6 +4,8 @@
 #ifndef _ASM_POWERPC_SWITCH_TO_H
 #define _ASM_POWERPC_SWITCH_TO_H
 
+#include <asm/reg.h>
+
 struct thread_struct;
 struct task_struct;
 struct pt_regs;
@@ -12,74 +14,59 @@ extern struct task_struct *__switch_to(struct task_struct *,
        struct task_struct *);
 #define switch_to(prev, next, last)    ((last) = __switch_to((prev), (next)))
 
-struct thread_struct;
 extern struct task_struct *_switch(struct thread_struct *prev,
                                   struct thread_struct *next);
-#ifdef CONFIG_PPC_BOOK3S_64
-static inline void save_early_sprs(struct thread_struct *prev)
-{
-       if (cpu_has_feature(CPU_FTR_ARCH_207S))
-               prev->tar = mfspr(SPRN_TAR);
-       if (cpu_has_feature(CPU_FTR_DSCR))
-               prev->dscr = mfspr(SPRN_DSCR);
-}
-#else
-static inline void save_early_sprs(struct thread_struct *prev) {}
-#endif
 
-extern void enable_kernel_fp(void);
-extern void enable_kernel_altivec(void);
-extern void enable_kernel_vsx(void);
-extern int emulate_altivec(struct pt_regs *);
-extern void __giveup_vsx(struct task_struct *);
-extern void giveup_vsx(struct task_struct *);
-extern void enable_kernel_spe(void);
-extern void giveup_spe(struct task_struct *);
-extern void load_up_spe(struct task_struct *);
 extern void switch_booke_debug_regs(struct debug_reg *new_debug);
 
-#ifndef CONFIG_SMP
-extern void discard_lazy_cpu_state(void);
-#else
-static inline void discard_lazy_cpu_state(void)
-{
-}
-#endif
+extern int emulate_altivec(struct pt_regs *);
+
+extern void flush_all_to_thread(struct task_struct *);
+extern void giveup_all(struct task_struct *);
 
 #ifdef CONFIG_PPC_FPU
+extern void enable_kernel_fp(void);
 extern void flush_fp_to_thread(struct task_struct *);
 extern void giveup_fpu(struct task_struct *);
+extern void __giveup_fpu(struct task_struct *);
+static inline void disable_kernel_fp(void)
+{
+       msr_check_and_clear(MSR_FP);
+}
 #else
 static inline void flush_fp_to_thread(struct task_struct *t) { }
-static inline void giveup_fpu(struct task_struct *t) { }
 #endif
 
 #ifdef CONFIG_ALTIVEC
+extern void enable_kernel_altivec(void);
 extern void flush_altivec_to_thread(struct task_struct *);
 extern void giveup_altivec(struct task_struct *);
-extern void giveup_altivec_notask(void);
-#else
-static inline void flush_altivec_to_thread(struct task_struct *t)
-{
-}
-static inline void giveup_altivec(struct task_struct *t)
+extern void __giveup_altivec(struct task_struct *);
+static inline void disable_kernel_altivec(void)
 {
+       msr_check_and_clear(MSR_VEC);
 }
 #endif
 
 #ifdef CONFIG_VSX
+extern void enable_kernel_vsx(void);
 extern void flush_vsx_to_thread(struct task_struct *);
-#else
-static inline void flush_vsx_to_thread(struct task_struct *t)
+extern void giveup_vsx(struct task_struct *);
+extern void __giveup_vsx(struct task_struct *);
+static inline void disable_kernel_vsx(void)
 {
+       msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
 }
 #endif
 
 #ifdef CONFIG_SPE
+extern void enable_kernel_spe(void);
 extern void flush_spe_to_thread(struct task_struct *);
-#else
-static inline void flush_spe_to_thread(struct task_struct *t)
+extern void giveup_spe(struct task_struct *);
+extern void __giveup_spe(struct task_struct *);
+static inline void disable_kernel_spe(void)
 {
+       msr_check_and_clear(MSR_SPE);
 }
 #endif
 
index e682a7143edb767826705243df3613cc85e305ab..c50868681f9ead40b285cdc4bb411e43123fb3ff 100644 (file)
@@ -44,7 +44,7 @@ static inline void isync(void)
        MAKE_LWSYNC_SECTION_ENTRY(97, __lwsync_fixup);
 #define PPC_ACQUIRE_BARRIER     "\n" stringify_in_c(__PPC_ACQUIRE_BARRIER)
 #define PPC_RELEASE_BARRIER     stringify_in_c(LWSYNC) "\n"
-#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(LWSYNC) "\n"
+#define PPC_ATOMIC_ENTRY_BARRIER "\n" stringify_in_c(sync) "\n"
 #define PPC_ATOMIC_EXIT_BARRIER         "\n" stringify_in_c(sync) "\n"
 #else
 #define PPC_ACQUIRE_BARRIER
index 10fc784a2ad4cbf687ce28b1d2bc2d6c06fed5b4..2d7109a8d2961f440193e47ecc2602a5729e018e 100644 (file)
@@ -27,7 +27,6 @@ extern struct clock_event_device decrementer_clockevent;
 
 struct rtc_time;
 extern void to_tm(int tim, struct rtc_time * tm);
-extern void GregorianDay(struct rtc_time *tm);
 extern void tick_broadcast_ipi_handler(void);
 
 extern void generic_calibrate_decr(void);
index 4b6b8ace18e086729932bd5635811167a6491a2b..6a5ace5fa0c8a8bbf83a73a5fca8fb0150d5d3e8 100644 (file)
 #include <uapi/asm/unistd.h>
 
 
-#define __NR_syscalls          379
+#define NR_syscalls            379
 
 #define __NR__exit __NR_exit
-#define NR_syscalls    __NR_syscalls
 
 #ifndef __ASSEMBLY__
 
index b73a8199f161cca520e325f073da7c45f45961a2..1afe90ade595e161016af3ca712b65f9990407f4 100644 (file)
@@ -41,7 +41,7 @@
 #include <linux/unistd.h>
 #include <linux/time.h>
 
-#define SYSCALL_MAP_SIZE      ((__NR_syscalls + 31) / 32)
+#define SYSCALL_MAP_SIZE      ((NR_syscalls + 31) / 32)
 
 /*
  * So here is the ppc64 backward compatible version
index 43686043e29734b47b183882417e309e617d1039..8dde19962a5b49fbce13685656911c8f47622f3f 100644 (file)
@@ -43,5 +43,7 @@
 #define PPC_FEATURE2_TAR               0x04000000
 #define PPC_FEATURE2_VEC_CRYPTO                0x02000000
 #define PPC_FEATURE2_HTM_NOSC          0x01000000
+#define PPC_FEATURE2_ARCH_3_00         0x00800000 /* ISA 3.00 */
+#define PPC_FEATURE2_HAS_IEEE128       0x00400000 /* VSX IEEE Binary Float 128-bit */
 
 #endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */
index 59dad113897b0b65f6c616df175a596d901dab77..c2d21d11c2d2c0c55554ce4faa153b88d006ea0f 100644 (file)
@@ -295,6 +295,8 @@ do {                                                                        \
 #define R_PPC64_TLSLD          108
 #define R_PPC64_TOCSAVE                109
 
+#define R_PPC64_ENTRY          118
+
 #define R_PPC64_REL16          249
 #define R_PPC64_REL16_LO       250
 #define R_PPC64_REL16_HI       251
index 86150fbb42c39111c7440523f482dbe55e0bd4d9..8e7cb8e2b21ac87b8ef60f141af16773085728e4 100644 (file)
@@ -960,6 +960,7 @@ int fix_alignment(struct pt_regs *regs)
                        preempt_disable();
                        enable_kernel_fp();
                        cvt_df(&data.dd, (float *)&data.x32.low32);
+                       disable_kernel_fp();
                        preempt_enable();
 #else
                        return 0;
@@ -1000,6 +1001,7 @@ int fix_alignment(struct pt_regs *regs)
                preempt_disable();
                enable_kernel_fp();
                cvt_fd((float *)&data.x32.low32, &data.dd);
+               disable_kernel_fp();
                preempt_enable();
 #else
                return 0;
index 221d584d089f9418abfa1087cffd9d0b0117c090..07cebc3514f34337b734153b02f836e28c402d4f 100644 (file)
@@ -185,14 +185,16 @@ int main(void)
        DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
        DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
        DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened));
-       DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
+#ifdef CONFIG_PPC_BOOK3S
+       DEFINE(PACACONTEXTID, offsetof(struct paca_struct, mm_ctx_id));
 #ifdef CONFIG_PPC_MM_SLICES
        DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
-                                           context.low_slices_psize));
+                                           mm_ctx_low_slices_psize));
        DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct,
-                                           context.high_slices_psize));
+                                           mm_ctx_high_slices_psize));
        DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
 #endif /* CONFIG_PPC_MM_SLICES */
+#endif
 
 #ifdef CONFIG_PPC_BOOK3E
        DEFINE(PACAPGD, offsetof(struct paca_struct, pgd));
@@ -222,7 +224,7 @@ int main(void)
 #ifdef CONFIG_PPC_MM_SLICES
        DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp));
 #else
-       DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
+       DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, mm_ctx_sllp));
 #endif /* CONFIG_PPC_MM_SLICES */
        DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
        DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
index a94f155db78e83d67fca2e967ba43585f18aeda8..0d525ce3717fb4a3587af904d8450ed2100a56d4 100644 (file)
@@ -223,7 +223,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
 
        beq-    1f
        ACCOUNT_CPU_USER_EXIT(r11, r12)
-       HMT_MEDIUM_LOW_HAS_PPR
+
+BEGIN_FTR_SECTION
+       HMT_MEDIUM_LOW
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
        ld      r13,GPR13(r1)   /* only restore r13 if returning to usermode */
 1:     ld      r2,GPR2(r1)
        ld      r1,GPR1(r1)
@@ -312,7 +316,13 @@ syscall_exit_work:
        subi    r12,r12,TI_FLAGS
 
 4:     /* Anything else left to do? */
-       SET_DEFAULT_THREAD_PPR(r3, r10)         /* Set thread.ppr = 3 */
+BEGIN_FTR_SECTION
+       lis     r3,INIT_PPR@highest     /* Set thread.ppr = 3 */
+       ld      r10,PACACURRENT(r13)
+       sldi    r3,r3,32        /* bits 11-13 are used for ppr */
+       std     r3,TASKTHREADPPR(r10)
+END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+
        andi.   r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
        beq     ret_from_except_lite
 
@@ -452,43 +462,11 @@ _GLOBAL(_switch)
        /* r3-r13 are caller saved -- Cort */
        SAVE_8GPRS(14, r1)
        SAVE_10GPRS(22, r1)
-       mflr    r20             /* Return to switch caller */
-       mfmsr   r22
-       li      r0, MSR_FP
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
-       oris    r0,r0,MSR_VSX@h /* Disable VSX */
-END_FTR_SECTION_IFSET(CPU_FTR_VSX)
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
-       oris    r0,r0,MSR_VEC@h /* Disable altivec */
-       mfspr   r24,SPRN_VRSAVE /* save vrsave register value */
-       std     r24,THREAD_VRSAVE(r3)
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif /* CONFIG_ALTIVEC */
-       and.    r0,r0,r22
-       beq+    1f
-       andc    r22,r22,r0
-       MTMSRD(r22)
-       isync
-1:     std     r20,_NIP(r1)
+       std     r0,_NIP(r1)     /* Return to switch caller */
        mfcr    r23
        std     r23,_CCR(r1)
        std     r1,KSP(r3)      /* Set old stack pointer */
 
-#ifdef CONFIG_PPC_BOOK3S_64
-BEGIN_FTR_SECTION
-       /* Event based branch registers */
-       mfspr   r0, SPRN_BESCR
-       std     r0, THREAD_BESCR(r3)
-       mfspr   r0, SPRN_EBBHR
-       std     r0, THREAD_EBBHR(r3)
-       mfspr   r0, SPRN_EBBRR
-       std     r0, THREAD_EBBRR(r3)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
-#endif
-
 #ifdef CONFIG_SMP
        /* We need a sync somewhere here to make sure that if the
         * previous task gets rescheduled on another CPU, it sees all
@@ -576,47 +554,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
        mr      r1,r8           /* start using new stack pointer */
        std     r7,PACAKSAVE(r13)
 
-#ifdef CONFIG_PPC_BOOK3S_64
-BEGIN_FTR_SECTION
-       /* Event based branch registers */
-       ld      r0, THREAD_BESCR(r4)
-       mtspr   SPRN_BESCR, r0
-       ld      r0, THREAD_EBBHR(r4)
-       mtspr   SPRN_EBBHR, r0
-       ld      r0, THREAD_EBBRR(r4)
-       mtspr   SPRN_EBBRR, r0
-
-       ld      r0,THREAD_TAR(r4)
-       mtspr   SPRN_TAR,r0
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
-#endif
-
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
-       ld      r0,THREAD_VRSAVE(r4)
-       mtspr   SPRN_VRSAVE,r0          /* if G4, restore VRSAVE reg */
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_PPC64
-BEGIN_FTR_SECTION
-       lwz     r6,THREAD_DSCR_INHERIT(r4)
-       ld      r0,THREAD_DSCR(r4)
-       cmpwi   r6,0
-       bne     1f
-       ld      r0,PACA_DSCR_DEFAULT(r13)
-1:
-BEGIN_FTR_SECTION_NESTED(70)
-       mfspr   r8, SPRN_FSCR
-       rldimi  r8, r6, FSCR_DSCR_LG, (63 - FSCR_DSCR_LG)
-       mtspr   SPRN_FSCR, r8
-END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
-       cmpd    r0,r25
-       beq     2f
-       mtspr   SPRN_DSCR,r0
-2:
-END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
-#endif
-
        ld      r6,_CCR(r1)
        mtcrf   0xFF,r6
 
index 0a0399c2af119c1c63efe094b1404f3250045769..7716cebf4b8ea086171ab326ba72079a758e2e79 100644 (file)
@@ -96,7 +96,6 @@ __start_interrupts:
 
        .globl system_reset_pSeries;
 system_reset_pSeries:
-       HMT_MEDIUM_PPR_DISCARD
        SET_SCRATCH0(r13)
 #ifdef CONFIG_PPC_P7_NAP
 BEGIN_FTR_SECTION
@@ -164,7 +163,6 @@ machine_check_pSeries_1:
         * some code path might still want to branch into the original
         * vector
         */
-       HMT_MEDIUM_PPR_DISCARD
        SET_SCRATCH0(r13)               /* save r13 */
 #ifdef CONFIG_PPC_P7_NAP
 BEGIN_FTR_SECTION
@@ -199,7 +197,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
        . = 0x300
        .globl data_access_pSeries
 data_access_pSeries:
-       HMT_MEDIUM_PPR_DISCARD
        SET_SCRATCH0(r13)
        EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
                                 KVMTEST, 0x300)
@@ -207,7 +204,6 @@ data_access_pSeries:
        . = 0x380
        .globl data_access_slb_pSeries
 data_access_slb_pSeries:
-       HMT_MEDIUM_PPR_DISCARD
        SET_SCRATCH0(r13)
        EXCEPTION_PROLOG_0(PACA_EXSLB)
        EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
@@ -234,15 +230,14 @@ data_access_slb_pSeries:
        bctr
 #endif
 
-       STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
+       STD_EXCEPTION_PSERIES(0x400, instruction_access)
 
        . = 0x480
        .globl instruction_access_slb_pSeries
 instruction_access_slb_pSeries:
-       HMT_MEDIUM_PPR_DISCARD
        SET_SCRATCH0(r13)
        EXCEPTION_PROLOG_0(PACA_EXSLB)
-       EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
+       EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x480)
        std     r3,PACA_EXSLB+EX_R3(r13)
        mfspr   r3,SPRN_SRR0            /* SRR0 is faulting address */
 #ifdef __DISABLED__
@@ -269,25 +264,24 @@ instruction_access_slb_pSeries:
        .globl hardware_interrupt_hv;
 hardware_interrupt_pSeries:
 hardware_interrupt_hv:
-       HMT_MEDIUM_PPR_DISCARD
        BEGIN_FTR_SECTION
                _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
                                            EXC_HV, SOFTEN_TEST_HV)
                KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
        FTR_SECTION_ELSE
                _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
-                                           EXC_STD, SOFTEN_TEST_HV_201)
+                                           EXC_STD, SOFTEN_TEST_PR)
                KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
        ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 
-       STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
+       STD_EXCEPTION_PSERIES(0x600, alignment)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x600)
 
-       STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
+       STD_EXCEPTION_PSERIES(0x700, program_check)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x700)
 
-       STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
+       STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x800)
 
        . = 0x900
        .globl decrementer_pSeries
@@ -297,10 +291,10 @@ decrementer_pSeries:
        STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
 
        MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xa00)
 
-       STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
+       STD_EXCEPTION_PSERIES(0xb00, trap_0b)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xb00)
 
        . = 0xc00
        .globl  system_call_pSeries
@@ -331,8 +325,8 @@ system_call_pSeries:
        SYSCALL_PSERIES_3
        KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
 
-       STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
+       STD_EXCEPTION_PSERIES(0xd00, single_step)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xd00)
 
        /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
         * out of line to handle them
@@ -407,13 +401,12 @@ hv_facility_unavailable_trampoline:
        KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
 #endif /* CONFIG_CBE_RAS */
 
-       STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
-       KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
+       STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
+       KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
 
        . = 0x1500
        .global denorm_exception_hv
 denorm_exception_hv:
-       HMT_MEDIUM_PPR_DISCARD
        mtspr   SPRN_SPRG_HSCRATCH0,r13
        EXCEPTION_PROLOG_0(PACA_EXGEN)
        EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
@@ -435,8 +428,8 @@ denorm_exception_hv:
        KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
 #endif /* CONFIG_CBE_RAS */
 
-       STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
+       STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x1700)
 
 #ifdef CONFIG_CBE_RAS
        STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
@@ -527,7 +520,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
 machine_check_pSeries:
        .globl machine_check_fwnmi
 machine_check_fwnmi:
-       HMT_MEDIUM_PPR_DISCARD
        SET_SCRATCH0(r13)               /* save r13 */
        EXCEPTION_PROLOG_0(PACA_EXMC)
 machine_check_pSeries_0:
@@ -536,9 +528,9 @@ machine_check_pSeries_0:
        KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
        KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
        KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
-       KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x400)
+       KVM_HANDLER(PACA_EXSLB, EXC_STD, 0x480)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x900)
        KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
 
 #ifdef CONFIG_PPC_DENORMALISATION
@@ -621,13 +613,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
 
        /* moved from 0xf00 */
        STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf00)
        STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf20)
        STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf40)
        STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
-       KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
+       KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf60)
        STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
        KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
 
@@ -711,7 +703,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
        .globl system_reset_fwnmi
       .align 7
 system_reset_fwnmi:
-       HMT_MEDIUM_PPR_DISCARD
        SET_SCRATCH0(r13)               /* save r13 */
        EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
                                 NOTEST, 0x100)
@@ -1556,29 +1547,19 @@ do_hash_page:
        lwz     r0,TI_PREEMPT(r11)      /* If we're in an "NMI" */
        andis.  r0,r0,NMI_MASK@h        /* (i.e. an irq when soft-disabled) */
        bne     77f                     /* then don't call hash_page now */
-       /*
-        * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
-        * accessing a userspace segment (even from the kernel). We assume
-        * kernel addresses always have the high bit set.
-        */
-       rlwinm  r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
-       rotldi  r0,r3,15                /* Move high bit into MSR_PR posn */
-       orc     r0,r12,r0               /* MSR_PR | ~high_bit */
-       rlwimi  r4,r0,32-13,30,30       /* becomes _PAGE_USER access bit */
-       ori     r4,r4,1                 /* add _PAGE_PRESENT */
-       rlwimi  r4,r5,22+2,31-2,31-2    /* Set _PAGE_EXEC if trap is 0x400 */
 
        /*
         * r3 contains the faulting address
-        * r4 contains the required access permissions
+        * r4 msr
         * r5 contains the trap number
         * r6 contains dsisr
         *
         * at return r3 = 0 for success, 1 for page fault, negative for error
         */
+        mr     r4,r12
        ld      r6,_DSISR(r1)
-       bl      hash_page               /* build HPTE if possible */
-       cmpdi   r3,0                    /* see if hash_page succeeded */
+       bl      __hash_page             /* build HPTE if possible */
+        cmpdi  r3,0                    /* see if __hash_page succeeded */
 
        /* Success */
        beq     fast_exc_return_irq     /* Return from exception on success */
index 9ad236e5d2c9d072540f1167107076e883af9ac3..2117eaca3d288232a735325f2a31bd2a3c80e4cf 100644 (file)
@@ -73,29 +73,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        MTFSF_L(fr0)
        REST_32FPVSRS(0, R4, R7)
 
-       /* FP/VSX off again */
-       MTMSRD(r6)
-       SYNC
-
        blr
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 
-/*
- * Enable use of the FPU, and VSX if possible, for the caller.
- */
-_GLOBAL(fp_enable)
-       mfmsr   r3
-       ori     r3,r3,MSR_FP
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
-       oris    r3,r3,MSR_VSX@h
-END_FTR_SECTION_IFSET(CPU_FTR_VSX)
-#endif
-       SYNC
-       MTMSRD(r3)
-       isync                   /* (not necessary for arch 2.02 and later) */
-       blr
-
 /*
  * Load state from memory into FP registers including FPSCR.
  * Assumes the caller has enabled FP in the MSR.
@@ -136,31 +116,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        SYNC
        MTMSRD(r5)                      /* enable use of fpu now */
        isync
-/*
- * For SMP, we don't do lazy FPU switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another.  Instead we call giveup_fpu in switch_to.
- */
-#ifndef CONFIG_SMP
-       LOAD_REG_ADDRBASE(r3, last_task_used_math)
-       toreal(r3)
-       PPC_LL  r4,ADDROFF(last_task_used_math)(r3)
-       PPC_LCMPI       0,r4,0
-       beq     1f
-       toreal(r4)
-       addi    r4,r4,THREAD            /* want last_task_used_math->thread */
-       addi    r10,r4,THREAD_FPSTATE
-       SAVE_32FPVSRS(0, R5, R10)
-       mffs    fr0
-       stfd    fr0,FPSTATE_FPSCR(r10)
-       PPC_LL  r5,PT_REGS(r4)
-       toreal(r5)
-       PPC_LL  r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-       li      r10,MSR_FP|MSR_FE0|MSR_FE1
-       andc    r4,r4,r10               /* disable FP for previous task */
-       PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
        /* enable use of FP after return */
 #ifdef CONFIG_PPC32
        mfspr   r5,SPRN_SPRG_THREAD     /* current task's THREAD (phys) */
@@ -179,36 +134,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        lfd     fr0,FPSTATE_FPSCR(r10)
        MTFSF_L(fr0)
        REST_32FPVSRS(0, R4, R10)
-#ifndef CONFIG_SMP
-       subi    r4,r5,THREAD
-       fromreal(r4)
-       PPC_STL r4,ADDROFF(last_task_used_math)(r3)
-#endif /* CONFIG_SMP */
        /* restore registers and return */
        /* we haven't used ctr or xer or lr */
        blr
 
 /*
- * giveup_fpu(tsk)
+ * __giveup_fpu(tsk)
  * Disable FP for the task given as the argument,
  * and save the floating-point registers in its thread_struct.
  * Enables the FPU for use in the kernel on return.
  */
-_GLOBAL(giveup_fpu)
-       mfmsr   r5
-       ori     r5,r5,MSR_FP
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
-       oris    r5,r5,MSR_VSX@h
-END_FTR_SECTION_IFSET(CPU_FTR_VSX)
-#endif
-       SYNC_601
-       ISYNC_601
-       MTMSRD(r5)                      /* enable use of fpu now */
-       SYNC_601
-       isync
-       PPC_LCMPI       0,r3,0
-       beqlr-                          /* if no previous owner, done */
+_GLOBAL(__giveup_fpu)
        addi    r3,r3,THREAD            /* want THREAD of task */
        PPC_LL  r6,THREAD_FPSAVEAREA(r3)
        PPC_LL  r5,PT_REGS(r3)
@@ -230,11 +166,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
        andc    r4,r4,r3                /* disable FP for previous task */
        PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 1:
-#ifndef CONFIG_SMP
-       li      r5,0
-       LOAD_REG_ADDRBASE(r4,last_task_used_math)
-       PPC_STL r5,ADDROFF(last_task_used_math)(r4)
-#endif /* CONFIG_SMP */
        blr
 
 /*
index fffd1f96bb1d0d81319ceb72ddcbf7ad15b82880..f705171b924b9389c5b16979ce52fd90171ecffa 100644 (file)
@@ -857,29 +857,6 @@ _GLOBAL(load_up_spe)
        oris    r5,r5,MSR_SPE@h
        mtmsr   r5                      /* enable use of SPE now */
        isync
-/*
- * For SMP, we don't do lazy SPE switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another.  Instead we call giveup_spe in switch_to.
- */
-#ifndef CONFIG_SMP
-       lis     r3,last_task_used_spe@ha
-       lwz     r4,last_task_used_spe@l(r3)
-       cmpi    0,r4,0
-       beq     1f
-       addi    r4,r4,THREAD    /* want THREAD of last_task_used_spe */
-       SAVE_32EVRS(0,r10,r4,THREAD_EVR0)
-       evxor   evr10, evr10, evr10     /* clear out evr10 */
-       evmwumiaa evr10, evr10, evr10   /* evr10 <- ACC = 0 * 0 + ACC */
-       li      r5,THREAD_ACC
-       evstddx evr10, r4, r5           /* save off accumulator */
-       lwz     r5,PT_REGS(r4)
-       lwz     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-       lis     r10,MSR_SPE@h
-       andc    r4,r4,r10       /* disable SPE for previous task */
-       stw     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* !CONFIG_SMP */
        /* enable use of SPE after return */
        oris    r9,r9,MSR_SPE@h
        mfspr   r5,SPRN_SPRG_THREAD     /* current task's THREAD (phys) */
@@ -889,10 +866,6 @@ _GLOBAL(load_up_spe)
        evlddx  evr4,r10,r5
        evmra   evr4,evr4
        REST_32EVRS(0,r10,r5,THREAD_EVR0)
-#ifndef CONFIG_SMP
-       subi    r4,r5,THREAD
-       stw     r4,last_task_used_spe@l(r3)
-#endif /* !CONFIG_SMP */
        blr
 
 /*
@@ -1011,16 +984,10 @@ _GLOBAL(__setup_ehv_ivors)
 
 #ifdef CONFIG_SPE
 /*
- * extern void giveup_spe(struct task_struct *prev)
+ * extern void __giveup_spe(struct task_struct *prev)
  *
  */
-_GLOBAL(giveup_spe)
-       mfmsr   r5
-       oris    r5,r5,MSR_SPE@h
-       mtmsr   r5                      /* enable use of SPE now */
-       isync
-       cmpi    0,r3,0
-       beqlr-                          /* if no previous owner, done */
+_GLOBAL(__giveup_spe)
        addi    r3,r3,THREAD            /* want THREAD of task */
        lwz     r5,PT_REGS(r3)
        cmpi    0,r5,0
@@ -1035,11 +1002,6 @@ _GLOBAL(giveup_spe)
        andc    r4,r4,r3                /* disable SPE for previous task */
        stw     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 1:
-#ifndef CONFIG_SMP
-       li      r5,0
-       lis     r4,last_task_used_spe@ha
-       stw     r5,last_task_used_spe@l(r4)
-#endif /* !CONFIG_SMP */
        blr
 #endif /* CONFIG_SPE */
 
index 112ccf4975620f899a4747c98a13cd09260b5598..cf4fb5429cf12fe097590d570d7c9a6b0bce2571 100644 (file)
@@ -89,13 +89,6 @@ _GLOBAL(power7_powersave_common)
        std     r0,_LINK(r1)
        std     r0,_NIP(r1)
 
-#ifndef CONFIG_SMP
-       /* Make sure FPU, VSX etc... are flushed as we may lose
-        * state when going to nap mode
-        */
-       bl      discard_lazy_cpu_state
-#endif /* CONFIG_SMP */
-
        /* Hard disable interrupts */
        mfmsr   r9
        rldicl  r9,r9,48,1
index ed3ab509facac91ebe8625570d9f15c203b45f20..be8edd67f05be889d971d714e4ef53a40d410081 100644 (file)
@@ -743,6 +743,8 @@ relocate_new_kernel:
        /* Check for 47x cores */
        mfspr   r3,SPRN_PVR
        srwi    r3,r3,16
+       cmplwi  cr0,r3,PVR_476FPE@h
+       beq     setup_map_47x
        cmplwi  cr0,r3,PVR_476@h
        beq     setup_map_47x
        cmplwi  cr0,r3,PVR_476_ISS@h
index 68384514506b7725346d4b1cefd61048459efa24..59663af9315fc16123cd4aacb090a6efd8cbc33d 100644 (file)
@@ -635,6 +635,33 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                         */
                        break;
 
+               case R_PPC64_ENTRY:
+                       /*
+                        * Optimize ELFv2 large code model entry point if
+                        * the TOC is within 2GB range of current location.
+                        */
+                       value = my_r2(sechdrs, me) - (unsigned long)location;
+                       if (value + 0x80008000 > 0xffffffff)
+                               break;
+                       /*
+                        * Check for the large code model prolog sequence:
+                        *      ld r2, ...(r12)
+                        *      add r2, r2, r12
+                        */
+                       if ((((uint32_t *)location)[0] & ~0xfffc)
+                           != 0xe84c0000)
+                               break;
+                       if (((uint32_t *)location)[1] != 0x7c426214)
+                               break;
+                       /*
+                        * If found, replace it with:
+                        *      addis r2, r12, (.TOC.-func)@ha
+                        *      addi r2, r12, (.TOC.-func)@l
+                        */
+                       ((uint32_t *)location)[0] = 0x3c4c0000 + PPC_HA(value);
+                       ((uint32_t *)location)[1] = 0x38420000 + PPC_LO(value);
+                       break;
+
                case R_PPC64_REL16_HA:
                        /* Subtract location pointer */
                        value -= (unsigned long)location;
index 202963ee013a81c76b62dcbd6bc59c0c8cbc9844..41e1607e800caf9ff5277f08609dab49566123e6 100644 (file)
@@ -19,13 +19,11 @@ EXPORT_SYMBOL(_mcount);
 #endif
 
 #ifdef CONFIG_PPC_FPU
-EXPORT_SYMBOL(giveup_fpu);
 EXPORT_SYMBOL(load_fp_state);
 EXPORT_SYMBOL(store_fp_state);
 #endif
 
 #ifdef CONFIG_ALTIVEC
-EXPORT_SYMBOL(giveup_altivec);
 EXPORT_SYMBOL(load_vr_state);
 EXPORT_SYMBOL(store_vr_state);
 #endif
@@ -34,10 +32,6 @@ EXPORT_SYMBOL(store_vr_state);
 EXPORT_SYMBOL_GPL(__giveup_vsx);
 #endif
 
-#ifdef CONFIG_SPE
-EXPORT_SYMBOL(giveup_spe);
-#endif
-
 #ifdef CONFIG_EPAPR_PARAVIRT
 EXPORT_SYMBOL(epapr_hypercall_start);
 #endif
index 646bf4d222c1caeda5c7851de74fbbe0ad0d65d9..dccc87e8fee5e6544de0d8fc732a97aa14f45907 100644 (file)
 
 extern unsigned long _get_SP(void);
 
-#ifndef CONFIG_SMP
-struct task_struct *last_task_used_math = NULL;
-struct task_struct *last_task_used_altivec = NULL;
-struct task_struct *last_task_used_vsx = NULL;
-struct task_struct *last_task_used_spe = NULL;
-#endif
-
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-void giveup_fpu_maybe_transactional(struct task_struct *tsk)
+static void check_if_tm_restore_required(struct task_struct *tsk)
 {
        /*
         * If we are saving the current thread's registers, and the
@@ -89,34 +82,67 @@ void giveup_fpu_maybe_transactional(struct task_struct *tsk)
                tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
                set_thread_flag(TIF_RESTORE_TM);
        }
+}
+#else
+static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+
+bool strict_msr_control;
+EXPORT_SYMBOL(strict_msr_control);
+
+static int __init enable_strict_msr_control(char *str)
+{
+       strict_msr_control = true;
+       pr_info("Enabling strict facility control\n");
 
-       giveup_fpu(tsk);
+       return 0;
 }
+early_param("ppc_strict_facility_enable", enable_strict_msr_control);
 
-void giveup_altivec_maybe_transactional(struct task_struct *tsk)
+void msr_check_and_set(unsigned long bits)
 {
-       /*
-        * If we are saving the current thread's registers, and the
-        * thread is in a transactional state, set the TIF_RESTORE_TM
-        * bit so that we know to restore the registers before
-        * returning to userspace.
-        */
-       if (tsk == current && tsk->thread.regs &&
-           MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
-           !test_thread_flag(TIF_RESTORE_TM)) {
-               tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
-               set_thread_flag(TIF_RESTORE_TM);
-       }
+       unsigned long oldmsr = mfmsr();
+       unsigned long newmsr;
 
-       giveup_altivec(tsk);
+       newmsr = oldmsr | bits;
+
+#ifdef CONFIG_VSX
+       if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
+               newmsr |= MSR_VSX;
+#endif
+
+       if (oldmsr != newmsr)
+               mtmsr_isync(newmsr);
 }
 
-#else
-#define giveup_fpu_maybe_transactional(tsk)    giveup_fpu(tsk)
-#define giveup_altivec_maybe_transactional(tsk)        giveup_altivec(tsk)
-#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+void __msr_check_and_clear(unsigned long bits)
+{
+       unsigned long oldmsr = mfmsr();
+       unsigned long newmsr;
+
+       newmsr = oldmsr & ~bits;
+
+#ifdef CONFIG_VSX
+       if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
+               newmsr &= ~MSR_VSX;
+#endif
+
+       if (oldmsr != newmsr)
+               mtmsr_isync(newmsr);
+}
+EXPORT_SYMBOL(__msr_check_and_clear);
 
 #ifdef CONFIG_PPC_FPU
+void giveup_fpu(struct task_struct *tsk)
+{
+       check_if_tm_restore_required(tsk);
+
+       msr_check_and_set(MSR_FP);
+       __giveup_fpu(tsk);
+       msr_check_and_clear(MSR_FP);
+}
+EXPORT_SYMBOL(giveup_fpu);
+
 /*
  * Make sure the floating-point register state in the
  * the thread_struct is up to date for task tsk.
@@ -134,52 +160,56 @@ void flush_fp_to_thread(struct task_struct *tsk)
                 */
                preempt_disable();
                if (tsk->thread.regs->msr & MSR_FP) {
-#ifdef CONFIG_SMP
                        /*
                         * This should only ever be called for current or
                         * for a stopped child process.  Since we save away
-                        * the FP register state on context switch on SMP,
+                        * the FP register state on context switch,
                         * there is something wrong if a stopped child appears
                         * to still have its FP state in the CPU registers.
                         */
                        BUG_ON(tsk != current);
-#endif
-                       giveup_fpu_maybe_transactional(tsk);
+                       giveup_fpu(tsk);
                }
                preempt_enable();
        }
 }
 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
-#endif /* CONFIG_PPC_FPU */
 
 void enable_kernel_fp(void)
 {
        WARN_ON(preemptible());
 
-#ifdef CONFIG_SMP
-       if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
-               giveup_fpu_maybe_transactional(current);
-       else
-               giveup_fpu(NULL);       /* just enables FP for kernel */
-#else
-       giveup_fpu_maybe_transactional(last_task_used_math);
-#endif /* CONFIG_SMP */
+       msr_check_and_set(MSR_FP);
+
+       if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
+               check_if_tm_restore_required(current);
+               __giveup_fpu(current);
+       }
 }
 EXPORT_SYMBOL(enable_kernel_fp);
+#endif /* CONFIG_PPC_FPU */
 
 #ifdef CONFIG_ALTIVEC
+void giveup_altivec(struct task_struct *tsk)
+{
+       check_if_tm_restore_required(tsk);
+
+       msr_check_and_set(MSR_VEC);
+       __giveup_altivec(tsk);
+       msr_check_and_clear(MSR_VEC);
+}
+EXPORT_SYMBOL(giveup_altivec);
+
 void enable_kernel_altivec(void)
 {
        WARN_ON(preemptible());
 
-#ifdef CONFIG_SMP
-       if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
-               giveup_altivec_maybe_transactional(current);
-       else
-               giveup_altivec_notask();
-#else
-       giveup_altivec_maybe_transactional(last_task_used_altivec);
-#endif /* CONFIG_SMP */
+       msr_check_and_set(MSR_VEC);
+
+       if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
+               check_if_tm_restore_required(current);
+               __giveup_altivec(current);
+       }
 }
 EXPORT_SYMBOL(enable_kernel_altivec);
 
@@ -192,10 +222,8 @@ void flush_altivec_to_thread(struct task_struct *tsk)
        if (tsk->thread.regs) {
                preempt_disable();
                if (tsk->thread.regs->msr & MSR_VEC) {
-#ifdef CONFIG_SMP
                        BUG_ON(tsk != current);
-#endif
-                       giveup_altivec_maybe_transactional(tsk);
+                       giveup_altivec(tsk);
                }
                preempt_enable();
        }
@@ -204,37 +232,43 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
 #endif /* CONFIG_ALTIVEC */
 
 #ifdef CONFIG_VSX
-void enable_kernel_vsx(void)
+void giveup_vsx(struct task_struct *tsk)
 {
-       WARN_ON(preemptible());
+       check_if_tm_restore_required(tsk);
 
-#ifdef CONFIG_SMP
-       if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
-               giveup_vsx(current);
-       else
-               giveup_vsx(NULL);       /* just enable vsx for kernel - force */
-#else
-       giveup_vsx(last_task_used_vsx);
-#endif /* CONFIG_SMP */
+       msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
+       if (tsk->thread.regs->msr & MSR_FP)
+               __giveup_fpu(tsk);
+       if (tsk->thread.regs->msr & MSR_VEC)
+               __giveup_altivec(tsk);
+       __giveup_vsx(tsk);
+       msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
 }
-EXPORT_SYMBOL(enable_kernel_vsx);
+EXPORT_SYMBOL(giveup_vsx);
 
-void giveup_vsx(struct task_struct *tsk)
+void enable_kernel_vsx(void)
 {
-       giveup_fpu_maybe_transactional(tsk);
-       giveup_altivec_maybe_transactional(tsk);
-       __giveup_vsx(tsk);
+       WARN_ON(preemptible());
+
+       msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
+
+       if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
+               check_if_tm_restore_required(current);
+               if (current->thread.regs->msr & MSR_FP)
+                       __giveup_fpu(current);
+               if (current->thread.regs->msr & MSR_VEC)
+                       __giveup_altivec(current);
+               __giveup_vsx(current);
+       }
 }
-EXPORT_SYMBOL(giveup_vsx);
+EXPORT_SYMBOL(enable_kernel_vsx);
 
 void flush_vsx_to_thread(struct task_struct *tsk)
 {
        if (tsk->thread.regs) {
                preempt_disable();
                if (tsk->thread.regs->msr & MSR_VSX) {
-#ifdef CONFIG_SMP
                        BUG_ON(tsk != current);
-#endif
                        giveup_vsx(tsk);
                }
                preempt_enable();
@@ -244,19 +278,26 @@ EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
 #endif /* CONFIG_VSX */
 
 #ifdef CONFIG_SPE
+void giveup_spe(struct task_struct *tsk)
+{
+       check_if_tm_restore_required(tsk);
+
+       msr_check_and_set(MSR_SPE);
+       __giveup_spe(tsk);
+       msr_check_and_clear(MSR_SPE);
+}
+EXPORT_SYMBOL(giveup_spe);
 
 void enable_kernel_spe(void)
 {
        WARN_ON(preemptible());
 
-#ifdef CONFIG_SMP
-       if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
-               giveup_spe(current);
-       else
-               giveup_spe(NULL);       /* just enable SPE for kernel - force */
-#else
-       giveup_spe(last_task_used_spe);
-#endif /* __SMP __ */
+       msr_check_and_set(MSR_SPE);
+
+       if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
+               check_if_tm_restore_required(current);
+               __giveup_spe(current);
+       }
 }
 EXPORT_SYMBOL(enable_kernel_spe);
 
@@ -265,9 +306,7 @@ void flush_spe_to_thread(struct task_struct *tsk)
        if (tsk->thread.regs) {
                preempt_disable();
                if (tsk->thread.regs->msr & MSR_SPE) {
-#ifdef CONFIG_SMP
                        BUG_ON(tsk != current);
-#endif
                        tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
                        giveup_spe(tsk);
                }
@@ -276,31 +315,81 @@ void flush_spe_to_thread(struct task_struct *tsk)
 }
 #endif /* CONFIG_SPE */
 
-#ifndef CONFIG_SMP
-/*
- * If we are doing lazy switching of CPU state (FP, altivec or SPE),
- * and the current task has some state, discard it.
- */
-void discard_lazy_cpu_state(void)
+static unsigned long msr_all_available;
+
+static int __init init_msr_all_available(void)
 {
-       preempt_disable();
-       if (last_task_used_math == current)
-               last_task_used_math = NULL;
+#ifdef CONFIG_PPC_FPU
+       msr_all_available |= MSR_FP;
+#endif
 #ifdef CONFIG_ALTIVEC
-       if (last_task_used_altivec == current)
-               last_task_used_altivec = NULL;
-#endif /* CONFIG_ALTIVEC */
+       if (cpu_has_feature(CPU_FTR_ALTIVEC))
+               msr_all_available |= MSR_VEC;
+#endif
 #ifdef CONFIG_VSX
-       if (last_task_used_vsx == current)
-               last_task_used_vsx = NULL;
-#endif /* CONFIG_VSX */
+       if (cpu_has_feature(CPU_FTR_VSX))
+               msr_all_available |= MSR_VSX;
+#endif
 #ifdef CONFIG_SPE
-       if (last_task_used_spe == current)
-               last_task_used_spe = NULL;
+       if (cpu_has_feature(CPU_FTR_SPE))
+               msr_all_available |= MSR_SPE;
 #endif
-       preempt_enable();
+
+       return 0;
+}
+early_initcall(init_msr_all_available);
+
+void giveup_all(struct task_struct *tsk)
+{
+       unsigned long usermsr;
+
+       if (!tsk->thread.regs)
+               return;
+
+       usermsr = tsk->thread.regs->msr;
+
+       if ((usermsr & msr_all_available) == 0)
+               return;
+
+       msr_check_and_set(msr_all_available);
+
+#ifdef CONFIG_PPC_FPU
+       if (usermsr & MSR_FP)
+               __giveup_fpu(tsk);
+#endif
+#ifdef CONFIG_ALTIVEC
+       if (usermsr & MSR_VEC)
+               __giveup_altivec(tsk);
+#endif
+#ifdef CONFIG_VSX
+       if (usermsr & MSR_VSX)
+               __giveup_vsx(tsk);
+#endif
+#ifdef CONFIG_SPE
+       if (usermsr & MSR_SPE)
+               __giveup_spe(tsk);
+#endif
+
+       msr_check_and_clear(msr_all_available);
+}
+EXPORT_SYMBOL(giveup_all);
+
+void flush_all_to_thread(struct task_struct *tsk)
+{
+       if (tsk->thread.regs) {
+               preempt_disable();
+               BUG_ON(tsk != current);
+               giveup_all(tsk);
+
+#ifdef CONFIG_SPE
+               if (tsk->thread.regs->msr & MSR_SPE)
+                       tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
+#endif
+
+               preempt_enable();
+       }
 }
-#endif /* CONFIG_SMP */
+EXPORT_SYMBOL(flush_all_to_thread);
 
 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
 void do_send_trap(struct pt_regs *regs, unsigned long address,
@@ -744,13 +833,15 @@ void restore_tm_state(struct pt_regs *regs)
        msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
        msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
        if (msr_diff & MSR_FP) {
-               fp_enable();
+               msr_check_and_set(MSR_FP);
                load_fp_state(&current->thread.fp_state);
+               msr_check_and_clear(MSR_FP);
                regs->msr |= current->thread.fpexc_mode;
        }
        if (msr_diff & MSR_VEC) {
-               vec_enable();
+               msr_check_and_set(MSR_VEC);
                load_vr_state(&current->thread.vr_state);
+               msr_check_and_clear(MSR_VEC);
        }
        regs->msr |= msr_diff;
 }
@@ -760,112 +851,87 @@ void restore_tm_state(struct pt_regs *regs)
 #define __switch_to_tm(prev)
 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 
-struct task_struct *__switch_to(struct task_struct *prev,
-       struct task_struct *new)
+static inline void save_sprs(struct thread_struct *t)
 {
-       struct thread_struct *new_thread, *old_thread;
-       struct task_struct *last;
-#ifdef CONFIG_PPC_BOOK3S_64
-       struct ppc64_tlb_batch *batch;
+#ifdef CONFIG_ALTIVEC
+       if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC)))
+               t->vrsave = mfspr(SPRN_VRSAVE);
 #endif
+#ifdef CONFIG_PPC_BOOK3S_64
+       if (cpu_has_feature(CPU_FTR_DSCR))
+               t->dscr = mfspr(SPRN_DSCR);
 
-       WARN_ON(!irqs_disabled());
+       if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+               t->bescr = mfspr(SPRN_BESCR);
+               t->ebbhr = mfspr(SPRN_EBBHR);
+               t->ebbrr = mfspr(SPRN_EBBRR);
 
-       /* Back up the TAR and DSCR across context switches.
-        * Note that the TAR is not available for use in the kernel.  (To
-        * provide this, the TAR should be backed up/restored on exception
-        * entry/exit instead, and be in pt_regs.  FIXME, this should be in
-        * pt_regs anyway (for debug).)
-        * Save the TAR and DSCR here before we do treclaim/trecheckpoint as
-        * these will change them.
-        */
-       save_early_sprs(&prev->thread);
+               t->fscr = mfspr(SPRN_FSCR);
 
-       __switch_to_tm(prev);
+               /*
+                * Note that the TAR is not available for use in the kernel.
+                * (To provide this, the TAR should be backed up/restored on
+                * exception entry/exit instead, and be in pt_regs.  FIXME,
+                * this should be in pt_regs anyway (for debug).)
+                */
+               t->tar = mfspr(SPRN_TAR);
+       }
+#endif
+}
 
-#ifdef CONFIG_SMP
-       /* avoid complexity of lazy save/restore of fpu
-        * by just saving it every time we switch out if
-        * this task used the fpu during the last quantum.
-        *
-        * If it tries to use the fpu again, it'll trap and
-        * reload its fp regs.  So we don't have to do a restore
-        * every switch, just a save.
-        *  -- Cort
-        */
-       if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
-               giveup_fpu(prev);
+static inline void restore_sprs(struct thread_struct *old_thread,
+                               struct thread_struct *new_thread)
+{
 #ifdef CONFIG_ALTIVEC
-       /*
-        * If the previous thread used altivec in the last quantum
-        * (thus changing altivec regs) then save them.
-        * We used to check the VRSAVE register but not all apps
-        * set it, so we don't rely on it now (and in fact we need
-        * to save & restore VSCR even if VRSAVE == 0).  -- paulus
-        *
-        * On SMP we always save/restore altivec regs just to avoid the
-        * complexity of changing processors.
-        *  -- Cort
-        */
-       if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
-               giveup_altivec(prev);
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_VSX
-       if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
-               /* VMX and FPU registers are already save here */
-               __giveup_vsx(prev);
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_SPE
-       /*
-        * If the previous thread used spe in the last quantum
-        * (thus changing spe regs) then save them.
-        *
-        * On SMP we always save/restore spe regs just to avoid the
-        * complexity of changing processors.
-        */
-       if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
-               giveup_spe(prev);
-#endif /* CONFIG_SPE */
+       if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
+           old_thread->vrsave != new_thread->vrsave)
+               mtspr(SPRN_VRSAVE, new_thread->vrsave);
+#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+       if (cpu_has_feature(CPU_FTR_DSCR)) {
+               u64 dscr = get_paca()->dscr_default;
+               u64 fscr = old_thread->fscr & ~FSCR_DSCR;
 
-#else  /* CONFIG_SMP */
-#ifdef CONFIG_ALTIVEC
-       /* Avoid the trap.  On smp this this never happens since
-        * we don't set last_task_used_altivec -- Cort
-        */
-       if (new->thread.regs && last_task_used_altivec == new)
-               new->thread.regs->msr |= MSR_VEC;
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_VSX
-       if (new->thread.regs && last_task_used_vsx == new)
-               new->thread.regs->msr |= MSR_VSX;
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_SPE
-       /* Avoid the trap.  On smp this this never happens since
-        * we don't set last_task_used_spe
-        */
-       if (new->thread.regs && last_task_used_spe == new)
-               new->thread.regs->msr |= MSR_SPE;
-#endif /* CONFIG_SPE */
+               if (new_thread->dscr_inherit) {
+                       dscr = new_thread->dscr;
+                       fscr |= FSCR_DSCR;
+               }
 
-#endif /* CONFIG_SMP */
+               if (old_thread->dscr != dscr)
+                       mtspr(SPRN_DSCR, dscr);
 
-#ifdef CONFIG_PPC_ADV_DEBUG_REGS
-       switch_booke_debug_regs(&new->thread.debug);
-#else
-/*
- * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
- * schedule DABR
- */
-#ifndef CONFIG_HAVE_HW_BREAKPOINT
-       if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
-               __set_breakpoint(&new->thread.hw_brk);
-#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+               if (old_thread->fscr != fscr)
+                       mtspr(SPRN_FSCR, fscr);
+       }
+
+       if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+               if (old_thread->bescr != new_thread->bescr)
+                       mtspr(SPRN_BESCR, new_thread->bescr);
+               if (old_thread->ebbhr != new_thread->ebbhr)
+                       mtspr(SPRN_EBBHR, new_thread->ebbhr);
+               if (old_thread->ebbrr != new_thread->ebbrr)
+                       mtspr(SPRN_EBBRR, new_thread->ebbrr);
+
+               if (old_thread->tar != new_thread->tar)
+                       mtspr(SPRN_TAR, new_thread->tar);
+       }
 #endif
+}
 
+struct task_struct *__switch_to(struct task_struct *prev,
+       struct task_struct *new)
+{
+       struct thread_struct *new_thread, *old_thread;
+       struct task_struct *last;
+#ifdef CONFIG_PPC_BOOK3S_64
+       struct ppc64_tlb_batch *batch;
+#endif
 
        new_thread = &new->thread;
        old_thread = &current->thread;
 
+       WARN_ON(!irqs_disabled());
+
 #ifdef CONFIG_PPC64
        /*
         * Collect processor utilization data per process
@@ -890,6 +956,30 @@ struct task_struct *__switch_to(struct task_struct *prev,
        }
 #endif /* CONFIG_PPC_BOOK3S_64 */
 
+#ifdef CONFIG_PPC_ADV_DEBUG_REGS
+       switch_booke_debug_regs(&new->thread.debug);
+#else
+/*
+ * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
+ * schedule DABR
+ */
+#ifndef CONFIG_HAVE_HW_BREAKPOINT
+       if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
+               __set_breakpoint(&new->thread.hw_brk);
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+#endif
+
+       /*
+        * We need to save SPRs before treclaim/trecheckpoint as these will
+        * change a number of them.
+        */
+       save_sprs(&prev->thread);
+
+       __switch_to_tm(prev);
+
+       /* Save FPU, Altivec, VSX and SPE state */
+       giveup_all(prev);
+
        /*
         * We can't take a PMU exception inside _switch() since there is a
         * window where the kernel stack SLB and the kernel stack are out
@@ -899,6 +989,15 @@ struct task_struct *__switch_to(struct task_struct *prev,
 
        tm_recheckpoint_new_task(new);
 
+       /*
+        * Call restore_sprs() before calling _switch(). If we move it after
+        * _switch() then we miss out on calling it for new tasks. The reason
+        * for this is we manually create a stack frame for new tasks that
+        * directly returns through ret_from_fork() or
+        * ret_from_kernel_thread(). See copy_thread() for details.
+        */
+       restore_sprs(old_thread, new_thread);
+
        last = _switch(old_thread, new_thread);
 
 #ifdef CONFIG_PPC_BOOK3S_64
@@ -952,10 +1051,12 @@ static void show_instructions(struct pt_regs *regs)
        printk("\n");
 }
 
-static struct regbit {
+struct regbit {
        unsigned long bit;
        const char *name;
-} msr_bits[] = {
+};
+
+static struct regbit msr_bits[] = {
 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
        {MSR_SF,        "SF"},
        {MSR_HV,        "HV"},
@@ -985,16 +1086,49 @@ static struct regbit {
        {0,             NULL}
 };
 
-static void printbits(unsigned long val, struct regbit *bits)
+static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
 {
-       const char *sep = "";
+       const char *s = "";
 
-       printk("<");
        for (; bits->bit; ++bits)
                if (val & bits->bit) {
-                       printk("%s%s", sep, bits->name);
-                       sep = ",";
+                       printk("%s%s", s, bits->name);
+                       s = sep;
                }
+}
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+static struct regbit msr_tm_bits[] = {
+       {MSR_TS_T,      "T"},
+       {MSR_TS_S,      "S"},
+       {MSR_TM,        "E"},
+       {0,             NULL}
+};
+
+static void print_tm_bits(unsigned long val)
+{
+/*
+ * This only prints something if at least one of the TM bit is set.
+ * Inside the TM[], the output means:
+ *   E: Enabled                (bit 32)
+ *   S: Suspended      (bit 33)
+ *   T: Transactional  (bit 34)
+ */
+       if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
+               printk(",TM[");
+               print_bits(val, msr_tm_bits, "");
+               printk("]");
+       }
+}
+#else
+static void print_tm_bits(unsigned long val) {}
+#endif
+
+static void print_msr_bits(unsigned long val)
+{
+       printk("<");
+       print_bits(val, msr_bits, ",");
+       print_tm_bits(val);
        printk(">");
 }
 
@@ -1019,7 +1153,7 @@ void show_regs(struct pt_regs * regs)
        printk("REGS: %p TRAP: %04lx   %s  (%s)\n",
               regs, regs->trap, print_tainted(), init_utsname()->release);
        printk("MSR: "REG" ", regs->msr);
-       printbits(regs->msr, msr_bits);
+       print_msr_bits(regs->msr);
        printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
        trap = TRAP(regs);
        if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
@@ -1061,13 +1195,10 @@ void show_regs(struct pt_regs * regs)
 
 void exit_thread(void)
 {
-       discard_lazy_cpu_state();
 }
 
 void flush_thread(void)
 {
-       discard_lazy_cpu_state();
-
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
        flush_ptrace_hw_breakpoint(current);
 #else /* CONFIG_HAVE_HW_BREAKPOINT */
@@ -1086,10 +1217,7 @@ release_thread(struct task_struct *t)
  */
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 {
-       flush_fp_to_thread(src);
-       flush_altivec_to_thread(src);
-       flush_vsx_to_thread(src);
-       flush_spe_to_thread(src);
+       flush_all_to_thread(src);
        /*
         * Flush TM state out so we can copy it.  __switch_to_tm() does this
         * flush but it removes the checkpointed state from the current CPU and
@@ -1212,7 +1340,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
 #ifdef CONFIG_PPC64 
        if (cpu_has_feature(CPU_FTR_DSCR)) {
                p->thread.dscr_inherit = current->thread.dscr_inherit;
-               p->thread.dscr = current->thread.dscr;
+               p->thread.dscr = mfspr(SPRN_DSCR);
        }
        if (cpu_has_feature(CPU_FTR_HAS_PPR))
                p->thread.ppr = INIT_PPR;
@@ -1305,7 +1433,6 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
                regs->msr = MSR_USER32;
        }
 #endif
-       discard_lazy_cpu_state();
 #ifdef CONFIG_VSX
        current->thread.used_vsr = 0;
 #endif
index 92dea8df6b26d178cb4c3eca6b5fc0b4e1890019..da5192590c445f89c2a8466128b570c87a133470 100644 (file)
@@ -389,6 +389,7 @@ static void __init prom_printf(const char *format, ...)
                        break;
                }
        }
+       va_end(args);
 }
 
 
index 737c0d0b53ac43dcdc41f09b0a3177cf8b4e389b..30a03c03fe734a8c80828fc4260fe3679ab63e85 100644 (file)
@@ -60,6 +60,7 @@ struct pt_regs_offset {
 #define STR(s) #s                      /* convert to string */
 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
 #define GPR_OFFSET_NAME(num)   \
+       {.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
        {.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
 #define REG_OFFSET_END {.name = NULL, .offset = 0}
 
index 5a753fae8265ae8fc2f9e2e99152c0192fd62b9d..28736ff27fea1090dc888e4ef73a6d235ce2989b 100644 (file)
@@ -44,6 +44,9 @@
 #include <asm/mmu.h>
 #include <asm/topology.h>
 
+/* This is here deliberately so it's only used in this file */
+void enter_rtas(unsigned long);
+
 struct rtas_t rtas = {
        .lock = __ARCH_SPIN_LOCK_UNLOCKED
 };
@@ -93,21 +96,13 @@ static void unlock_rtas(unsigned long flags)
  */
 static void call_rtas_display_status(unsigned char c)
 {
-       struct rtas_args *args = &rtas.args;
        unsigned long s;
 
        if (!rtas.base)
                return;
-       s = lock_rtas();
-
-       args->token = cpu_to_be32(10);
-       args->nargs = cpu_to_be32(1);
-       args->nret  = cpu_to_be32(1);
-       args->rets  = &(args->args[1]);
-       args->args[0] = cpu_to_be32(c);
-
-       enter_rtas(__pa(args));
 
+       s = lock_rtas();
+       rtas_call_unlocked(&rtas.args, 10, 1, 1, NULL, c);
        unlock_rtas(s);
 }
 
@@ -418,6 +413,36 @@ static char *__fetch_rtas_last_error(char *altbuf)
 #define get_errorlog_buffer()          NULL
 #endif
 
+
+static void
+va_rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret,
+                     va_list list)
+{
+       int i;
+
+       args->token = cpu_to_be32(token);
+       args->nargs = cpu_to_be32(nargs);
+       args->nret  = cpu_to_be32(nret);
+       args->rets  = &(args->args[nargs]);
+
+       for (i = 0; i < nargs; ++i)
+               args->args[i] = cpu_to_be32(va_arg(list, __u32));
+
+       for (i = 0; i < nret; ++i)
+               args->rets[i] = 0;
+
+       enter_rtas(__pa(args));
+}
+
+void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...)
+{
+       va_list list;
+
+       va_start(list, nret);
+       va_rtas_call_unlocked(args, token, nargs, nret, list);
+       va_end(list);
+}
+
 int rtas_call(int token, int nargs, int nret, int *outputs, ...)
 {
        va_list list;
@@ -431,22 +456,14 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
                return -1;
 
        s = lock_rtas();
+
+       /* We use the global rtas args buffer */
        rtas_args = &rtas.args;
 
-       rtas_args->token = cpu_to_be32(token);
-       rtas_args->nargs = cpu_to_be32(nargs);
-       rtas_args->nret  = cpu_to_be32(nret);
-       rtas_args->rets  = &(rtas_args->args[nargs]);
        va_start(list, outputs);
-       for (i = 0; i < nargs; ++i)
-               rtas_args->args[i] = cpu_to_be32(va_arg(list, __u32));
+       va_rtas_call_unlocked(rtas_args, token, nargs, nret, list);
        va_end(list);
 
-       for (i = 0; i < nret; ++i)
-               rtas_args->rets[i] = 0;
-
-       enter_rtas(__pa(rtas_args));
-
        /* A -1 return code indicates that the last command couldn't
           be completed due to a hardware error. */
        if (be32_to_cpu(rtas_args->rets[0]) == -1)
index ef7c24e84a623882c8f98dbc93aa7ab318a4ff68..b6aa378aff636800a668ac67e525827fefd1ae76 100644 (file)
@@ -458,7 +458,7 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
         * contains valid data
         */
        if (current->thread.used_vsr && ctx_has_vsx_region) {
-               __giveup_vsx(current);
+               flush_vsx_to_thread(current);
                if (copy_vsx_to_user(&frame->mc_vsregs, current))
                        return 1;
                msr |= MSR_VSX;
@@ -606,7 +606,7 @@ static int save_tm_user_regs(struct pt_regs *regs,
         * contains valid data
         */
        if (current->thread.used_vsr) {
-               __giveup_vsx(current);
+               flush_vsx_to_thread(current);
                if (copy_vsx_to_user(&frame->mc_vsregs, current))
                        return 1;
                if (msr & MSR_VSX) {
@@ -687,15 +687,6 @@ static long restore_user_regs(struct pt_regs *regs,
        if (sig)
                regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
 
-       /*
-        * Do this before updating the thread state in
-        * current->thread.fpr/vr/evr.  That way, if we get preempted
-        * and another task grabs the FPU/Altivec/SPE, it won't be
-        * tempted to save the current CPU state into the thread_struct
-        * and corrupt what we are writing there.
-        */
-       discard_lazy_cpu_state();
-
 #ifdef CONFIG_ALTIVEC
        /*
         * Force the process to reload the altivec registers from
@@ -798,15 +789,6 @@ static long restore_tm_user_regs(struct pt_regs *regs,
        /* Restore the previous little-endian mode */
        regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
 
-       /*
-        * Do this before updating the thread state in
-        * current->thread.fpr/vr/evr.  That way, if we get preempted
-        * and another task grabs the FPU/Altivec/SPE, it won't be
-        * tempted to save the current CPU state into the thread_struct
-        * and corrupt what we are writing there.
-        */
-       discard_lazy_cpu_state();
-
 #ifdef CONFIG_ALTIVEC
        regs->msr &= ~MSR_VEC;
        if (msr & MSR_VEC) {
index c676ecec0869b26216e87483f54c2c6906ea6d01..25520794aa37905f3509c13955dcbf2134c0f36b 100644 (file)
@@ -147,7 +147,7 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
         * VMX data.
         */
        if (current->thread.used_vsr && ctx_has_vsx_region) {
-               __giveup_vsx(current);
+               flush_vsx_to_thread(current);
                v_regs += ELF_NVRREG;
                err |= copy_vsx_to_user(v_regs, current);
                /* set MSR_VSX in the MSR value in the frame to
@@ -270,7 +270,7 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
         * VMX data.
         */
        if (current->thread.used_vsr) {
-               __giveup_vsx(current);
+               flush_vsx_to_thread(current);
                v_regs += ELF_NVRREG;
                tm_v_regs += ELF_NVRREG;
 
@@ -349,15 +349,6 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
        if (set != NULL)
                err |=  __get_user(set->sig[0], &sc->oldmask);
 
-       /*
-        * Do this before updating the thread state in
-        * current->thread.fpr/vr.  That way, if we get preempted
-        * and another task grabs the FPU/Altivec, it won't be
-        * tempted to save the current CPU state into the thread_struct
-        * and corrupt what we are writing there.
-        */
-       discard_lazy_cpu_state();
-
        /*
         * Force reload of FP/VEC.
         * This has to be done before copying stuff into current->thread.fpr/vr
@@ -468,15 +459,6 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
        err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
        err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
 
-       /*
-        * Do this before updating the thread state in
-        * current->thread.fpr/vr.  That way, if we get preempted
-        * and another task grabs the FPU/Altivec, it won't be
-        * tempted to save the current CPU state into the thread_struct
-        * and corrupt what we are writing there.
-        */
-       discard_lazy_cpu_state();
-
        /*
         * Force reload of FP/VEC.
         * This has to be done before copying stuff into current->thread.fpr/vr
index ea43a347a1044c37a800d7f78487e6f238078c94..4f24606afc3f5e414642e1fe5d9c6982e50810e0 100644 (file)
@@ -61,3 +61,10 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
        save_context_stack(trace, tsk->thread.ksp, tsk, 0);
 }
 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void
+save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
+{
+       save_context_stack(trace, regs->gpr[1], current, 0);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_regs);
index eae33e10b65fb4a1b8858872427ce16bbbf85634..6669b17525129f8c39ab7b9de8b89fb38adb4354 100644 (file)
@@ -20,9 +20,7 @@ void save_processor_state(void)
         * flush out all the special registers so we don't need
         * to save them in the snapshot
         */
-       flush_fp_to_thread(current);
-       flush_altivec_to_thread(current);
-       flush_spe_to_thread(current);
+       flush_all_to_thread(current);
 
 #ifdef CONFIG_PPC64
        hard_irq_disable();
index 2384129f5893093f82bcd2f8896e2186127ecb49..55323a620cfe2c026ee1adc79cf390b9ab511c3d 100644 (file)
@@ -57,4 +57,4 @@
 
 START_TABLE
 #include <asm/systbl.h>
-END_TABLE __NR_syscalls
+END_TABLE NR_syscalls
index 19415e7674a50fb21091e485b22703242835513b..31b6e7c358ca9ae6329e6363f29bfd1b31465642 100644 (file)
@@ -16,7 +16,7 @@ awk   'BEGIN { num = -1; }    # Ignore the beginning of the file
        /^START_TABLE/ { num = 0; next; }
        /^END_TABLE/ {
                if (num != $2) {
-                       printf "__NR_syscalls (%s) is not one more than the last syscall (%s)\n",
+                       printf "NR_syscalls (%s) is not one more than the last syscall (%s)\n",
                                $2, num - 1;
                        exit(1);
                }
index 1be1092c72042b9d4fbf91d601ded3f23669c730..81b0900a39eef095a917a4471f4b09753c988ede 100644 (file)
@@ -1002,38 +1002,6 @@ static int month_days[12] = {
        31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
 };
 
-/*
- * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
- */
-void GregorianDay(struct rtc_time * tm)
-{
-       int leapsToDate;
-       int lastYear;
-       int day;
-       int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
-
-       lastYear = tm->tm_year - 1;
-
-       /*
-        * Number of leap corrections to apply up to end of last year
-        */
-       leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
-
-       /*
-        * This year is a leap year if it is divisible by 4 except when it is
-        * divisible by 100 unless it is divisible by 400
-        *
-        * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
-        */
-       day = tm->tm_mon > 2 && leapyear(tm->tm_year);
-
-       day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
-                  tm->tm_mday;
-
-       tm->tm_wday = day % 7;
-}
-EXPORT_SYMBOL_GPL(GregorianDay);
-
 void to_tm(int tim, struct rtc_time * tm)
 {
        register int    i;
@@ -1064,9 +1032,9 @@ void to_tm(int tim, struct rtc_time * tm)
        tm->tm_mday = day + 1;
 
        /*
-        * Determine the day of week
+        * No-one uses the day of the week.
         */
-       GregorianDay(tm);
+       tm->tm_wday = -1;
 }
 EXPORT_SYMBOL(to_tm);
 
index 37de90f8a845c9ef8f1410100a0d7d3b05b93195..b6becc795bb559399d1a9d300e2251600a8c5aba 100644 (file)
@@ -1313,13 +1313,6 @@ void nonrecoverable_exception(struct pt_regs *regs)
        die("nonrecoverable exception", regs, SIGKILL);
 }
 
-void trace_syscall(struct pt_regs *regs)
-{
-       printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
-              current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
-              regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
-}
-
 void kernel_fp_unavailable_exception(struct pt_regs *regs)
 {
        enum ctx_state prev_state = exception_enter();
index b457bfa2843603f9c920236372effe6304da3bed..def1b8b5e6c14b701b0ee83be25e4196eccdb7d3 100644 (file)
@@ -671,7 +671,7 @@ static void __init vdso_setup_syscall_map(void)
        extern unsigned long sys_ni_syscall;
 
 
-       for (i = 0; i < __NR_syscalls; i++) {
+       for (i = 0; i < NR_syscalls; i++) {
 #ifdef CONFIG_PPC64
                if (sys_call_table[i*2] != sys_ni_syscall)
                        vdso_data->syscall_map_64[i >> 5] |=
index 59cf5f452879bef8729a7b58797694b6adac1f49..3745113fcc652d8ca3e66692aaab7d87f7ea9338 100644 (file)
@@ -61,7 +61,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
        addi    r3,r3,CFG_SYSCALL_MAP32
        cmpli   cr0,r4,0
        beqlr
-       li      r0,__NR_syscalls
+       li      r0,NR_syscalls
        stw     r0,0(r4)
        crclr   cr0*4+so
        blr
index 2f01c4a0d8a037ca65cacd9af51022846661fcb0..184a6ba7f2831f1e814f934992b23272922eca13 100644 (file)
@@ -62,7 +62,7 @@ V_FUNCTION_BEGIN(__kernel_get_syscall_map)
        cmpli   cr0,r4,0
        crclr   cr0*4+so
        beqlr
-       li      r0,__NR_syscalls
+       li      r0,NR_syscalls
        stw     r0,0(r4)
        blr
   .cfi_endproc
index f5c80d567d8d188734b9f2230622f843b5ae57f3..162d0f7149419f50a7324e8e02086ab03bee993b 100644 (file)
@@ -29,23 +29,9 @@ _GLOBAL(do_load_up_transact_altivec)
        addi    r10,r3,THREAD_TRANSACT_VRSTATE
        REST_32VRS(0,r4,r10)
 
-       /* Disable VEC again. */
-       MTMSRD(r6)
-       isync
-
        blr
 #endif
 
-/*
- * Enable use of VMX/Altivec for the caller.
- */
-_GLOBAL(vec_enable)
-       mfmsr   r3
-       oris    r3,r3,MSR_VEC@h
-       MTMSRD(r3)
-       isync
-       blr
-
 /*
  * Load state from memory into VMX registers including VSCR.
  * Assumes the caller has enabled VMX in the MSR.
@@ -84,39 +70,6 @@ _GLOBAL(load_up_altivec)
        MTMSRD(r5)                      /* enable use of AltiVec now */
        isync
 
-/*
- * For SMP, we don't do lazy VMX switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another.  Instead we call giveup_altvec in switch_to.
- * VRSAVE isn't dealt with here, that is done in the normal context
- * switch code. Note that we could rely on vrsave value to eventually
- * avoid saving all of the VREGs here...
- */
-#ifndef CONFIG_SMP
-       LOAD_REG_ADDRBASE(r3, last_task_used_altivec)
-       toreal(r3)
-       PPC_LL  r4,ADDROFF(last_task_used_altivec)(r3)
-       PPC_LCMPI       0,r4,0
-       beq     1f
-
-       /* Save VMX state to last_task_used_altivec's THREAD struct */
-       toreal(r4)
-       addi    r4,r4,THREAD
-       addi    r6,r4,THREAD_VRSTATE
-       SAVE_32VRS(0,r5,r6)
-       mfvscr  v0
-       li      r10,VRSTATE_VSCR
-       stvx    v0,r10,r6
-       /* Disable VMX for last_task_used_altivec */
-       PPC_LL  r5,PT_REGS(r4)
-       toreal(r5)
-       PPC_LL  r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-       lis     r10,MSR_VEC@h
-       andc    r4,r4,r10
-       PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
-
        /* Hack: if we get an altivec unavailable trap with VRSAVE
         * set to all zeros, we assume this is a broken application
         * that fails to set it properly, and thus we switch it to
@@ -145,39 +98,15 @@ _GLOBAL(load_up_altivec)
        lvx     v0,r10,r6
        mtvscr  v0
        REST_32VRS(0,r4,r6)
-#ifndef CONFIG_SMP
-       /* Update last_task_used_altivec to 'current' */
-       subi    r4,r5,THREAD            /* Back to 'current' */
-       fromreal(r4)
-       PPC_STL r4,ADDROFF(last_task_used_altivec)(r3)
-#endif /* CONFIG_SMP */
        /* restore registers and return */
        blr
 
-_GLOBAL(giveup_altivec_notask)
-       mfmsr   r3
-       andis.  r4,r3,MSR_VEC@h
-       bnelr                           /* Already enabled? */
-       oris    r3,r3,MSR_VEC@h
-       SYNC
-       MTMSRD(r3)                      /* enable use of VMX now */
-       isync
-       blr
-
 /*
- * giveup_altivec(tsk)
+ * __giveup_altivec(tsk)
  * Disable VMX for the task given as the argument,
  * and save the vector registers in its thread_struct.
- * Enables the VMX for use in the kernel on return.
  */
-_GLOBAL(giveup_altivec)
-       mfmsr   r5
-       oris    r5,r5,MSR_VEC@h
-       SYNC
-       MTMSRD(r5)                      /* enable use of VMX now */
-       isync
-       PPC_LCMPI       0,r3,0
-       beqlr                           /* if no previous owner, done */
+_GLOBAL(__giveup_altivec)
        addi    r3,r3,THREAD            /* want THREAD of task */
        PPC_LL  r7,THREAD_VRSAVEAREA(r3)
        PPC_LL  r5,PT_REGS(r3)
@@ -203,11 +132,6 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
        andc    r4,r4,r3                /* disable FP for previous task */
        PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 1:
-#ifndef CONFIG_SMP
-       li      r5,0
-       LOAD_REG_ADDRBASE(r4,last_task_used_altivec)
-       PPC_STL r5,ADDROFF(last_task_used_altivec)(r4)
-#endif /* CONFIG_SMP */
        blr
 
 #ifdef CONFIG_VSX
@@ -230,20 +154,6 @@ _GLOBAL(load_up_vsx)
        andis.  r5,r12,MSR_VEC@h
        beql+   load_up_altivec         /* skip if already loaded */
 
-#ifndef CONFIG_SMP
-       ld      r3,last_task_used_vsx@got(r2)
-       ld      r4,0(r3)
-       cmpdi   0,r4,0
-       beq     1f
-       /* Disable VSX for last_task_used_vsx */
-       addi    r4,r4,THREAD
-       ld      r5,PT_REGS(r4)
-       ld      r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-       lis     r6,MSR_VSX@h
-       andc    r6,r4,r6
-       std     r6,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
        ld      r4,PACACURRENT(r13)
        addi    r4,r4,THREAD            /* Get THREAD */
        li      r6,1
@@ -251,27 +161,14 @@ _GLOBAL(load_up_vsx)
        /* enable use of VSX after return */
        oris    r12,r12,MSR_VSX@h
        std     r12,_MSR(r1)
-#ifndef CONFIG_SMP
-       /* Update last_task_used_vsx to 'current' */
-       ld      r4,PACACURRENT(r13)
-       std     r4,0(r3)
-#endif /* CONFIG_SMP */
        b       fast_exception_return
 
 /*
  * __giveup_vsx(tsk)
  * Disable VSX for the task given as the argument.
  * Does NOT save vsx registers.
- * Enables the VSX for use in the kernel on return.
  */
 _GLOBAL(__giveup_vsx)
-       mfmsr   r5
-       oris    r5,r5,MSR_VSX@h
-       mtmsrd  r5                      /* enable use of VSX now */
-       isync
-
-       cmpdi   0,r3,0
-       beqlr-                          /* if no previous owner, done */
        addi    r3,r3,THREAD            /* want THREAD of task */
        ld      r5,PT_REGS(r3)
        cmpdi   0,r5,0
@@ -281,11 +178,6 @@ _GLOBAL(__giveup_vsx)
        andc    r4,r4,r3                /* disable VSX for previous task */
        std     r4,_MSR-STACK_FRAME_OVERHEAD(r5)
 1:
-#ifndef CONFIG_SMP
-       li      r5,0
-       ld      r4,last_task_used_vsx@got(r2)
-       std     r5,0(r4)
-#endif /* CONFIG_SMP */
        blr
 
 #endif /* CONFIG_VSX */
index 6b352691b8c99cb8c46ba7cc0f80798edeff457d..cff207b72c46ae0b54cf48db5e4a3aa828f2fc29 100644 (file)
@@ -2700,9 +2700,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
                        goto out;
        }
 
-       flush_fp_to_thread(current);
-       flush_altivec_to_thread(current);
-       flush_vsx_to_thread(current);
+       flush_all_to_thread(current);
+
        vcpu->arch.wqp = &vcpu->arch.vcore->wq;
        vcpu->arch.pgdir = current->mm->pgd;
        vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
index a759d9adb0b6f8218c38c52520dfe3cdfa70c884..eab96cfe82fa08772999704d07686a6dd61fa307 100644 (file)
@@ -1265,6 +1265,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
        if (rcomp)
                kvmppc_set_cr(vcpu, cr);
 
+       disable_kernel_fp();
        preempt_enable();
 
        return emulated;
index 70fb08da416dd60a7c52a10521b766c3733888fe..95bceca8f40e870fe933e3f6744b250326fca7a5 100644 (file)
@@ -751,6 +751,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
                preempt_disable();
                enable_kernel_fp();
                load_fp_state(&vcpu->arch.fp);
+               disable_kernel_fp();
                t->fp_save_area = &vcpu->arch.fp;
                preempt_enable();
        }
@@ -760,6 +761,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
                preempt_disable();
                enable_kernel_altivec();
                load_vr_state(&vcpu->arch.vr);
+               disable_kernel_altivec();
                t->vr_save_area = &vcpu->arch.vr;
                preempt_enable();
 #endif
@@ -788,6 +790,7 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
                preempt_disable();
                enable_kernel_fp();
                load_fp_state(&vcpu->arch.fp);
+               disable_kernel_fp();
                preempt_enable();
        }
 #ifdef CONFIG_ALTIVEC
@@ -795,6 +798,7 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
                preempt_disable();
                enable_kernel_altivec();
                load_vr_state(&vcpu->arch.vr);
+               disable_kernel_altivec();
                preempt_enable();
        }
 #endif
@@ -1486,21 +1490,8 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
                goto out;
        /* interrupts now hard-disabled */
 
-       /* Save FPU state in thread_struct */
-       if (current->thread.regs->msr & MSR_FP)
-               giveup_fpu(current);
-
-#ifdef CONFIG_ALTIVEC
-       /* Save Altivec state in thread_struct */
-       if (current->thread.regs->msr & MSR_VEC)
-               giveup_altivec(current);
-#endif
-
-#ifdef CONFIG_VSX
-       /* Save VSX state in thread_struct */
-       if (current->thread.regs->msr & MSR_VSX)
-               __giveup_vsx(current);
-#endif
+       /* Save FPU, Altivec and VSX state */
+       giveup_all(current);
 
        /* Preload FPU if it's enabled */
        if (kvmppc_get_msr(vcpu) & MSR_FP)
index fd5875179e5c0e6738a1e7867a75f05cbe1fa0c8..778ef86e187eca40d63585a54b3d0ff4161ba613 100644 (file)
@@ -98,6 +98,7 @@ void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
        preempt_disable();
        enable_kernel_spe();
        kvmppc_save_guest_spe(vcpu);
+       disable_kernel_spe();
        vcpu->arch.shadow_msr &= ~MSR_SPE;
        preempt_enable();
 }
@@ -107,6 +108,7 @@ static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
        preempt_disable();
        enable_kernel_spe();
        kvmppc_load_guest_spe(vcpu);
+       disable_kernel_spe();
        vcpu->arch.shadow_msr |= MSR_SPE;
        preempt_enable();
 }
@@ -141,6 +143,7 @@ static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
        if (!(current->thread.regs->msr & MSR_FP)) {
                enable_kernel_fp();
                load_fp_state(&vcpu->arch.fp);
+               disable_kernel_fp();
                current->thread.fp_save_area = &vcpu->arch.fp;
                current->thread.regs->msr |= MSR_FP;
        }
@@ -182,6 +185,7 @@ static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
                if (!(current->thread.regs->msr & MSR_VEC)) {
                        enable_kernel_altivec();
                        load_vr_state(&vcpu->arch.vr);
+                       disable_kernel_altivec();
                        current->thread.vr_save_area = &vcpu->arch.vr;
                        current->thread.regs->msr |= MSR_VEC;
                }
index ac93a3bd27300f9d058f45a2df4838379ecde52e..b27e030fc9f865be037729f2aa46d1c3e1d35c38 100644 (file)
@@ -46,6 +46,7 @@ int enter_vmx_usercopy(void)
  */
 int exit_vmx_usercopy(void)
 {
+       disable_kernel_altivec();
        pagefault_enable();
        preempt_enable();
        return 0;
@@ -70,6 +71,7 @@ int enter_vmx_copy(void)
  */
 void *exit_vmx_copy(void *dest)
 {
+       disable_kernel_altivec();
        preempt_enable();
        return dest;
 }
index e905f7c2ea7bf9ce5a370a7442daa6b2210a38ac..07f49f1568e5eacccf5ea19929d572ff2eb68cfc 100644 (file)
@@ -74,6 +74,7 @@ void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
                v2 += 4;
        } while (--lines > 0);
 
+       disable_kernel_altivec();
        preempt_enable();
 }
 EXPORT_SYMBOL(xor_altivec_2);
@@ -102,6 +103,7 @@ void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
                v3 += 4;
        } while (--lines > 0);
 
+       disable_kernel_altivec();
        preempt_enable();
 }
 EXPORT_SYMBOL(xor_altivec_3);
@@ -135,6 +137,7 @@ void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
                v4 += 4;
        } while (--lines > 0);
 
+       disable_kernel_altivec();
        preempt_enable();
 }
 EXPORT_SYMBOL(xor_altivec_4);
@@ -172,6 +175,7 @@ void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
                v5 += 4;
        } while (--lines > 0);
 
+       disable_kernel_altivec();
        preempt_enable();
 }
 EXPORT_SYMBOL(xor_altivec_5);
index 5810967511d4d01ddef3b8ec96de9cf2ab40a01b..31a5d42df8c9af2ea0af8d62420b4e073fff3f29 100644 (file)
@@ -110,10 +110,10 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
                unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_HWWRITE;
 
                pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
-               pmd_val(*pmdp++) = val;
-               pmd_val(*pmdp++) = val;
-               pmd_val(*pmdp++) = val;
-               pmd_val(*pmdp++) = val;
+               *pmdp++ = __pmd(val);
+               *pmdp++ = __pmd(val);
+               *pmdp++ = __pmd(val);
+               *pmdp++ = __pmd(val);
 
                v += LARGE_PAGE_SIZE_16M;
                p += LARGE_PAGE_SIZE_16M;
@@ -125,7 +125,7 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
                unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_HWWRITE;
 
                pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
-               pmd_val(*pmdp) = val;
+               *pmdp = __pmd(val);
 
                v += LARGE_PAGE_SIZE_4M;
                p += LARGE_PAGE_SIZE_4M;
index 3eb73a38220de34379de3bad092b06d40f051f53..1ffeda85c08615f6f1583995608892f1058fb284 100644 (file)
@@ -14,10 +14,13 @@ obj-$(CONFIG_PPC_MMU_NOHASH)        += mmu_context_nohash.o tlb_nohash.o \
 obj-$(CONFIG_PPC_BOOK3E)       += tlb_low_$(CONFIG_WORD_SIZE)e.o
 hash64-$(CONFIG_PPC_NATIVE)    := hash_native_64.o
 obj-$(CONFIG_PPC_STD_MMU_64)   += hash_utils_64.o slb_low.o slb.o $(hash64-y)
-obj-$(CONFIG_PPC_STD_MMU_32)   += ppc_mmu_32.o
-obj-$(CONFIG_PPC_STD_MMU)      += hash_low_$(CONFIG_WORD_SIZE).o \
-                                  tlb_hash$(CONFIG_WORD_SIZE).o \
+obj-$(CONFIG_PPC_STD_MMU_32)   += ppc_mmu_32.o hash_low_32.o
+obj-$(CONFIG_PPC_STD_MMU)      += tlb_hash$(CONFIG_WORD_SIZE).o \
                                   mmu_context_hash$(CONFIG_WORD_SIZE).o
+ifeq ($(CONFIG_PPC_STD_MMU_64),y)
+obj-$(CONFIG_PPC_4K_PAGES)     += hash64_4k.o
+obj-$(CONFIG_PPC_64K_PAGES)    += hash64_64k.o
+endif
 obj-$(CONFIG_PPC_ICSWX)                += icswx.o
 obj-$(CONFIG_PPC_ICSWX_PID)    += icswx_pid.o
 obj-$(CONFIG_40x)              += 40x_mmu.o
diff --git a/arch/powerpc/mm/hash64_4k.c b/arch/powerpc/mm/hash64_4k.c
new file mode 100644 (file)
index 0000000..e7c0454
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright IBM Corporation, 2015
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+#include <linux/mm.h>
+#include <asm/machdep.h>
+#include <asm/mmu.h>
+
+int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
+                  pte_t *ptep, unsigned long trap, unsigned long flags,
+                  int ssize, int subpg_prot)
+{
+       unsigned long hpte_group;
+       unsigned long rflags, pa;
+       unsigned long old_pte, new_pte;
+       unsigned long vpn, hash, slot;
+       unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift;
+
+       /*
+        * atomically mark the linux large page PTE busy and dirty
+        */
+       do {
+               pte_t pte = READ_ONCE(*ptep);
+
+               old_pte = pte_val(pte);
+               /* If PTE busy, retry the access */
+               if (unlikely(old_pte & _PAGE_BUSY))
+                       return 0;
+               /* If PTE permissions don't match, take page fault */
+               if (unlikely(access & ~old_pte))
+                       return 1;
+               /*
+                * Try to lock the PTE, add ACCESSED and DIRTY if it was
+                * a write access. Since this is 4K insert of 64K page size
+                * also add _PAGE_COMBO
+                */
+               new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE;
+               if (access & _PAGE_RW)
+                       new_pte |= _PAGE_DIRTY;
+       } while (old_pte != __cmpxchg_u64((unsigned long *)ptep,
+                                         old_pte, new_pte));
+       /*
+        * PP bits. _PAGE_USER is already PP bit 0x2, so we only
+        * need to add in 0x1 if it's a read-only user page
+        */
+       rflags = htab_convert_pte_flags(new_pte);
+
+       if (!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
+           !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+               rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
+
+       vpn  = hpt_vpn(ea, vsid, ssize);
+       if (unlikely(old_pte & _PAGE_HASHPTE)) {
+               /*
+                * There MIGHT be an HPTE for this pte
+                */
+               hash = hpt_hash(vpn, shift, ssize);
+               if (old_pte & _PAGE_F_SECOND)
+                       hash = ~hash;
+               slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+               slot += (old_pte & _PAGE_F_GIX) >> _PAGE_F_GIX_SHIFT;
+
+               if (ppc_md.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_4K,
+                                        MMU_PAGE_4K, ssize, flags) == -1)
+                       old_pte &= ~_PAGE_HPTEFLAGS;
+       }
+
+       if (likely(!(old_pte & _PAGE_HASHPTE))) {
+
+               pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
+               hash = hpt_hash(vpn, shift, ssize);
+
+repeat:
+               hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+
+               /* Insert into the hash table, primary slot */
+               slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+                                 MMU_PAGE_4K, MMU_PAGE_4K, ssize);
+               /*
+                * Primary is full, try the secondary
+                */
+               if (unlikely(slot == -1)) {
+                       hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+                       slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
+                                                 rflags, HPTE_V_SECONDARY,
+                                                 MMU_PAGE_4K, MMU_PAGE_4K, ssize);
+                       if (slot == -1) {
+                               if (mftb() & 0x1)
+                                       hpte_group = ((hash & htab_hash_mask) *
+                                                     HPTES_PER_GROUP) & ~0x7UL;
+                               ppc_md.hpte_remove(hpte_group);
+                               /*
+                                * FIXME!! Should be try the group from which we removed ?
+                                */
+                               goto repeat;
+                       }
+               }
+               /*
+                * Hypervisor failure. Restore old pmd and return -1
+                * similar to __hash_page_*
+                */
+               if (unlikely(slot == -2)) {
+                       *ptep = __pte(old_pte);
+                       hash_failure_debug(ea, access, vsid, trap, ssize,
+                                          MMU_PAGE_4K, MMU_PAGE_4K, old_pte);
+                       return -1;
+               }
+               new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
+               new_pte |= (slot << _PAGE_F_GIX_SHIFT) & (_PAGE_F_SECOND | _PAGE_F_GIX);
+       }
+       *ptep = __pte(new_pte & ~_PAGE_BUSY);
+       return 0;
+}
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
new file mode 100644 (file)
index 0000000..0762c1e
--- /dev/null
@@ -0,0 +1,322 @@
+/*
+ * Copyright IBM Corporation, 2015
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+
+#include <linux/mm.h>
+#include <asm/machdep.h>
+#include <asm/mmu.h>
+/*
+ * index from 0 - 15
+ */
+bool __rpte_sub_valid(real_pte_t rpte, unsigned long index)
+{
+       unsigned long g_idx;
+       unsigned long ptev = pte_val(rpte.pte);
+
+       g_idx = (ptev & _PAGE_COMBO_VALID) >> _PAGE_F_GIX_SHIFT;
+       index = index >> 2;
+       if (g_idx & (0x1 << index))
+               return true;
+       else
+               return false;
+}
+/*
+ * index from 0 - 15
+ */
+static unsigned long mark_subptegroup_valid(unsigned long ptev, unsigned long index)
+{
+       unsigned long g_idx;
+
+       if (!(ptev & _PAGE_COMBO))
+               return ptev;
+       index = index >> 2;
+       g_idx = 0x1 << index;
+
+       return ptev | (g_idx << _PAGE_F_GIX_SHIFT);
+}
+
+int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
+                  pte_t *ptep, unsigned long trap, unsigned long flags,
+                  int ssize, int subpg_prot)
+{
+       real_pte_t rpte;
+       unsigned long *hidxp;
+       unsigned long hpte_group;
+       unsigned int subpg_index;
+       unsigned long rflags, pa, hidx;
+       unsigned long old_pte, new_pte, subpg_pte;
+       unsigned long vpn, hash, slot;
+       unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift;
+
+       /*
+        * atomically mark the linux large page PTE busy and dirty
+        */
+       do {
+               pte_t pte = READ_ONCE(*ptep);
+
+               old_pte = pte_val(pte);
+               /* If PTE busy, retry the access */
+               if (unlikely(old_pte & _PAGE_BUSY))
+                       return 0;
+               /* If PTE permissions don't match, take page fault */
+               if (unlikely(access & ~old_pte))
+                       return 1;
+               /*
+                * Try to lock the PTE, add ACCESSED and DIRTY if it was
+                * a write access. Since this is 4K insert of 64K page size
+                * also add _PAGE_COMBO
+                */
+               new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED | _PAGE_COMBO;
+               if (access & _PAGE_RW)
+                       new_pte |= _PAGE_DIRTY;
+       } while (old_pte != __cmpxchg_u64((unsigned long *)ptep,
+                                         old_pte, new_pte));
+       /*
+        * Handle the subpage protection bits
+        */
+       subpg_pte = new_pte & ~subpg_prot;
+       rflags = htab_convert_pte_flags(subpg_pte);
+
+       if (!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
+           !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
+
+               /*
+                * No CPU has hugepages but lacks no execute, so we
+                * don't need to worry about that case
+                */
+               rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
+       }
+
+       subpg_index = (ea & (PAGE_SIZE - 1)) >> shift;
+       vpn  = hpt_vpn(ea, vsid, ssize);
+       rpte = __real_pte(__pte(old_pte), ptep);
+       /*
+        *None of the sub 4k page is hashed
+        */
+       if (!(old_pte & _PAGE_HASHPTE))
+               goto htab_insert_hpte;
+       /*
+        * Check if the pte was already inserted into the hash table
+        * as a 64k HW page, and invalidate the 64k HPTE if so.
+        */
+       if (!(old_pte & _PAGE_COMBO)) {
+               flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags);
+               old_pte &= ~_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND;
+               goto htab_insert_hpte;
+       }
+       /*
+        * Check for sub page valid and update
+        */
+       if (__rpte_sub_valid(rpte, subpg_index)) {
+               int ret;
+
+               hash = hpt_hash(vpn, shift, ssize);
+               hidx = __rpte_to_hidx(rpte, subpg_index);
+               if (hidx & _PTEIDX_SECONDARY)
+                       hash = ~hash;
+               slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+               slot += hidx & _PTEIDX_GROUP_IX;
+
+               ret = ppc_md.hpte_updatepp(slot, rflags, vpn,
+                                          MMU_PAGE_4K, MMU_PAGE_4K,
+                                          ssize, flags);
+               /*
+                *if we failed because typically the HPTE wasn't really here
+                * we try an insertion.
+                */
+               if (ret == -1)
+                       goto htab_insert_hpte;
+
+               *ptep = __pte(new_pte & ~_PAGE_BUSY);
+               return 0;
+       }
+
+htab_insert_hpte:
+       /*
+        * handle _PAGE_4K_PFN case
+        */
+       if (old_pte & _PAGE_4K_PFN) {
+               /*
+                * All the sub 4k page have the same
+                * physical address.
+                */
+               pa = pte_pfn(__pte(old_pte)) << HW_PAGE_SHIFT;
+       } else {
+               pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
+               pa += (subpg_index << shift);
+       }
+       hash = hpt_hash(vpn, shift, ssize);
+repeat:
+       hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+
+       /* Insert into the hash table, primary slot */
+       slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+                                 MMU_PAGE_4K, MMU_PAGE_4K, ssize);
+       /*
+        * Primary is full, try the secondary
+        */
+       if (unlikely(slot == -1)) {
+               hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+               slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
+                                         rflags, HPTE_V_SECONDARY,
+                                         MMU_PAGE_4K, MMU_PAGE_4K, ssize);
+               if (slot == -1) {
+                       if (mftb() & 0x1)
+                               hpte_group = ((hash & htab_hash_mask) *
+                                             HPTES_PER_GROUP) & ~0x7UL;
+                       ppc_md.hpte_remove(hpte_group);
+                       /*
+                        * FIXME!! Should be try the group from which we removed ?
+                        */
+                       goto repeat;
+               }
+       }
+       /*
+        * Hypervisor failure. Restore old pmd and return -1
+        * similar to __hash_page_*
+        */
+       if (unlikely(slot == -2)) {
+               *ptep = __pte(old_pte);
+               hash_failure_debug(ea, access, vsid, trap, ssize,
+                                  MMU_PAGE_4K, MMU_PAGE_4K, old_pte);
+               return -1;
+       }
+       /*
+        * Insert slot number & secondary bit in PTE second half,
+        * clear _PAGE_BUSY and set appropriate HPTE slot bit
+        * Since we have _PAGE_BUSY set on ptep, we can be sure
+        * nobody is undating hidx.
+        */
+       hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
+       rpte.hidx &= ~(0xfUL << (subpg_index << 2));
+       *hidxp = rpte.hidx  | (slot << (subpg_index << 2));
+       new_pte = mark_subptegroup_valid(new_pte, subpg_index);
+       new_pte |=  _PAGE_HASHPTE;
+       /*
+        * check __real_pte for details on matching smp_rmb()
+        */
+       smp_wmb();
+       *ptep = __pte(new_pte & ~_PAGE_BUSY);
+       return 0;
+}
+
+int __hash_page_64K(unsigned long ea, unsigned long access,
+                   unsigned long vsid, pte_t *ptep, unsigned long trap,
+                   unsigned long flags, int ssize)
+{
+
+       unsigned long hpte_group;
+       unsigned long rflags, pa;
+       unsigned long old_pte, new_pte;
+       unsigned long vpn, hash, slot;
+       unsigned long shift = mmu_psize_defs[MMU_PAGE_64K].shift;
+
+       /*
+        * atomically mark the linux large page PTE busy and dirty
+        */
+       do {
+               pte_t pte = READ_ONCE(*ptep);
+
+               old_pte = pte_val(pte);
+               /* If PTE busy, retry the access */
+               if (unlikely(old_pte & _PAGE_BUSY))
+                       return 0;
+               /* If PTE permissions don't match, take page fault */
+               if (unlikely(access & ~old_pte))
+                       return 1;
+               /*
+                * Check if PTE has the cache-inhibit bit set
+                * If so, bail out and refault as a 4k page
+                */
+               if (!mmu_has_feature(MMU_FTR_CI_LARGE_PAGE) &&
+                   unlikely(old_pte & _PAGE_NO_CACHE))
+                       return 0;
+               /*
+                * Try to lock the PTE, add ACCESSED and DIRTY if it was
+                * a write access. Since this is 4K insert of 64K page size
+                * also add _PAGE_COMBO
+                */
+               new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
+               if (access & _PAGE_RW)
+                       new_pte |= _PAGE_DIRTY;
+       } while (old_pte != __cmpxchg_u64((unsigned long *)ptep,
+                                         old_pte, new_pte));
+
+       rflags = htab_convert_pte_flags(new_pte);
+
+       if (!cpu_has_feature(CPU_FTR_NOEXECUTE) &&
+           !cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+               rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
+
+       vpn  = hpt_vpn(ea, vsid, ssize);
+       if (unlikely(old_pte & _PAGE_HASHPTE)) {
+               /*
+                * There MIGHT be an HPTE for this pte
+                */
+               hash = hpt_hash(vpn, shift, ssize);
+               if (old_pte & _PAGE_F_SECOND)
+                       hash = ~hash;
+               slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+               slot += (old_pte & _PAGE_F_GIX) >> _PAGE_F_GIX_SHIFT;
+
+               if (ppc_md.hpte_updatepp(slot, rflags, vpn, MMU_PAGE_64K,
+                                        MMU_PAGE_64K, ssize, flags) == -1)
+                       old_pte &= ~_PAGE_HPTEFLAGS;
+       }
+
+       if (likely(!(old_pte & _PAGE_HASHPTE))) {
+
+               pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
+               hash = hpt_hash(vpn, shift, ssize);
+
+repeat:
+               hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+
+               /* Insert into the hash table, primary slot */
+               slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+                                 MMU_PAGE_64K, MMU_PAGE_64K, ssize);
+               /*
+                * Primary is full, try the secondary
+                */
+               if (unlikely(slot == -1)) {
+                       hpte_group = ((~hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
+                       slot = ppc_md.hpte_insert(hpte_group, vpn, pa,
+                                                 rflags, HPTE_V_SECONDARY,
+                                                 MMU_PAGE_64K, MMU_PAGE_64K, ssize);
+                       if (slot == -1) {
+                               if (mftb() & 0x1)
+                                       hpte_group = ((hash & htab_hash_mask) *
+                                                     HPTES_PER_GROUP) & ~0x7UL;
+                               ppc_md.hpte_remove(hpte_group);
+                               /*
+                                * FIXME!! Should be try the group from which we removed ?
+                                */
+                               goto repeat;
+                       }
+               }
+               /*
+                * Hypervisor failure. Restore old pmd and return -1
+                * similar to __hash_page_*
+                */
+               if (unlikely(slot == -2)) {
+                       *ptep = __pte(old_pte);
+                       hash_failure_debug(ea, access, vsid, trap, ssize,
+                                          MMU_PAGE_64K, MMU_PAGE_64K, old_pte);
+                       return -1;
+               }
+               new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
+               new_pte |= (slot << _PAGE_F_GIX_SHIFT) & (_PAGE_F_SECOND | _PAGE_F_GIX);
+       }
+       *ptep = __pte(new_pte & ~_PAGE_BUSY);
+       return 0;
+}
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
deleted file mode 100644 (file)
index 3b49e32..0000000
+++ /dev/null
@@ -1,1003 +0,0 @@
-/*
- * ppc64 MMU hashtable management routines
- *
- * (c) Copyright IBM Corp. 2003, 2005
- *
- * Maintained by: Benjamin Herrenschmidt
- *                <benh@kernel.crashing.org>
- *
- * This file is covered by the GNU Public Licence v2 as
- * described in the kernel's COPYING file.
- */
-
-#include <asm/reg.h>
-#include <asm/pgtable.h>
-#include <asm/mmu.h>
-#include <asm/page.h>
-#include <asm/types.h>
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-#include <asm/cputable.h>
-
-       .text
-
-/*
- * Stackframe:
- *             
- *         +-> Back chain                      (SP + 256)
- *         |   General register save area      (SP + 112)
- *         |   Parameter save area             (SP + 48)
- *         |   TOC save area                   (SP + 40)
- *         |   link editor doubleword          (SP + 32)
- *         |   compiler doubleword             (SP + 24)
- *         |   LR save area                    (SP + 16)
- *         |   CR save area                    (SP + 8)
- * SP ---> +-- Back chain                      (SP + 0)
- */
-
-#ifndef CONFIG_PPC_64K_PAGES
-
-/*****************************************************************************
- *                                                                           *
- *           4K SW & 4K HW pages implementation                              *
- *                                                                           *
- *****************************************************************************/
-
-
-/*
- * _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
- *              pte_t *ptep, unsigned long trap, unsigned long flags,
- *              int ssize)
- *
- * Adds a 4K page to the hash table in a segment of 4K pages only
- */
-
-_GLOBAL(__hash_page_4K)
-       mflr    r0
-       std     r0,16(r1)
-       stdu    r1,-STACKFRAMESIZE(r1)
-       /* Save all params that we need after a function call */
-       std     r6,STK_PARAM(R6)(r1)
-       std     r8,STK_PARAM(R8)(r1)
-       std     r9,STK_PARAM(R9)(r1)
-       
-       /* Save non-volatile registers.
-        * r31 will hold "old PTE"
-        * r30 is "new PTE"
-        * r29 is vpn
-        * r28 is a hash value
-        * r27 is hashtab mask (maybe dynamic patched instead ?)
-        */
-       std     r27,STK_REG(R27)(r1)
-       std     r28,STK_REG(R28)(r1)
-       std     r29,STK_REG(R29)(r1)
-       std     r30,STK_REG(R30)(r1)
-       std     r31,STK_REG(R31)(r1)
-       
-       /* Step 1:
-        *
-        * Check permissions, atomically mark the linux PTE busy
-        * and hashed.
-        */ 
-1:
-       ldarx   r31,0,r6
-       /* Check access rights (access & ~(pte_val(*ptep))) */
-       andc.   r0,r4,r31
-       bne-    htab_wrong_access
-       /* Check if PTE is busy */
-       andi.   r0,r31,_PAGE_BUSY
-       /* If so, just bail out and refault if needed. Someone else
-        * is changing this PTE anyway and might hash it.
-        */
-       bne-    htab_bail_ok
-
-       /* Prepare new PTE value (turn access RW into DIRTY, then
-        * add BUSY,HASHPTE and ACCESSED)
-        */
-       rlwinm  r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
-       or      r30,r30,r31
-       ori     r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
-       /* Write the linux PTE atomically (setting busy) */
-       stdcx.  r30,0,r6
-       bne-    1b
-       isync
-
-       /* Step 2:
-        *
-        * Insert/Update the HPTE in the hash table. At this point,
-        * r4 (access) is re-useable, we use it for the new HPTE flags
-        */
-
-BEGIN_FTR_SECTION
-       cmpdi   r9,0                    /* check segment size */
-       bne     3f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
-       /* Calc vpn and put it in r29 */
-       sldi    r29,r5,SID_SHIFT - VPN_SHIFT
-       rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
-       or      r29,r28,r29
-       /*
-        * Calculate hash value for primary slot and store it in r28
-        * r3 = va, r5 = vsid
-        * r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
-        */
-       rldicl  r0,r3,64-12,48
-       xor     r28,r5,r0               /* hash */
-       b       4f
-
-3:     /* Calc vpn and put it in r29 */
-       sldi    r29,r5,SID_SHIFT_1T - VPN_SHIFT
-       rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
-       or      r29,r28,r29
-
-       /*
-        * calculate hash value for primary slot and
-        * store it in r28 for 1T segment
-        * r3 = va, r5 = vsid
-        */
-       sldi    r28,r5,25               /* vsid << 25 */
-       /* r0 =  (va >> 12) & ((1ul << (40 - 12)) -1) */
-       rldicl  r0,r3,64-12,36
-       xor     r28,r28,r5              /* vsid ^ ( vsid << 25) */
-       xor     r28,r28,r0              /* hash */
-
-       /* Convert linux PTE bits into HW equivalents */
-4:     andi.   r3,r30,0x1fe            /* Get basic set of flags */
-       xori    r3,r3,HPTE_R_N          /* _PAGE_EXEC -> NOEXEC */
-       rlwinm  r0,r30,32-9+1,30,30     /* _PAGE_RW -> _PAGE_USER (r0) */
-       rlwinm  r4,r30,32-7+1,30,30     /* _PAGE_DIRTY -> _PAGE_USER (r4) */
-       and     r0,r0,r4                /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
-       andc    r0,r30,r0               /* r0 = pte & ~r0 */
-       rlwimi  r3,r0,32-1,31,31        /* Insert result into PP lsb */
-       /*
-        * Always add "C" bit for perf. Memory coherence is always enabled
-        */
-       ori     r3,r3,HPTE_R_C | HPTE_R_M
-
-       /* We eventually do the icache sync here (maybe inline that
-        * code rather than call a C function...) 
-        */
-BEGIN_FTR_SECTION
-       mr      r4,r30
-       mr      r5,r7
-       bl      hash_page_do_lazy_icache
-END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
-
-       /* At this point, r3 contains new PP bits, save them in
-        * place of "access" in the param area (sic)
-        */
-       std     r3,STK_PARAM(R4)(r1)
-
-       /* Get htab_hash_mask */
-       ld      r4,htab_hash_mask@got(2)
-       ld      r27,0(r4)       /* htab_hash_mask -> r27 */
-
-       /* Check if we may already be in the hashtable, in this case, we
-        * go to out-of-line code to try to modify the HPTE
-        */
-       andi.   r0,r31,_PAGE_HASHPTE
-       bne     htab_modify_pte
-
-htab_insert_pte:
-       /* Clear hpte bits in new pte (we also clear BUSY btw) and
-        * add _PAGE_HASHPTE
-        */
-       lis     r0,_PAGE_HPTEFLAGS@h
-       ori     r0,r0,_PAGE_HPTEFLAGS@l
-       andc    r30,r30,r0
-       ori     r30,r30,_PAGE_HASHPTE
-
-       /* physical address r5 */
-       rldicl  r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
-       sldi    r5,r5,PAGE_SHIFT
-
-       /* Calculate primary group hash */
-       and     r0,r28,r27
-       rldicr  r3,r0,3,63-3            /* r3 = (hash & mask) << 3 */
-
-       /* Call ppc_md.hpte_insert */
-       ld      r6,STK_PARAM(R4)(r1)    /* Retrieve new pp bits */
-       mr      r4,r29                  /* Retrieve vpn */
-       li      r7,0                    /* !bolted, !secondary */
-       li      r8,MMU_PAGE_4K          /* page size */
-       li      r9,MMU_PAGE_4K          /* actual page size */
-       ld      r10,STK_PARAM(R9)(r1)   /* segment size */
-.globl htab_call_hpte_insert1
-htab_call_hpte_insert1:
-       bl      .                       /* Patched by htab_finish_init() */
-       cmpdi   0,r3,0
-       bge     htab_pte_insert_ok      /* Insertion successful */
-       cmpdi   0,r3,-2                 /* Critical failure */
-       beq-    htab_pte_insert_failure
-
-       /* Now try secondary slot */
-       
-       /* physical address r5 */
-       rldicl  r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
-       sldi    r5,r5,PAGE_SHIFT
-
-       /* Calculate secondary group hash */
-       andc    r0,r27,r28
-       rldicr  r3,r0,3,63-3    /* r0 = (~hash & mask) << 3 */
-       
-       /* Call ppc_md.hpte_insert */
-       ld      r6,STK_PARAM(R4)(r1)    /* Retrieve new pp bits */
-       mr      r4,r29                  /* Retrieve vpn */
-       li      r7,HPTE_V_SECONDARY     /* !bolted, secondary */
-       li      r8,MMU_PAGE_4K          /* page size */
-       li      r9,MMU_PAGE_4K          /* actual page size */
-       ld      r10,STK_PARAM(R9)(r1)   /* segment size */
-.globl htab_call_hpte_insert2
-htab_call_hpte_insert2:
-       bl      .                       /* Patched by htab_finish_init() */
-       cmpdi   0,r3,0
-       bge+    htab_pte_insert_ok      /* Insertion successful */
-       cmpdi   0,r3,-2                 /* Critical failure */
-       beq-    htab_pte_insert_failure
-
-       /* Both are full, we need to evict something */
-       mftb    r0
-       /* Pick a random group based on TB */
-       andi.   r0,r0,1
-       mr      r5,r28
-       bne     2f
-       not     r5,r5
-2:     and     r0,r5,r27
-       rldicr  r3,r0,3,63-3    /* r0 = (hash & mask) << 3 */   
-       /* Call ppc_md.hpte_remove */
-.globl htab_call_hpte_remove
-htab_call_hpte_remove:
-       bl      .                       /* Patched by htab_finish_init() */
-
-       /* Try all again */
-       b       htab_insert_pte 
-
-htab_bail_ok:
-       li      r3,0
-       b       htab_bail
-
-htab_pte_insert_ok:
-       /* Insert slot number & secondary bit in PTE */
-       rldimi  r30,r3,12,63-15
-               
-       /* Write out the PTE with a normal write
-        * (maybe add eieio may be good still ?)
-        */
-htab_write_out_pte:
-       ld      r6,STK_PARAM(R6)(r1)
-       std     r30,0(r6)
-       li      r3, 0
-htab_bail:
-       ld      r27,STK_REG(R27)(r1)
-       ld      r28,STK_REG(R28)(r1)
-       ld      r29,STK_REG(R29)(r1)
-       ld      r30,STK_REG(R30)(r1)
-       ld      r31,STK_REG(R31)(r1)
-       addi    r1,r1,STACKFRAMESIZE
-       ld      r0,16(r1)
-       mtlr    r0
-       blr
-
-htab_modify_pte:
-       /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
-       mr      r4,r3
-       rlwinm  r3,r31,32-12,29,31
-
-       /* Secondary group ? if yes, get a inverted hash value */
-       mr      r5,r28
-       andi.   r0,r31,_PAGE_SECONDARY
-       beq     1f
-       not     r5,r5
-1:
-       /* Calculate proper slot value for ppc_md.hpte_updatepp */
-       and     r0,r5,r27
-       rldicr  r0,r0,3,63-3    /* r0 = (hash & mask) << 3 */
-       add     r3,r0,r3        /* add slot idx */
-
-       /* Call ppc_md.hpte_updatepp */
-       mr      r5,r29                  /* vpn */
-       li      r6,MMU_PAGE_4K          /* base page size */
-       li      r7,MMU_PAGE_4K          /* actual page size */
-       ld      r8,STK_PARAM(R9)(r1)    /* segment size */
-       ld      r9,STK_PARAM(R8)(r1)    /* get "flags" param */
-.globl htab_call_hpte_updatepp
-htab_call_hpte_updatepp:
-       bl      .                       /* Patched by htab_finish_init() */
-
-       /* if we failed because typically the HPTE wasn't really here
-        * we try an insertion. 
-        */
-       cmpdi   0,r3,-1
-       beq-    htab_insert_pte
-
-       /* Clear the BUSY bit and Write out the PTE */
-       li      r0,_PAGE_BUSY
-       andc    r30,r30,r0
-       b       htab_write_out_pte
-
-htab_wrong_access:
-       /* Bail out clearing reservation */
-       stdcx.  r31,0,r6
-       li      r3,1
-       b       htab_bail
-
-htab_pte_insert_failure:
-       /* Bail out restoring old PTE */
-       ld      r6,STK_PARAM(R6)(r1)
-       std     r31,0(r6)
-       li      r3,-1
-       b       htab_bail
-
-
-#else /* CONFIG_PPC_64K_PAGES */
-
-
-/*****************************************************************************
- *                                                                           *
- *           64K SW & 4K or 64K HW in a 4K segment pages implementation      *
- *                                                                           *
- *****************************************************************************/
-
-/* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
- *              pte_t *ptep, unsigned long trap, unsigned local flags,
- *              int ssize, int subpg_prot)
- */
-
-/*
- * For now, we do NOT implement Admixed pages
- */
-_GLOBAL(__hash_page_4K)
-       mflr    r0
-       std     r0,16(r1)
-       stdu    r1,-STACKFRAMESIZE(r1)
-       /* Save all params that we need after a function call */
-       std     r6,STK_PARAM(R6)(r1)
-       std     r8,STK_PARAM(R8)(r1)
-       std     r9,STK_PARAM(R9)(r1)
-
-       /* Save non-volatile registers.
-        * r31 will hold "old PTE"
-        * r30 is "new PTE"
-        * r29 is vpn
-        * r28 is a hash value
-        * r27 is hashtab mask (maybe dynamic patched instead ?)
-        * r26 is the hidx mask
-        * r25 is the index in combo page
-        */
-       std     r25,STK_REG(R25)(r1)
-       std     r26,STK_REG(R26)(r1)
-       std     r27,STK_REG(R27)(r1)
-       std     r28,STK_REG(R28)(r1)
-       std     r29,STK_REG(R29)(r1)
-       std     r30,STK_REG(R30)(r1)
-       std     r31,STK_REG(R31)(r1)
-
-       /* Step 1:
-        *
-        * Check permissions, atomically mark the linux PTE busy
-        * and hashed.
-        */
-1:
-       ldarx   r31,0,r6
-       /* Check access rights (access & ~(pte_val(*ptep))) */
-       andc.   r0,r4,r31
-       bne-    htab_wrong_access
-       /* Check if PTE is busy */
-       andi.   r0,r31,_PAGE_BUSY
-       /* If so, just bail out and refault if needed. Someone else
-        * is changing this PTE anyway and might hash it.
-        */
-       bne-    htab_bail_ok
-       /* Prepare new PTE value (turn access RW into DIRTY, then
-        * add BUSY and ACCESSED)
-        */
-       rlwinm  r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
-       or      r30,r30,r31
-       ori     r30,r30,_PAGE_BUSY | _PAGE_ACCESSED
-       oris    r30,r30,_PAGE_COMBO@h
-       /* Write the linux PTE atomically (setting busy) */
-       stdcx.  r30,0,r6
-       bne-    1b
-       isync
-
-       /* Step 2:
-        *
-        * Insert/Update the HPTE in the hash table. At this point,
-        * r4 (access) is re-useable, we use it for the new HPTE flags
-        */
-
-       /* Load the hidx index */
-       rldicl  r25,r3,64-12,60
-
-BEGIN_FTR_SECTION
-       cmpdi   r9,0                    /* check segment size */
-       bne     3f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
-       /* Calc vpn and put it in r29 */
-       sldi    r29,r5,SID_SHIFT - VPN_SHIFT
-       /*
-        * clrldi r3,r3,64 - SID_SHIFT -->  ea & 0xfffffff
-        * srdi  r28,r3,VPN_SHIFT
-        */
-       rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
-       or      r29,r28,r29
-       /*
-        * Calculate hash value for primary slot and store it in r28
-        * r3 = va, r5 = vsid
-        * r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
-        */
-       rldicl  r0,r3,64-12,48
-       xor     r28,r5,r0               /* hash */
-       b       4f
-
-3:     /* Calc vpn and put it in r29 */
-       sldi    r29,r5,SID_SHIFT_1T - VPN_SHIFT
-       /*
-        * clrldi r3,r3,64 - SID_SHIFT_1T -->  ea & 0xffffffffff
-        * srdi r28,r3,VPN_SHIFT
-        */
-       rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
-       or      r29,r28,r29
-
-       /*
-        * Calculate hash value for primary slot and
-        * store it in r28  for 1T segment
-        * r3 = va, r5 = vsid
-        */
-       sldi    r28,r5,25               /* vsid << 25 */
-       /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */
-       rldicl  r0,r3,64-12,36
-       xor     r28,r28,r5              /* vsid ^ ( vsid << 25) */
-       xor     r28,r28,r0              /* hash */
-
-       /* Convert linux PTE bits into HW equivalents */
-4:
-#ifdef CONFIG_PPC_SUBPAGE_PROT
-       andc    r10,r30,r10
-       andi.   r3,r10,0x1fe            /* Get basic set of flags */
-       rlwinm  r0,r10,32-9+1,30,30     /* _PAGE_RW -> _PAGE_USER (r0) */
-#else
-       andi.   r3,r30,0x1fe            /* Get basic set of flags */
-       rlwinm  r0,r30,32-9+1,30,30     /* _PAGE_RW -> _PAGE_USER (r0) */
-#endif
-       xori    r3,r3,HPTE_R_N          /* _PAGE_EXEC -> NOEXEC */
-       rlwinm  r4,r30,32-7+1,30,30     /* _PAGE_DIRTY -> _PAGE_USER (r4) */
-       and     r0,r0,r4                /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
-       andc    r0,r3,r0                /* r0 = pte & ~r0 */
-       rlwimi  r3,r0,32-1,31,31        /* Insert result into PP lsb */
-       /*
-        * Always add "C" bit for perf. Memory coherence is always enabled
-        */
-       ori     r3,r3,HPTE_R_C | HPTE_R_M
-
-       /* We eventually do the icache sync here (maybe inline that
-        * code rather than call a C function...)
-        */
-BEGIN_FTR_SECTION
-       mr      r4,r30
-       mr      r5,r7
-       bl      hash_page_do_lazy_icache
-END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
-
-       /* At this point, r3 contains new PP bits, save them in
-        * place of "access" in the param area (sic)
-        */
-       std     r3,STK_PARAM(R4)(r1)
-
-       /* Get htab_hash_mask */
-       ld      r4,htab_hash_mask@got(2)
-       ld      r27,0(r4)       /* htab_hash_mask -> r27 */
-
-       /* Check if we may already be in the hashtable, in this case, we
-        * go to out-of-line code to try to modify the HPTE. We look for
-        * the bit at (1 >> (index + 32))
-        */
-       rldicl. r0,r31,64-12,48
-       li      r26,0                   /* Default hidx */
-       beq     htab_insert_pte
-
-       /*
-        * Check if the pte was already inserted into the hash table
-        * as a 64k HW page, and invalidate the 64k HPTE if so.
-        */
-       andis.  r0,r31,_PAGE_COMBO@h
-       beq     htab_inval_old_hpte
-
-       ld      r6,STK_PARAM(R6)(r1)
-       ori     r26,r6,PTE_PAGE_HIDX_OFFSET /* Load the hidx mask. */
-       ld      r26,0(r26)
-       addi    r5,r25,36               /* Check actual HPTE_SUB bit, this */
-       rldcr.  r0,r31,r5,0             /* must match pgtable.h definition */
-       bne     htab_modify_pte
-
-htab_insert_pte:
-       /* real page number in r5, PTE RPN value + index */
-       andis.  r0,r31,_PAGE_4K_PFN@h
-       srdi    r5,r31,PTE_RPN_SHIFT
-       bne-    htab_special_pfn
-       sldi    r5,r5,PAGE_FACTOR
-       add     r5,r5,r25
-htab_special_pfn:
-       sldi    r5,r5,HW_PAGE_SHIFT
-
-       /* Calculate primary group hash */
-       and     r0,r28,r27
-       rldicr  r3,r0,3,63-3            /* r0 = (hash & mask) << 3 */
-
-       /* Call ppc_md.hpte_insert */
-       ld      r6,STK_PARAM(R4)(r1)    /* Retrieve new pp bits */
-       mr      r4,r29                  /* Retrieve vpn */
-       li      r7,0                    /* !bolted, !secondary */
-       li      r8,MMU_PAGE_4K          /* page size */
-       li      r9,MMU_PAGE_4K          /* actual page size */
-       ld      r10,STK_PARAM(R9)(r1)   /* segment size */
-.globl htab_call_hpte_insert1
-htab_call_hpte_insert1:
-       bl      .                       /* patched by htab_finish_init() */
-       cmpdi   0,r3,0
-       bge     htab_pte_insert_ok      /* Insertion successful */
-       cmpdi   0,r3,-2                 /* Critical failure */
-       beq-    htab_pte_insert_failure
-
-       /* Now try secondary slot */
-
-       /* real page number in r5, PTE RPN value + index */
-       andis.  r0,r31,_PAGE_4K_PFN@h
-       srdi    r5,r31,PTE_RPN_SHIFT
-       bne-    3f
-       sldi    r5,r5,PAGE_FACTOR
-       add     r5,r5,r25
-3:     sldi    r5,r5,HW_PAGE_SHIFT
-
-       /* Calculate secondary group hash */
-       andc    r0,r27,r28
-       rldicr  r3,r0,3,63-3            /* r0 = (~hash & mask) << 3 */
-
-       /* Call ppc_md.hpte_insert */
-       ld      r6,STK_PARAM(R4)(r1)    /* Retrieve new pp bits */
-       mr      r4,r29                  /* Retrieve vpn */
-       li      r7,HPTE_V_SECONDARY     /* !bolted, secondary */
-       li      r8,MMU_PAGE_4K          /* page size */
-       li      r9,MMU_PAGE_4K          /* actual page size */
-       ld      r10,STK_PARAM(R9)(r1)   /* segment size */
-.globl htab_call_hpte_insert2
-htab_call_hpte_insert2:
-       bl      .                       /* patched by htab_finish_init() */
-       cmpdi   0,r3,0
-       bge+    htab_pte_insert_ok      /* Insertion successful */
-       cmpdi   0,r3,-2                 /* Critical failure */
-       beq-    htab_pte_insert_failure
-
-       /* Both are full, we need to evict something */
-       mftb    r0
-       /* Pick a random group based on TB */
-       andi.   r0,r0,1
-       mr      r5,r28
-       bne     2f
-       not     r5,r5
-2:     and     r0,r5,r27
-       rldicr  r3,r0,3,63-3            /* r0 = (hash & mask) << 3 */
-       /* Call ppc_md.hpte_remove */
-.globl htab_call_hpte_remove
-htab_call_hpte_remove:
-       bl      .                       /* patched by htab_finish_init() */
-
-       /* Try all again */
-       b       htab_insert_pte
-
-       /*
-        * Call out to C code to invalidate an 64k HW HPTE that is
-        * useless now that the segment has been switched to 4k pages.
-        */
-htab_inval_old_hpte:
-       mr      r3,r29                  /* vpn */
-       mr      r4,r31                  /* PTE.pte */
-       li      r5,0                    /* PTE.hidx */
-       li      r6,MMU_PAGE_64K         /* psize */
-       ld      r7,STK_PARAM(R9)(r1)    /* ssize */
-       ld      r8,STK_PARAM(R8)(r1)    /* flags */
-       bl      flush_hash_page
-       /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */
-       lis     r0,_PAGE_HPTE_SUB@h
-       ori     r0,r0,_PAGE_HPTE_SUB@l
-       andc    r30,r30,r0
-       b       htab_insert_pte
-       
-htab_bail_ok:
-       li      r3,0
-       b       htab_bail
-
-htab_pte_insert_ok:
-       /* Insert slot number & secondary bit in PTE second half,
-        * clear _PAGE_BUSY and set approriate HPTE slot bit
-        */
-       ld      r6,STK_PARAM(R6)(r1)
-       li      r0,_PAGE_BUSY
-       andc    r30,r30,r0
-       /* HPTE SUB bit */
-       li      r0,1
-       subfic  r5,r25,27               /* Must match bit position in */
-       sld     r0,r0,r5                /* pgtable.h */
-       or      r30,r30,r0
-       /* hindx */
-       sldi    r5,r25,2
-       sld     r3,r3,r5
-       li      r4,0xf
-       sld     r4,r4,r5
-       andc    r26,r26,r4
-       or      r26,r26,r3
-       ori     r5,r6,PTE_PAGE_HIDX_OFFSET
-       std     r26,0(r5)
-       lwsync
-       std     r30,0(r6)
-       li      r3, 0
-htab_bail:
-       ld      r25,STK_REG(R25)(r1)
-       ld      r26,STK_REG(R26)(r1)
-       ld      r27,STK_REG(R27)(r1)
-       ld      r28,STK_REG(R28)(r1)
-       ld      r29,STK_REG(R29)(r1)
-       ld      r30,STK_REG(R30)(r1)
-       ld      r31,STK_REG(R31)(r1)
-       addi    r1,r1,STACKFRAMESIZE
-       ld      r0,16(r1)
-       mtlr    r0
-       blr
-
-htab_modify_pte:
-       /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
-       mr      r4,r3
-       sldi    r5,r25,2
-       srd     r3,r26,r5
-
-       /* Secondary group ? if yes, get a inverted hash value */
-       mr      r5,r28
-       andi.   r0,r3,0x8 /* page secondary ? */
-       beq     1f
-       not     r5,r5
-1:     andi.   r3,r3,0x7 /* extract idx alone */
-
-       /* Calculate proper slot value for ppc_md.hpte_updatepp */
-       and     r0,r5,r27
-       rldicr  r0,r0,3,63-3    /* r0 = (hash & mask) << 3 */
-       add     r3,r0,r3        /* add slot idx */
-
-       /* Call ppc_md.hpte_updatepp */
-       mr      r5,r29                  /* vpn */
-       li      r6,MMU_PAGE_4K          /* base page size */
-       li      r7,MMU_PAGE_4K          /* actual page size */
-       ld      r8,STK_PARAM(R9)(r1)    /* segment size */
-       ld      r9,STK_PARAM(R8)(r1)    /* get "flags" param */
-.globl htab_call_hpte_updatepp
-htab_call_hpte_updatepp:
-       bl      .                       /* patched by htab_finish_init() */
-
-       /* if we failed because typically the HPTE wasn't really here
-        * we try an insertion.
-        */
-       cmpdi   0,r3,-1
-       beq-    htab_insert_pte
-
-       /* Clear the BUSY bit and Write out the PTE */
-       li      r0,_PAGE_BUSY
-       andc    r30,r30,r0
-       ld      r6,STK_PARAM(R6)(r1)
-       std     r30,0(r6)
-       li      r3,0
-       b       htab_bail
-
-htab_wrong_access:
-       /* Bail out clearing reservation */
-       stdcx.  r31,0,r6
-       li      r3,1
-       b       htab_bail
-
-htab_pte_insert_failure:
-       /* Bail out restoring old PTE */
-       ld      r6,STK_PARAM(R6)(r1)
-       std     r31,0(r6)
-       li      r3,-1
-       b       htab_bail
-
-#endif /* CONFIG_PPC_64K_PAGES */
-
-#ifdef CONFIG_PPC_64K_PAGES
-
-/*****************************************************************************
- *                                                                           *
- *           64K SW & 64K HW in a 64K segment pages implementation           *
- *                                                                           *
- *****************************************************************************/
-
-_GLOBAL(__hash_page_64K)
-       mflr    r0
-       std     r0,16(r1)
-       stdu    r1,-STACKFRAMESIZE(r1)
-       /* Save all params that we need after a function call */
-       std     r6,STK_PARAM(R6)(r1)
-       std     r8,STK_PARAM(R8)(r1)
-       std     r9,STK_PARAM(R9)(r1)
-
-       /* Save non-volatile registers.
-        * r31 will hold "old PTE"
-        * r30 is "new PTE"
-        * r29 is vpn
-        * r28 is a hash value
-        * r27 is hashtab mask (maybe dynamic patched instead ?)
-        */
-       std     r27,STK_REG(R27)(r1)
-       std     r28,STK_REG(R28)(r1)
-       std     r29,STK_REG(R29)(r1)
-       std     r30,STK_REG(R30)(r1)
-       std     r31,STK_REG(R31)(r1)
-
-       /* Step 1:
-        *
-        * Check permissions, atomically mark the linux PTE busy
-        * and hashed.
-        */
-1:
-       ldarx   r31,0,r6
-       /* Check access rights (access & ~(pte_val(*ptep))) */
-       andc.   r0,r4,r31
-       bne-    ht64_wrong_access
-       /* Check if PTE is busy */
-       andi.   r0,r31,_PAGE_BUSY
-       /* If so, just bail out and refault if needed. Someone else
-        * is changing this PTE anyway and might hash it.
-        */
-       bne-    ht64_bail_ok
-BEGIN_FTR_SECTION
-       /* Check if PTE has the cache-inhibit bit set */
-       andi.   r0,r31,_PAGE_NO_CACHE
-       /* If so, bail out and refault as a 4k page */
-       bne-    ht64_bail_ok
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_CI_LARGE_PAGE)
-       /* Prepare new PTE value (turn access RW into DIRTY, then
-        * add BUSY and ACCESSED)
-        */
-       rlwinm  r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
-       or      r30,r30,r31
-       ori     r30,r30,_PAGE_BUSY | _PAGE_ACCESSED
-       /* Write the linux PTE atomically (setting busy) */
-       stdcx.  r30,0,r6
-       bne-    1b
-       isync
-
-       /* Step 2:
-        *
-        * Insert/Update the HPTE in the hash table. At this point,
-        * r4 (access) is re-useable, we use it for the new HPTE flags
-        */
-
-BEGIN_FTR_SECTION
-       cmpdi   r9,0                    /* check segment size */
-       bne     3f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
-       /* Calc vpn and put it in r29 */
-       sldi    r29,r5,SID_SHIFT - VPN_SHIFT
-       rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
-       or      r29,r28,r29
-
-       /* Calculate hash value for primary slot and store it in r28
-        * r3 = va, r5 = vsid
-        * r0 = (va >> 16) & ((1ul << (28 - 16)) -1)
-        */
-       rldicl  r0,r3,64-16,52
-       xor     r28,r5,r0               /* hash */
-       b       4f
-
-3:     /* Calc vpn and put it in r29 */
-       sldi    r29,r5,SID_SHIFT_1T - VPN_SHIFT
-       rldicl  r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
-       or      r29,r28,r29
-       /*
-        * calculate hash value for primary slot and
-        * store it in r28 for 1T segment
-        * r3 = va, r5 = vsid
-        */
-       sldi    r28,r5,25               /* vsid << 25 */
-       /* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */
-       rldicl  r0,r3,64-16,40
-       xor     r28,r28,r5              /* vsid ^ ( vsid << 25) */
-       xor     r28,r28,r0              /* hash */
-
-       /* Convert linux PTE bits into HW equivalents */
-4:     andi.   r3,r30,0x1fe            /* Get basic set of flags */
-       xori    r3,r3,HPTE_R_N          /* _PAGE_EXEC -> NOEXEC */
-       rlwinm  r0,r30,32-9+1,30,30     /* _PAGE_RW -> _PAGE_USER (r0) */
-       rlwinm  r4,r30,32-7+1,30,30     /* _PAGE_DIRTY -> _PAGE_USER (r4) */
-       and     r0,r0,r4                /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
-       andc    r0,r30,r0               /* r0 = pte & ~r0 */
-       rlwimi  r3,r0,32-1,31,31        /* Insert result into PP lsb */
-       /*
-        * Always add "C" bit for perf. Memory coherence is always enabled
-        */
-       ori     r3,r3,HPTE_R_C | HPTE_R_M
-
-       /* We eventually do the icache sync here (maybe inline that
-        * code rather than call a C function...)
-        */
-BEGIN_FTR_SECTION
-       mr      r4,r30
-       mr      r5,r7
-       bl      hash_page_do_lazy_icache
-END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
-
-       /* At this point, r3 contains new PP bits, save them in
-        * place of "access" in the param area (sic)
-        */
-       std     r3,STK_PARAM(R4)(r1)
-
-       /* Get htab_hash_mask */
-       ld      r4,htab_hash_mask@got(2)
-       ld      r27,0(r4)       /* htab_hash_mask -> r27 */
-
-       /* Check if we may already be in the hashtable, in this case, we
-        * go to out-of-line code to try to modify the HPTE
-        */
-       rldicl. r0,r31,64-12,48
-       bne     ht64_modify_pte
-
-ht64_insert_pte:
-       /* Clear hpte bits in new pte (we also clear BUSY btw) and
-        * add _PAGE_HPTE_SUB0
-        */
-       lis     r0,_PAGE_HPTEFLAGS@h
-       ori     r0,r0,_PAGE_HPTEFLAGS@l
-       andc    r30,r30,r0
-#ifdef CONFIG_PPC_64K_PAGES
-       oris    r30,r30,_PAGE_HPTE_SUB0@h
-#else
-       ori     r30,r30,_PAGE_HASHPTE
-#endif
-       /* Phyical address in r5 */
-       rldicl  r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
-       sldi    r5,r5,PAGE_SHIFT
-
-       /* Calculate primary group hash */
-       and     r0,r28,r27
-       rldicr  r3,r0,3,63-3    /* r0 = (hash & mask) << 3 */
-
-       /* Call ppc_md.hpte_insert */
-       ld      r6,STK_PARAM(R4)(r1)    /* Retrieve new pp bits */
-       mr      r4,r29                  /* Retrieve vpn */
-       li      r7,0                    /* !bolted, !secondary */
-       li      r8,MMU_PAGE_64K
-       li      r9,MMU_PAGE_64K         /* actual page size */
-       ld      r10,STK_PARAM(R9)(r1)   /* segment size */
-.globl ht64_call_hpte_insert1
-ht64_call_hpte_insert1:
-       bl      .                       /* patched by htab_finish_init() */
-       cmpdi   0,r3,0
-       bge     ht64_pte_insert_ok      /* Insertion successful */
-       cmpdi   0,r3,-2                 /* Critical failure */
-       beq-    ht64_pte_insert_failure
-
-       /* Now try secondary slot */
-
-       /* Phyical address in r5 */
-       rldicl  r5,r31,64-PTE_RPN_SHIFT,PTE_RPN_SHIFT
-       sldi    r5,r5,PAGE_SHIFT
-
-       /* Calculate secondary group hash */
-       andc    r0,r27,r28
-       rldicr  r3,r0,3,63-3    /* r0 = (~hash & mask) << 3 */
-
-       /* Call ppc_md.hpte_insert */
-       ld      r6,STK_PARAM(R4)(r1)    /* Retrieve new pp bits */
-       mr      r4,r29                  /* Retrieve vpn */
-       li      r7,HPTE_V_SECONDARY     /* !bolted, secondary */
-       li      r8,MMU_PAGE_64K
-       li      r9,MMU_PAGE_64K         /* actual page size */
-       ld      r10,STK_PARAM(R9)(r1)   /* segment size */
-.globl ht64_call_hpte_insert2
-ht64_call_hpte_insert2:
-       bl      .                       /* patched by htab_finish_init() */
-       cmpdi   0,r3,0
-       bge+    ht64_pte_insert_ok      /* Insertion successful */
-       cmpdi   0,r3,-2                 /* Critical failure */
-       beq-    ht64_pte_insert_failure
-
-       /* Both are full, we need to evict something */
-       mftb    r0
-       /* Pick a random group based on TB */
-       andi.   r0,r0,1
-       mr      r5,r28
-       bne     2f
-       not     r5,r5
-2:     and     r0,r5,r27
-       rldicr  r3,r0,3,63-3    /* r0 = (hash & mask) << 3 */
-       /* Call ppc_md.hpte_remove */
-.globl ht64_call_hpte_remove
-ht64_call_hpte_remove:
-       bl      .                       /* patched by htab_finish_init() */
-
-       /* Try all again */
-       b       ht64_insert_pte
-
-ht64_bail_ok:
-       li      r3,0
-       b       ht64_bail
-
-ht64_pte_insert_ok:
-       /* Insert slot number & secondary bit in PTE */
-       rldimi  r30,r3,12,63-15
-
-       /* Write out the PTE with a normal write
-        * (maybe add eieio may be good still ?)
-        */
-ht64_write_out_pte:
-       ld      r6,STK_PARAM(R6)(r1)
-       std     r30,0(r6)
-       li      r3, 0
-ht64_bail:
-       ld      r27,STK_REG(R27)(r1)
-       ld      r28,STK_REG(R28)(r1)
-       ld      r29,STK_REG(R29)(r1)
-       ld      r30,STK_REG(R30)(r1)
-       ld      r31,STK_REG(R31)(r1)
-       addi    r1,r1,STACKFRAMESIZE
-       ld      r0,16(r1)
-       mtlr    r0
-       blr
-
-ht64_modify_pte:
-       /* Keep PP bits in r4 and slot idx from the PTE around in r3 */
-       mr      r4,r3
-       rlwinm  r3,r31,32-12,29,31
-
-       /* Secondary group ? if yes, get a inverted hash value */
-       mr      r5,r28
-       andi.   r0,r31,_PAGE_F_SECOND
-       beq     1f
-       not     r5,r5
-1:
-       /* Calculate proper slot value for ppc_md.hpte_updatepp */
-       and     r0,r5,r27
-       rldicr  r0,r0,3,63-3    /* r0 = (hash & mask) << 3 */
-       add     r3,r0,r3        /* add slot idx */
-
-       /* Call ppc_md.hpte_updatepp */
-       mr      r5,r29                  /* vpn */
-       li      r6,MMU_PAGE_64K         /* base page size */
-       li      r7,MMU_PAGE_64K         /* actual page size */
-       ld      r8,STK_PARAM(R9)(r1)    /* segment size */
-       ld      r9,STK_PARAM(R8)(r1)    /* get "flags" param */
-.globl ht64_call_hpte_updatepp
-ht64_call_hpte_updatepp:
-       bl      .                       /* patched by htab_finish_init() */
-
-       /* if we failed because typically the HPTE wasn't really here
-        * we try an insertion.
-        */
-       cmpdi   0,r3,-1
-       beq-    ht64_insert_pte
-
-       /* Clear the BUSY bit and Write out the PTE */
-       li      r0,_PAGE_BUSY
-       andc    r30,r30,r0
-       b       ht64_write_out_pte
-
-ht64_wrong_access:
-       /* Bail out clearing reservation */
-       stdcx.  r31,0,r6
-       li      r3,1
-       b       ht64_bail
-
-ht64_pte_insert_failure:
-       /* Bail out restoring old PTE */
-       ld      r6,STK_PARAM(R6)(r1)
-       std     r31,0(r6)
-       li      r3,-1
-       b       ht64_bail
-
-
-#endif /* CONFIG_PPC_64K_PAGES */
-
-
-/*****************************************************************************
- *                                                                           *
- *           Huge pages implementation is in hugetlbpage.c                   *
- *                                                                           *
- *****************************************************************************/
index c8822af10a587389999473171db475eb5462714b..8eaac81347fdb43c8722884318bc176dba957940 100644 (file)
@@ -429,6 +429,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
        local_irq_restore(flags);
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static void native_hugepage_invalidate(unsigned long vsid,
                                       unsigned long addr,
                                       unsigned char *hpte_slot_array,
@@ -482,6 +483,15 @@ static void native_hugepage_invalidate(unsigned long vsid,
        }
        local_irq_restore(flags);
 }
+#else
+static void native_hugepage_invalidate(unsigned long vsid,
+                                      unsigned long addr,
+                                      unsigned char *hpte_slot_array,
+                                      int psize, int ssize, int local)
+{
+       WARN(1, "%s called without THP support\n", __func__);
+}
+#endif
 
 static inline int __hpte_actual_psize(unsigned int lp, int psize)
 {
index 7f9616f7c4797fb680ae21380516bdc4a70876e7..ba59d5977f3498d4c9c4e42faeff5111f925d2a5 100644 (file)
@@ -159,24 +159,41 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
        },
 };
 
-static unsigned long htab_convert_pte_flags(unsigned long pteflags)
+unsigned long htab_convert_pte_flags(unsigned long pteflags)
 {
-       unsigned long rflags = pteflags & 0x1fa;
+       unsigned long rflags = 0;
 
        /* _PAGE_EXEC -> NOEXEC */
        if ((pteflags & _PAGE_EXEC) == 0)
                rflags |= HPTE_R_N;
-
-       /* PP bits. PAGE_USER is already PP bit 0x2, so we only
-        * need to add in 0x1 if it's a read-only user page
+       /*
+        * PP bits:
+        * Linux use slb key 0 for kernel and 1 for user.
+        * kernel areas are mapped by PP bits 00
+        * and and there is no kernel RO (_PAGE_KERNEL_RO).
+        * User area mapped by 0x2 and read only use by
+        * 0x3.
         */
-       if ((pteflags & _PAGE_USER) && !((pteflags & _PAGE_RW) &&
-                                        (pteflags & _PAGE_DIRTY)))
-               rflags |= 1;
+       if (pteflags & _PAGE_USER) {
+               rflags |= 0x2;
+               if (!((pteflags & _PAGE_RW) && (pteflags & _PAGE_DIRTY)))
+                       rflags |= 0x1;
+       }
        /*
         * Always add "C" bit for perf. Memory coherence is always enabled
         */
-       return rflags | HPTE_R_C | HPTE_R_M;
+       rflags |=  HPTE_R_C | HPTE_R_M;
+       /*
+        * Add in WIG bits
+        */
+       if (pteflags & _PAGE_WRITETHRU)
+               rflags |= HPTE_R_W;
+       if (pteflags & _PAGE_NO_CACHE)
+               rflags |= HPTE_R_I;
+       if (pteflags & _PAGE_GUARDED)
+               rflags |= HPTE_R_G;
+
+       return rflags;
 }
 
 int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
@@ -629,46 +646,6 @@ int remove_section_mapping(unsigned long start, unsigned long end)
 }
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
-extern u32 htab_call_hpte_insert1[];
-extern u32 htab_call_hpte_insert2[];
-extern u32 htab_call_hpte_remove[];
-extern u32 htab_call_hpte_updatepp[];
-extern u32 ht64_call_hpte_insert1[];
-extern u32 ht64_call_hpte_insert2[];
-extern u32 ht64_call_hpte_remove[];
-extern u32 ht64_call_hpte_updatepp[];
-
-static void __init htab_finish_init(void)
-{
-#ifdef CONFIG_PPC_64K_PAGES
-       patch_branch(ht64_call_hpte_insert1,
-               ppc_function_entry(ppc_md.hpte_insert),
-               BRANCH_SET_LINK);
-       patch_branch(ht64_call_hpte_insert2,
-               ppc_function_entry(ppc_md.hpte_insert),
-               BRANCH_SET_LINK);
-       patch_branch(ht64_call_hpte_remove,
-               ppc_function_entry(ppc_md.hpte_remove),
-               BRANCH_SET_LINK);
-       patch_branch(ht64_call_hpte_updatepp,
-               ppc_function_entry(ppc_md.hpte_updatepp),
-               BRANCH_SET_LINK);
-#endif /* CONFIG_PPC_64K_PAGES */
-
-       patch_branch(htab_call_hpte_insert1,
-               ppc_function_entry(ppc_md.hpte_insert),
-               BRANCH_SET_LINK);
-       patch_branch(htab_call_hpte_insert2,
-               ppc_function_entry(ppc_md.hpte_insert),
-               BRANCH_SET_LINK);
-       patch_branch(htab_call_hpte_remove,
-               ppc_function_entry(ppc_md.hpte_remove),
-               BRANCH_SET_LINK);
-       patch_branch(htab_call_hpte_updatepp,
-               ppc_function_entry(ppc_md.hpte_updatepp),
-               BRANCH_SET_LINK);
-}
-
 static void __init htab_initialize(void)
 {
        unsigned long table;
@@ -815,7 +792,6 @@ static void __init htab_initialize(void)
                                         mmu_linear_psize, mmu_kernel_ssize));
        }
 
-       htab_finish_init();
 
        DBG(" <- htab_initialize()\n");
 }
@@ -877,11 +853,11 @@ static unsigned int get_paca_psize(unsigned long addr)
        unsigned long index, mask_index;
 
        if (addr < SLICE_LOW_TOP) {
-               lpsizes = get_paca()->context.low_slices_psize;
+               lpsizes = get_paca()->mm_ctx_low_slices_psize;
                index = GET_LOW_SLICE_INDEX(addr);
                return (lpsizes >> (index * 4)) & 0xF;
        }
-       hpsizes = get_paca()->context.high_slices_psize;
+       hpsizes = get_paca()->mm_ctx_high_slices_psize;
        index = GET_HIGH_SLICE_INDEX(addr);
        mask_index = index & 0x1;
        return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF;
@@ -890,7 +866,7 @@ static unsigned int get_paca_psize(unsigned long addr)
 #else
 unsigned int get_paca_psize(unsigned long addr)
 {
-       return get_paca()->context.user_psize;
+       return get_paca()->mm_ctx_user_psize;
 }
 #endif
 
@@ -906,7 +882,8 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
        slice_set_range_psize(mm, addr, 1, MMU_PAGE_4K);
        copro_flush_all_slbs(mm);
        if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) {
-               get_paca()->context = mm->context;
+
+               copy_mm_to_paca(&mm->context);
                slb_flush_and_rebolt();
        }
 }
@@ -973,7 +950,7 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
 {
        if (user_region) {
                if (psize != get_paca_psize(ea)) {
-                       get_paca()->context = mm->context;
+                       copy_mm_to_paca(&mm->context);
                        slb_flush_and_rebolt();
                }
        } else if (get_paca()->vmalloc_sllp !=
@@ -1148,9 +1125,10 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
                }
        }
 
+#endif /* CONFIG_PPC_64K_PAGES */
+
        if (current->mm == mm)
                check_paca_psize(ea, mm, psize, user_region);
-#endif /* CONFIG_PPC_64K_PAGES */
 
 #ifdef CONFIG_PPC_64K_PAGES
        if (psize == MMU_PAGE_64K)
@@ -1203,6 +1181,35 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
 }
 EXPORT_SYMBOL_GPL(hash_page);
 
+int __hash_page(unsigned long ea, unsigned long msr, unsigned long trap,
+               unsigned long dsisr)
+{
+       unsigned long access = _PAGE_PRESENT;
+       unsigned long flags = 0;
+       struct mm_struct *mm = current->mm;
+
+       if (REGION_ID(ea) == VMALLOC_REGION_ID)
+               mm = &init_mm;
+
+       if (dsisr & DSISR_NOHPTE)
+               flags |= HPTE_NOHPTE_UPDATE;
+
+       if (dsisr & DSISR_ISSTORE)
+               access |= _PAGE_RW;
+       /*
+        * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
+        * accessing a userspace segment (even from the kernel). We assume
+        * kernel addresses always have the high bit set.
+        */
+       if ((msr & MSR_PR) || (REGION_ID(ea) == USER_REGION_ID))
+               access |= _PAGE_USER;
+
+       if (trap == 0x400)
+               access |= _PAGE_EXEC;
+
+       return hash_page_mm(mm, ea, access, trap, flags);
+}
+
 void hash_preload(struct mm_struct *mm, unsigned long ea,
                  unsigned long access, unsigned long trap)
 {
index 4d87122cf6a725805d3de6bdf46cc44b9d3f91ef..baf1301ded0ccf4ae5c3280923aa65985f42d8fe 100644 (file)
@@ -54,18 +54,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
                        new_pmd |= _PAGE_DIRTY;
        } while (old_pmd != __cmpxchg_u64((unsigned long *)pmdp,
                                          old_pmd, new_pmd));
-       /*
-        * PP bits. _PAGE_USER is already PP bit 0x2, so we only
-        * need to add in 0x1 if it's a read-only user page
-        */
-       rflags = new_pmd & _PAGE_USER;
-       if ((new_pmd & _PAGE_USER) && !((new_pmd & _PAGE_RW) &&
-                                          (new_pmd & _PAGE_DIRTY)))
-               rflags |= 0x1;
-       /*
-        * _PAGE_EXEC -> HW_NO_EXEC since it's inverted
-        */
-       rflags |= ((new_pmd & _PAGE_EXEC) ? 0 : HPTE_R_N);
+       rflags = htab_convert_pte_flags(new_pmd);
 
 #if 0
        if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
@@ -82,7 +71,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
         */
        shift = mmu_psize_defs[psize].shift;
        index = (ea & ~HPAGE_PMD_MASK) >> shift;
-       BUG_ON(index >= 4096);
+       BUG_ON(index >= PTE_FRAG_SIZE);
 
        vpn = hpt_vpn(ea, vsid, ssize);
        hpte_slot_array = get_hpte_slot_array(pmdp);
@@ -131,13 +120,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
                pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT;
                new_pmd |= _PAGE_HASHPTE;
 
-               /* Add in WIMG bits */
-               rflags |= (new_pmd & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
-                                     _PAGE_GUARDED));
-               /*
-                * enable the memory coherence always
-                */
-               rflags |= HPTE_R_M;
 repeat:
                hpte_group = ((hash & htab_hash_mask) * HPTES_PER_GROUP) & ~0x7UL;
 
index ba47aaf33a4bf19c19859fc0de1ed1ad05703c53..7e6d0880813fe9e363a7e9edd2f5278ef10e4a9e 100644 (file)
@@ -51,6 +51,48 @@ static inline int mmu_get_tsize(int psize)
        return mmu_psize_defs[psize].enc;
 }
 
+#if defined(CONFIG_PPC_FSL_BOOK3E) && defined(CONFIG_PPC64)
+#include <asm/paca.h>
+
+static inline void book3e_tlb_lock(void)
+{
+       struct paca_struct *paca = get_paca();
+       unsigned long tmp;
+       int token = smp_processor_id() + 1;
+
+       asm volatile("1: lbarx %0, 0, %1;"
+                    "cmpwi %0, 0;"
+                    "bne 2f;"
+                    "stbcx. %2, 0, %1;"
+                    "bne 1b;"
+                    "b 3f;"
+                    "2: lbzx %0, 0, %1;"
+                    "cmpwi %0, 0;"
+                    "bne 2b;"
+                    "b 1b;"
+                    "3:"
+                    : "=&r" (tmp)
+                    : "r" (&paca->tcd_ptr->lock), "r" (token)
+                    : "memory");
+}
+
+static inline void book3e_tlb_unlock(void)
+{
+       struct paca_struct *paca = get_paca();
+
+       isync();
+       paca->tcd_ptr->lock = 0;
+}
+#else
+static inline void book3e_tlb_lock(void)
+{
+}
+
+static inline void book3e_tlb_unlock(void)
+{
+}
+#endif
+
 static inline int book3e_tlb_exists(unsigned long ea, unsigned long pid)
 {
        int found = 0;
@@ -109,7 +151,10 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
         */
        local_irq_save(flags);
 
+       book3e_tlb_lock();
+
        if (unlikely(book3e_tlb_exists(ea, mm->context.id))) {
+               book3e_tlb_unlock();
                local_irq_restore(flags);
                return;
        }
@@ -141,6 +186,7 @@ void book3e_hugetlb_preload(struct vm_area_struct *vma, unsigned long ea,
 
        asm volatile ("tlbwe");
 
+       book3e_tlb_unlock();
        local_irq_restore(flags);
 }
 
index d94b1af53a93620bb7888902dd3f0cd40b46e384..e2138c7ae70fed97740cd74a84ce41b5ac29baa9 100644 (file)
@@ -59,10 +59,8 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
                        new_pte |= _PAGE_DIRTY;
        } while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
                                         old_pte, new_pte));
+       rflags = htab_convert_pte_flags(new_pte);
 
-       rflags = 0x2 | (!(new_pte & _PAGE_RW));
-       /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
-       rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
        sz = ((1UL) << shift);
        if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
                /* No CPU has hugepages but lacks no execute, so we
@@ -91,18 +89,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
                pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
 
                /* clear HPTE slot informations in new PTE */
-#ifdef CONFIG_PPC_64K_PAGES
-               new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
-#else
                new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
-#endif
-               /* Add in WIMG bits */
-               rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
-                                     _PAGE_COHERENT | _PAGE_GUARDED));
-               /*
-                * enable the memory coherence always
-                */
-               rflags |= HPTE_R_M;
 
                slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
                                             mmu_psize, ssize);
@@ -127,3 +114,21 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
        *ptep = __pte(new_pte & ~_PAGE_BUSY);
        return 0;
 }
+
+#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_DEBUG_VM)
+/*
+ * This enables us to catch the wrong page directory format
+ * Moved here so that we can use WARN() in the call.
+ */
+int hugepd_ok(hugepd_t hpd)
+{
+       bool is_hugepd;
+
+       /*
+        * We should not find this format in page directory, warn otherwise.
+        */
+       is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
+       WARN(is_hugepd, "Found wrong page directory format\n");
+       return 0;
+}
+#endif
index 9833fee493ec414be50c241153889d7ac4259402..61b8b7ccea4f55882ccf00f000879dda6e2dd493 100644 (file)
@@ -53,78 +53,6 @@ static unsigned nr_gpages;
 
 #define hugepd_none(hpd)       ((hpd).pd == 0)
 
-#ifdef CONFIG_PPC_BOOK3S_64
-/*
- * At this point we do the placement change only for BOOK3S 64. This would
- * possibly work on other subarchs.
- */
-
-/*
- * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
- * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
- *
- * Defined in such a way that we can optimize away code block at build time
- * if CONFIG_HUGETLB_PAGE=n.
- */
-int pmd_huge(pmd_t pmd)
-{
-       /*
-        * leaf pte for huge page, bottom two bits != 00
-        */
-       return ((pmd_val(pmd) & 0x3) != 0x0);
-}
-
-int pud_huge(pud_t pud)
-{
-       /*
-        * leaf pte for huge page, bottom two bits != 00
-        */
-       return ((pud_val(pud) & 0x3) != 0x0);
-}
-
-int pgd_huge(pgd_t pgd)
-{
-       /*
-        * leaf pte for huge page, bottom two bits != 00
-        */
-       return ((pgd_val(pgd) & 0x3) != 0x0);
-}
-
-#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_DEBUG_VM)
-/*
- * This enables us to catch the wrong page directory format
- * Moved here so that we can use WARN() in the call.
- */
-int hugepd_ok(hugepd_t hpd)
-{
-       bool is_hugepd;
-
-       /*
-        * We should not find this format in page directory, warn otherwise.
-        */
-       is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
-       WARN(is_hugepd, "Found wrong page directory format\n");
-       return 0;
-}
-#endif
-
-#else
-int pmd_huge(pmd_t pmd)
-{
-       return 0;
-}
-
-int pud_huge(pud_t pud)
-{
-       return 0;
-}
-
-int pgd_huge(pgd_t pgd)
-{
-       return 0;
-}
-#endif
-
 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 {
        /* Only called for hugetlbfs pages, hence can ignore THP */
@@ -966,8 +894,8 @@ void flush_dcache_icache_hugepage(struct page *page)
  * We have 4 cases for pgds and pmds:
  * (1) invalid (all zeroes)
  * (2) pointer to next table, as normal; bottom 6 bits == 0
- * (3) leaf pte for huge page, bottom two bits != 00
- * (4) hugepd pointer, bottom two bits == 00, next 4 bits indicate size of table
+ * (3) leaf pte for huge page _PAGE_PTE set
+ * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
  *
  * So long as we atomically load page table pointers we are safe against teardown,
  * we can follow the address down to the the page and take a ref on it.
index d747dd7bc90b72ff7a7756fa745c2027da6c793d..379a6a90644be155483e76ddd45a2bcdfdb28c2a 100644 (file)
@@ -87,11 +87,7 @@ static void pgd_ctor(void *addr)
 
 static void pmd_ctor(void *addr)
 {
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       memset(addr, 0, PMD_TABLE_SIZE * 2);
-#else
        memset(addr, 0, PMD_TABLE_SIZE);
-#endif
 }
 
 struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
index 83dfcb55ffef69b2f5921212ccdaf6d18b49da52..83dfd7925c72c95efa47766c1806ef3e4a39126c 100644 (file)
@@ -179,6 +179,10 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
         */
        VM_WARN_ON((pte_val(*ptep) & (_PAGE_PRESENT | _PAGE_USER)) ==
                (_PAGE_PRESENT | _PAGE_USER));
+       /*
+        * Add the pte bit when tryint set a pte
+        */
+       pte = __pte(pte_val(pte) | _PAGE_PTE);
 
        /* Note: mm->context.id might not yet have been assigned as
         * this context might not have been activated yet when this
index e92cb2146b1862668ade6f9b8ec8bc94a13a209f..ea6bc31debb05562cf235d0696f5aaadbc698982 100644 (file)
@@ -359,7 +359,7 @@ struct page *pud_page(pud_t pud)
 struct page *pmd_page(pmd_t pmd)
 {
        if (pmd_trans_huge(pmd) || pmd_huge(pmd))
-               return pfn_to_page(pmd_pfn(pmd));
+               return pte_page(pmd_pte(pmd));
        return virt_to_page(pmd_page_vaddr(pmd));
 }
 
@@ -625,7 +625,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma,
        "1:     ldarx   %0,0,%3\n\
                andi.   %1,%0,%6\n\
                bne-    1b \n\
-               ori     %1,%0,%4 \n\
+               oris    %1,%0,%4@h \n\
                stdcx.  %1,0,%3 \n\
                bne-    1b"
        : "=&r" (old), "=&r" (tmp), "=m" (*pmdp)
@@ -759,22 +759,15 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
 
 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
 {
-       pmd_val(pmd) |= pgprot_val(pgprot);
-       return pmd;
+       return __pmd(pmd_val(pmd) | pgprot_val(pgprot));
 }
 
 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
 {
-       pmd_t pmd;
-       /*
-        * For a valid pte, we would have _PAGE_PRESENT always
-        * set. We use this to check THP page at pmd level.
-        * leaf pte for huge page, bottom two bits != 00
-        */
-       pmd_val(pmd) = pfn << PTE_RPN_SHIFT;
-       pmd_val(pmd) |= _PAGE_THP_HUGE;
-       pmd = pmd_set_protbits(pmd, pgprot);
-       return pmd;
+       unsigned long pmdv;
+
+       pmdv = pfn << PTE_RPN_SHIFT;
+       return pmd_set_protbits(__pmd(pmdv), pgprot);
 }
 
 pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
@@ -784,10 +777,11 @@ pmd_t mk_pmd(struct page *page, pgprot_t pgprot)
 
 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 {
+       unsigned long pmdv;
 
-       pmd_val(pmd) &= _HPAGE_CHG_MASK;
-       pmd = pmd_set_protbits(pmd, newprot);
-       return pmd;
+       pmdv = pmd_val(pmd);
+       pmdv &= _HPAGE_CHG_MASK;
+       return pmd_set_protbits(__pmd(pmdv), newprot);
 }
 
 /*
index 515730e499fe663b7dbe2ba00c759e06e916d59d..825b6873391f9654f402d1b1a5ede306e5febe60 100644 (file)
@@ -228,7 +228,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
                asm volatile("slbie %0" : : "r" (slbie_data));
 
        get_paca()->slb_cache_ptr = 0;
-       get_paca()->context = mm->context;
+       copy_mm_to_paca(&mm->context);
 
        /*
         * preload some userspace segments into the SLB.
index 0f432a702870fa96a9d35b41110e0b6b4e99406e..42954f0b47aced31454d44103180df9202527fb6 100644 (file)
@@ -185,8 +185,7 @@ static void slice_flush_segments(void *parm)
        if (mm != current->active_mm)
                return;
 
-       /* update the paca copy of the context struct */
-       get_paca()->context = current->active_mm->context;
+       copy_mm_to_paca(&current->active_mm->context);
 
        local_irq_save(flags);
        slb_flush_and_rebolt();
index bf4c4473abb96b1c39eb0d6b04e4decc0a1c2a98..4bc6bbbe9ada59658c4d39e38697538f895274c5 100644 (file)
@@ -37,8 +37,8 @@
 #include <asm/udbg.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
-#include <asm/qe.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/qe_ic.h>
 
 #include "mpc83xx.h"
 
@@ -136,8 +136,6 @@ static void __init mpc83xx_km_setup_arch(void)
        mpc83xx_setup_pci();
 
 #ifdef CONFIG_QUICC_ENGINE
-       qe_reset();
-
        np = of_find_node_by_name(NULL, "par_io");
        if (np != NULL) {
                par_io_init(np);
index ef9d01a049c16755ddf2616f4a9babfec0867de5..7e923cad56cf62ce577d43aeb31d88cc6958ef05 100644 (file)
@@ -17,7 +17,7 @@
 #include <asm/io.h>
 #include <asm/hw_irq.h>
 #include <asm/ipic.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe_ic.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
index 8d762203eeffa103345c2d7acd09a4b6077ccc46..a973b2ae5df6c9320e3914ea10239c4ed59259e4 100644 (file)
@@ -36,8 +36,8 @@
 #include <asm/udbg.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
-#include <asm/qe.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/qe_ic.h>
 
 #include "mpc83xx.h"
 
@@ -74,8 +74,6 @@ static void __init mpc832x_sys_setup_arch(void)
        mpc83xx_setup_pci();
 
 #ifdef CONFIG_QUICC_ENGINE
-       qe_reset();
-
        if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
                par_io_init(np);
                of_node_put(np);
index eff5baabc3fbf1600dec624782778fa68fbef448..ea2b87d202cadf88aa453784d38e236dc91c783f 100644 (file)
@@ -25,8 +25,8 @@
 #include <asm/time.h>
 #include <asm/ipic.h>
 #include <asm/udbg.h>
-#include <asm/qe.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/qe_ic.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
@@ -203,8 +203,6 @@ static void __init mpc832x_rdb_setup_arch(void)
        mpc83xx_setup_pci();
 
 #ifdef CONFIG_QUICC_ENGINE
-       qe_reset();
-
        if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
                par_io_init(np);
                of_node_put(np);
index 1a26d2f83401964b49006abb1d7d3836e351d124..dd70b85f56d41d3d8ad1f49ab266ab4c4de40d6b 100644 (file)
@@ -44,8 +44,8 @@
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 #include <sysdev/simple_gpio.h>
-#include <asm/qe.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/qe_ic.h>
 
 #include "mpc83xx.h"
 
@@ -82,8 +82,6 @@ static void __init mpc836x_mds_setup_arch(void)
        mpc83xx_setup_pci();
 
 #ifdef CONFIG_QUICC_ENGINE
-       qe_reset();
-
        if ((np = of_find_node_by_name(NULL, "par_io")) != NULL) {
                par_io_init(np);
                of_node_put(np);
index b63b42d11d6c92e4d0e476100df85ead02d49ccf..4cd7153a6c889068b2afe99123f53b85d3016d82 100644 (file)
@@ -20,8 +20,8 @@
 #include <asm/time.h>
 #include <asm/ipic.h>
 #include <asm/udbg.h>
-#include <asm/qe.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/qe_ic.h>
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 
@@ -35,9 +35,6 @@ static void __init mpc836x_rdk_setup_arch(void)
                ppc_md.progress("mpc836x_rdk_setup_arch()", 0);
 
        mpc83xx_setup_pci();
-#ifdef CONFIG_QUICC_ENGINE
-       qe_reset();
-#endif
 }
 
 /*
index f0927e58af2579aa9adf082cf7912b23933887f4..dcfafd6b91ee1f48ced0620557039441d9f30618 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/pci.h>
 #include <asm/mpic.h>
 #include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
 #include <asm/udbg.h>
 
 #include "mpc85xx.h"
@@ -46,10 +47,12 @@ static void __init bsc913x_qds_setup_arch(void)
        mpc85xx_smp_init();
 #endif
 
+       fsl_pci_assign_primary();
+
        pr_info("bsc913x board from Freescale Semiconductor\n");
 }
 
-machine_device_initcall(bsc9132_qds, mpc85xx_common_publish_devices);
+machine_arch_initcall(bsc9132_qds, mpc85xx_common_publish_devices);
 
 /*
  * Called very early, device-tree isn't unflattened
@@ -67,6 +70,9 @@ define_machine(bsc9132_qds) {
        .probe                  = bsc9132_qds_probe,
        .setup_arch             = bsc913x_qds_setup_arch,
        .init_IRQ               = bsc913x_qds_pic_init,
+#ifdef CONFIG_PCI
+       .pcibios_fixup_bus      = fsl_pcibios_fixup_bus,
+#endif
        .get_irq                = mpic_get_irq,
        .restart                = fsl_rstcr_restart,
        .calibrate_decr         = generic_calibrate_decr,
index 23791de7b688f91aa4b87770a1c5ae40931137a4..949f22c86e61c97bce0e9dd0eaab64c4be24d8da 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
 
-#include <asm/qe.h>
+#include <soc/fsl/qe/qe.h>
 #include <sysdev/cpm2_pic.h>
 
 #include "mpc85xx.h"
@@ -105,7 +105,6 @@ void __init mpc85xx_qe_init(void)
                return;
        }
 
-       qe_reset();
        of_node_put(np);
 
 }
index 46d05c94add60217761e326b8695da303e63e690..a2b0bc859de0c230ddc6bfb352d5cf6f730c6b18 100644 (file)
@@ -27,7 +27,7 @@
 #include <asm/udbg.h>
 #include <asm/mpic.h>
 #include <asm/ehv_pic.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe_ic.h>
 
 #include <linux/of_platform.h>
 #include <sysdev/fsl_soc.h>
index 7d12a19aa7eecddddb58cff630478faa8f87893d..de72a5f464b1e50ea1d277934385eef89d634a83 100644 (file)
 
 #include "mpc85xx.h"
 
-#ifdef CONFIG_PCI
-static int mpc85xx_exclude_device(struct pci_controller *hose,
-                                  u_char bus, u_char devfn)
-{
-       if (bus == 0 && PCI_SLOT(devfn) == 0)
-               return PCIBIOS_DEVICE_NOT_FOUND;
-       else
-               return PCIBIOS_SUCCESSFUL;
-}
-#endif /* CONFIG_PCI */
-
 static void __init mpc85xx_ads_pic_init(void)
 {
        struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
@@ -145,10 +134,6 @@ static void __init mpc85xx_ads_setup_arch(void)
        init_ioports();
 #endif
 
-#ifdef CONFIG_PCI
-       ppc_md.pci_exclude_device = mpc85xx_exclude_device;
-#endif
-
        fsl_pci_assign_primary();
 }
 
index f0be439ceaaada4e583c4959dcbf0f4cf744b6da..f61cbe235581ad92951188fc470f14058b47f224 100644 (file)
@@ -48,8 +48,8 @@
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
 #include <sysdev/simple_gpio.h>
-#include <asm/qe.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/qe_ic.h>
 #include <asm/mpic.h>
 #include <asm/swiotlb.h>
 #include "smp.h"
index 50dcc00a0f5a0dd2cab3d2a5835b0270d92dc7f7..3f4dad133338801c96f262e017d9392128ec3ebb 100644 (file)
@@ -26,8 +26,8 @@
 #include <asm/prom.h>
 #include <asm/udbg.h>
 #include <asm/mpic.h>
-#include <asm/qe.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/qe_ic.h>
 
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
index 892e613519cc12939d5626049d4cdea941deac8c..71bc255b432420e22407adcf1af445b0424760e0 100644 (file)
@@ -22,8 +22,8 @@
 #include <asm/pci-bridge.h>
 #include <asm/udbg.h>
 #include <asm/mpic.h>
-#include <asm/qe.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/qe_ic.h>
 
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
index b7f9c408bf24fea333a3e80d1810be2463a90ace..46a3533d3acb251a853d831b746ead4c2cf5dbe8 100644 (file)
@@ -272,17 +272,6 @@ config TAU_AVERAGE
 
          If in doubt, say N here.
 
-config QUICC_ENGINE
-       bool "Freescale QUICC Engine (QE) Support"
-       depends on FSL_SOC && PPC32
-       select PPC_LIB_RHEAP
-       select CRC32
-       help
-         The QUICC Engine (QE) is a new generation of communications
-         coprocessors on Freescale embedded CPUs (akin to CPM in older chips).
-         Selecting this option means that you wish to build a kernel
-         for a machine with a QE coprocessor.
-
 config QE_GPIO
        bool "QE GPIO support"
        depends on QUICC_ENGINE
@@ -295,7 +284,6 @@ config CPM2
        bool "Enable support for the CPM2 (Communications Processor Module)"
        depends on (FSL_SOC_BOOKE && PPC32) || 8260
        select CPM
-       select PPC_LIB_RHEAP
        select PPC_PCI_CHOICE
        select ARCH_REQUIRE_GPIOLIB
        help
@@ -325,6 +313,7 @@ config FSL_ULI1575
 
 config CPM
        bool
+       select GENERIC_ALLOCATOR
 
 config OF_RTC
        bool
index 429fc59d2a476c0f0ca66589121d772909ec45b4..d9088f0b8fcc56f76f0e1f29787a83864efbcbc9 100644 (file)
@@ -33,11 +33,6 @@ config PPC_IBM_CELL_BLADE
        select PPC_UDBG_16550
        select UDBG_RTAS_CONSOLE
 
-config PPC_CELL_QPACE
-       bool "IBM Cell - QPACE"
-       depends on PPC64 && PPC_BOOK3S && CPU_BIG_ENDIAN
-       select PPC_CELL_COMMON
-
 config AXON_MSI
        bool
        depends on PPC_IBM_CELL_BLADE && PCI_MSI
index 34699bddfdddbfc375222617775d267d9d926775..00464305763d70c6f2c9783aa55c24b100132e65 100644 (file)
@@ -11,7 +11,6 @@ obj-$(CONFIG_PPC_IBM_CELL_POWERBUTTON)        += cbe_powerbutton.o
 
 ifeq ($(CONFIG_SMP),y)
 obj-$(CONFIG_PPC_CELL_NATIVE)          += smp.o
-obj-$(CONFIG_PPC_CELL_QPACE)           += smp.o
 endif
 
 # needed only when building loadable spufs.ko
@@ -26,6 +25,3 @@ obj-$(CONFIG_SPU_BASE)                        += spu_callbacks.o spu_base.o \
                                           spufs/
 
 obj-$(CONFIG_AXON_MSI)                 += axon_msi.o
-
-# qpace setup
-obj-$(CONFIG_PPC_CELL_QPACE)           += qpace_setup.o
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c
deleted file mode 100644 (file)
index d328140..0000000
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- *  linux/arch/powerpc/platforms/cell/qpace_setup.c
- *
- *  Copyright (C) 1995  Linus Torvalds
- *  Adapted from 'alpha' version by Gary Thomas
- *  Modified by Cort Dougan (cort@cs.nmt.edu)
- *  Modified by PPC64 Team, IBM Corp
- *  Modified by Cell Team, IBM Deutschland Entwicklung GmbH
- *  Modified by Benjamin Krill <ben@codiert.org>, IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/console.h>
-#include <linux/of_platform.h>
-
-#include <asm/mmu.h>
-#include <asm/processor.h>
-#include <asm/io.h>
-#include <asm/kexec.h>
-#include <asm/pgtable.h>
-#include <asm/prom.h>
-#include <asm/rtas.h>
-#include <asm/dma.h>
-#include <asm/machdep.h>
-#include <asm/time.h>
-#include <asm/cputable.h>
-#include <asm/irq.h>
-#include <asm/spu.h>
-#include <asm/spu_priv1.h>
-#include <asm/udbg.h>
-#include <asm/cell-regs.h>
-
-#include "interrupt.h"
-#include "pervasive.h"
-#include "ras.h"
-
-static void qpace_show_cpuinfo(struct seq_file *m)
-{
-       struct device_node *root;
-       const char *model = "";
-
-       root = of_find_node_by_path("/");
-       if (root)
-               model = of_get_property(root, "model", NULL);
-       seq_printf(m, "machine\t\t: CHRP %s\n", model);
-       of_node_put(root);
-}
-
-static void qpace_progress(char *s, unsigned short hex)
-{
-       printk("*** %04x : %s\n", hex, s ? s : "");
-}
-
-static const struct of_device_id qpace_bus_ids[] __initconst = {
-       { .type = "soc", },
-       { .compatible = "soc", },
-       { .type = "spider", },
-       { .type = "axon", },
-       { .type = "plb5", },
-       { .type = "plb4", },
-       { .type = "opb", },
-       { .type = "ebc", },
-       {},
-};
-
-static int __init qpace_publish_devices(void)
-{
-       int node;
-
-       /* Publish OF platform devices for southbridge IOs */
-       of_platform_bus_probe(NULL, qpace_bus_ids, NULL);
-
-       /* There is no device for the MIC memory controller, thus we create
-        * a platform device for it to attach the EDAC driver to.
-        */
-       for_each_online_node(node) {
-               if (cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(node)) == NULL)
-                       continue;
-               platform_device_register_simple("cbe-mic", node, NULL, 0);
-       }
-
-       return 0;
-}
-machine_subsys_initcall(qpace, qpace_publish_devices);
-
-static void __init qpace_setup_arch(void)
-{
-#ifdef CONFIG_SPU_BASE
-       spu_priv1_ops = &spu_priv1_mmio_ops;
-       spu_management_ops = &spu_management_of_ops;
-#endif
-
-       cbe_regs_init();
-
-#ifdef CONFIG_CBE_RAS
-       cbe_ras_init();
-#endif
-
-#ifdef CONFIG_SMP
-       smp_init_cell();
-#endif
-
-       /* init to some ~sane value until calibrate_delay() runs */
-       loops_per_jiffy = 50000000;
-
-       cbe_pervasive_init();
-#ifdef CONFIG_DUMMY_CONSOLE
-       conswitchp = &dummy_con;
-#endif
-}
-
-static int __init qpace_probe(void)
-{
-       unsigned long root = of_get_flat_dt_root();
-
-       if (!of_flat_dt_is_compatible(root, "IBM,QPACE"))
-               return 0;
-
-       hpte_init_native();
-       pm_power_off = rtas_power_off;
-
-       return 1;
-}
-
-define_machine(qpace) {
-       .name                   = "QPACE",
-       .probe                  = qpace_probe,
-       .setup_arch             = qpace_setup_arch,
-       .show_cpuinfo           = qpace_show_cpuinfo,
-       .restart                = rtas_restart,
-       .halt                   = rtas_halt,
-       .get_boot_time          = rtas_get_boot_time,
-       .get_rtc_time           = rtas_get_rtc_time,
-       .set_rtc_time           = rtas_set_rtc_time,
-       .calibrate_decr         = generic_calibrate_decr,
-       .progress               = qpace_progress,
-       .init_IRQ               = iic_init_IRQ,
-};
index 4ddf769a64e589adbfdba180b86021b357730897..9f79004e6d6f614d562990eaffcb69a00c23cce8 100644 (file)
@@ -326,7 +326,7 @@ static int spu_process_callback(struct spu_context *ctx)
        spu_ret = -ENOSYS;
        npc += 4;
 
-       if (s.nr_ret < __NR_syscalls) {
+       if (s.nr_ret < NR_syscalls) {
                spu_release(ctx);
                /* do actual system call from here */
                spu_ret = spu_sys_callback(&s);
index b4a369dac3a8aea479580187f7ace7fbe5ce8704..81799d70a1eeb1bb26cf38cab5fafab35cf94f53 100644 (file)
@@ -77,7 +77,7 @@ void maple_get_rtc_time(struct rtc_time *tm)
        if ((tm->tm_year + 1900) < 1970)
                tm->tm_year += 100;
 
-       GregorianDay(tm);
+       tm->tm_wday = -1;
 }
 
 int maple_set_rtc_time(struct rtc_time *tm)
index 76f5013c35e5c1cedb9f70b09401b3cc76ec5ab5..c3c9bbb3573ae6bcbc0ff77e73bd68305bf50f43 100644 (file)
@@ -84,6 +84,7 @@ static void __init bootx_printf(const char *format, ...)
                        break;
                }
        }
+       va_end(args);
 }
 #else /* CONFIG_BOOTX_TEXT */
 static void __init bootx_printf(const char *format, ...) {}
index 6f4f8b060def53cc1afc55b80ffd3f230766bb81..9815463450331686134219814663e091b0763d93 100644 (file)
@@ -258,13 +258,14 @@ static unsigned int pmac_pic_get_irq(void)
 #ifdef CONFIG_XMON
 static struct irqaction xmon_action = {
        .handler        = xmon_irq,
-       .flags          = 0,
+       .flags          = IRQF_NO_THREAD,
        .name           = "NMI - XMON"
 };
 #endif
 
 static struct irqaction gatwick_cascade_action = {
        .handler        = gatwick_action,
+       .flags          = IRQF_NO_THREAD,
        .name           = "cascade",
 };
 
index 1c8cdb6250e7c15fb0b26620e0ba43756008cfbc..f1516b5ecec94c04740256396b293b53157923df 100644 (file)
@@ -2,9 +2,10 @@ obj-y                  += setup.o opal-wrappers.o opal.o opal-async.o idle.o
 obj-y                  += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
 obj-y                  += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
 obj-y                  += opal-msglog.o opal-hmi.o opal-power.o opal-irqchip.o
+obj-y                  += opal-kmsg.o
 
 obj-$(CONFIG_SMP)      += smp.o subcore.o subcore-asm.o
-obj-$(CONFIG_PCI)      += pci.o pci-p5ioc2.o pci-ioda.o
+obj-$(CONFIG_PCI)      += pci.o pci-p5ioc2.o pci-ioda.o npu-dma.o
 obj-$(CONFIG_EEH)      += eeh-powernv.o
 obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
 obj-$(CONFIG_MEMORY_FAILURE)   += opal-memory-errors.o
index e1c90725522a1a8bf263ed3b7bb0c25e1317518a..5f152b95ca0c8493536d787a51d741ae628adb8a 100644 (file)
@@ -48,8 +48,8 @@ static int pnv_eeh_init(void)
        struct pci_controller *hose;
        struct pnv_phb *phb;
 
-       if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
-               pr_warn("%s: OPALv3 is required !\n",
+       if (!firmware_has_feature(FW_FEATURE_OPAL)) {
+               pr_warn("%s: OPAL is required !\n",
                        __func__);
                return -EINVAL;
        }
index 59d735d2e5c053853d0a1795a54787a006b42130..15bfbcd5debc2db9e8e44930b66f5cdda5717a39 100644 (file)
@@ -242,7 +242,7 @@ static int __init pnv_init_idle_states(void)
        if (cpuidle_disable != IDLE_NO_OVERRIDE)
                goto out;
 
-       if (!firmware_has_feature(FW_FEATURE_OPALv3))
+       if (!firmware_has_feature(FW_FEATURE_OPAL))
                goto out;
 
        power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
new file mode 100644 (file)
index 0000000..e85aa90
--- /dev/null
@@ -0,0 +1,348 @@
+/*
+ * This file implements the DMA operations for NVLink devices. The NPU
+ * devices all point to the same iommu table as the parent PCI device.
+ *
+ * Copyright Alistair Popple, IBM Corporation 2015.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ */
+
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <linux/memblock.h>
+
+#include <asm/iommu.h>
+#include <asm/pnv-pci.h>
+#include <asm/msi_bitmap.h>
+#include <asm/opal.h>
+
+#include "powernv.h"
+#include "pci.h"
+
+/*
+ * Other types of TCE cache invalidation are not functional in the
+ * hardware.
+ */
+#define TCE_KILL_INVAL_ALL PPC_BIT(0)
+
+static struct pci_dev *get_pci_dev(struct device_node *dn)
+{
+       return PCI_DN(dn)->pcidev;
+}
+
+/* Given a NPU device get the associated PCI device. */
+struct pci_dev *pnv_pci_get_gpu_dev(struct pci_dev *npdev)
+{
+       struct device_node *dn;
+       struct pci_dev *gpdev;
+
+       /* Get assoicated PCI device */
+       dn = of_parse_phandle(npdev->dev.of_node, "ibm,gpu", 0);
+       if (!dn)
+               return NULL;
+
+       gpdev = get_pci_dev(dn);
+       of_node_put(dn);
+
+       return gpdev;
+}
+EXPORT_SYMBOL(pnv_pci_get_gpu_dev);
+
+/* Given the real PCI device get a linked NPU device. */
+struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
+{
+       struct device_node *dn;
+       struct pci_dev *npdev;
+
+       /* Get assoicated PCI device */
+       dn = of_parse_phandle(gpdev->dev.of_node, "ibm,npu", index);
+       if (!dn)
+               return NULL;
+
+       npdev = get_pci_dev(dn);
+       of_node_put(dn);
+
+       return npdev;
+}
+EXPORT_SYMBOL(pnv_pci_get_npu_dev);
+
+#define NPU_DMA_OP_UNSUPPORTED()                                       \
+       dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
+               __func__)
+
+static void *dma_npu_alloc(struct device *dev, size_t size,
+                          dma_addr_t *dma_handle, gfp_t flag,
+                          struct dma_attrs *attrs)
+{
+       NPU_DMA_OP_UNSUPPORTED();
+       return NULL;
+}
+
+static void dma_npu_free(struct device *dev, size_t size,
+                        void *vaddr, dma_addr_t dma_handle,
+                        struct dma_attrs *attrs)
+{
+       NPU_DMA_OP_UNSUPPORTED();
+}
+
+static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
+                                  unsigned long offset, size_t size,
+                                  enum dma_data_direction direction,
+                                  struct dma_attrs *attrs)
+{
+       NPU_DMA_OP_UNSUPPORTED();
+       return 0;
+}
+
+static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
+                         int nelems, enum dma_data_direction direction,
+                         struct dma_attrs *attrs)
+{
+       NPU_DMA_OP_UNSUPPORTED();
+       return 0;
+}
+
+static int dma_npu_dma_supported(struct device *dev, u64 mask)
+{
+       NPU_DMA_OP_UNSUPPORTED();
+       return 0;
+}
+
+static u64 dma_npu_get_required_mask(struct device *dev)
+{
+       NPU_DMA_OP_UNSUPPORTED();
+       return 0;
+}
+
+struct dma_map_ops dma_npu_ops = {
+       .map_page               = dma_npu_map_page,
+       .map_sg                 = dma_npu_map_sg,
+       .alloc                  = dma_npu_alloc,
+       .free                   = dma_npu_free,
+       .dma_supported          = dma_npu_dma_supported,
+       .get_required_mask      = dma_npu_get_required_mask,
+};
+
+/*
+ * Returns the PE assoicated with the PCI device of the given
+ * NPU. Returns the linked pci device if pci_dev != NULL.
+ */
+static struct pnv_ioda_pe *get_gpu_pci_dev_and_pe(struct pnv_ioda_pe *npe,
+                                                 struct pci_dev **gpdev)
+{
+       struct pnv_phb *phb;
+       struct pci_controller *hose;
+       struct pci_dev *pdev;
+       struct pnv_ioda_pe *pe;
+       struct pci_dn *pdn;
+
+       if (npe->flags & PNV_IODA_PE_PEER) {
+               pe = npe->peers[0];
+               pdev = pe->pdev;
+       } else {
+               pdev = pnv_pci_get_gpu_dev(npe->pdev);
+               if (!pdev)
+                       return NULL;
+
+               pdn = pci_get_pdn(pdev);
+               if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
+                       return NULL;
+
+               hose = pci_bus_to_host(pdev->bus);
+               phb = hose->private_data;
+               pe = &phb->ioda.pe_array[pdn->pe_number];
+       }
+
+       if (gpdev)
+               *gpdev = pdev;
+
+       return pe;
+}
+
+void pnv_npu_tce_invalidate_entire(struct pnv_ioda_pe *npe)
+{
+       struct pnv_phb *phb = npe->phb;
+
+       if (WARN_ON(phb->type != PNV_PHB_NPU ||
+                   !phb->ioda.tce_inval_reg ||
+                   !(npe->flags & PNV_IODA_PE_DEV)))
+               return;
+
+       mb(); /* Ensure previous TCE table stores are visible */
+       __raw_writeq(cpu_to_be64(TCE_KILL_INVAL_ALL),
+               phb->ioda.tce_inval_reg);
+}
+
+void pnv_npu_tce_invalidate(struct pnv_ioda_pe *npe,
+                               struct iommu_table *tbl,
+                               unsigned long index,
+                               unsigned long npages,
+                               bool rm)
+{
+       struct pnv_phb *phb = npe->phb;
+
+       /* We can only invalidate the whole cache on NPU */
+       unsigned long val = TCE_KILL_INVAL_ALL;
+
+       if (WARN_ON(phb->type != PNV_PHB_NPU ||
+                   !phb->ioda.tce_inval_reg ||
+                   !(npe->flags & PNV_IODA_PE_DEV)))
+               return;
+
+       mb(); /* Ensure previous TCE table stores are visible */
+       if (rm)
+               __raw_rm_writeq(cpu_to_be64(val),
+                 (__be64 __iomem *) phb->ioda.tce_inval_reg_phys);
+       else
+               __raw_writeq(cpu_to_be64(val),
+                       phb->ioda.tce_inval_reg);
+}
+
+void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe)
+{
+       struct pnv_ioda_pe *gpe;
+       struct pci_dev *gpdev;
+       int i, avail = -1;
+
+       if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV))
+               return;
+
+       gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
+       if (!gpe)
+               return;
+
+       for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
+               /* Nothing to do if the PE is already connected. */
+               if (gpe->peers[i] == npe)
+                       return;
+
+               if (!gpe->peers[i])
+                       avail = i;
+       }
+
+       if (WARN_ON(avail < 0))
+               return;
+
+       gpe->peers[avail] = npe;
+       gpe->flags |= PNV_IODA_PE_PEER;
+
+       /*
+        * We assume that the NPU devices only have a single peer PE
+        * (the GPU PCIe device PE).
+        */
+       npe->peers[0] = gpe;
+       npe->flags |= PNV_IODA_PE_PEER;
+}
+
+/*
+ * For the NPU we want to point the TCE table at the same table as the
+ * real PCI device.
+ */
+static void pnv_npu_disable_bypass(struct pnv_ioda_pe *npe)
+{
+       struct pnv_phb *phb = npe->phb;
+       struct pci_dev *gpdev;
+       struct pnv_ioda_pe *gpe;
+       void *addr;
+       unsigned int size;
+       int64_t rc;
+
+       /*
+        * Find the assoicated PCI devices and get the dma window
+        * information from there.
+        */
+       if (!npe->pdev || !(npe->flags & PNV_IODA_PE_DEV))
+               return;
+
+       gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
+       if (!gpe)
+               return;
+
+       addr = (void *)gpe->table_group.tables[0]->it_base;
+       size = gpe->table_group.tables[0]->it_size << 3;
+       rc = opal_pci_map_pe_dma_window(phb->opal_id, npe->pe_number,
+                                       npe->pe_number, 1, __pa(addr),
+                                       size, 0x1000);
+       if (rc != OPAL_SUCCESS)
+               pr_warn("%s: Error %lld setting DMA window on PHB#%d-PE#%d\n",
+                       __func__, rc, phb->hose->global_number, npe->pe_number);
+
+       /*
+        * We don't initialise npu_pe->tce32_table as we always use
+        * dma_npu_ops which are nops.
+        */
+       set_dma_ops(&npe->pdev->dev, &dma_npu_ops);
+}
+
+/*
+ * Enable/disable bypass mode on the NPU. The NPU only supports one
+ * window per link, so bypass needs to be explicity enabled or
+ * disabled. Unlike for a PHB3 bypass and non-bypass modes can't be
+ * active at the same time.
+ */
+int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe, bool enable)
+{
+       struct pnv_phb *phb = npe->phb;
+       int64_t rc = 0;
+
+       if (phb->type != PNV_PHB_NPU || !npe->pdev)
+               return -EINVAL;
+
+       if (enable) {
+               /* Enable the bypass window */
+               phys_addr_t top = memblock_end_of_DRAM();
+
+               npe->tce_bypass_base = 0;
+               top = roundup_pow_of_two(top);
+               dev_info(&npe->pdev->dev, "Enabling bypass for PE %d\n",
+                        npe->pe_number);
+               rc = opal_pci_map_pe_dma_window_real(phb->opal_id,
+                                       npe->pe_number, npe->pe_number,
+                                       npe->tce_bypass_base, top);
+       } else {
+               /*
+                * Disable the bypass window by replacing it with the
+                * TCE32 window.
+                */
+               pnv_npu_disable_bypass(npe);
+       }
+
+       return rc;
+}
+
+int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask)
+{
+       struct pci_controller *hose = pci_bus_to_host(npdev->bus);
+       struct pnv_phb *phb = hose->private_data;
+       struct pci_dn *pdn = pci_get_pdn(npdev);
+       struct pnv_ioda_pe *npe, *gpe;
+       struct pci_dev *gpdev;
+       uint64_t top;
+       bool bypass = false;
+
+       if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
+               return -ENXIO;
+
+       /* We only do bypass if it's enabled on the linked device */
+       npe = &phb->ioda.pe_array[pdn->pe_number];
+       gpe = get_gpu_pci_dev_and_pe(npe, &gpdev);
+       if (!gpe)
+               return -ENODEV;
+
+       if (gpe->tce_bypass_enabled) {
+               top = gpe->tce_bypass_base + memblock_end_of_DRAM() - 1;
+               bypass = (dma_mask >= top);
+       }
+
+       if (bypass)
+               dev_info(&npdev->dev, "Using 64-bit DMA iommu bypass\n");
+       else
+               dev_info(&npdev->dev, "Using 32-bit DMA via iommu\n");
+
+       pnv_npu_dma_set_bypass(npe, bypass);
+       *npdev->dev.dma_mask = dma_mask;
+
+       return 0;
+}
diff --git a/arch/powerpc/platforms/powernv/opal-kmsg.c b/arch/powerpc/platforms/powernv/opal-kmsg.c
new file mode 100644 (file)
index 0000000..6f1214d
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * kmsg dumper that ensures the OPAL console fully flushes panic messages
+ *
+ * Author: Russell Currey <ruscur@russell.cc>
+ *
+ * Copyright 2015 IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kmsg_dump.h>
+
+#include <asm/opal.h>
+#include <asm/opal-api.h>
+
+/*
+ * Console output is controlled by OPAL firmware.  The kernel regularly calls
+ * OPAL_POLL_EVENTS, which flushes some console output.  In a panic state,
+ * however, the kernel no longer calls OPAL_POLL_EVENTS and the panic message
+ * may not be completely printed.  This function does not actually dump the
+ * message, it just ensures that OPAL completely flushes the console buffer.
+ */
+static void force_opal_console_flush(struct kmsg_dumper *dumper,
+                                    enum kmsg_dump_reason reason)
+{
+       int i;
+       int64_t ret;
+
+       /*
+        * Outside of a panic context the pollers will continue to run,
+        * so we don't need to do any special flushing.
+        */
+       if (reason != KMSG_DUMP_PANIC)
+               return;
+
+       if (opal_check_token(OPAL_CONSOLE_FLUSH)) {
+               ret = opal_console_flush(0);
+
+               if (ret == OPAL_UNSUPPORTED || ret == OPAL_PARAMETER)
+                       return;
+
+               /* Incrementally flush until there's nothing left */
+               while (opal_console_flush(0) != OPAL_SUCCESS);
+       } else {
+               /*
+                * If OPAL_CONSOLE_FLUSH is not implemented in the firmware,
+                * the console can still be flushed by calling the polling
+                * function enough times to flush the buffer.  We don't know
+                * how much output still needs to be flushed, but we can be
+                * generous since the kernel is in panic and doesn't need
+                * to do much else.
+                */
+               printk(KERN_NOTICE "opal: OPAL_CONSOLE_FLUSH missing.\n");
+               for (i = 0; i < 1024; i++) {
+                       opal_poll_events(NULL);
+               }
+       }
+}
+
+static struct kmsg_dumper opal_kmsg_dumper = {
+       .dump = force_opal_console_flush
+};
+
+void __init opal_kmsg_init(void)
+{
+       int rc;
+
+       /* Add our dumper to the list */
+       rc = kmsg_dump_register(&opal_kmsg_dumper);
+       if (rc != 0)
+               pr_err("opal: kmsg_dump_register failed; returned %d\n", rc);
+}
index 4ece8e40dd5414868ef9934d7a809578c37f2743..e315e704cca7100cc22ee9f335701659684e744d 100644 (file)
@@ -434,7 +434,6 @@ static const struct of_device_id opal_prd_match[] = {
 static struct platform_driver opal_prd_driver = {
        .driver = {
                .name           = "opal-prd",
-               .owner          = THIS_MODULE,
                .of_match_table = opal_prd_match,
        },
        .probe  = opal_prd_probe,
index 37dbee15769fdcb42bc9271e9de81991a79f1ffd..f8868864f373ed5eecba333a5b1a2b83e38e0ec6 100644 (file)
@@ -31,8 +31,7 @@ static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm)
        tm->tm_hour     = bcd2bin((h_m_s_ms >> 56) & 0xff);
        tm->tm_min      = bcd2bin((h_m_s_ms >> 48) & 0xff);
        tm->tm_sec      = bcd2bin((h_m_s_ms >> 40) & 0xff);
-
-        GregorianDay(tm);
+       tm->tm_wday     = -1;
 }
 
 unsigned long __init opal_get_boot_time(void)
@@ -51,7 +50,7 @@ unsigned long __init opal_get_boot_time(void)
                rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
                if (rc == OPAL_BUSY_EVENT)
                        opal_poll_events(NULL);
-               else
+               else if (rc == OPAL_BUSY)
                        mdelay(10);
        }
        if (rc != OPAL_SUCCESS)
index b7a464fef7a77be52671a5ef0679f62453cdbede..e45b88a5d7e0f7ed20f68516050a7e8a8c37fa1d 100644 (file)
@@ -301,3 +301,4 @@ OPAL_CALL(opal_flash_erase,                 OPAL_FLASH_ERASE);
 OPAL_CALL(opal_prd_msg,                                OPAL_PRD_MSG);
 OPAL_CALL(opal_leds_get_ind,                   OPAL_LEDS_GET_INDICATOR);
 OPAL_CALL(opal_leds_set_ind,                   OPAL_LEDS_SET_INDICATOR);
+OPAL_CALL(opal_console_flush,                  OPAL_CONSOLE_FLUSH);
index 7634d1c62299b44063169989ef32a27c9ae5ce9d..d0ac535cf5d7c783ad0ee0ded359d2eeb48ccd68 100644 (file)
@@ -126,7 +126,7 @@ static const struct scom_controller opal_scom_controller = {
 
 static int opal_xscom_init(void)
 {
-       if (firmware_has_feature(FW_FEATURE_OPALv3))
+       if (firmware_has_feature(FW_FEATURE_OPAL))
                scom_init(&opal_scom_controller);
        return 0;
 }
index 57cffb80bc36a85d975dc993235c02efda1d7be8..4e0da5af94a124db13fb524b7cce79f9a1704035 100644 (file)
@@ -98,16 +98,11 @@ int __init early_init_dt_scan_opal(unsigned long node,
        pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
                 opal.size, sizep, runtimesz);
 
-       powerpc_firmware_features |= FW_FEATURE_OPAL;
        if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
-               powerpc_firmware_features |= FW_FEATURE_OPALv2;
-               powerpc_firmware_features |= FW_FEATURE_OPALv3;
-               pr_info("OPAL V3 detected !\n");
-       } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
-               powerpc_firmware_features |= FW_FEATURE_OPALv2;
-               pr_info("OPAL V2 detected !\n");
+               powerpc_firmware_features |= FW_FEATURE_OPAL;
+               pr_info("OPAL detected !\n");
        } else {
-               pr_info("OPAL V1 detected !\n");
+               panic("OPAL != V3 detected, no longer supported.\n");
        }
 
        /* Reinit all cores with the right endian */
@@ -352,17 +347,15 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
         * enough room and be done with it
         */
        spin_lock_irqsave(&opal_write_lock, flags);
-       if (firmware_has_feature(FW_FEATURE_OPALv2)) {
-               rc = opal_console_write_buffer_space(vtermno, &olen);
-               len = be64_to_cpu(olen);
-               if (rc || len < total_len) {
-                       spin_unlock_irqrestore(&opal_write_lock, flags);
-                       /* Closed -> drop characters */
-                       if (rc)
-                               return total_len;
-                       opal_poll_events(NULL);
-                       return -EAGAIN;
-               }
+       rc = opal_console_write_buffer_space(vtermno, &olen);
+       len = be64_to_cpu(olen);
+       if (rc || len < total_len) {
+               spin_unlock_irqrestore(&opal_write_lock, flags);
+               /* Closed -> drop characters */
+               if (rc)
+                       return total_len;
+               opal_poll_events(NULL);
+               return -EAGAIN;
        }
 
        /* We still try to handle partial completions, though they
@@ -555,7 +548,7 @@ bool opal_mce_check_early_recovery(struct pt_regs *regs)
                goto out;
 
        if ((regs->nip >= opal.base) &&
-                       (regs->nip <= (opal.base + opal.size)))
+                       (regs->nip < (opal.base + opal.size)))
                recover_addr = find_recovery_address(regs->nip);
 
        /*
@@ -696,10 +689,7 @@ static int __init opal_init(void)
        }
 
        /* Register OPAL consoles if any ports */
-       if (firmware_has_feature(FW_FEATURE_OPALv2))
-               consoles = of_find_node_by_path("/ibm,opal/consoles");
-       else
-               consoles = of_node_get(opal_node);
+       consoles = of_find_node_by_path("/ibm,opal/consoles");
        if (consoles) {
                for_each_child_of_node(consoles, np) {
                        if (strcmp(np->name, "serial"))
@@ -758,6 +748,9 @@ static int __init opal_init(void)
        opal_pdev_init(opal_node, "ibm,opal-flash");
        opal_pdev_init(opal_node, "ibm,opal-prd");
 
+       /* Initialise OPAL kmsg dumper for flushing console on panic */
+       opal_kmsg_init();
+
        return 0;
 }
 machine_subsys_initcall(powernv, opal_init);
index 414fd1a00fda85b243dcb2839a4929b09be4a52c..573ae1994097fb91e15e3f7f6351fe1e73b35c59 100644 (file)
@@ -116,16 +116,6 @@ static int __init iommu_setup(char *str)
 }
 early_param("iommu", iommu_setup);
 
-/*
- * stdcix is only supposed to be used in hypervisor real mode as per
- * the architecture spec
- */
-static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
-{
-       __asm__ __volatile__("stdcix %0,0,%1"
-               : : "r" (val), "r" (paddr) : "memory");
-}
-
 static inline bool pnv_pci_is_mem_pref_64(unsigned long flags)
 {
        return ((flags & (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)) ==
@@ -344,7 +334,7 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
                return;
        }
 
-       if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
+       if (!firmware_has_feature(FW_FEATURE_OPAL)) {
                pr_info("  Firmware too old to support M64 window\n");
                return;
        }
@@ -357,6 +347,7 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
        }
 
        res = &hose->mem_resources[1];
+       res->name = dn->full_name;
        res->start = of_translate_address(dn, r + 2);
        res->end = res->start + of_read_number(r + 4, 2) - 1;
        res->flags = (IORESOURCE_MEM | IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
@@ -780,8 +771,12 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
                return -ENXIO;
        }
 
-       /* Configure PELTV */
-       pnv_ioda_set_peltv(phb, pe, true);
+       /*
+        * Configure PELTV. NPUs don't have a PELTV table so skip
+        * configuration on them.
+        */
+       if (phb->type != PNV_PHB_NPU)
+               pnv_ioda_set_peltv(phb, pe, true);
 
        /* Setup reverse map */
        for (rid = pe->rid; rid < rid_end; rid++)
@@ -924,7 +919,6 @@ static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
 }
 #endif /* CONFIG_PCI_IOV */
 
-#if 0
 static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
 {
        struct pci_controller *hose = pci_bus_to_host(dev->bus);
@@ -941,11 +935,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
        if (pdn->pe_number != IODA_INVALID_PE)
                return NULL;
 
-       /* PE#0 has been pre-set */
-       if (dev->bus->number == 0)
-               pe_num = 0;
-       else
-               pe_num = pnv_ioda_alloc_pe(phb);
+       pe_num = pnv_ioda_alloc_pe(phb);
        if (pe_num == IODA_INVALID_PE) {
                pr_warning("%s: Not enough PE# available, disabling device\n",
                           pci_name(dev));
@@ -963,6 +953,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
        pci_dev_get(dev);
        pdn->pcidev = dev;
        pdn->pe_number = pe_num;
+       pe->flags = PNV_IODA_PE_DEV;
        pe->pdev = dev;
        pe->pbus = NULL;
        pe->tce32_seg = -1;
@@ -993,7 +984,6 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
 
        return pe;
 }
-#endif /* Useful for SRIOV case */
 
 static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
 {
@@ -1007,6 +997,7 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
                                pci_name(dev));
                        continue;
                }
+               pdn->pcidev = dev;
                pdn->pe_number = pe->pe_number;
                pe->dma_weight += pnv_ioda_dma_weight(dev);
                if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
@@ -1083,6 +1074,77 @@ static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, bool all)
        pnv_ioda_link_pe_by_weight(phb, pe);
 }
 
+static struct pnv_ioda_pe *pnv_ioda_setup_npu_PE(struct pci_dev *npu_pdev)
+{
+       int pe_num, found_pe = false, rc;
+       long rid;
+       struct pnv_ioda_pe *pe;
+       struct pci_dev *gpu_pdev;
+       struct pci_dn *npu_pdn;
+       struct pci_controller *hose = pci_bus_to_host(npu_pdev->bus);
+       struct pnv_phb *phb = hose->private_data;
+
+       /*
+        * Due to a hardware errata PE#0 on the NPU is reserved for
+        * error handling. This means we only have three PEs remaining
+        * which need to be assigned to four links, implying some
+        * links must share PEs.
+        *
+        * To achieve this we assign PEs such that NPUs linking the
+        * same GPU get assigned the same PE.
+        */
+       gpu_pdev = pnv_pci_get_gpu_dev(npu_pdev);
+       for (pe_num = 0; pe_num < phb->ioda.total_pe; pe_num++) {
+               pe = &phb->ioda.pe_array[pe_num];
+               if (!pe->pdev)
+                       continue;
+
+               if (pnv_pci_get_gpu_dev(pe->pdev) == gpu_pdev) {
+                       /*
+                        * This device has the same peer GPU so should
+                        * be assigned the same PE as the existing
+                        * peer NPU.
+                        */
+                       dev_info(&npu_pdev->dev,
+                               "Associating to existing PE %d\n", pe_num);
+                       pci_dev_get(npu_pdev);
+                       npu_pdn = pci_get_pdn(npu_pdev);
+                       rid = npu_pdev->bus->number << 8 | npu_pdn->devfn;
+                       npu_pdn->pcidev = npu_pdev;
+                       npu_pdn->pe_number = pe_num;
+                       pe->dma_weight += pnv_ioda_dma_weight(npu_pdev);
+                       phb->ioda.pe_rmap[rid] = pe->pe_number;
+
+                       /* Map the PE to this link */
+                       rc = opal_pci_set_pe(phb->opal_id, pe_num, rid,
+                                       OpalPciBusAll,
+                                       OPAL_COMPARE_RID_DEVICE_NUMBER,
+                                       OPAL_COMPARE_RID_FUNCTION_NUMBER,
+                                       OPAL_MAP_PE);
+                       WARN_ON(rc != OPAL_SUCCESS);
+                       found_pe = true;
+                       break;
+               }
+       }
+
+       if (!found_pe)
+               /*
+                * Could not find an existing PE so allocate a new
+                * one.
+                */
+               return pnv_ioda_setup_dev_PE(npu_pdev);
+       else
+               return pe;
+}
+
+static void pnv_ioda_setup_npu_PEs(struct pci_bus *bus)
+{
+       struct pci_dev *pdev;
+
+       list_for_each_entry(pdev, &bus->devices, bus_list)
+               pnv_ioda_setup_npu_PE(pdev);
+}
+
 static void pnv_ioda_setup_PEs(struct pci_bus *bus)
 {
        struct pci_dev *dev;
@@ -1119,7 +1181,17 @@ static void pnv_pci_ioda_setup_PEs(void)
                if (phb->reserve_m64_pe)
                        phb->reserve_m64_pe(hose->bus, NULL, true);
 
-               pnv_ioda_setup_PEs(hose->bus);
+               /*
+                * On NPU PHB, we expect separate PEs for individual PCI
+                * functions. PCI bus dependent PEs are required for the
+                * remaining types of PHBs.
+                */
+               if (phb->type == PNV_PHB_NPU) {
+                       /* PE#0 is needed for error reporting */
+                       pnv_ioda_reserve_pe(phb, 0);
+                       pnv_ioda_setup_npu_PEs(hose->bus);
+               } else
+                       pnv_ioda_setup_PEs(hose->bus);
        }
 }
 
@@ -1578,6 +1650,8 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
        struct pnv_ioda_pe *pe;
        uint64_t top;
        bool bypass = false;
+       struct pci_dev *linked_npu_dev;
+       int i;
 
        if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
                return -ENODEV;;
@@ -1596,6 +1670,18 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
                set_dma_ops(&pdev->dev, &dma_iommu_ops);
        }
        *pdev->dev.dma_mask = dma_mask;
+
+       /* Update peer npu devices */
+       if (pe->flags & PNV_IODA_PE_PEER)
+               for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
+                       if (!pe->peers[i])
+                               continue;
+
+                       linked_npu_dev = pe->peers[i]->pdev;
+                       if (dma_get_mask(&linked_npu_dev->dev) != dma_mask)
+                               dma_set_mask(&linked_npu_dev->dev, dma_mask);
+               }
+
        return 0;
 }
 
@@ -1740,12 +1826,23 @@ static inline void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_ioda_pe *pe)
        /* 01xb - invalidate TCEs that match the specified PE# */
        unsigned long val = (0x4ull << 60) | (pe->pe_number & 0xFF);
        struct pnv_phb *phb = pe->phb;
+       struct pnv_ioda_pe *npe;
+       int i;
 
        if (!phb->ioda.tce_inval_reg)
                return;
 
        mb(); /* Ensure above stores are visible */
        __raw_writeq(cpu_to_be64(val), phb->ioda.tce_inval_reg);
+
+       if (pe->flags & PNV_IODA_PE_PEER)
+               for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
+                       npe = pe->peers[i];
+                       if (!npe || npe->phb->type != PNV_PHB_NPU)
+                               continue;
+
+                       pnv_npu_tce_invalidate_entire(npe);
+               }
 }
 
 static void pnv_pci_ioda2_do_tce_invalidate(unsigned pe_number, bool rm,
@@ -1780,15 +1877,28 @@ static void pnv_pci_ioda2_tce_invalidate(struct iommu_table *tbl,
        struct iommu_table_group_link *tgl;
 
        list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
+               struct pnv_ioda_pe *npe;
                struct pnv_ioda_pe *pe = container_of(tgl->table_group,
                                struct pnv_ioda_pe, table_group);
                __be64 __iomem *invalidate = rm ?
                        (__be64 __iomem *)pe->phb->ioda.tce_inval_reg_phys :
                        pe->phb->ioda.tce_inval_reg;
+               int i;
 
                pnv_pci_ioda2_do_tce_invalidate(pe->pe_number, rm,
                        invalidate, tbl->it_page_shift,
                        index, npages);
+
+               if (pe->flags & PNV_IODA_PE_PEER)
+                       /* Invalidate PEs using the same TCE table */
+                       for (i = 0; i < PNV_IODA_MAX_PEER_PES; i++) {
+                               npe = pe->peers[i];
+                               if (!npe || npe->phb->type != PNV_PHB_NPU)
+                                       continue;
+
+                               pnv_npu_tce_invalidate(npe, tbl, index,
+                                                       npages, rm);
+                       }
        }
 }
 
@@ -2436,10 +2546,17 @@ static void pnv_ioda_setup_dma(struct pnv_phb *phb)
                        pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
                                pe->dma_weight, segs);
                        pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
-               } else {
+               } else if (phb->type == PNV_PHB_IODA2) {
                        pe_info(pe, "Assign DMA32 space\n");
                        segs = 0;
                        pnv_pci_ioda2_setup_dma_pe(phb, pe);
+               } else if (phb->type == PNV_PHB_NPU) {
+                       /*
+                        * We initialise the DMA space for an NPU PHB
+                        * after setup of the PHB is complete as we
+                        * point the NPU TVT to the the same location
+                        * as the PHB3 TVT.
+                        */
                }
 
                remaining -= segs;
@@ -2881,6 +2998,11 @@ static void pnv_pci_ioda_setup_seg(void)
 
        list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
                phb = hose->private_data;
+
+               /* NPU PHB does not support IO or MMIO segmentation */
+               if (phb->type == PNV_PHB_NPU)
+                       continue;
+
                list_for_each_entry(pe, &phb->ioda.pe_list, list) {
                        pnv_ioda_setup_pe_seg(hose, pe);
                }
@@ -2920,6 +3042,27 @@ static void pnv_pci_ioda_create_dbgfs(void)
 #endif /* CONFIG_DEBUG_FS */
 }
 
+static void pnv_npu_ioda_fixup(void)
+{
+       bool enable_bypass;
+       struct pci_controller *hose, *tmp;
+       struct pnv_phb *phb;
+       struct pnv_ioda_pe *pe;
+
+       list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+               phb = hose->private_data;
+               if (phb->type != PNV_PHB_NPU)
+                       continue;
+
+               list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
+                       enable_bypass = dma_get_mask(&pe->pdev->dev) ==
+                               DMA_BIT_MASK(64);
+                       pnv_npu_init_dma_pe(pe);
+                       pnv_npu_dma_set_bypass(pe, enable_bypass);
+               }
+       }
+}
+
 static void pnv_pci_ioda_fixup(void)
 {
        pnv_pci_ioda_setup_PEs();
@@ -2932,6 +3075,9 @@ static void pnv_pci_ioda_fixup(void)
        eeh_init();
        eeh_addr_cache_build();
 #endif
+
+       /* Link NPU IODA tables to their PCI devices. */
+       pnv_npu_ioda_fixup();
 }
 
 /*
@@ -3046,6 +3192,19 @@ static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
        .shutdown = pnv_pci_ioda_shutdown,
 };
 
+static const struct pci_controller_ops pnv_npu_ioda_controller_ops = {
+       .dma_dev_setup = pnv_pci_dma_dev_setup,
+#ifdef CONFIG_PCI_MSI
+       .setup_msi_irqs = pnv_setup_msi_irqs,
+       .teardown_msi_irqs = pnv_teardown_msi_irqs,
+#endif
+       .enable_device_hook = pnv_pci_enable_device_hook,
+       .window_alignment = pnv_pci_window_alignment,
+       .reset_secondary_bus = pnv_pci_reset_secondary_bus,
+       .dma_set_mask = pnv_npu_dma_set_mask,
+       .shutdown = pnv_pci_ioda_shutdown,
+};
+
 static void __init pnv_pci_init_ioda_phb(struct device_node *np,
                                         u64 hub_id, int ioda_type)
 {
@@ -3101,6 +3260,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
                phb->model = PNV_PHB_MODEL_P7IOC;
        else if (of_device_is_compatible(np, "ibm,power8-pciex"))
                phb->model = PNV_PHB_MODEL_PHB3;
+       else if (of_device_is_compatible(np, "ibm,power8-npu-pciex"))
+               phb->model = PNV_PHB_MODEL_NPU;
        else
                phb->model = PNV_PHB_MODEL_UNKNOWN;
 
@@ -3201,7 +3362,11 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
         * the child P2P bridges) can form individual PE.
         */
        ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
-       hose->controller_ops = pnv_pci_ioda_controller_ops;
+
+       if (phb->type == PNV_PHB_NPU)
+               hose->controller_ops = pnv_npu_ioda_controller_ops;
+       else
+               hose->controller_ops = pnv_pci_ioda_controller_ops;
 
 #ifdef CONFIG_PCI_IOV
        ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
@@ -3236,6 +3401,11 @@ void __init pnv_pci_init_ioda2_phb(struct device_node *np)
        pnv_pci_init_ioda_phb(np, 0, PNV_PHB_IODA2);
 }
 
+void __init pnv_pci_init_npu_phb(struct device_node *np)
+{
+       pnv_pci_init_ioda_phb(np, 0, PNV_PHB_NPU);
+}
+
 void __init pnv_pci_init_ioda_hub(struct device_node *np)
 {
        struct device_node *phbn;
index f2dd7723424034a0626c28ca2cc9762f79123984..2f55c86df703554bfd9541a44f8ba26bec072729 100644 (file)
@@ -1,8 +1,6 @@
 /*
  * Support PCI/PCIe on PowerNV platforms
  *
- * Currently supports only P5IOC2
- *
  * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
  *
  * This program is free software; you can redistribute it and/or
@@ -807,6 +805,10 @@ void __init pnv_pci_init(void)
        for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
                pnv_pci_init_ioda2_phb(np);
 
+       /* Look for NPU PHBs */
+       for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
+               pnv_pci_init_npu_phb(np);
+
        /* Setup the linkage between OF nodes and PHBs */
        pci_devs_phb_init();
 
index c8ff50e907662c0407477e9ff79f7531e6d6b6b2..7f56313e8d7223dfd9f22b927c807b20516cf941 100644 (file)
@@ -7,6 +7,7 @@ enum pnv_phb_type {
        PNV_PHB_P5IOC2  = 0,
        PNV_PHB_IODA1   = 1,
        PNV_PHB_IODA2   = 2,
+       PNV_PHB_NPU     = 3,
 };
 
 /* Precise PHB model for error management */
@@ -15,6 +16,7 @@ enum pnv_phb_model {
        PNV_PHB_MODEL_P5IOC2,
        PNV_PHB_MODEL_P7IOC,
        PNV_PHB_MODEL_PHB3,
+       PNV_PHB_MODEL_NPU,
 };
 
 #define PNV_PCI_DIAG_BUF_SIZE  8192
@@ -24,6 +26,7 @@ enum pnv_phb_model {
 #define PNV_IODA_PE_MASTER     (1 << 3)        /* Master PE in compound case   */
 #define PNV_IODA_PE_SLAVE      (1 << 4)        /* Slave PE in compound case    */
 #define PNV_IODA_PE_VF         (1 << 5)        /* PE for one VF                */
+#define PNV_IODA_PE_PEER       (1 << 6)        /* PE has peers                 */
 
 /* Data associated with a PE, including IOMMU tracking etc.. */
 struct pnv_phb;
@@ -31,6 +34,9 @@ struct pnv_ioda_pe {
        unsigned long           flags;
        struct pnv_phb          *phb;
 
+#define PNV_IODA_MAX_PEER_PES  8
+       struct pnv_ioda_pe      *peers[PNV_IODA_MAX_PEER_PES];
+
        /* A PE can be associated with a single device or an
         * entire bus (& children). In the former case, pdev
         * is populated, in the later case, pbus is.
@@ -229,6 +235,7 @@ extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
 extern void pnv_pci_init_p5ioc2_hub(struct device_node *np);
 extern void pnv_pci_init_ioda_hub(struct device_node *np);
 extern void pnv_pci_init_ioda2_phb(struct device_node *np);
+extern void pnv_pci_init_npu_phb(struct device_node *np);
 extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
                                        __be64 *startp, __be64 *endp, bool rm);
 extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
@@ -238,4 +245,16 @@ extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
 extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
 extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
 
+/* Nvlink functions */
+extern void pnv_npu_tce_invalidate_entire(struct pnv_ioda_pe *npe);
+extern void pnv_npu_tce_invalidate(struct pnv_ioda_pe *npe,
+                                      struct iommu_table *tbl,
+                                      unsigned long index,
+                                      unsigned long npages,
+                                      bool rm);
+extern void pnv_npu_init_dma_pe(struct pnv_ioda_pe *npe);
+extern void pnv_npu_setup_dma_pe(struct pnv_ioda_pe *npe);
+extern int pnv_npu_dma_set_bypass(struct pnv_ioda_pe *npe, bool enabled);
+extern int pnv_npu_dma_set_mask(struct pci_dev *npdev, u64 dma_mask);
+
 #endif /* __POWERNV_PCI_H */
index a9a8fa37a555f5e9b6e3ed835265bc6426b7d85f..1acb0c72d9231d980080af3acbae7120b720c67c 100644 (file)
@@ -90,12 +90,8 @@ static void pnv_show_cpuinfo(struct seq_file *m)
        if (root)
                model = of_get_property(root, "model", NULL);
        seq_printf(m, "machine\t\t: PowerNV %s\n", model);
-       if (firmware_has_feature(FW_FEATURE_OPALv3))
-               seq_printf(m, "firmware\t: OPAL v3\n");
-       else if (firmware_has_feature(FW_FEATURE_OPALv2))
-               seq_printf(m, "firmware\t: OPAL v2\n");
-       else if (firmware_has_feature(FW_FEATURE_OPAL))
-               seq_printf(m, "firmware\t: OPAL v1\n");
+       if (firmware_has_feature(FW_FEATURE_OPAL))
+               seq_printf(m, "firmware\t: OPAL\n");
        else
                seq_printf(m, "firmware\t: BML\n");
        of_node_put(root);
@@ -224,9 +220,9 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
 {
        xics_kexec_teardown_cpu(secondary);
 
-       /* On OPAL v3, we return all CPUs to firmware */
+       /* On OPAL, we return all CPUs to firmware */
 
-       if (!firmware_has_feature(FW_FEATURE_OPALv3))
+       if (!firmware_has_feature(FW_FEATURE_OPAL))
                return;
 
        if (secondary) {
index ca264833ee64d5c7a55035c66c21454d7b4d0b14..ad7b1a3dbed0970cd7c5c474d5f78eb90565822e 100644 (file)
@@ -61,14 +61,15 @@ static int pnv_smp_kick_cpu(int nr)
        unsigned long start_here =
                        __pa(ppc_function_entry(generic_secondary_smp_init));
        long rc;
+       uint8_t status;
 
        BUG_ON(nr < 0 || nr >= NR_CPUS);
 
        /*
-        * If we already started or OPALv2 is not supported, we just
+        * If we already started or OPAL is not supported, we just
         * kick the CPU via the PACA
         */
-       if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2))
+       if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPAL))
                goto kick;
 
        /*
@@ -77,55 +78,42 @@ static int pnv_smp_kick_cpu(int nr)
         * first time. OPAL v3 allows us to query OPAL to know if it
         * has the CPUs, so we do that
         */
-       if (firmware_has_feature(FW_FEATURE_OPALv3)) {
-               uint8_t status;
-
-               rc = opal_query_cpu_status(pcpu, &status);
-               if (rc != OPAL_SUCCESS) {
-                       pr_warn("OPAL Error %ld querying CPU %d state\n",
-                               rc, nr);
-                       return -ENODEV;
-               }
+       rc = opal_query_cpu_status(pcpu, &status);
+       if (rc != OPAL_SUCCESS) {
+               pr_warn("OPAL Error %ld querying CPU %d state\n", rc, nr);
+               return -ENODEV;
+       }
 
-               /*
-                * Already started, just kick it, probably coming from
-                * kexec and spinning
-                */
-               if (status == OPAL_THREAD_STARTED)
-                       goto kick;
+       /*
+        * Already started, just kick it, probably coming from
+        * kexec and spinning
+        */
+       if (status == OPAL_THREAD_STARTED)
+               goto kick;
 
-               /*
-                * Available/inactive, let's kick it
-                */
-               if (status == OPAL_THREAD_INACTIVE) {
-                       pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n",
-                                nr, pcpu);
-                       rc = opal_start_cpu(pcpu, start_here);
-                       if (rc != OPAL_SUCCESS) {
-                               pr_warn("OPAL Error %ld starting CPU %d\n",
-                                       rc, nr);
-                               return -ENODEV;
-                       }
-               } else {
-                       /*
-                        * An unavailable CPU (or any other unknown status)
-                        * shouldn't be started. It should also
-                        * not be in the possible map but currently it can
-                        * happen
-                        */
-                       pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
-                                " (status %d)...\n", nr, pcpu, status);
+       /*
+        * Available/inactive, let's kick it
+        */
+       if (status == OPAL_THREAD_INACTIVE) {
+               pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
+               rc = opal_start_cpu(pcpu, start_here);
+               if (rc != OPAL_SUCCESS) {
+                       pr_warn("OPAL Error %ld starting CPU %d\n", rc, nr);
                        return -ENODEV;
                }
        } else {
                /*
-                * On OPAL v2, we just kick it and hope for the best,
-                * we must not test the error from opal_start_cpu() or
-                * we would fail to get CPUs from kexec.
+                * An unavailable CPU (or any other unknown status)
+                * shouldn't be started. It should also
+                * not be in the possible map but currently it can
+                * happen
                 */
-               opal_start_cpu(pcpu, start_here);
+               pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
+                        " (status %d)...\n", nr, pcpu, status);
+               return -ENODEV;
        }
- kick:
+
+kick:
        return smp_generic_kick_cpu(nr);
 }
 
index f244dcb4f2cf01c35e1f1025a01fbaaf866de7b6..2b93ae8d557a321edb5d22be7f5dc9a1cfda10a9 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/of.h>
 
 #include "of_helpers.h"
-#include "offline_states.h"
 #include "pseries.h"
 
 #include <asm/prom.h>
@@ -338,185 +337,6 @@ int dlpar_release_drc(u32 drc_index)
        return 0;
 }
 
-#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
-
-static int dlpar_online_cpu(struct device_node *dn)
-{
-       int rc = 0;
-       unsigned int cpu;
-       int len, nthreads, i;
-       const __be32 *intserv;
-       u32 thread;
-
-       intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
-       if (!intserv)
-               return -EINVAL;
-
-       nthreads = len / sizeof(u32);
-
-       cpu_maps_update_begin();
-       for (i = 0; i < nthreads; i++) {
-               thread = be32_to_cpu(intserv[i]);
-               for_each_present_cpu(cpu) {
-                       if (get_hard_smp_processor_id(cpu) != thread)
-                               continue;
-                       BUG_ON(get_cpu_current_state(cpu)
-                                       != CPU_STATE_OFFLINE);
-                       cpu_maps_update_done();
-                       rc = device_online(get_cpu_device(cpu));
-                       if (rc)
-                               goto out;
-                       cpu_maps_update_begin();
-
-                       break;
-               }
-               if (cpu == num_possible_cpus())
-                       printk(KERN_WARNING "Could not find cpu to online "
-                              "with physical id 0x%x\n", thread);
-       }
-       cpu_maps_update_done();
-
-out:
-       return rc;
-
-}
-
-static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
-{
-       struct device_node *dn, *parent;
-       u32 drc_index;
-       int rc;
-
-       rc = kstrtou32(buf, 0, &drc_index);
-       if (rc)
-               return -EINVAL;
-
-       rc = dlpar_acquire_drc(drc_index);
-       if (rc)
-               return -EINVAL;
-
-       parent = of_find_node_by_path("/cpus");
-       if (!parent)
-               return -ENODEV;
-
-       dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
-       of_node_put(parent);
-       if (!dn) {
-               dlpar_release_drc(drc_index);
-               return -EINVAL;
-       }
-
-       rc = dlpar_attach_node(dn);
-       if (rc) {
-               dlpar_release_drc(drc_index);
-               dlpar_free_cc_nodes(dn);
-               return rc;
-       }
-
-       rc = dlpar_online_cpu(dn);
-       if (rc)
-               return rc;
-
-       return count;
-}
-
-static int dlpar_offline_cpu(struct device_node *dn)
-{
-       int rc = 0;
-       unsigned int cpu;
-       int len, nthreads, i;
-       const __be32 *intserv;
-       u32 thread;
-
-       intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
-       if (!intserv)
-               return -EINVAL;
-
-       nthreads = len / sizeof(u32);
-
-       cpu_maps_update_begin();
-       for (i = 0; i < nthreads; i++) {
-               thread = be32_to_cpu(intserv[i]);
-               for_each_present_cpu(cpu) {
-                       if (get_hard_smp_processor_id(cpu) != thread)
-                               continue;
-
-                       if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
-                               break;
-
-                       if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
-                               set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
-                               cpu_maps_update_done();
-                               rc = device_offline(get_cpu_device(cpu));
-                               if (rc)
-                                       goto out;
-                               cpu_maps_update_begin();
-                               break;
-
-                       }
-
-                       /*
-                        * The cpu is in CPU_STATE_INACTIVE.
-                        * Upgrade it's state to CPU_STATE_OFFLINE.
-                        */
-                       set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
-                       BUG_ON(plpar_hcall_norets(H_PROD, thread)
-                                                               != H_SUCCESS);
-                       __cpu_die(cpu);
-                       break;
-               }
-               if (cpu == num_possible_cpus())
-                       printk(KERN_WARNING "Could not find cpu to offline "
-                              "with physical id 0x%x\n", thread);
-       }
-       cpu_maps_update_done();
-
-out:
-       return rc;
-
-}
-
-static ssize_t dlpar_cpu_release(const char *buf, size_t count)
-{
-       struct device_node *dn;
-       u32 drc_index;
-       int rc;
-
-       dn = of_find_node_by_path(buf);
-       if (!dn)
-               return -EINVAL;
-
-       rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
-       if (rc) {
-               of_node_put(dn);
-               return -EINVAL;
-       }
-
-       rc = dlpar_offline_cpu(dn);
-       if (rc) {
-               of_node_put(dn);
-               return -EINVAL;
-       }
-
-       rc = dlpar_release_drc(drc_index);
-       if (rc) {
-               of_node_put(dn);
-               return rc;
-       }
-
-       rc = dlpar_detach_node(dn);
-       if (rc) {
-               dlpar_acquire_drc(drc_index);
-               return rc;
-       }
-
-       of_node_put(dn);
-
-       return count;
-}
-
-#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
-
 static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
 {
        int rc;
@@ -536,6 +356,9 @@ static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
        case PSERIES_HP_ELOG_RESOURCE_MEM:
                rc = dlpar_memory(hp_elog);
                break;
+       case PSERIES_HP_ELOG_RESOURCE_CPU:
+               rc = dlpar_cpu(hp_elog);
+               break;
        default:
                pr_warn_ratelimited("Invalid resource (%d) specified\n",
                                    hp_elog->resource);
@@ -565,6 +388,9 @@ static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
        if (!strncmp(arg, "memory", 6)) {
                hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
                arg += strlen("memory ");
+       } else if (!strncmp(arg, "cpu", 3)) {
+               hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_CPU;
+               arg += strlen("cpu ");
        } else {
                pr_err("Invalid resource specified: \"%s\"\n", buf);
                rc = -EINVAL;
@@ -624,16 +450,7 @@ static CLASS_ATTR(dlpar, S_IWUSR, NULL, dlpar_store);
 
 static int __init pseries_dlpar_init(void)
 {
-       int rc;
-
-#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
-       ppc_md.cpu_probe = dlpar_cpu_probe;
-       ppc_md.cpu_release = dlpar_cpu_release;
-#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
-
-       rc = sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
-
-       return rc;
+       return sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
 }
 machine_device_initcall(pseries, pseries_dlpar_init);
 
index 62475440fd453105b1df12c26dc8fad32a020b8f..32274f72fe3fc361baa3f44e71e84a683bb9dfe3 100644 (file)
  *      2 of the License, or (at your option) any later version.
  */
 
+#define pr_fmt(fmt)     "pseries-hotplug-cpu: " fmt
+
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/sched.h>       /* for idle_task_exit */
 #include <linux/cpu.h>
 #include <linux/of.h>
+#include <linux/slab.h>
 #include <asm/prom.h>
 #include <asm/rtas.h>
 #include <asm/firmware.h>
@@ -32,6 +35,7 @@
 #include <asm/xics.h>
 #include <asm/plpar_wrappers.h>
 
+#include "pseries.h"
 #include "offline_states.h"
 
 /* This version can't take the spinlock, because it never returns */
@@ -88,13 +92,7 @@ void set_default_offline_state(int cpu)
 
 static void rtas_stop_self(void)
 {
-       static struct rtas_args args = {
-               .nargs = 0,
-               .nret = cpu_to_be32(1),
-               .rets = &args.args[0],
-       };
-
-       args.token = cpu_to_be32(rtas_stop_self_token);
+       static struct rtas_args args;
 
        local_irq_disable();
 
@@ -102,7 +100,8 @@ static void rtas_stop_self(void)
 
        printk("cpu %u (hwid %u) Ready to die...\n",
               smp_processor_id(), hard_smp_processor_id());
-       enter_rtas(__pa(&args));
+
+       rtas_call_unlocked(&args, rtas_stop_self_token, 0, 1, NULL);
 
        panic("Alas, I survived.\n");
 }
@@ -339,6 +338,536 @@ static void pseries_remove_processor(struct device_node *np)
        cpu_maps_update_done();
 }
 
+static int dlpar_online_cpu(struct device_node *dn)
+{
+       int rc = 0;
+       unsigned int cpu;
+       int len, nthreads, i;
+       const __be32 *intserv;
+       u32 thread;
+
+       intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
+       if (!intserv)
+               return -EINVAL;
+
+       nthreads = len / sizeof(u32);
+
+       cpu_maps_update_begin();
+       for (i = 0; i < nthreads; i++) {
+               thread = be32_to_cpu(intserv[i]);
+               for_each_present_cpu(cpu) {
+                       if (get_hard_smp_processor_id(cpu) != thread)
+                               continue;
+                       BUG_ON(get_cpu_current_state(cpu)
+                                       != CPU_STATE_OFFLINE);
+                       cpu_maps_update_done();
+                       rc = device_online(get_cpu_device(cpu));
+                       if (rc)
+                               goto out;
+                       cpu_maps_update_begin();
+
+                       break;
+               }
+               if (cpu == num_possible_cpus())
+                       printk(KERN_WARNING "Could not find cpu to online "
+                              "with physical id 0x%x\n", thread);
+       }
+       cpu_maps_update_done();
+
+out:
+       return rc;
+
+}
+
+static bool dlpar_cpu_exists(struct device_node *parent, u32 drc_index)
+{
+       struct device_node *child = NULL;
+       u32 my_drc_index;
+       bool found;
+       int rc;
+
+       /* Assume cpu doesn't exist */
+       found = false;
+
+       for_each_child_of_node(parent, child) {
+               rc = of_property_read_u32(child, "ibm,my-drc-index",
+                                         &my_drc_index);
+               if (rc)
+                       continue;
+
+               if (my_drc_index == drc_index) {
+                       of_node_put(child);
+                       found = true;
+                       break;
+               }
+       }
+
+       return found;
+}
+
+static bool valid_cpu_drc_index(struct device_node *parent, u32 drc_index)
+{
+       bool found = false;
+       int rc, index;
+
+       index = 0;
+       while (!found) {
+               u32 drc;
+
+               rc = of_property_read_u32_index(parent, "ibm,drc-indexes",
+                                               index++, &drc);
+               if (rc)
+                       break;
+
+               if (drc == drc_index)
+                       found = true;
+       }
+
+       return found;
+}
+
+static ssize_t dlpar_cpu_add(u32 drc_index)
+{
+       struct device_node *dn, *parent;
+       int rc, saved_rc;
+
+       pr_debug("Attempting to add CPU, drc index: %x\n", drc_index);
+
+       parent = of_find_node_by_path("/cpus");
+       if (!parent) {
+               pr_warn("Failed to find CPU root node \"/cpus\"\n");
+               return -ENODEV;
+       }
+
+       if (dlpar_cpu_exists(parent, drc_index)) {
+               of_node_put(parent);
+               pr_warn("CPU with drc index %x already exists\n", drc_index);
+               return -EINVAL;
+       }
+
+       if (!valid_cpu_drc_index(parent, drc_index)) {
+               of_node_put(parent);
+               pr_warn("Cannot find CPU (drc index %x) to add.\n", drc_index);
+               return -EINVAL;
+       }
+
+       rc = dlpar_acquire_drc(drc_index);
+       if (rc) {
+               pr_warn("Failed to acquire DRC, rc: %d, drc index: %x\n",
+                       rc, drc_index);
+               of_node_put(parent);
+               return -EINVAL;
+       }
+
+       dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
+       of_node_put(parent);
+       if (!dn) {
+               pr_warn("Failed call to configure-connector, drc index: %x\n",
+                       drc_index);
+               dlpar_release_drc(drc_index);
+               return -EINVAL;
+       }
+
+       rc = dlpar_attach_node(dn);
+       if (rc) {
+               saved_rc = rc;
+               pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n",
+                       dn->name, rc, drc_index);
+
+               rc = dlpar_release_drc(drc_index);
+               if (!rc)
+                       dlpar_free_cc_nodes(dn);
+
+               return saved_rc;
+       }
+
+       rc = dlpar_online_cpu(dn);
+       if (rc) {
+               saved_rc = rc;
+               pr_warn("Failed to online cpu %s, rc: %d, drc index: %x\n",
+                       dn->name, rc, drc_index);
+
+               rc = dlpar_detach_node(dn);
+               if (!rc)
+                       dlpar_release_drc(drc_index);
+
+               return saved_rc;
+       }
+
+       pr_debug("Successfully added CPU %s, drc index: %x\n", dn->name,
+                drc_index);
+       return rc;
+}
+
+static int dlpar_offline_cpu(struct device_node *dn)
+{
+       int rc = 0;
+       unsigned int cpu;
+       int len, nthreads, i;
+       const __be32 *intserv;
+       u32 thread;
+
+       intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", &len);
+       if (!intserv)
+               return -EINVAL;
+
+       nthreads = len / sizeof(u32);
+
+       cpu_maps_update_begin();
+       for (i = 0; i < nthreads; i++) {
+               thread = be32_to_cpu(intserv[i]);
+               for_each_present_cpu(cpu) {
+                       if (get_hard_smp_processor_id(cpu) != thread)
+                               continue;
+
+                       if (get_cpu_current_state(cpu) == CPU_STATE_OFFLINE)
+                               break;
+
+                       if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
+                               set_preferred_offline_state(cpu,
+                                                           CPU_STATE_OFFLINE);
+                               cpu_maps_update_done();
+                               rc = device_offline(get_cpu_device(cpu));
+                               if (rc)
+                                       goto out;
+                               cpu_maps_update_begin();
+                               break;
+
+                       }
+
+                       /*
+                        * The cpu is in CPU_STATE_INACTIVE.
+                        * Upgrade it's state to CPU_STATE_OFFLINE.
+                        */
+                       set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
+                       BUG_ON(plpar_hcall_norets(H_PROD, thread)
+                                                               != H_SUCCESS);
+                       __cpu_die(cpu);
+                       break;
+               }
+               if (cpu == num_possible_cpus())
+                       printk(KERN_WARNING "Could not find cpu to offline with physical id 0x%x\n", thread);
+       }
+       cpu_maps_update_done();
+
+out:
+       return rc;
+
+}
+
+static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
+{
+       int rc;
+
+       pr_debug("Attemping to remove CPU %s, drc index: %x\n",
+                dn->name, drc_index);
+
+       rc = dlpar_offline_cpu(dn);
+       if (rc) {
+               pr_warn("Failed to offline CPU %s, rc: %d\n", dn->name, rc);
+               return -EINVAL;
+       }
+
+       rc = dlpar_release_drc(drc_index);
+       if (rc) {
+               pr_warn("Failed to release drc (%x) for CPU %s, rc: %d\n",
+                       drc_index, dn->name, rc);
+               dlpar_online_cpu(dn);
+               return rc;
+       }
+
+       rc = dlpar_detach_node(dn);
+       if (rc) {
+               int saved_rc = rc;
+
+               pr_warn("Failed to detach CPU %s, rc: %d", dn->name, rc);
+
+               rc = dlpar_acquire_drc(drc_index);
+               if (!rc)
+                       dlpar_online_cpu(dn);
+
+               return saved_rc;
+       }
+
+       pr_debug("Successfully removed CPU, drc index: %x\n", drc_index);
+       return 0;
+}
+
+static struct device_node *cpu_drc_index_to_dn(u32 drc_index)
+{
+       struct device_node *dn;
+       u32 my_index;
+       int rc;
+
+       for_each_node_by_type(dn, "cpu") {
+               rc = of_property_read_u32(dn, "ibm,my-drc-index", &my_index);
+               if (rc)
+                       continue;
+
+               if (my_index == drc_index)
+                       break;
+       }
+
+       return dn;
+}
+
+static int dlpar_cpu_remove_by_index(u32 drc_index)
+{
+       struct device_node *dn;
+       int rc;
+
+       dn = cpu_drc_index_to_dn(drc_index);
+       if (!dn) {
+               pr_warn("Cannot find CPU (drc index %x) to remove\n",
+                       drc_index);
+               return -ENODEV;
+       }
+
+       rc = dlpar_cpu_remove(dn, drc_index);
+       of_node_put(dn);
+       return rc;
+}
+
+static int find_dlpar_cpus_to_remove(u32 *cpu_drcs, int cpus_to_remove)
+{
+       struct device_node *dn;
+       int cpus_found = 0;
+       int rc;
+
+       /* We want to find cpus_to_remove + 1 CPUs to ensure we do not
+        * remove the last CPU.
+        */
+       for_each_node_by_type(dn, "cpu") {
+               cpus_found++;
+
+               if (cpus_found > cpus_to_remove) {
+                       of_node_put(dn);
+                       break;
+               }
+
+               /* Note that cpus_found is always 1 ahead of the index
+                * into the cpu_drcs array, so we use cpus_found - 1
+                */
+               rc = of_property_read_u32(dn, "ibm,my-drc-index",
+                                         &cpu_drcs[cpus_found - 1]);
+               if (rc) {
+                       pr_warn("Error occurred getting drc-index for %s\n",
+                               dn->name);
+                       of_node_put(dn);
+                       return -1;
+               }
+       }
+
+       if (cpus_found < cpus_to_remove) {
+               pr_warn("Failed to find enough CPUs (%d of %d) to remove\n",
+                       cpus_found, cpus_to_remove);
+       } else if (cpus_found == cpus_to_remove) {
+               pr_warn("Cannot remove all CPUs\n");
+       }
+
+       return cpus_found;
+}
+
+static int dlpar_cpu_remove_by_count(u32 cpus_to_remove)
+{
+       u32 *cpu_drcs;
+       int cpus_found;
+       int cpus_removed = 0;
+       int i, rc;
+
+       pr_debug("Attempting to hot-remove %d CPUs\n", cpus_to_remove);
+
+       cpu_drcs = kcalloc(cpus_to_remove, sizeof(*cpu_drcs), GFP_KERNEL);
+       if (!cpu_drcs)
+               return -EINVAL;
+
+       cpus_found = find_dlpar_cpus_to_remove(cpu_drcs, cpus_to_remove);
+       if (cpus_found <= cpus_to_remove) {
+               kfree(cpu_drcs);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < cpus_to_remove; i++) {
+               rc = dlpar_cpu_remove_by_index(cpu_drcs[i]);
+               if (rc)
+                       break;
+
+               cpus_removed++;
+       }
+
+       if (cpus_removed != cpus_to_remove) {
+               pr_warn("CPU hot-remove failed, adding back removed CPUs\n");
+
+               for (i = 0; i < cpus_removed; i++)
+                       dlpar_cpu_add(cpu_drcs[i]);
+
+               rc = -EINVAL;
+       } else {
+               rc = 0;
+       }
+
+       kfree(cpu_drcs);
+       return rc;
+}
+
+static int find_dlpar_cpus_to_add(u32 *cpu_drcs, u32 cpus_to_add)
+{
+       struct device_node *parent;
+       int cpus_found = 0;
+       int index, rc;
+
+       parent = of_find_node_by_path("/cpus");
+       if (!parent) {
+               pr_warn("Could not find CPU root node in device tree\n");
+               kfree(cpu_drcs);
+               return -1;
+       }
+
+       /* Search the ibm,drc-indexes array for possible CPU drcs to
+        * add. Note that the format of the ibm,drc-indexes array is
+        * the number of entries in the array followed by the array
+        * of drc values so we start looking at index = 1.
+        */
+       index = 1;
+       while (cpus_found < cpus_to_add) {
+               u32 drc;
+
+               rc = of_property_read_u32_index(parent, "ibm,drc-indexes",
+                                               index++, &drc);
+               if (rc)
+                       break;
+
+               if (dlpar_cpu_exists(parent, drc))
+                       continue;
+
+               cpu_drcs[cpus_found++] = drc;
+       }
+
+       of_node_put(parent);
+       return cpus_found;
+}
+
+static int dlpar_cpu_add_by_count(u32 cpus_to_add)
+{
+       u32 *cpu_drcs;
+       int cpus_added = 0;
+       int cpus_found;
+       int i, rc;
+
+       pr_debug("Attempting to hot-add %d CPUs\n", cpus_to_add);
+
+       cpu_drcs = kcalloc(cpus_to_add, sizeof(*cpu_drcs), GFP_KERNEL);
+       if (!cpu_drcs)
+               return -EINVAL;
+
+       cpus_found = find_dlpar_cpus_to_add(cpu_drcs, cpus_to_add);
+       if (cpus_found < cpus_to_add) {
+               pr_warn("Failed to find enough CPUs (%d of %d) to add\n",
+                       cpus_found, cpus_to_add);
+               kfree(cpu_drcs);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < cpus_to_add; i++) {
+               rc = dlpar_cpu_add(cpu_drcs[i]);
+               if (rc)
+                       break;
+
+               cpus_added++;
+       }
+
+       if (cpus_added < cpus_to_add) {
+               pr_warn("CPU hot-add failed, removing any added CPUs\n");
+
+               for (i = 0; i < cpus_added; i++)
+                       dlpar_cpu_remove_by_index(cpu_drcs[i]);
+
+               rc = -EINVAL;
+       } else {
+               rc = 0;
+       }
+
+       kfree(cpu_drcs);
+       return rc;
+}
+
+int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
+{
+       u32 count, drc_index;
+       int rc;
+
+       count = hp_elog->_drc_u.drc_count;
+       drc_index = hp_elog->_drc_u.drc_index;
+
+       lock_device_hotplug();
+
+       switch (hp_elog->action) {
+       case PSERIES_HP_ELOG_ACTION_REMOVE:
+               if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
+                       rc = dlpar_cpu_remove_by_count(count);
+               else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
+                       rc = dlpar_cpu_remove_by_index(drc_index);
+               else
+                       rc = -EINVAL;
+               break;
+       case PSERIES_HP_ELOG_ACTION_ADD:
+               if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
+                       rc = dlpar_cpu_add_by_count(count);
+               else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
+                       rc = dlpar_cpu_add(drc_index);
+               else
+                       rc = -EINVAL;
+               break;
+       default:
+               pr_err("Invalid action (%d) specified\n", hp_elog->action);
+               rc = -EINVAL;
+               break;
+       }
+
+       unlock_device_hotplug();
+       return rc;
+}
+
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+
+static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
+{
+       u32 drc_index;
+       int rc;
+
+       rc = kstrtou32(buf, 0, &drc_index);
+       if (rc)
+               return -EINVAL;
+
+       rc = dlpar_cpu_add(drc_index);
+
+       return rc ? rc : count;
+}
+
+static ssize_t dlpar_cpu_release(const char *buf, size_t count)
+{
+       struct device_node *dn;
+       u32 drc_index;
+       int rc;
+
+       dn = of_find_node_by_path(buf);
+       if (!dn)
+               return -EINVAL;
+
+       rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
+       if (rc) {
+               of_node_put(dn);
+               return -EINVAL;
+       }
+
+       rc = dlpar_cpu_remove(dn, drc_index);
+       of_node_put(dn);
+
+       return rc ? rc : count;
+}
+
+#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
+
 static int pseries_smp_notifier(struct notifier_block *nb,
                                unsigned long action, void *data)
 {
@@ -385,6 +914,11 @@ static int __init pseries_cpu_hotplug_init(void)
        int cpu;
        int qcss_tok;
 
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+       ppc_md.cpu_probe = dlpar_cpu_probe;
+       ppc_md.cpu_release = dlpar_cpu_release;
+#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
+
        for_each_node_by_name(np, "interrupt-controller") {
                typep = of_get_property(np, "compatible", NULL);
                if (strstr(typep, "open-pic")) {
index b7a67e3d2201e4d5988e9cb3df651651c12024c8..477290ad855e50a5f0c5da5065ba4b174b77fcfa 100644 (file)
@@ -315,48 +315,48 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
        return 0;
 }
 
-static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot)
+static long __pSeries_lpar_hpte_find(unsigned long want_v, unsigned long hpte_group)
 {
-       unsigned long dword0;
-       unsigned long lpar_rc;
-       unsigned long dummy_word1;
-       unsigned long flags;
+       long lpar_rc;
+       unsigned long i, j;
+       struct {
+               unsigned long pteh;
+               unsigned long ptel;
+       } ptes[4];
 
-       /* Read 1 pte at a time                        */
-       /* Do not need RPN to logical page translation */
-       /* No cross CEC PFT access                     */
-       flags = 0;
+       for (i = 0; i < HPTES_PER_GROUP; i += 4, hpte_group += 4) {
 
-       lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1);
+               lpar_rc = plpar_pte_read_4(0, hpte_group, (void *)ptes);
+               if (lpar_rc != H_SUCCESS)
+                       continue;
 
-       BUG_ON(lpar_rc != H_SUCCESS);
+               for (j = 0; j < 4; j++) {
+                       if (HPTE_V_COMPARE(ptes[j].pteh, want_v) &&
+                           (ptes[j].pteh & HPTE_V_VALID))
+                               return i + j;
+               }
+       }
 
-       return dword0;
+       return -1;
 }
 
 static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize)
 {
-       unsigned long hash;
-       unsigned long i;
        long slot;
-       unsigned long want_v, hpte_v;
+       unsigned long hash;
+       unsigned long want_v;
+       unsigned long hpte_group;
 
        hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
        want_v = hpte_encode_avpn(vpn, psize, ssize);
 
        /* Bolted entries are always in the primary group */
-       slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
-       for (i = 0; i < HPTES_PER_GROUP; i++) {
-               hpte_v = pSeries_lpar_hpte_getword0(slot);
-
-               if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
-                       /* HPTE matches */
-                       return slot;
-               ++slot;
-       }
-
-       return -1;
-} 
+       hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+       slot = __pSeries_lpar_hpte_find(want_v, hpte_group);
+       if (slot < 0)
+               return -1;
+       return hpte_group + slot;
+}
 
 static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp,
                                             unsigned long ea,
@@ -396,6 +396,7 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
        BUG_ON(lpar_rc != H_SUCCESS);
 }
 
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 /*
  * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
  * to make sure that we avoid bouncing the hypervisor tlbie lock.
@@ -494,6 +495,15 @@ static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
                __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array,
                                                   index, psize, ssize);
 }
+#else
+static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
+                                            unsigned long addr,
+                                            unsigned char *hpte_slot_array,
+                                            int psize, int ssize, int local)
+{
+       WARN(1, "%s called without THP support\n", __func__);
+}
+#endif
 
 static void pSeries_lpar_hpte_removebolted(unsigned long ea,
                                           int psize, int ssize)
index 8411c27293e447c3e7b60871180319d1c124d0f6..7aa83f00ac620d2554a04363f3dc2500c09ad211 100644 (file)
@@ -73,6 +73,15 @@ static inline int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
 }
 #endif
 
+#ifdef CONFIG_HOTPLUG_CPU
+int dlpar_cpu(struct pseries_hp_errorlog *hp_elog);
+#else
+static inline int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
+{
+       return -EOPNOTSUPP;
+}
+#endif
+
 /* PCI root bridge prepare function override for pseries */
 struct pci_host_bridge;
 int pseries_root_bridge_prepare(struct pci_host_bridge *bridge);
index 3b6647e574b6d0f3fbabd7780f176fbcfef7f4f5..9a3e27b863ce518451f7eb0ecd33b4f15760bb57 100644 (file)
@@ -40,6 +40,9 @@ static int ras_check_exception_token;
 #define EPOW_SENSOR_TOKEN      9
 #define EPOW_SENSOR_INDEX      0
 
+/* EPOW events counter variable */
+static int num_epow_events;
+
 static irqreturn_t ras_epow_interrupt(int irq, void *dev_id);
 static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
 
@@ -82,32 +85,30 @@ static void handle_system_shutdown(char event_modifier)
 {
        switch (event_modifier) {
        case EPOW_SHUTDOWN_NORMAL:
-               pr_emerg("Firmware initiated power off");
+               pr_emerg("Power off requested\n");
                orderly_poweroff(true);
                break;
 
        case EPOW_SHUTDOWN_ON_UPS:
-               pr_emerg("Loss of power reported by firmware, system is "
-                       "running on UPS/battery");
-               pr_emerg("Check RTAS error log for details");
+               pr_emerg("Loss of system power detected. System is running on"
+                        " UPS/battery. Check RTAS error log for details\n");
                orderly_poweroff(true);
                break;
 
        case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
-               pr_emerg("Loss of system critical functions reported by "
-                       "firmware");
-               pr_emerg("Check RTAS error log for details");
+               pr_emerg("Loss of system critical functions detected. Check"
+                        " RTAS error log for details\n");
                orderly_poweroff(true);
                break;
 
        case EPOW_SHUTDOWN_AMBIENT_TEMPERATURE_TOO_HIGH:
-               pr_emerg("Ambient temperature too high reported by firmware");
-               pr_emerg("Check RTAS error log for details");
+               pr_emerg("High ambient temperature detected. Check RTAS"
+                        " error log for details\n");
                orderly_poweroff(true);
                break;
 
        default:
-               pr_err("Unknown power/cooling shutdown event (modifier %d)",
+               pr_err("Unknown power/cooling shutdown event (modifier = %d)\n",
                        event_modifier);
        }
 }
@@ -145,17 +146,20 @@ static void rtas_parse_epow_errlog(struct rtas_error_log *log)
 
        switch (action_code) {
        case EPOW_RESET:
-               pr_err("Non critical power or cooling issue cleared");
+               if (num_epow_events) {
+                       pr_info("Non critical power/cooling issue cleared\n");
+                       num_epow_events--;
+               }
                break;
 
        case EPOW_WARN_COOLING:
-               pr_err("Non critical cooling issue reported by firmware");
-               pr_err("Check RTAS error log for details");
+               pr_info("Non-critical cooling issue detected. Check RTAS error"
+                       " log for details\n");
                break;
 
        case EPOW_WARN_POWER:
-               pr_err("Non critical power issue reported by firmware");
-               pr_err("Check RTAS error log for details");
+               pr_info("Non-critical power issue detected. Check RTAS error"
+                       " log for details\n");
                break;
 
        case EPOW_SYSTEM_SHUTDOWN:
@@ -163,23 +167,27 @@ static void rtas_parse_epow_errlog(struct rtas_error_log *log)
                break;
 
        case EPOW_SYSTEM_HALT:
-               pr_emerg("Firmware initiated power off");
+               pr_emerg("Critical power/cooling issue detected. Check RTAS"
+                        " error log for details. Powering off.\n");
                orderly_poweroff(true);
                break;
 
        case EPOW_MAIN_ENCLOSURE:
        case EPOW_POWER_OFF:
-               pr_emerg("Critical power/cooling issue reported by firmware");
-               pr_emerg("Check RTAS error log for details");
-               pr_emerg("Immediate power off");
+               pr_emerg("System about to lose power. Check RTAS error log "
+                        " for details. Powering off immediately.\n");
                emergency_sync();
                kernel_power_off();
                break;
 
        default:
-               pr_err("Unknown power/cooling event (action code %d)",
+               pr_err("Unknown power/cooling event (action code  = %d)\n",
                        action_code);
        }
+
+       /* Increment epow events counter variable */
+       if (action_code != EPOW_RESET)
+               num_epow_events++;
 }
 
 /* Handle environmental and power warning (EPOW) interrupts. */
@@ -249,13 +257,12 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
        log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal);
 
        if (fatal) {
-               pr_emerg("Fatal hardware error reported by firmware");
-               pr_emerg("Check RTAS error log for details");
-               pr_emerg("Immediate power off");
+               pr_emerg("Fatal hardware error detected. Check RTAS error"
+                        " log for details. Powering off immediately\n");
                emergency_sync();
                kernel_power_off();
        } else {
-               pr_err("Recoverable hardware error reported by firmware");
+               pr_err("Recoverable hardware error detected\n");
        }
 
        spin_unlock(&ras_log_buf_lock);
index 5b492a6438ffa8723ca9e78feaa17d92bff57da2..bd6bd729969c87b8faa213857c743217a1f4c2ad 100644 (file)
@@ -26,7 +26,6 @@ obj-$(CONFIG_FSL_85XX_CACHE_SRAM)     += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o
 obj-$(CONFIG_SIMPLE_GPIO)      += simple_gpio.o
 obj-$(CONFIG_FSL_RIO)          += fsl_rio.o fsl_rmu.o
 obj-$(CONFIG_TSI108_BRIDGE)    += tsi108_pci.o tsi108_dev.o
-obj-$(CONFIG_QUICC_ENGINE)     += qe_lib/
 mv64x60-$(CONFIG_PCI)          += mv64x60_pci.o
 obj-$(CONFIG_MV64X60)          += $(mv64x60-y) mv64x60_pic.o mv64x60_dev.o \
                                   mv64x60_udbg.o
index 7a399b4d60a03a5a83d091113cfb257c45f92b39..c713b349d967946fe1b95e8ae9ff1ec18f7761bc 100644 (file)
@@ -313,6 +313,7 @@ static const struct of_device_id axon_ram_device_id[] = {
        },
        {}
 };
+MODULE_DEVICE_TABLE(of, axon_ram_device_id);
 
 static struct platform_driver axon_ram_driver = {
        .probe          = axon_ram_probe,
index e00a5ee58fd71f5af33d105b41bb2f2897b2b098..9d32465eddb13a172fdeb977c44e154658d86ef5 100644 (file)
@@ -27,8 +27,8 @@
 
 #include <asm/udbg.h>
 #include <asm/io.h>
-#include <asm/rheap.h>
 #include <asm/cpm.h>
+#include <soc/fsl/qe/qe.h>
 
 #include <mm/mmu_decl.h>
 
@@ -65,162 +65,6 @@ void __init udbg_init_cpm(void)
 }
 #endif
 
-static spinlock_t cpm_muram_lock;
-static rh_block_t cpm_boot_muram_rh_block[16];
-static rh_info_t cpm_muram_info;
-static u8 __iomem *muram_vbase;
-static phys_addr_t muram_pbase;
-
-/* Max address size we deal with */
-#define OF_MAX_ADDR_CELLS      4
-
-int cpm_muram_init(void)
-{
-       struct device_node *np;
-       struct resource r;
-       u32 zero[OF_MAX_ADDR_CELLS] = {};
-       resource_size_t max = 0;
-       int i = 0;
-       int ret = 0;
-
-       if (muram_pbase)
-               return 0;
-
-       spin_lock_init(&cpm_muram_lock);
-       /* initialize the info header */
-       rh_init(&cpm_muram_info, 1,
-               sizeof(cpm_boot_muram_rh_block) /
-               sizeof(cpm_boot_muram_rh_block[0]),
-               cpm_boot_muram_rh_block);
-
-       np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
-       if (!np) {
-               /* try legacy bindings */
-               np = of_find_node_by_name(NULL, "data-only");
-               if (!np) {
-                       printk(KERN_ERR "Cannot find CPM muram data node");
-                       ret = -ENODEV;
-                       goto out;
-               }
-       }
-
-       muram_pbase = of_translate_address(np, zero);
-       if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
-               printk(KERN_ERR "Cannot translate zero through CPM muram node");
-               ret = -ENODEV;
-               goto out;
-       }
-
-       while (of_address_to_resource(np, i++, &r) == 0) {
-               if (r.end > max)
-                       max = r.end;
-
-               rh_attach_region(&cpm_muram_info, r.start - muram_pbase,
-                                resource_size(&r));
-       }
-
-       muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
-       if (!muram_vbase) {
-               printk(KERN_ERR "Cannot map CPM muram");
-               ret = -ENOMEM;
-       }
-
-out:
-       of_node_put(np);
-       return ret;
-}
-
-/**
- * cpm_muram_alloc - allocate the requested size worth of multi-user ram
- * @size: number of bytes to allocate
- * @align: requested alignment, in bytes
- *
- * This function returns an offset into the muram area.
- * Use cpm_dpram_addr() to get the virtual address of the area.
- * Use cpm_muram_free() to free the allocation.
- */
-unsigned long cpm_muram_alloc(unsigned long size, unsigned long align)
-{
-       unsigned long start;
-       unsigned long flags;
-
-       spin_lock_irqsave(&cpm_muram_lock, flags);
-       cpm_muram_info.alignment = align;
-       start = rh_alloc(&cpm_muram_info, size, "commproc");
-       if (!IS_ERR_VALUE(start))
-               memset_io(cpm_muram_addr(start), 0, size);
-       spin_unlock_irqrestore(&cpm_muram_lock, flags);
-
-       return start;
-}
-EXPORT_SYMBOL(cpm_muram_alloc);
-
-/**
- * cpm_muram_free - free a chunk of multi-user ram
- * @offset: The beginning of the chunk as returned by cpm_muram_alloc().
- */
-int cpm_muram_free(unsigned long offset)
-{
-       int ret;
-       unsigned long flags;
-
-       spin_lock_irqsave(&cpm_muram_lock, flags);
-       ret = rh_free(&cpm_muram_info, offset);
-       spin_unlock_irqrestore(&cpm_muram_lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(cpm_muram_free);
-
-/**
- * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
- * @offset: the offset into the muram area to reserve
- * @size: the number of bytes to reserve
- *
- * This function returns "start" on success, -ENOMEM on failure.
- * Use cpm_dpram_addr() to get the virtual address of the area.
- * Use cpm_muram_free() to free the allocation.
- */
-unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
-{
-       unsigned long start;
-       unsigned long flags;
-
-       spin_lock_irqsave(&cpm_muram_lock, flags);
-       cpm_muram_info.alignment = 1;
-       start = rh_alloc_fixed(&cpm_muram_info, offset, size, "commproc");
-       spin_unlock_irqrestore(&cpm_muram_lock, flags);
-
-       return start;
-}
-EXPORT_SYMBOL(cpm_muram_alloc_fixed);
-
-/**
- * cpm_muram_addr - turn a muram offset into a virtual address
- * @offset: muram offset to convert
- */
-void __iomem *cpm_muram_addr(unsigned long offset)
-{
-       return muram_vbase + offset;
-}
-EXPORT_SYMBOL(cpm_muram_addr);
-
-unsigned long cpm_muram_offset(void __iomem *addr)
-{
-       return addr - (void __iomem *)muram_vbase;
-}
-EXPORT_SYMBOL(cpm_muram_offset);
-
-/**
- * cpm_muram_dma - turn a muram virtual address into a DMA address
- * @offset: virtual address from cpm_muram_addr() to convert
- */
-dma_addr_t cpm_muram_dma(void __iomem *addr)
-{
-       return muram_pbase + ((u8 __iomem *)addr - muram_vbase);
-}
-EXPORT_SYMBOL(cpm_muram_dma);
-
 #if defined(CONFIG_CPM2) || defined(CONFIG_8xx_GPIO)
 
 struct cpm2_ioports {
index 38138cf8d33e2213804dad3e903c4037a59db605..47f781059eeb667b3114f1990e5ce0863a8490f3 100644 (file)
@@ -243,8 +243,6 @@ static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data)
        if (status & LTESR_CS)
                dev_err(ctrl->dev, "Chip select error: "
                        "LTESR 0x%08X\n", status);
-       if (status & LTESR_UPM)
-               ;
        if (status & LTESR_FCT) {
                dev_err(ctrl->dev, "FCM command time-out: "
                        "LTESR 0x%08X\n", status);
index a1ac80b3041a9519523480a470c121d6db4cd3ca..c69e88e91459f256c6ba568eaac050666c920403 100644 (file)
@@ -218,6 +218,19 @@ static void setup_pci_atmu(struct pci_controller *hose)
         */
        setup_inbound = !is_kdump();
 
+       if (of_device_is_compatible(hose->dn, "fsl,bsc9132-pcie")) {
+               /*
+                * BSC9132 Rev1.0 has an issue where all the PEX inbound
+                * windows have implemented the default target value as 0xf
+                * for CCSR space.In all Freescale legacy devices the target
+                * of 0xf is reserved for local memory space. 9132 Rev1.0
+                * now has local mempry space mapped to target 0x0 instead of
+                * 0xf. Hence adding a workaround to remove the target 0xf
+                * defined for memory space from Inbound window attributes.
+                */
+               piwar &= ~PIWAR_TGI_LOCAL;
+       }
+
        if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
                if (in_be32(&pci->block_rev1) >= PCIE_IP_REV_2_2) {
                        win_idx = 2;
index 786bf01691c9802420e7e6f40d4b6415cd18cfb5..07a8508cb7fae6cd1d0a742eb0c1d4c19769eeac 100644 (file)
@@ -320,6 +320,7 @@ static inline void disable_surveillance(void)
 #ifdef CONFIG_PPC_PSERIES
        /* Since this can't be a module, args should end up below 4GB. */
        static struct rtas_args args;
+       int token;
 
        /*
         * At this point we have got all the cpus we can into
@@ -328,17 +329,12 @@ static inline void disable_surveillance(void)
         * If we did try to take rtas.lock there would be a
         * real possibility of deadlock.
         */
-       args.token = rtas_token("set-indicator");
-       if (args.token == RTAS_UNKNOWN_SERVICE)
+       token = rtas_token("set-indicator");
+       if (token == RTAS_UNKNOWN_SERVICE)
                return;
-       args.token = cpu_to_be32(args.token);
-       args.nargs = cpu_to_be32(3);
-       args.nret = cpu_to_be32(1);
-       args.rets = &args.args[3];
-       args.args[0] = cpu_to_be32(SURVEILLANCE_TOKEN);
-       args.args[1] = 0;
-       args.args[2] = 0;
-       enter_rtas(__pa(&args));
+
+       rtas_call_unlocked(&args, token, 3, 1, NULL, SURVEILLANCE_TOKEN, 0, 0);
+
 #endif /* CONFIG_PPC_PSERIES */
 }
 
@@ -1522,6 +1518,8 @@ static void excprint(struct pt_regs *fp)
 
        if (trap == 0x700)
                print_bug_trap(fp);
+
+       printf(linux_banner);
 }
 
 static void prregs(struct pt_regs *fp)
index cb501386eb6ef6beee5f91b6cd54840d31b38611..547890fd9572179eba4c6ce81caf5bbc8b06b947 100644 (file)
@@ -586,7 +586,7 @@ static int __init powernv_cpufreq_init(void)
        int rc = 0;
 
        /* Don't probe on pseries (guest) platforms */
-       if (!firmware_has_feature(FW_FEATURE_OPALv3))
+       if (!firmware_has_feature(FW_FEATURE_OPAL))
                return -ENODEV;
 
        /* Discover pstates from device tree and init */
index 845bafcfa7929fd66cbb5eb966b16e0f3320de7f..e12dc30d8864e007fa5b70cc2969a1f00c9cf2b2 100644 (file)
@@ -264,7 +264,7 @@ static int powernv_idle_probe(void)
        if (cpuidle_disable != IDLE_NO_OVERRIDE)
                return -ENODEV;
 
-       if (firmware_has_feature(FW_FEATURE_OPALv3)) {
+       if (firmware_has_feature(FW_FEATURE_OPAL)) {
                cpuidle_state_table = powernv_states;
                /* Device tree can indicate more idle states */
                max_idle_state = powernv_add_idle_states();
index 263af709e53604ee5a049f707d2e9f5795031d1d..022c7ab7351a08c4f6afdff324e0b51eb1214bb1 100644 (file)
@@ -83,10 +83,10 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
 
        preempt_disable();
        pagefault_disable();
-       enable_kernel_altivec();
        enable_kernel_vsx();
        ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
        ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+       disable_kernel_vsx();
        pagefault_enable();
        preempt_enable();
 
@@ -103,9 +103,9 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
        } else {
                preempt_disable();
                pagefault_disable();
-               enable_kernel_altivec();
                enable_kernel_vsx();
                aes_p8_encrypt(src, dst, &ctx->enc_key);
+               disable_kernel_vsx();
                pagefault_enable();
                preempt_enable();
        }
@@ -120,9 +120,9 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
        } else {
                preempt_disable();
                pagefault_disable();
-               enable_kernel_altivec();
                enable_kernel_vsx();
                aes_p8_decrypt(src, dst, &ctx->dec_key);
+               disable_kernel_vsx();
                pagefault_enable();
                preempt_enable();
        }
index 78a978613ca83261c5a2a327e18dc54f9adc516d..495577b6d31b33d72792bb7f0f14b3d0a2c66b7e 100644 (file)
@@ -84,10 +84,10 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
 
        preempt_disable();
        pagefault_disable();
-       enable_kernel_altivec();
        enable_kernel_vsx();
        ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
        ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+       disable_kernel_vsx();
        pagefault_enable();
        preempt_enable();
 
@@ -115,7 +115,6 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
        } else {
                preempt_disable();
                pagefault_disable();
-               enable_kernel_altivec();
                enable_kernel_vsx();
 
                blkcipher_walk_init(&walk, dst, src, nbytes);
@@ -129,6 +128,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
                        ret = blkcipher_walk_done(desc, &walk, nbytes);
                }
 
+               disable_kernel_vsx();
                pagefault_enable();
                preempt_enable();
        }
@@ -156,7 +156,6 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
        } else {
                preempt_disable();
                pagefault_disable();
-               enable_kernel_altivec();
                enable_kernel_vsx();
 
                blkcipher_walk_init(&walk, dst, src, nbytes);
@@ -170,6 +169,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
                        ret = blkcipher_walk_done(desc, &walk, nbytes);
                }
 
+               disable_kernel_vsx();
                pagefault_enable();
                preempt_enable();
        }
index 1febc4f1d9affb6380cf6b4fa2b9abc6e1c43dae..0a3c1b04cf3c6057fe667ec95e10588906613935 100644 (file)
@@ -81,9 +81,9 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
        struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
 
        pagefault_disable();
-       enable_kernel_altivec();
        enable_kernel_vsx();
        ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+       disable_kernel_vsx();
        pagefault_enable();
 
        ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
@@ -100,9 +100,9 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
        unsigned int nbytes = walk->nbytes;
 
        pagefault_disable();
-       enable_kernel_altivec();
        enable_kernel_vsx();
        aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
+       disable_kernel_vsx();
        pagefault_enable();
 
        crypto_xor(keystream, src, nbytes);
@@ -133,7 +133,6 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
                ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
                while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
                        pagefault_disable();
-                       enable_kernel_altivec();
                        enable_kernel_vsx();
                        aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
                                                    walk.dst.virt.addr,
@@ -142,6 +141,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
                                                    AES_BLOCK_SIZE,
                                                    &ctx->enc_key,
                                                    walk.iv);
+                       disable_kernel_vsx();
                        pagefault_enable();
 
                        /* We need to update IV mostly for last bytes/round */
index 2183a2e77641e0682ca113951430765bcbcca4fc..6c999cb01b804839b4565a75a248063c0330dda3 100644 (file)
@@ -118,10 +118,9 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
 
        preempt_disable();
        pagefault_disable();
-       enable_kernel_altivec();
        enable_kernel_vsx();
-       enable_kernel_fp();
        gcm_init_p8(ctx->htable, (const u64 *) key);
+       disable_kernel_vsx();
        pagefault_enable();
        preempt_enable();
        return crypto_shash_setkey(ctx->fallback, key, keylen);
@@ -149,11 +148,10 @@ static int p8_ghash_update(struct shash_desc *desc,
                               GHASH_DIGEST_SIZE - dctx->bytes);
                        preempt_disable();
                        pagefault_disable();
-                       enable_kernel_altivec();
                        enable_kernel_vsx();
-                       enable_kernel_fp();
                        gcm_ghash_p8(dctx->shash, ctx->htable,
                                     dctx->buffer, GHASH_DIGEST_SIZE);
+                       disable_kernel_vsx();
                        pagefault_enable();
                        preempt_enable();
                        src += GHASH_DIGEST_SIZE - dctx->bytes;
@@ -164,10 +162,9 @@ static int p8_ghash_update(struct shash_desc *desc,
                if (len) {
                        preempt_disable();
                        pagefault_disable();
-                       enable_kernel_altivec();
                        enable_kernel_vsx();
-                       enable_kernel_fp();
                        gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
+                       disable_kernel_vsx();
                        pagefault_enable();
                        preempt_enable();
                        src += len;
@@ -195,11 +192,10 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
                                dctx->buffer[i] = 0;
                        preempt_disable();
                        pagefault_disable();
-                       enable_kernel_altivec();
                        enable_kernel_vsx();
-                       enable_kernel_fp();
                        gcm_ghash_p8(dctx->shash, ctx->htable,
                                     dctx->buffer, GHASH_DIGEST_SIZE);
+                       disable_kernel_vsx();
                        pagefault_enable();
                        preempt_enable();
                        dctx->bytes = 0;
index 048901a1111a339d21ddfc3eaf92844f381e4142..caaec654d7ea028e001c0bbdaee4bd01c3a2a292 100644 (file)
@@ -582,6 +582,7 @@ static struct of_device_id rackmeter_match[] = {
        { .name = "i2s" },
        { }
 };
+MODULE_DEVICE_TABLE(of, rackmeter_match);
 
 static struct macio_driver rackmeter_driver = {
        .driver = {
index f9512bfa6c3c7d86ec14da2309b54804b882f057..01ee736fe0efc820f980f1e2acf1e20a453e26b2 100644 (file)
@@ -425,8 +425,9 @@ static int __init via_pmu_start(void)
                        gpio_irq = irq_of_parse_and_map(gpio_node, 0);
 
                if (gpio_irq != NO_IRQ) {
-                       if (request_irq(gpio_irq, gpio1_interrupt, IRQF_TIMER,
-                                       "GPIO1 ADB", (void *)0))
+                       if (request_irq(gpio_irq, gpio1_interrupt,
+                                       IRQF_NO_SUSPEND, "GPIO1 ADB",
+                                       (void *)0))
                                printk(KERN_ERR "pmu: can't get irq %d"
                                       " (GPIO1)\n", gpio_irq);
                        else
index 6982f603fadc51a20154f38cbf1f82d95125f8df..be2ac5ce349f0d5cad3fdfd8220645321ab622c3 100644 (file)
@@ -1,4 +1,5 @@
-ccflags-y := -Werror -Wno-unused-const-variable
+ccflags-y                      := $(call cc-disable-warning, unused-const-variable)
+ccflags-$(CONFIG_PPC_WERROR)   += -Werror
 
 cxl-y                          += main.o file.o irq.o fault.o native.o
 cxl-y                          += context.o sysfs.o debugfs.o pci.o trace.o
index 103baf0e0c5bfd9aa23537adf12f6035bb427d86..ea3eeb7011e1770e24f6287bac51826611bd0859 100644 (file)
@@ -25,7 +25,6 @@ struct cxl_context *cxl_dev_context_init(struct pci_dev *dev)
 
        afu = cxl_pci_to_afu(dev);
 
-       get_device(&afu->dev);
        ctx = cxl_context_alloc();
        if (IS_ERR(ctx)) {
                rc = PTR_ERR(ctx);
@@ -61,7 +60,6 @@ err_mapping:
 err_ctx:
        kfree(ctx);
 err_dev:
-       put_device(&afu->dev);
        return ERR_PTR(rc);
 }
 EXPORT_SYMBOL_GPL(cxl_dev_context_init);
@@ -87,8 +85,6 @@ int cxl_release_context(struct cxl_context *ctx)
        if (ctx->status >= STARTED)
                return -EBUSY;
 
-       put_device(&ctx->afu->dev);
-
        cxl_context_free(ctx);
 
        return 0;
@@ -176,7 +172,7 @@ int cxl_start_context(struct cxl_context *ctx, u64 wed,
 
        if (task) {
                ctx->pid = get_task_pid(task, PIDTYPE_PID);
-               get_pid(ctx->pid);
+               ctx->glpid = get_task_pid(task->group_leader, PIDTYPE_PID);
                kernel = false;
        }
 
index 2faa1270d085b15f92e185f8f389f5790390fbef..262b88eac414a3e40d8edf42fc9fdffc7b54a6c0 100644 (file)
@@ -42,7 +42,7 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
        spin_lock_init(&ctx->sste_lock);
        ctx->afu = afu;
        ctx->master = master;
-       ctx->pid = NULL; /* Set in start work ioctl */
+       ctx->pid = ctx->glpid = NULL; /* Set in start work ioctl */
        mutex_init(&ctx->mapping_lock);
        ctx->mapping = mapping;
 
@@ -97,6 +97,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
        ctx->pe = i;
        ctx->elem = &ctx->afu->spa[i];
        ctx->pe_inserted = false;
+
+       /*
+        * take a ref on the afu so that it stays alive at-least till
+        * this context is reclaimed inside reclaim_ctx.
+        */
+       cxl_afu_get(afu);
        return 0;
 }
 
@@ -211,7 +217,11 @@ int __detach_context(struct cxl_context *ctx)
        WARN_ON(cxl_detach_process(ctx) &&
                cxl_adapter_link_ok(ctx->afu->adapter));
        flush_work(&ctx->fault_work); /* Only needed for dedicated process */
+
+       /* release the reference to the group leader and mm handling pid */
        put_pid(ctx->pid);
+       put_pid(ctx->glpid);
+
        cxl_ctx_put();
        return 0;
 }
@@ -278,6 +288,9 @@ static void reclaim_ctx(struct rcu_head *rcu)
        if (ctx->irq_bitmap)
                kfree(ctx->irq_bitmap);
 
+       /* Drop ref to the afu device taken during cxl_context_init */
+       cxl_afu_put(ctx->afu);
+
        kfree(ctx);
 }
 
index 0cfb9c129f273cbdf0a408c6b5d3008bd308596d..a521bc72cec2d83c88332703d6fe9f5795093401 100644 (file)
@@ -403,6 +403,18 @@ struct cxl_afu {
        bool enabled;
 };
 
+/* AFU refcount management */
+static inline struct cxl_afu *cxl_afu_get(struct cxl_afu *afu)
+{
+
+       return (get_device(&afu->dev) == NULL) ? NULL : afu;
+}
+
+static inline void  cxl_afu_put(struct cxl_afu *afu)
+{
+       put_device(&afu->dev);
+}
+
 
 struct cxl_irq_name {
        struct list_head list;
@@ -433,6 +445,9 @@ struct cxl_context {
        unsigned int sst_size, sst_lru;
 
        wait_queue_head_t wq;
+       /* pid of the group leader associated with the pid */
+       struct pid *glpid;
+       /* use mm context associated with this pid for ds faults */
        struct pid *pid;
        spinlock_t lock; /* Protects pending_irq_mask, pending_fault and fault_addr */
        /* Only used in PR mode */
index 25a5418c55cb897e245a8faf8f728f059f5756fa..81c3f75b73308c08e578aa8948930565ae1944d0 100644 (file)
@@ -166,13 +166,92 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
        cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0);
 }
 
+/*
+ * Returns the mm_struct corresponding to the context ctx via ctx->pid
+ * In case the task has exited we use the task group leader accessible
+ * via ctx->glpid to find the next task in the thread group that has a
+ * valid  mm_struct associated with it. If a task with valid mm_struct
+ * is found the ctx->pid is updated to use the task struct for subsequent
+ * translations. In case no valid mm_struct is found in the task group to
+ * service the fault a NULL is returned.
+ */
+static struct mm_struct *get_mem_context(struct cxl_context *ctx)
+{
+       struct task_struct *task = NULL;
+       struct mm_struct *mm = NULL;
+       struct pid *old_pid = ctx->pid;
+
+       if (old_pid == NULL) {
+               pr_warn("%s: Invalid context for pe=%d\n",
+                        __func__, ctx->pe);
+               return NULL;
+       }
+
+       task = get_pid_task(old_pid, PIDTYPE_PID);
+
+       /*
+        * pid_alive may look racy but this saves us from costly
+        * get_task_mm when the task is a zombie. In worst case
+        * we may think a task is alive, which is about to die
+        * but get_task_mm will return NULL.
+        */
+       if (task != NULL && pid_alive(task))
+               mm = get_task_mm(task);
+
+       /* release the task struct that was taken earlier */
+       if (task)
+               put_task_struct(task);
+       else
+               pr_devel("%s: Context owning pid=%i for pe=%i dead\n",
+                       __func__, pid_nr(old_pid), ctx->pe);
+
+       /*
+        * If we couldn't find the mm context then use the group
+        * leader to iterate over the task group and find a task
+        * that gives us mm_struct.
+        */
+       if (unlikely(mm == NULL && ctx->glpid != NULL)) {
+
+               rcu_read_lock();
+               task = pid_task(ctx->glpid, PIDTYPE_PID);
+               if (task)
+                       do {
+                               mm = get_task_mm(task);
+                               if (mm) {
+                                       ctx->pid = get_task_pid(task,
+                                                               PIDTYPE_PID);
+                                       break;
+                               }
+                               task = next_thread(task);
+                       } while (task && !thread_group_leader(task));
+               rcu_read_unlock();
+
+               /* check if we switched pid */
+               if (ctx->pid != old_pid) {
+                       if (mm)
+                               pr_devel("%s:pe=%i switch pid %i->%i\n",
+                                        __func__, ctx->pe, pid_nr(old_pid),
+                                        pid_nr(ctx->pid));
+                       else
+                               pr_devel("%s:Cannot find mm for pid=%i\n",
+                                        __func__, pid_nr(old_pid));
+
+                       /* drop the reference to older pid */
+                       put_pid(old_pid);
+               }
+       }
+
+       return mm;
+}
+
+
+
 void cxl_handle_fault(struct work_struct *fault_work)
 {
        struct cxl_context *ctx =
                container_of(fault_work, struct cxl_context, fault_work);
        u64 dsisr = ctx->dsisr;
        u64 dar = ctx->dar;
-       struct task_struct *task = NULL;
        struct mm_struct *mm = NULL;
 
        if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr ||
@@ -195,17 +274,17 @@ void cxl_handle_fault(struct work_struct *fault_work)
                "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar);
 
        if (!ctx->kernel) {
-               if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
-                       pr_devel("cxl_handle_fault unable to get task %i\n",
-                                pid_nr(ctx->pid));
+
+               mm = get_mem_context(ctx);
+               /* indicates all the thread in task group have exited */
+               if (mm == NULL) {
+                       pr_devel("%s: unable to get mm for pe=%d pid=%i\n",
+                                __func__, ctx->pe, pid_nr(ctx->pid));
                        cxl_ack_ae(ctx);
                        return;
-               }
-               if (!(mm = get_task_mm(task))) {
-                       pr_devel("cxl_handle_fault unable to get mm %i\n",
-                                pid_nr(ctx->pid));
-                       cxl_ack_ae(ctx);
-                       goto out;
+               } else {
+                       pr_devel("Handling page fault for pe=%d pid=%i\n",
+                                ctx->pe, pid_nr(ctx->pid));
                }
        }
 
@@ -218,33 +297,22 @@ void cxl_handle_fault(struct work_struct *fault_work)
 
        if (mm)
                mmput(mm);
-out:
-       if (task)
-               put_task_struct(task);
 }
 
 static void cxl_prefault_one(struct cxl_context *ctx, u64 ea)
 {
-       int rc;
-       struct task_struct *task;
        struct mm_struct *mm;
 
-       if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
-               pr_devel("cxl_prefault_one unable to get task %i\n",
-                        pid_nr(ctx->pid));
-               return;
-       }
-       if (!(mm = get_task_mm(task))) {
+       mm = get_mem_context(ctx);
+       if (mm == NULL) {
                pr_devel("cxl_prefault_one unable to get mm %i\n",
                         pid_nr(ctx->pid));
-               put_task_struct(task);
                return;
        }
 
-       rc = cxl_fault_segment(ctx, mm, ea);
+       cxl_fault_segment(ctx, mm, ea);
 
        mmput(mm);
-       put_task_struct(task);
 }
 
 static u64 next_segment(u64 ea, u64 vsid)
@@ -263,18 +331,13 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
        struct copro_slb slb;
        struct vm_area_struct *vma;
        int rc;
-       struct task_struct *task;
        struct mm_struct *mm;
 
-       if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) {
-               pr_devel("cxl_prefault_vma unable to get task %i\n",
-                        pid_nr(ctx->pid));
-               return;
-       }
-       if (!(mm = get_task_mm(task))) {
+       mm = get_mem_context(ctx);
+       if (mm == NULL) {
                pr_devel("cxl_prefault_vm unable to get mm %i\n",
                         pid_nr(ctx->pid));
-               goto out1;
+               return;
        }
 
        down_read(&mm->mmap_sem);
@@ -295,8 +358,6 @@ static void cxl_prefault_vma(struct cxl_context *ctx)
        up_read(&mm->mmap_sem);
 
        mmput(mm);
-out1:
-       put_task_struct(task);
 }
 
 void cxl_prefault(struct cxl_context *ctx, u64 wed)
index 7ccd2998be92b8b3f7cdca2a0acbf3f9586d0f34..783337d22f36ab4c1841ba5635de5bc5fc193594 100644 (file)
@@ -67,7 +67,13 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
                spin_unlock(&adapter->afu_list_lock);
                goto err_put_adapter;
        }
-       get_device(&afu->dev);
+
+       /*
+        * taking a ref to the afu so that it doesn't go away
+        * for rest of the function. This ref is released before
+        * we return.
+        */
+       cxl_afu_get(afu);
        spin_unlock(&adapter->afu_list_lock);
 
        if (!afu->current_mode)
@@ -90,13 +96,12 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
        file->private_data = ctx;
        cxl_ctx_get();
 
-       /* Our ref on the AFU will now hold the adapter */
-       put_device(&adapter->dev);
-
-       return 0;
+       /* indicate success */
+       rc = 0;
 
 err_put_afu:
-       put_device(&afu->dev);
+       /* release the ref taken earlier */
+       cxl_afu_put(afu);
 err_put_adapter:
        put_device(&adapter->dev);
        return rc;
@@ -131,8 +136,6 @@ int afu_release(struct inode *inode, struct file *file)
                mutex_unlock(&ctx->mapping_lock);
        }
 
-       put_device(&ctx->afu->dev);
-
        /*
         * At this this point all bottom halfs have finished and we should be
         * getting no more IRQs from the hardware for this context.  Once it's
@@ -198,8 +201,12 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
         * where a process (master, some daemon, etc) has opened the chardev on
         * behalf of another process, so the AFU's mm gets bound to the process
         * that performs this ioctl and not the process that opened the file.
+        * Also we grab the PID of the group leader so that if the task that
+        * has performed the attach operation exits the mm context of the
+        * process is still accessible.
         */
-       ctx->pid = get_pid(get_task_pid(current, PIDTYPE_PID));
+       ctx->pid = get_task_pid(current, PIDTYPE_PID);
+       ctx->glpid = get_task_pid(current->group_leader, PIDTYPE_PID);
 
        trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr);
 
index 85761d7eb333173040204a7a5593bf2c7cf06485..4c1903f781fc1793bb7ddf7205d138e60ea2133a 100644 (file)
@@ -138,6 +138,7 @@ static const struct pci_device_id cxl_pci_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
        { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
        { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
+       { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
        { PCI_DEVICE_CLASS(0x120000, ~0), },
 
        { }
index c241e15cacb1f022e766a1280208f8cb6dfbd176..cbd4331fb45cb00368173e446b8619d852386a14 100644 (file)
@@ -203,7 +203,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
        mask <<= shift;
        val <<= shift;
 
-       v = (in_le32(ioaddr) & ~mask) || (val & mask);
+       v = (in_le32(ioaddr) & ~mask) | (val & mask);
 
        out_le32(ioaddr, v);
        return PCIBIOS_SUCCESSFUL;
index 622005abf8595519734c26bb9ea1cb15e17ba4ba..f3c63dce1e30223813ce9637cc89f703e53d7a35 100644 (file)
@@ -29,7 +29,7 @@
 
 #include <asm/io.h>
 #if IS_ENABLED(CONFIG_UCC_GETH)
-#include <asm/ucc.h>   /* for ucc_set_qe_mux_mii_mng() */
+#include <soc/fsl/qe/ucc.h>
 #endif
 
 #include "gianfar.h"
index cbddbe2d0429ad3b2cb8d402b14493ef04321ecb..5bf1ade2831577741004cf2a1ef3c3ab423c6c55 100644 (file)
 #include <asm/uaccess.h>
 #include <asm/irq.h>
 #include <asm/io.h>
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
-#include <asm/ucc.h>
-#include <asm/ucc_fast.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_fast.h>
 #include <asm/machdep.h>
 
 #include "ucc_geth.h"
index 75f337163ce3c685fab0909516d5cb44055fcfa1..5da19b440a6a80b52ef5366d65b1bba679ecfc83 100644 (file)
 #include <linux/list.h>
 #include <linux/if_ether.h>
 
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
 
-#include <asm/ucc.h>
-#include <asm/ucc_fast.h>
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_fast.h>
 
 #define DRV_DESC "QE UCC Gigabit Ethernet Controller"
 #define DRV_NAME "ucc_geth"
index df39ce02a99d33f9b5c606b187ab374567290dae..9c18d6fd8107599914558b0d3fe87a80ac38fb1c 100644 (file)
@@ -40,7 +40,7 @@ static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm)
        tm->tm_min  = bcd2bin((h_m_s_ms >> 48) & 0xff);
        tm->tm_sec  = bcd2bin((h_m_s_ms >> 40) & 0xff);
 
-       GregorianDay(tm);
+       tm->tm_wday = -1;
 }
 
 static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
index 4e853ed2c82b937ebd8fb6cf4c22288a122fca3a..ad0df75fab6ed0aa5da02c5685ee925b4ad1807f 100644 (file)
@@ -1,6 +1,7 @@
 menu "SOC (System On Chip) specific Drivers"
 
 source "drivers/soc/brcmstb/Kconfig"
+source "drivers/soc/fsl/qe/Kconfig"
 source "drivers/soc/mediatek/Kconfig"
 source "drivers/soc/qcom/Kconfig"
 source "drivers/soc/rockchip/Kconfig"
index f2ba2e932ae10c5d2cda1de269b826b9875a4a5c..9536b804424a88dcc3b1d3585081c83d4bdf1ce6 100644 (file)
@@ -4,6 +4,7 @@
 
 obj-$(CONFIG_SOC_BRCMSTB)      += brcmstb/
 obj-$(CONFIG_MACH_DOVE)                += dove/
+obj-y                          += fsl/
 obj-$(CONFIG_ARCH_MEDIATEK)    += mediatek/
 obj-$(CONFIG_ARCH_QCOM)                += qcom/
 obj-$(CONFIG_ARCH_ROCKCHIP)            += rockchip/
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
new file mode 100644 (file)
index 0000000..203307f
--- /dev/null
@@ -0,0 +1,6 @@
+#
+# Makefile for the Linux Kernel SOC fsl specific device drivers
+#
+
+obj-$(CONFIG_QUICC_ENGINE)             += qe/
+obj-$(CONFIG_CPM)                      += qe/
similarity index 54%
rename from arch/powerpc/sysdev/qe_lib/Kconfig
rename to drivers/soc/fsl/qe/Kconfig
index 3c251993bacd502992ca2b9188993b5e69802eaf..20978f2058a67c9241bbacbef965a5f27aa60a44 100644 (file)
@@ -2,6 +2,17 @@
 # QE Communication options
 #
 
+config QUICC_ENGINE
+       bool "Freescale QUICC Engine (QE) Support"
+       depends on FSL_SOC && PPC32
+       select GENERIC_ALLOCATOR
+       select CRC32
+       help
+         The QUICC Engine (QE) is a new generation of communications
+         coprocessors on Freescale embedded CPUs (akin to CPM in older chips).
+         Selecting this option means that you wish to build a kernel
+         for a machine with a QE coprocessor.
+
 config UCC_SLOW
        bool
        default y if SERIAL_QE
similarity index 69%
rename from arch/powerpc/sysdev/qe_lib/Makefile
rename to drivers/soc/fsl/qe/Makefile
index f1855c185291aa923b02dff0b668ae2e061c8f34..ffac5410c5c7fef25bc7bf00e36fe7e96699848c 100644 (file)
@@ -1,8 +1,8 @@
 #
 # Makefile for the linux ppc-specific parts of QE
 #
-obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_ic.o qe_io.o
-
+obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_ic.o qe_io.o
+obj-$(CONFIG_CPM)      += qe_common.o
 obj-$(CONFIG_UCC)      += ucc.o
 obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
 obj-$(CONFIG_UCC_FAST) += ucc_fast.o
similarity index 99%
rename from arch/powerpc/sysdev/qe_lib/gpio.c
rename to drivers/soc/fsl/qe/gpio.c
index 521e67a49dc4075355c4f78811c0b877ab48b8d2..aa5c11acf212d708130771bf5545c0c81452360f 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/gpio.h>
 #include <linux/slab.h>
 #include <linux/export.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/qe.h>
 
 struct qe_gpio_chip {
        struct of_mm_gpio_chip mm_gc;
similarity index 98%
rename from arch/powerpc/sysdev/qe_lib/qe.c
rename to drivers/soc/fsl/qe/qe.c
index c2518cdb7ddb6e94822138e7c702d9242b444f86..709fc63809e5ca2fa28aeb0ad8be171cba783b4e 100644 (file)
@@ -31,8 +31,8 @@
 #include <asm/irq.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
 #include <asm/prom.h>
 #include <asm/rheap.h>
 
@@ -671,6 +671,19 @@ unsigned int qe_get_num_of_snums(void)
 }
 EXPORT_SYMBOL(qe_get_num_of_snums);
 
+static int __init qe_init(void)
+{
+       struct device_node *np;
+
+       np = of_find_compatible_node(NULL, NULL, "fsl,qe");
+       if (!np)
+               return -ENODEV;
+       qe_reset();
+       of_node_put(np);
+       return 0;
+}
+subsys_initcall(qe_init);
+
 #if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx)
 static int qe_resume(struct platform_device *ofdev)
 {
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
new file mode 100644 (file)
index 0000000..419fa5b
--- /dev/null
@@ -0,0 +1,235 @@
+/*
+ * Common CPM code
+ *
+ * Author: Scott Wood <scottwood@freescale.com>
+ *
+ * Copyright 2007-2008,2010 Freescale Semiconductor, Inc.
+ *
+ * Some parts derived from commproc.c/cpm2_common.c, which is:
+ * Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
+ * Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com>
+ * Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com)
+ * 2006 (c) MontaVista Software, Inc.
+ * Vitaly Bordug <vbordug@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+#include <linux/genalloc.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <soc/fsl/qe/qe.h>
+
+static struct gen_pool *muram_pool;
+static spinlock_t cpm_muram_lock;
+static u8 __iomem *muram_vbase;
+static phys_addr_t muram_pbase;
+
+struct muram_block {
+       struct list_head head;
+       unsigned long start;
+       int size;
+};
+
+static LIST_HEAD(muram_block_list);
+
+/* max address size we deal with */
+#define OF_MAX_ADDR_CELLS      4
+#define GENPOOL_OFFSET         (4096 * 8)
+
+int cpm_muram_init(void)
+{
+       struct device_node *np;
+       struct resource r;
+       u32 zero[OF_MAX_ADDR_CELLS] = {};
+       resource_size_t max = 0;
+       int i = 0;
+       int ret = 0;
+
+       if (muram_pbase)
+               return 0;
+
+       spin_lock_init(&cpm_muram_lock);
+       np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
+       if (!np) {
+               /* try legacy bindings */
+               np = of_find_node_by_name(NULL, "data-only");
+               if (!np) {
+                       pr_err("Cannot find CPM muram data node");
+                       ret = -ENODEV;
+                       goto out_muram;
+               }
+       }
+
+       muram_pool = gen_pool_create(0, -1);
+       muram_pbase = of_translate_address(np, zero);
+       if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
+               pr_err("Cannot translate zero through CPM muram node");
+               ret = -ENODEV;
+               goto out_pool;
+       }
+
+       while (of_address_to_resource(np, i++, &r) == 0) {
+               if (r.end > max)
+                       max = r.end;
+               ret = gen_pool_add(muram_pool, r.start - muram_pbase +
+                                  GENPOOL_OFFSET, resource_size(&r), -1);
+               if (ret) {
+                       pr_err("QE: couldn't add muram to pool!\n");
+                       goto out_pool;
+               }
+       }
+
+       muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
+       if (!muram_vbase) {
+               pr_err("Cannot map QE muram");
+               ret = -ENOMEM;
+               goto out_pool;
+       }
+       goto out_muram;
+out_pool:
+       gen_pool_destroy(muram_pool);
+out_muram:
+       of_node_put(np);
+       return ret;
+}
+
+/*
+ * cpm_muram_alloc - allocate the requested size worth of multi-user ram
+ * @size: number of bytes to allocate
+ * @align: requested alignment, in bytes
+ *
+ * This function returns an offset into the muram area.
+ * Use cpm_dpram_addr() to get the virtual address of the area.
+ * Use cpm_muram_free() to free the allocation.
+ */
+unsigned long cpm_muram_alloc(unsigned long size, unsigned long align)
+{
+       unsigned long start;
+       unsigned long flags;
+       struct genpool_data_align muram_pool_data;
+
+       spin_lock_irqsave(&cpm_muram_lock, flags);
+       muram_pool_data.align = align;
+       start = cpm_muram_alloc_common(size, gen_pool_first_fit_align,
+                                      &muram_pool_data);
+       spin_unlock_irqrestore(&cpm_muram_lock, flags);
+       return start;
+}
+EXPORT_SYMBOL(cpm_muram_alloc);
+
+/**
+ * cpm_muram_free - free a chunk of multi-user ram
+ * @offset: The beginning of the chunk as returned by cpm_muram_alloc().
+ */
+int cpm_muram_free(unsigned long offset)
+{
+       unsigned long flags;
+       int size;
+       struct muram_block *tmp;
+
+       size = 0;
+       spin_lock_irqsave(&cpm_muram_lock, flags);
+       list_for_each_entry(tmp, &muram_block_list, head) {
+               if (tmp->start == offset) {
+                       size = tmp->size;
+                       list_del(&tmp->head);
+                       kfree(tmp);
+                       break;
+               }
+       }
+       gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size);
+       spin_unlock_irqrestore(&cpm_muram_lock, flags);
+       return size;
+}
+EXPORT_SYMBOL(cpm_muram_free);
+
+/*
+ * cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
+ * @offset: offset of allocation start address
+ * @size: number of bytes to allocate
+ * This function returns an offset into the muram area
+ * Use cpm_dpram_addr() to get the virtual address of the area.
+ * Use cpm_muram_free() to free the allocation.
+ */
+unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
+{
+       unsigned long start;
+       unsigned long flags;
+       struct genpool_data_fixed muram_pool_data_fixed;
+
+       spin_lock_irqsave(&cpm_muram_lock, flags);
+       muram_pool_data_fixed.offset = offset + GENPOOL_OFFSET;
+       start = cpm_muram_alloc_common(size, gen_pool_fixed_alloc,
+                                      &muram_pool_data_fixed);
+       spin_unlock_irqrestore(&cpm_muram_lock, flags);
+       return start;
+}
+EXPORT_SYMBOL(cpm_muram_alloc_fixed);
+
+/*
+ * cpm_muram_alloc_common - cpm_muram_alloc common code
+ * @size: number of bytes to allocate
+ * @algo: algorithm for alloc.
+ * @data: data for genalloc's algorithm.
+ *
+ * This function returns an offset into the muram area.
+ */
+unsigned long cpm_muram_alloc_common(unsigned long size, genpool_algo_t algo,
+                                    void *data)
+{
+       struct muram_block *entry;
+       unsigned long start;
+
+       start = gen_pool_alloc_algo(muram_pool, size, algo, data);
+       if (!start)
+               goto out2;
+       start = start - GENPOOL_OFFSET;
+       memset_io(cpm_muram_addr(start), 0, size);
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               goto out1;
+       entry->start = start;
+       entry->size = size;
+       list_add(&entry->head, &muram_block_list);
+
+       return start;
+out1:
+       gen_pool_free(muram_pool, start, size);
+out2:
+       return (unsigned long)-ENOMEM;
+}
+
+/**
+ * cpm_muram_addr - turn a muram offset into a virtual address
+ * @offset: muram offset to convert
+ */
+void __iomem *cpm_muram_addr(unsigned long offset)
+{
+       return muram_vbase + offset;
+}
+EXPORT_SYMBOL(cpm_muram_addr);
+
+unsigned long cpm_muram_offset(void __iomem *addr)
+{
+       return addr - (void __iomem *)muram_vbase;
+}
+EXPORT_SYMBOL(cpm_muram_offset);
+
+/**
+ * cpm_muram_dma - turn a muram virtual address into a DMA address
+ * @offset: virtual address from cpm_muram_addr() to convert
+ */
+dma_addr_t cpm_muram_dma(void __iomem *addr)
+{
+       return muram_pbase + ((u8 __iomem *)addr - muram_vbase);
+}
+EXPORT_SYMBOL(cpm_muram_dma);
similarity index 99%
rename from arch/powerpc/sysdev/qe_lib/qe_ic.c
rename to drivers/soc/fsl/qe/qe_ic.c
index ef36f16f9f6fbc9bdfd0c02e6df29e77fc83817c..b77d01ff8330dea346f50e8bc758f3a971d2cbb0 100644 (file)
@@ -14,6 +14,8 @@
  * option) any later version.
  */
 
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/errno.h>
@@ -26,8 +28,7 @@
 #include <linux/spinlock.h>
 #include <asm/irq.h>
 #include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe_ic.h>
 
 #include "qe_ic.h"
 
similarity index 97%
rename from arch/powerpc/sysdev/qe_lib/qe_ic.h
rename to drivers/soc/fsl/qe/qe_ic.h
index efef7ab9b753eba14ba858c50583c6e3a844415b..926a2ed423193a65e9af404dac7f81a7f7755172 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * arch/powerpc/sysdev/qe_lib/qe_ic.h
+ * drivers/soc/fsl/qe/qe_ic.h
  *
  * QUICC ENGINE Interrupt Controller Header
  *
@@ -16,7 +16,7 @@
 #ifndef _POWERPC_SYSDEV_QE_IC_H
 #define _POWERPC_SYSDEV_QE_IC_H
 
-#include <asm/qe_ic.h>
+#include <soc/fsl/qe/qe_ic.h>
 
 #define NR_QE_IC_INTS          64
 
similarity index 99%
rename from arch/powerpc/sysdev/qe_lib/qe_io.c
rename to drivers/soc/fsl/qe/qe_io.c
index 7ea0174f6d3d7e9f805d7c2c092838032ab78e4d..7ae59abc78637edc0da2018c3f22241bbbf2db75 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/ioport.h>
 
 #include <asm/io.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/qe.h>
 #include <asm/prom.h>
 #include <sysdev/fsl_soc.h>
 
similarity index 98%
rename from arch/powerpc/sysdev/qe_lib/ucc.c
rename to drivers/soc/fsl/qe/ucc.c
index 621575b7e84aa3b2c5932b1768f909b56d593587..b59d3358f9bd3f4337ea3230ea43f9225f757820 100644 (file)
@@ -21,9 +21,9 @@
 
 #include <asm/irq.h>
 #include <asm/io.h>
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
-#include <asm/ucc.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/ucc.h>
 
 int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
 {
similarity index 98%
rename from arch/powerpc/sysdev/qe_lib/ucc_fast.c
rename to drivers/soc/fsl/qe/ucc_fast.c
index 65aaf15032aee221f313e844248749cb8029baee..a7689310fe409d9db764b6d7a60d601e3aeb6246 100644 (file)
 #include <linux/export.h>
 
 #include <asm/io.h>
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
 
-#include <asm/ucc.h>
-#include <asm/ucc_fast.h>
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_fast.h>
 
 void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
 {
similarity index 98%
rename from arch/powerpc/sysdev/qe_lib/ucc_slow.c
rename to drivers/soc/fsl/qe/ucc_slow.c
index 5f91628209eb5cc72563382fd699e46bf49a0845..9334bdbd9b309a014133c7ff14f7194d3a51d4f4 100644 (file)
 #include <linux/export.h>
 
 #include <asm/io.h>
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
 
-#include <asm/ucc.h>
-#include <asm/ucc_slow.h>
+#include <soc/fsl/qe/ucc.h>
+#include <soc/fsl/qe/ucc_slow.h>
 
 u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
 {
similarity index 96%
rename from arch/powerpc/sysdev/qe_lib/usb.c
rename to drivers/soc/fsl/qe/usb.c
index 27f23bd15eb65b75729cc7443a683219df74609e..111f7ab80f04802b603beffb3918df680e1d6957 100644 (file)
@@ -17,8 +17,8 @@
 #include <linux/errno.h>
 #include <linux/export.h>
 #include <linux/io.h>
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
 
 int qe_usb_clock_set(enum qe_clock clk, int rate)
 {
index 896add8cfd3b6c6bba311a335072e63a4060a3a4..8f7b26ec181e2782c2e8fd0cbb4de7e4b3c89f89 100644 (file)
@@ -16,7 +16,7 @@
  * option) any later version.
  */
 #include <asm/cpm.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/qe.h>
 #include <linux/dma-mapping.h>
 #include <linux/fsl_devices.h>
 #include <linux/kernel.h>
index 73190f5d28327ad5a039132726f656258603cd26..1a7dc3c590b1991deef64c330cf7cdf662ced894 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/dma-mapping.h>
 
 #include <linux/fs_uart_pd.h>
-#include <asm/ucc_slow.h>
+#include <soc/fsl/qe/ucc_slow.h>
 
 #include <linux/firmware.h>
 #include <asm/reg.h>
index 5fb6f8b4f0b4889f51d1686610ab07b40cc9bdc0..53c0692f1b096eef837624bda521a11ad3181fc4 100644 (file)
@@ -38,7 +38,7 @@
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
 #include <linux/usb/otg.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/qe.h>
 #include <asm/cpm.h>
 #include <asm/dma.h>
 #include <asm/reg.h>
index c6cebb96fd2157aea6c13318330c8162db4d9f8e..0960f41f945aba959da6efd566754b3b6553b124 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/of_platform.h>
 #include <linux/of_gpio.h>
 #include <linux/slab.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/qe.h>
 #include <asm/fsl_gtm.h>
 #include "fhci.h"
 
index 3bacdd7befe9b0c55dafa1e1d799ecb1f8df6535..60d55eb3de0dcd479b376de99727bd2ca2f27c13 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
 #include <linux/gpio.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/qe.h>
 #include "fhci.h"
 
 /* virtual root hub specific descriptor */
index 95ca5986e672dc90cfa97e685e2bdb6fce04a3a6..a9609a336efef1f3b8c0d108121bc793a5717fee 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/io.h>
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/qe.h>
 #include <asm/fsl_gtm.h>
 #include "fhci.h"
 
index 154e6a007727159498a3a5278aa51de69d9e34f0..3fc82c1c3c73e46403aa827b22630f1534837bcd 100644 (file)
@@ -27,8 +27,8 @@
 #include <linux/io.h>
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
-#include <asm/qe.h>
-#include <asm/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
+#include <soc/fsl/qe/immap_qe.h>
 
 #define USB_CLOCK      48000000
 
index 7ff168d06967c9e2544faa50e4e0d995c27bf4ff..29d4385903d456c274e56d258741fa51002881ce 100644 (file)
 #ifndef __GENALLOC_H__
 #define __GENALLOC_H__
 
+#include <linux/types.h>
 #include <linux/spinlock_types.h>
 
 struct device;
 struct device_node;
+struct gen_pool;
 
 /**
  * Allocation callback function type definition
@@ -47,7 +49,7 @@ typedef unsigned long (*genpool_algo_t)(unsigned long *map,
                        unsigned long size,
                        unsigned long start,
                        unsigned int nr,
-                       void *data);
+                       void *data, struct gen_pool *pool);
 
 /*
  *  General purpose special memory pool descriptor.
@@ -75,6 +77,20 @@ struct gen_pool_chunk {
        unsigned long bits[0];          /* bitmap for allocating memory chunk */
 };
 
+/*
+ *  gen_pool data descriptor for gen_pool_first_fit_align.
+ */
+struct genpool_data_align {
+       int align;              /* alignment by bytes for starting address */
+};
+
+/*
+ *  gen_pool data descriptor for gen_pool_fixed_alloc.
+ */
+struct genpool_data_fixed {
+       unsigned long offset;           /* The offset of the specific region */
+};
+
 extern struct gen_pool *gen_pool_create(int, int);
 extern phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long);
 extern int gen_pool_add_virt(struct gen_pool *, unsigned long, phys_addr_t,
@@ -98,6 +114,8 @@ static inline int gen_pool_add(struct gen_pool *pool, unsigned long addr,
 }
 extern void gen_pool_destroy(struct gen_pool *);
 extern unsigned long gen_pool_alloc(struct gen_pool *, size_t);
+extern unsigned long gen_pool_alloc_algo(struct gen_pool *, size_t,
+               genpool_algo_t algo, void *data);
 extern void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size,
                dma_addr_t *dma);
 extern void gen_pool_free(struct gen_pool *, unsigned long, size_t);
@@ -110,14 +128,26 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
                void *data);
 
 extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
-               unsigned long start, unsigned int nr, void *data);
+               unsigned long start, unsigned int nr, void *data,
+               struct gen_pool *pool);
+
+extern unsigned long gen_pool_fixed_alloc(unsigned long *map,
+               unsigned long size, unsigned long start, unsigned int nr,
+               void *data, struct gen_pool *pool);
+
+extern unsigned long gen_pool_first_fit_align(unsigned long *map,
+               unsigned long size, unsigned long start, unsigned int nr,
+               void *data, struct gen_pool *pool);
+
 
 extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
                unsigned long size, unsigned long start, unsigned int nr,
-               void *data);
+               void *data, struct gen_pool *pool);
 
 extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
-               unsigned long start, unsigned int nr, void *data);
+               unsigned long start, unsigned int nr, void *data,
+               struct gen_pool *pool);
+
 
 extern struct gen_pool *devm_gen_pool_create(struct device *dev,
                int min_alloc_order, int nid, const char *name);
similarity index 95%
rename from arch/powerpc/include/asm/qe.h
rename to include/soc/fsl/qe/qe.h
index 32b9bfa0c9bd39a3b17083afe4eda86dfbd34057..c7fa36c335c9746446593f35f5051a974703ee35 100644 (file)
 #define _ASM_POWERPC_QE_H
 #ifdef __KERNEL__
 
+#include <linux/compiler.h>
+#include <linux/genalloc.h>
 #include <linux/spinlock.h>
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <asm/cpm.h>
-#include <asm/immap_qe.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/types.h>
 
 #define QE_NUM_OF_SNUM 256     /* There are 256 serial number in QE */
 #define QE_NUM_OF_BRGS 16
@@ -92,6 +97,51 @@ extern void qe_reset(void);
 static inline void qe_reset(void) {}
 #endif
 
+int cpm_muram_init(void);
+
+#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE)
+unsigned long cpm_muram_alloc(unsigned long size, unsigned long align);
+int cpm_muram_free(unsigned long offset);
+unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
+unsigned long cpm_muram_alloc_common(unsigned long size, genpool_algo_t algo,
+                                    void *data);
+void __iomem *cpm_muram_addr(unsigned long offset);
+unsigned long cpm_muram_offset(void __iomem *addr);
+dma_addr_t cpm_muram_dma(void __iomem *addr);
+#else
+static inline unsigned long cpm_muram_alloc(unsigned long size,
+                                           unsigned long align)
+{
+       return -ENOSYS;
+}
+
+static inline int cpm_muram_free(unsigned long offset)
+{
+       return -ENOSYS;
+}
+
+static inline unsigned long cpm_muram_alloc_fixed(unsigned long offset,
+                                                 unsigned long size)
+{
+       return -ENOSYS;
+}
+
+static inline void __iomem *cpm_muram_addr(unsigned long offset)
+{
+       return NULL;
+}
+
+static inline unsigned long cpm_muram_offset(void __iomem *addr)
+{
+       return -ENOSYS;
+}
+
+static inline dma_addr_t cpm_muram_dma(void __iomem *addr)
+{
+       return 0;
+}
+#endif /* defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE) */
+
 /* QE PIO */
 #define QE_PIO_PINS 32
 
similarity index 96%
rename from arch/powerpc/include/asm/ucc.h
rename to include/soc/fsl/qe/ucc.h
index 6927ac26516ea7ef698c0552ef0f67cec27e6a65..894f14cbb044506150acc7a5b63fa39ab46a3fc1 100644 (file)
@@ -15,8 +15,8 @@
 #ifndef __UCC_H__
 #define __UCC_H__
 
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
 
 #define STATISTICS
 
similarity index 98%
rename from arch/powerpc/include/asm/ucc_fast.h
rename to include/soc/fsl/qe/ucc_fast.h
index 72ea9bab07df4c1e63bd7022fe340cc7c16c0846..df8ea7958c6375c3f50b2c2694732cc18d35f148 100644 (file)
 
 #include <linux/kernel.h>
 
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
 
-#include <asm/ucc.h>
+#include <soc/fsl/qe/ucc.h>
 
 /* Receive BD's status */
 #define R_E    0x80000000      /* buffer empty */
similarity index 99%
rename from arch/powerpc/include/asm/ucc_slow.h
rename to include/soc/fsl/qe/ucc_slow.h
index 233ef5fe5fde8326c5815b21fbe03b2272e32449..6c0573a0825c9cb5410491cd0448a2f4150a5057 100644 (file)
 
 #include <linux/kernel.h>
 
-#include <asm/immap_qe.h>
-#include <asm/qe.h>
+#include <soc/fsl/qe/immap_qe.h>
+#include <soc/fsl/qe/qe.h>
 
-#include <asm/ucc.h>
+#include <soc/fsl/qe/ucc.h>
 
 /* transmit BD's status */
 #define T_R    0x80000000      /* ready bit */
index 116a166b096f06eb64ed288e4d8b694648f854b8..0a1139644d328a92ae346ee6fa723d7e18085c75 100644 (file)
@@ -269,6 +269,25 @@ EXPORT_SYMBOL(gen_pool_destroy);
  * NMI-safe cmpxchg implementation.
  */
 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
+{
+       return gen_pool_alloc_algo(pool, size, pool->algo, pool->data);
+}
+EXPORT_SYMBOL(gen_pool_alloc);
+
+/**
+ * gen_pool_alloc_algo - allocate special memory from the pool
+ * @pool: pool to allocate from
+ * @size: number of bytes to allocate from the pool
+ * @algo: algorithm passed from caller
+ * @data: data passed to algorithm
+ *
+ * Allocate the requested number of bytes from the specified pool.
+ * Uses the pool allocation function (with first-fit algorithm by default).
+ * Can not be used in NMI handler on architectures without
+ * NMI-safe cmpxchg implementation.
+ */
+unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
+               genpool_algo_t algo, void *data)
 {
        struct gen_pool_chunk *chunk;
        unsigned long addr = 0;
@@ -290,8 +309,8 @@ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
 
                end_bit = chunk_size(chunk) >> order;
 retry:
-               start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
-                               pool->data);
+               start_bit = algo(chunk->bits, end_bit, start_bit,
+                                nbits, data, pool);
                if (start_bit >= end_bit)
                        continue;
                remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
@@ -310,7 +329,7 @@ retry:
        rcu_read_unlock();
        return addr;
 }
-EXPORT_SYMBOL(gen_pool_alloc);
+EXPORT_SYMBOL(gen_pool_alloc_algo);
 
 /**
  * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
@@ -501,14 +520,73 @@ EXPORT_SYMBOL(gen_pool_set_algo);
  * @start: The bitnumber to start searching at
  * @nr: The number of zeroed bits we're looking for
  * @data: additional data - unused
+ * @pool: pool to find the fit region memory from
  */
 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
-               unsigned long start, unsigned int nr, void *data)
+               unsigned long start, unsigned int nr, void *data,
+               struct gen_pool *pool)
 {
        return bitmap_find_next_zero_area(map, size, start, nr, 0);
 }
 EXPORT_SYMBOL(gen_pool_first_fit);
 
+/**
+ * gen_pool_first_fit_align - find the first available region
+ * of memory matching the size requirement (alignment constraint)
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: data for alignment
+ * @pool: pool to get order from
+ */
+unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
+               unsigned long start, unsigned int nr, void *data,
+               struct gen_pool *pool)
+{
+       struct genpool_data_align *alignment;
+       unsigned long align_mask;
+       int order;
+
+       alignment = data;
+       order = pool->min_alloc_order;
+       align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
+       return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
+}
+EXPORT_SYMBOL(gen_pool_first_fit_align);
+
+/**
+ * gen_pool_fixed_alloc - reserve a specific region
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: data for alignment
+ * @pool: pool to get order from
+ */
+unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
+               unsigned long start, unsigned int nr, void *data,
+               struct gen_pool *pool)
+{
+       struct genpool_data_fixed *fixed_data;
+       int order;
+       unsigned long offset_bit;
+       unsigned long start_bit;
+
+       fixed_data = data;
+       order = pool->min_alloc_order;
+       offset_bit = fixed_data->offset >> order;
+       if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
+               return size;
+
+       start_bit = bitmap_find_next_zero_area(map, size,
+                       start + offset_bit, nr, 0);
+       if (start_bit != offset_bit)
+               start_bit = size;
+       return start_bit;
+}
+EXPORT_SYMBOL(gen_pool_fixed_alloc);
+
 /**
  * gen_pool_first_fit_order_align - find the first available region
  * of memory matching the size requirement. The region will be aligned
@@ -518,10 +596,11 @@ EXPORT_SYMBOL(gen_pool_first_fit);
  * @start: The bitnumber to start searching at
  * @nr: The number of zeroed bits we're looking for
  * @data: additional data - unused
+ * @pool: pool to find the fit region memory from
  */
 unsigned long gen_pool_first_fit_order_align(unsigned long *map,
                unsigned long size, unsigned long start,
-               unsigned int nr, void *data)
+               unsigned int nr, void *data, struct gen_pool *pool)
 {
        unsigned long align_mask = roundup_pow_of_two(nr) - 1;
 
@@ -537,12 +616,14 @@ EXPORT_SYMBOL(gen_pool_first_fit_order_align);
  * @start: The bitnumber to start searching at
  * @nr: The number of zeroed bits we're looking for
  * @data: additional data - unused
+ * @pool: pool to find the fit region memory from
  *
  * Iterate over the bitmap to find the smallest free region
  * which we can allocate the memory.
  */
 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
-               unsigned long start, unsigned int nr, void *data)
+               unsigned long start, unsigned int nr, void *data,
+               struct gen_pool *pool)
 {
        unsigned long start_bit = size;
        unsigned long len = size + 1;
index bec27fce7501702a7da2a16c621c1b91adb938f5..682aae8a1fef2d78ba289bb63cdf697b84cb1648 100644 (file)
@@ -101,6 +101,7 @@ static void raid6_altivec$#_gen_syndrome(int disks, size_t bytes, void **ptrs)
 
        raid6_altivec$#_gen_syndrome_real(disks, bytes, ptrs);
 
+       disable_kernel_altivec();
        preempt_enable();
 }
 
index 826470d7f00077278875a68c577bccbc7a82d5ea..96e2486a6fc479559eba03662317c82f802a0f43 100755 (executable)
@@ -263,7 +263,8 @@ if ($arch eq "x86_64") {
 
 } elsif ($arch eq "powerpc") {
     $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
-    $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?.*?)>:";
+    # See comment in the sparc64 section for why we use '\w'.
+    $function_regex = "^([0-9a-fA-F]+)\\s+<(\\.?\\w*?)>:";
     $mcount_regex = "^\\s*([0-9a-fA-F]+):.*\\s\\.?_mcount\$";
 
     if ($bits == 64) {
index b4709ea588c1bc608ee4240a512bc86883f7a92d..6fa673316ac2caf6367459a4604446ea1cf8bee6 100644 (file)
@@ -1 +1,2 @@
 gettimeofday
+context_switch
index 5fa48702070dd3d0e05ba33cae0808f4d427f6fb..912445ff7ce76637619d4a8e47beae59b3678001 100644 (file)
@@ -1,4 +1,4 @@
-TEST_PROGS := gettimeofday
+TEST_PROGS := gettimeofday context_switch
 
 CFLAGS += -O2
 
@@ -6,6 +6,9 @@ all: $(TEST_PROGS)
 
 $(TEST_PROGS): ../harness.c
 
+context_switch: ../utils.c
+context_switch: LDLIBS += -lpthread
+
 include ../../lib.mk
 
 clean:
diff --git a/tools/testing/selftests/powerpc/benchmarks/context_switch.c b/tools/testing/selftests/powerpc/benchmarks/context_switch.c
new file mode 100644 (file)
index 0000000..7b78594
--- /dev/null
@@ -0,0 +1,466 @@
+/*
+ * Context switch microbenchmark.
+ *
+ * Copyright (C) 2015 Anton Blanchard <anton@au.ibm.com>, IBM
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define _GNU_SOURCE
+#include <sched.h>
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <getopt.h>
+#include <signal.h>
+#include <assert.h>
+#include <pthread.h>
+#include <limits.h>
+#include <sys/time.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/shm.h>
+#include <linux/futex.h>
+
+#include "../utils.h"
+
+static unsigned int timeout = 30;
+
+static int touch_vdso;
+struct timeval tv;
+
+static int touch_fp = 1;
+double fp;
+
+static int touch_vector = 1;
+typedef int v4si __attribute__ ((vector_size (16)));
+v4si a, b, c;
+
+#ifdef __powerpc__
+static int touch_altivec = 1;
+
+static void __attribute__((__target__("no-vsx"))) altivec_touch_fn(void)
+{
+       c = a + b;
+}
+#endif
+
+static void touch(void)
+{
+       if (touch_vdso)
+               gettimeofday(&tv, NULL);
+
+       if (touch_fp)
+               fp += 0.1;
+
+#ifdef __powerpc__
+       if (touch_altivec)
+               altivec_touch_fn();
+#endif
+
+       if (touch_vector)
+               c = a + b;
+
+       asm volatile("# %0 %1 %2": : "r"(&tv), "r"(&fp), "r"(&c));
+}
+
+static void start_thread_on(void *(*fn)(void *), void *arg, unsigned long cpu)
+{
+       pthread_t tid;
+       cpu_set_t cpuset;
+       pthread_attr_t attr;
+
+       CPU_ZERO(&cpuset);
+       CPU_SET(cpu, &cpuset);
+
+       pthread_attr_init(&attr);
+
+       if (pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset)) {
+               perror("pthread_attr_setaffinity_np");
+               exit(1);
+       }
+
+       if (pthread_create(&tid, &attr, fn, arg)) {
+               perror("pthread_create");
+               exit(1);
+       }
+}
+
+static void start_process_on(void *(*fn)(void *), void *arg, unsigned long cpu)
+{
+       int pid;
+       cpu_set_t cpuset;
+
+       pid = fork();
+       if (pid == -1) {
+               perror("fork");
+               exit(1);
+       }
+
+       if (pid)
+               return;
+
+       CPU_ZERO(&cpuset);
+       CPU_SET(cpu, &cpuset);
+
+       if (sched_setaffinity(0, sizeof(cpuset), &cpuset)) {
+               perror("sched_setaffinity");
+               exit(1);
+       }
+
+       fn(arg);
+
+       exit(0);
+}
+
+static unsigned long iterations;
+static unsigned long iterations_prev;
+
+static void sigalrm_handler(int junk)
+{
+       unsigned long i = iterations;
+
+       printf("%ld\n", i - iterations_prev);
+       iterations_prev = i;
+
+       if (--timeout == 0)
+               kill(0, SIGUSR1);
+
+       alarm(1);
+}
+
+static void sigusr1_handler(int junk)
+{
+       exit(0);
+}
+
+struct actions {
+       void (*setup)(int, int);
+       void *(*thread1)(void *);
+       void *(*thread2)(void *);
+};
+
+#define READ 0
+#define WRITE 1
+
+static int pipe_fd1[2];
+static int pipe_fd2[2];
+
+static void pipe_setup(int cpu1, int cpu2)
+{
+       if (pipe(pipe_fd1) || pipe(pipe_fd2))
+               exit(1);
+}
+
+static void *pipe_thread1(void *arg)
+{
+       signal(SIGALRM, sigalrm_handler);
+       alarm(1);
+
+       while (1) {
+               assert(read(pipe_fd1[READ], &c, 1) == 1);
+               touch();
+
+               assert(write(pipe_fd2[WRITE], &c, 1) == 1);
+               touch();
+
+               iterations += 2;
+       }
+
+       return NULL;
+}
+
+static void *pipe_thread2(void *arg)
+{
+       while (1) {
+               assert(write(pipe_fd1[WRITE], &c, 1) == 1);
+               touch();
+
+               assert(read(pipe_fd2[READ], &c, 1) == 1);
+               touch();
+       }
+
+       return NULL;
+}
+
+static struct actions pipe_actions = {
+       .setup = pipe_setup,
+       .thread1 = pipe_thread1,
+       .thread2 = pipe_thread2,
+};
+
+static void yield_setup(int cpu1, int cpu2)
+{
+       if (cpu1 != cpu2) {
+               fprintf(stderr, "Both threads must be on the same CPU for yield test\n");
+               exit(1);
+       }
+}
+
+static void *yield_thread1(void *arg)
+{
+       signal(SIGALRM, sigalrm_handler);
+       alarm(1);
+
+       while (1) {
+               sched_yield();
+               touch();
+
+               iterations += 2;
+       }
+
+       return NULL;
+}
+
+static void *yield_thread2(void *arg)
+{
+       while (1) {
+               sched_yield();
+               touch();
+       }
+
+       return NULL;
+}
+
+static struct actions yield_actions = {
+       .setup = yield_setup,
+       .thread1 = yield_thread1,
+       .thread2 = yield_thread2,
+};
+
+static long sys_futex(void *addr1, int op, int val1, struct timespec *timeout,
+                     void *addr2, int val3)
+{
+       return syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3);
+}
+
+static unsigned long cmpxchg(unsigned long *p, unsigned long expected,
+                            unsigned long desired)
+{
+       unsigned long exp = expected;
+
+       __atomic_compare_exchange_n(p, &exp, desired, 0,
+                                   __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
+       return exp;
+}
+
+static unsigned long xchg(unsigned long *p, unsigned long val)
+{
+       return __atomic_exchange_n(p, val, __ATOMIC_SEQ_CST);
+}
+
+static int mutex_lock(unsigned long *m)
+{
+       int c;
+
+       c = cmpxchg(m, 0, 1);
+       if (!c)
+               return 0;
+
+       if (c == 1)
+               c = xchg(m, 2);
+
+       while (c) {
+               sys_futex(m, FUTEX_WAIT, 2, NULL, NULL, 0);
+               c = xchg(m, 2);
+       }
+
+       return 0;
+}
+
+static int mutex_unlock(unsigned long *m)
+{
+       if (*m == 2)
+               *m = 0;
+       else if (xchg(m, 0) == 1)
+               return 0;
+
+       sys_futex(m, FUTEX_WAKE, 1, NULL, NULL, 0);
+
+       return 0;
+}
+
+static unsigned long *m1, *m2;
+
+static void futex_setup(int cpu1, int cpu2)
+{
+       int shmid;
+       void *shmaddr;
+
+       shmid = shmget(IPC_PRIVATE, getpagesize(), SHM_R | SHM_W);
+       if (shmid < 0) {
+               perror("shmget");
+               exit(1);
+       }
+
+       shmaddr = shmat(shmid, NULL, 0);
+       if (shmaddr == (char *)-1) {
+               perror("shmat");
+               shmctl(shmid, IPC_RMID, NULL);
+               exit(1);
+       }
+
+       shmctl(shmid, IPC_RMID, NULL);
+
+       m1 = shmaddr;
+       m2 = shmaddr + sizeof(*m1);
+
+       *m1 = 0;
+       *m2 = 0;
+
+       mutex_lock(m1);
+       mutex_lock(m2);
+}
+
+static void *futex_thread1(void *arg)
+{
+       signal(SIGALRM, sigalrm_handler);
+       alarm(1);
+
+       while (1) {
+               mutex_lock(m2);
+               mutex_unlock(m1);
+
+               iterations += 2;
+       }
+
+       return NULL;
+}
+
+static void *futex_thread2(void *arg)
+{
+       while (1) {
+               mutex_unlock(m2);
+               mutex_lock(m1);
+       }
+
+       return NULL;
+}
+
+static struct actions futex_actions = {
+       .setup = futex_setup,
+       .thread1 = futex_thread1,
+       .thread2 = futex_thread2,
+};
+
+static int processes;
+
+static struct option options[] = {
+       { "test", required_argument, 0, 't' },
+       { "process", no_argument, &processes, 1 },
+       { "timeout", required_argument, 0, 's' },
+       { "vdso", no_argument, &touch_vdso, 1 },
+       { "no-fp", no_argument, &touch_fp, 0 },
+#ifdef __powerpc__
+       { "no-altivec", no_argument, &touch_altivec, 0 },
+#endif
+       { "no-vector", no_argument, &touch_vector, 0 },
+       { 0, },
+};
+
+static void usage(void)
+{
+       fprintf(stderr, "Usage: context_switch2 <options> CPU1 CPU2\n\n");
+       fprintf(stderr, "\t\t--test=X\tpipe, futex or yield (default)\n");
+       fprintf(stderr, "\t\t--process\tUse processes (default threads)\n");
+       fprintf(stderr, "\t\t--timeout=X\tDuration in seconds to run (default 30)\n");
+       fprintf(stderr, "\t\t--vdso\t\ttouch VDSO\n");
+       fprintf(stderr, "\t\t--fp\t\ttouch FP\n");
+#ifdef __powerpc__
+       fprintf(stderr, "\t\t--altivec\ttouch altivec\n");
+#endif
+       fprintf(stderr, "\t\t--vector\ttouch vector\n");
+}
+
+int main(int argc, char *argv[])
+{
+       signed char c;
+       struct actions *actions = &yield_actions;
+       int cpu1;
+       int cpu2;
+       static void (*start_fn)(void *(*fn)(void *), void *arg, unsigned long cpu);
+
+       while (1) {
+               int option_index = 0;
+
+               c = getopt_long(argc, argv, "", options, &option_index);
+
+               if (c == -1)
+                       break;
+
+               switch (c) {
+               case 0:
+                       if (options[option_index].flag != 0)
+                               break;
+
+                       usage();
+                       exit(1);
+                       break;
+
+               case 't':
+                       if (!strcmp(optarg, "pipe")) {
+                               actions = &pipe_actions;
+                       } else if (!strcmp(optarg, "yield")) {
+                               actions = &yield_actions;
+                       } else if (!strcmp(optarg, "futex")) {
+                               actions = &futex_actions;
+                       } else {
+                               usage();
+                               exit(1);
+                       }
+                       break;
+
+               case 's':
+                       timeout = atoi(optarg);
+                       break;
+
+               default:
+                       usage();
+                       exit(1);
+               }
+       }
+
+       if (processes)
+               start_fn = start_process_on;
+       else
+               start_fn = start_thread_on;
+
+       if (((argc - optind) != 2)) {
+               cpu1 = cpu2 = pick_online_cpu();
+       } else {
+               cpu1 = atoi(argv[optind++]);
+               cpu2 = atoi(argv[optind++]);
+       }
+
+       printf("Using %s with ", processes ? "processes" : "threads");
+
+       if (actions == &pipe_actions)
+               printf("pipe");
+       else if (actions == &yield_actions)
+               printf("yield");
+       else
+               printf("futex");
+
+       printf(" on cpus %d/%d touching FP:%s altivec:%s vector:%s vdso:%s\n",
+              cpu1, cpu2, touch_fp ?  "yes" : "no", touch_altivec ? "yes" : "no",
+              touch_vector ? "yes" : "no", touch_vdso ? "yes" : "no");
+
+       /* Create a new process group so we can signal everyone for exit */
+       setpgid(getpid(), getpid());
+
+       signal(SIGUSR1, sigusr1_handler);
+
+       actions->setup(cpu1, cpu2);
+
+       start_fn(actions->thread1, NULL, cpu1);
+       start_fn(actions->thread2, NULL, cpu2);
+
+       while (1)
+               sleep(3600);
+
+       return 0;
+}
index 8265504de5717b345336c3c2bc11e444e934b729..08a8b95e3bc185830bc8956c94a659d678f1cc18 100644 (file)
@@ -60,14 +60,6 @@ int dscr_inherit_exec(void)
                else
                        set_dscr(dscr);
 
-               /*
-                * XXX: Force a context switch out so that DSCR
-                * current value is copied into the thread struct
-                * which is required for the child to inherit the
-                * changed value.
-                */
-               sleep(1);
-
                pid = fork();
                if (pid == -1) {
                        perror("fork() failed");
index 4e414caf7f4016ba9f624f34da5ab1faf38b4719..3e5a6d195e9ab04ca54d22bfcaeb775ddd5e2892 100644 (file)
@@ -40,14 +40,6 @@ int dscr_inherit(void)
                else
                        set_dscr(dscr);
 
-               /*
-                * XXX: Force a context switch out so that DSCR
-                * current value is copied into the thread struct
-                * which is required for the child to inherit the
-                * changed value.
-                */
-               sleep(1);
-
                pid = fork();
                if (pid == -1) {
                        perror("fork() failed");
index f7997affd14364e349503ffb443b7ddb8d26284b..52f9be7f61f028fd930d793f88146cbf911f8e9e 100644 (file)
@@ -116,46 +116,3 @@ int test_harness(int (test_function)(void), char *name)
 
        return rc;
 }
-
-static char auxv[4096];
-
-void *get_auxv_entry(int type)
-{
-       ElfW(auxv_t) *p;
-       void *result;
-       ssize_t num;
-       int fd;
-
-       fd = open("/proc/self/auxv", O_RDONLY);
-       if (fd == -1) {
-               perror("open");
-               return NULL;
-       }
-
-       result = NULL;
-
-       num = read(fd, auxv, sizeof(auxv));
-       if (num < 0) {
-               perror("read");
-               goto out;
-       }
-
-       if (num > sizeof(auxv)) {
-               printf("Overflowed auxv buffer\n");
-               goto out;
-       }
-
-       p = (ElfW(auxv_t) *)auxv;
-
-       while (p->a_type != AT_NULL) {
-               if (p->a_type == type) {
-                       result = (void *)p->a_un.a_val;
-                       break;
-               }
-
-               p++;
-       }
-out:
-       close(fd);
-       return result;
-}
index a9099d9f8f39ea8a38a6345b15e2d24ab2e00eb7..ac41a7177f2e2cd659ca89c9a56c8473a502dde3 100644 (file)
@@ -2,7 +2,7 @@ noarg:
        $(MAKE) -C ../
 
 TEST_PROGS := count_instructions l3_bank_test per_event_excludes
-EXTRA_SOURCES := ../harness.c event.c lib.c
+EXTRA_SOURCES := ../harness.c event.c lib.c ../utils.c
 
 all: $(TEST_PROGS) ebb
 
@@ -12,6 +12,8 @@ $(TEST_PROGS): $(EXTRA_SOURCES)
 count_instructions: loop.S count_instructions.c $(EXTRA_SOURCES)
        $(CC) $(CFLAGS) -m64 -o $@ $^
 
+per_event_excludes: ../utils.c
+
 include ../../lib.mk
 
 DEFAULT_RUN_TESTS := $(RUN_TESTS)
index 5cdc9dbf2b279c95cd3f9603759b63bc5c0cfee5..8d2279c4bb4b6a81cb5713f6ba3bf92b72f2e3e7 100644 (file)
@@ -18,7 +18,8 @@ TEST_PROGS := reg_access_test event_attributes_test cycles_test       \
 
 all: $(TEST_PROGS)
 
-$(TEST_PROGS): ../../harness.c ../event.c ../lib.c ebb.c ebb_handler.S trace.c busy_loop.S
+$(TEST_PROGS): ../../harness.c ../../utils.c ../event.c ../lib.c \
+              ebb.c ebb_handler.S trace.c busy_loop.S
 
 instruction_count_test: ../loop.S
 
index 9729d9f9021874842e8a18998ba91e3eeae63c74..e67452f1bcffd47141adf60187b052cdf3ae575e 100644 (file)
@@ -13,7 +13,6 @@
 #include <stdlib.h>
 #include <string.h>
 #include <sys/ioctl.h>
-#include <linux/auxvec.h>
 
 #include "trace.h"
 #include "reg.h"
@@ -324,7 +323,7 @@ bool ebb_is_supported(void)
 {
 #ifdef PPC_FEATURE2_EBB
        /* EBB requires at least POWER8 */
-       return ((long)get_auxv_entry(AT_HWCAP2) & PPC_FEATURE2_EBB);
+       return have_hwcap2(PPC_FEATURE2_EBB);
 #else
        return false;
 #endif
index a07104c2afe69229fb74577c6af48c76ff40cadb..a361ad3334ce36cee93b2801ede22776efb6acb4 100644 (file)
 #include "lib.h"
 
 
-int pick_online_cpu(void)
-{
-       cpu_set_t mask;
-       int cpu;
-
-       CPU_ZERO(&mask);
-
-       if (sched_getaffinity(0, sizeof(mask), &mask)) {
-               perror("sched_getaffinity");
-               return -1;
-       }
-
-       /* We prefer a primary thread, but skip 0 */
-       for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8)
-               if (CPU_ISSET(cpu, &mask))
-                       return cpu;
-
-       /* Search for anything, but in reverse */
-       for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--)
-               if (CPU_ISSET(cpu, &mask))
-                       return cpu;
-
-       printf("No cpus in affinity mask?!\n");
-       return -1;
-}
-
 int bind_to_cpu(int cpu)
 {
        cpu_set_t mask;
index ca5d72ae3be6b0cc8aed1e1caabb2f7b0d062317..0213af4ff332d0c020f73cdc2c780a9c01f0a143 100644 (file)
@@ -19,7 +19,6 @@ union pipe {
        int fds[2];
 };
 
-extern int pick_online_cpu(void);
 extern int bind_to_cpu(int cpu);
 extern int kill_child_and_wait(pid_t child_pid);
 extern int wait_for_child(pid_t child_pid);
diff --git a/tools/testing/selftests/powerpc/scripts/hmi.sh b/tools/testing/selftests/powerpc/scripts/hmi.sh
new file mode 100755 (executable)
index 0000000..83fb253
--- /dev/null
@@ -0,0 +1,89 @@
+#!/bin/sh
+#
+# Copyright 2015, Daniel Axtens, IBM Corporation
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+#  the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+
+# do we have ./getscom, ./putscom?
+if [ -x ./getscom ] && [ -x ./putscom ]; then
+       GETSCOM=./getscom
+       PUTSCOM=./putscom
+elif which getscom > /dev/null; then
+       GETSCOM=$(which getscom)
+       PUTSCOM=$(which putscom)
+else
+       cat <<EOF
+Can't find getscom/putscom in . or \$PATH.
+See https://github.com/open-power/skiboot.
+The tool is in external/xscom-utils
+EOF
+       exit 1
+fi
+
+# We will get 8 HMI events per injection
+# todo: deal with things being offline
+expected_hmis=8
+COUNT_HMIS() {
+    dmesg | grep -c 'Harmless Hypervisor Maintenance interrupt'
+}
+
+# massively expand snooze delay, allowing injection on all cores
+ppc64_cpu --smt-snooze-delay=1000000000
+
+# when we exit, restore it
+trap "ppc64_cpu --smt-snooze-delay=100" 0 1
+
+# for each chip+core combination
+# todo - less fragile parsing
+egrep -o 'OCC: Chip [0-9a-f]+ Core [0-9a-f]' < /sys/firmware/opal/msglog |
+while read chipcore; do
+       chip=$(echo "$chipcore"|awk '{print $3}')
+       core=$(echo "$chipcore"|awk '{print $5}')
+       fir="0x1${core}013100"
+
+       # verify that Core FIR is zero as expected
+       if [ "$($GETSCOM -c 0x${chip} $fir)" != 0 ]; then
+               echo "FIR was not zero before injection for chip $chip, core $core. Aborting!"
+               echo "Result of $GETSCOM -c 0x${chip} $fir:"
+               $GETSCOM -c 0x${chip} $fir
+               echo "If you get a -5 error, the core may be in idle state. Try stress-ng."
+               echo "Otherwise, try $PUTSCOM -c 0x${chip} $fir 0"
+               exit 1
+       fi
+
+       # keep track of the number of HMIs handled
+       old_hmis=$(COUNT_HMIS)
+
+       # do injection, adding a marker to dmesg for clarity
+       echo "Injecting HMI on core $core, chip $chip" | tee /dev/kmsg
+       # inject a RegFile recoverable error
+       if ! $PUTSCOM -c 0x${chip} $fir 2000000000000000 > /dev/null; then
+               echo "Error injecting. Aborting!"
+               exit 1
+       fi
+
+       # now we want to wait for all the HMIs to be processed
+       # we expect one per thread on the core
+       i=0;
+       new_hmis=$(COUNT_HMIS)
+       while [ $new_hmis -lt $((old_hmis + expected_hmis)) ] && [ $i -lt 12 ]; do
+           echo "Seen $((new_hmis - old_hmis)) HMI(s) out of $expected_hmis expected, sleeping"
+           sleep 5;
+           i=$((i + 1))
+           new_hmis=$(COUNT_HMIS)
+       done
+       if [ $i = 12 ]; then
+           echo "Haven't seen expected $expected_hmis recoveries after 1 min. Aborting."
+           exit 1
+       fi
+       echo "Processed $expected_hmis events; presumed success. Check dmesg."
+       echo ""
+done
index 2699635d2cd93dd721f83fc54f7f2d55cb7c5a00..7d0f14b8cb2e465657e509e6e7875627859d348b 100644 (file)
@@ -1,2 +1,5 @@
 tm-resched-dscr
 tm-syscall
+tm-signal-msr-resv
+tm-signal-stack
+tm-vmxcopy
index 4bea62a319dcaf96ee85f58aaa3cf6160d0428d5..737f72c964e65c22ea2923dfb6843f522074e5bc 100644 (file)
@@ -1,8 +1,8 @@
-TEST_PROGS := tm-resched-dscr tm-syscall
+TEST_PROGS := tm-resched-dscr tm-syscall tm-signal-msr-resv tm-signal-stack tm-vmxcopy
 
 all: $(TEST_PROGS)
 
-$(TEST_PROGS): ../harness.c
+$(TEST_PROGS): ../harness.c ../utils.c
 
 tm-syscall: tm-syscall-asm.S
 tm-syscall: CFLAGS += -mhtm -I../../../../../usr/include
index 42d4c8caad813f19143b4237c497d9f64015fbb7..8fde93d6021f67986a931a30e0409f3bb95f5b70 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/tm.h>
 
 #include "utils.h"
+#include "tm.h"
 
 #define TBEGIN          ".long 0x7C00051D ;"
 #define TEND            ".long 0x7C00055D ;"
@@ -42,6 +43,8 @@ int test_body(void)
 {
        uint64_t rv, dscr1 = 1, dscr2, texasr;
 
+       SKIP_IF(!have_htm());
+
        printf("Check DSCR TM context switch: ");
        fflush(stdout);
        for (;;) {
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-msr-resv.c b/tools/testing/selftests/powerpc/tm/tm-signal-msr-resv.c
new file mode 100644 (file)
index 0000000..d86653f
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2015, Michael Neuling, IBM Corp.
+ * Licensed under GPLv2.
+ *
+ * Test the kernel's signal return code to ensure that it doesn't
+ * crash when both the transactional and suspend MSR bits are set in
+ * the signal context.
+ *
+ * For this test, we send ourselves a SIGUSR1.  In the SIGUSR1 handler
+ * we modify the signal context to set both MSR TM S and T bits (which
+ * is "reserved" by the PowerISA). When we return from the signal
+ * handler (implicit sigreturn), the kernel should detect reserved MSR
+ * value and send us with a SIGSEGV.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <signal.h>
+#include <unistd.h>
+
+#include "utils.h"
+#include "tm.h"
+
+int segv_expected = 0;
+
+void signal_segv(int signum)
+{
+       if (segv_expected && (signum == SIGSEGV))
+               _exit(0);
+       _exit(1);
+}
+
+void signal_usr1(int signum, siginfo_t *info, void *uc)
+{
+       ucontext_t *ucp = uc;
+
+       /* Link tm checkpointed context to normal context */
+       ucp->uc_link = ucp;
+       /* Set all TM bits so that the context is now invalid */
+#ifdef __powerpc64__
+       ucp->uc_mcontext.gp_regs[PT_MSR] |= (7ULL << 32);
+#else
+       ucp->uc_mcontext.regs->gpr[PT_MSR] |= (7ULL);
+#endif
+       /* Should segv on return becuase of invalid context */
+       segv_expected = 1;
+}
+
+int tm_signal_msr_resv()
+{
+       struct sigaction act;
+
+       SKIP_IF(!have_htm());
+
+       act.sa_sigaction = signal_usr1;
+       sigemptyset(&act.sa_mask);
+       act.sa_flags = SA_SIGINFO;
+       if (sigaction(SIGUSR1, &act, NULL) < 0) {
+               perror("sigaction sigusr1");
+               exit(1);
+       }
+       if (signal(SIGSEGV, signal_segv) == SIG_ERR)
+               exit(1);
+
+       raise(SIGUSR1);
+
+       /* We shouldn't get here as we exit in the segv handler */
+       return 1;
+}
+
+int main(void)
+{
+       return test_harness(tm_signal_msr_resv, "tm_signal_msr_resv");
+}
diff --git a/tools/testing/selftests/powerpc/tm/tm-signal-stack.c b/tools/testing/selftests/powerpc/tm/tm-signal-stack.c
new file mode 100644 (file)
index 0000000..e44a238
--- /dev/null
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2015, Michael Neuling, IBM Corp.
+ * Licensed under GPLv2.
+ *
+ * Test the kernel's signal delievery code to ensure that we don't
+ * trelaim twice in the kernel signal delivery code.  This can happen
+ * if we trigger a signal when in a transaction and the stack pointer
+ * is bogus.
+ *
+ * This test case registers a SEGV handler, sets the stack pointer
+ * (r1) to NULL, starts a transaction and then generates a SEGV.  The
+ * SEGV should be handled but we exit here as the stack pointer is
+ * invalid and hance we can't sigreturn.  We only need to check that
+ * this flow doesn't crash the kernel.
+ */
+
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <signal.h>
+
+#include "utils.h"
+#include "tm.h"
+
+void signal_segv(int signum)
+{
+       /* This should never actually run since stack is foobar */
+       exit(1);
+}
+
+int tm_signal_stack()
+{
+       int pid;
+
+       SKIP_IF(!have_htm());
+
+       pid = fork();
+       if (pid < 0)
+               exit(1);
+
+       if (pid) { /* Parent */
+               /*
+                * It's likely the whole machine will crash here so if
+                * the child ever exits, we are good.
+                */
+               wait(NULL);
+               return 0;
+       }
+
+       /*
+        * The flow here is:
+        * 1) register a signal handler (so signal delievery occurs)
+        * 2) make stack pointer (r1) = NULL
+        * 3) start transaction
+        * 4) cause segv
+        */
+       if (signal(SIGSEGV, signal_segv) == SIG_ERR)
+               exit(1);
+       asm volatile("li 1, 0 ;"                /* stack ptr == NULL */
+                    "1:"
+                    ".long 0x7C00051D ;"       /* tbegin */
+                    "beq 1b ;"                 /* retry forever */
+                    ".long 0x7C0005DD ; ;"     /* tsuspend */
+                    "ld 2, 0(1) ;"             /* trigger segv" */
+                    : : : "memory");
+
+       /* This should never get here due to above segv */
+       return 1;
+}
+
+int main(void)
+{
+       return test_harness(tm_signal_stack, "tm_signal_stack");
+}
index e835bf7ec7aedd786a72c192b176e899d202e2c7..60560cb20e380701fcd58aa5d19f749e479c9eb6 100644 (file)
 #include <unistd.h>
 #include <sys/syscall.h>
 #include <asm/tm.h>
-#include <asm/cputable.h>
-#include <linux/auxvec.h>
 #include <sys/time.h>
 #include <stdlib.h>
 
 #include "utils.h"
+#include "tm.h"
 
 extern int getppid_tm_active(void);
 extern int getppid_tm_suspended(void);
@@ -77,16 +76,6 @@ pid_t getppid_tm(bool suspend)
        exit(-1);
 }
 
-static inline bool have_htm_nosc(void)
-{
-#ifdef PPC_FEATURE2_HTM_NOSC
-       return ((long)get_auxv_entry(AT_HWCAP2) & PPC_FEATURE2_HTM_NOSC);
-#else
-       printf("PPC_FEATURE2_HTM_NOSC not defined, can't check AT_HWCAP2\n");
-       return false;
-#endif
-}
-
 int tm_syscall(void)
 {
        unsigned count = 0;
diff --git a/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c b/tools/testing/selftests/powerpc/tm/tm-vmxcopy.c
new file mode 100644 (file)
index 0000000..0274de7
--- /dev/null
@@ -0,0 +1,103 @@
+/*
+ * Copyright 2015, Michael Neuling, IBM Corp.
+ * Licensed under GPLv2.
+ *
+ * Original: Michael Neuling 4/12/2013
+ * Edited: Rashmica Gupta 4/12/2015
+ *
+ * See if the altivec state is leaked out of an aborted transaction due to
+ * kernel vmx copy loops.
+ *
+ * When the transaction aborts, VSR values should rollback to the values
+ * they held before the transaction commenced. Using VSRs while transaction
+ * is suspended should not affect the checkpointed values.
+ *
+ * (1) write A to a VSR
+ * (2) start transaction
+ * (3) suspend transaction
+ * (4) change the VSR to B
+ * (5) trigger kernel vmx copy loop
+ * (6) abort transaction
+ * (7) check that the VSR value is A
+ */
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <string.h>
+#include <assert.h>
+
+#include "tm.h"
+#include "utils.h"
+
+int test_vmxcopy()
+{
+       long double vecin = 1.3;
+       long double vecout;
+       unsigned long pgsize = getpagesize();
+       int i;
+       int fd;
+       int size = pgsize*16;
+       char tmpfile[] = "/tmp/page_faultXXXXXX";
+       char buf[pgsize];
+       char *a;
+       uint64_t aborted = 0;
+
+       SKIP_IF(!have_htm());
+
+       fd = mkstemp(tmpfile);
+       assert(fd >= 0);
+
+       memset(buf, 0, pgsize);
+       for (i = 0; i < size; i += pgsize)
+               assert(write(fd, buf, pgsize) == pgsize);
+
+       unlink(tmpfile);
+
+       a = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
+       assert(a != MAP_FAILED);
+
+       asm __volatile__(
+               "lxvd2x 40,0,%[vecinptr];"      /* set 40 to initial value*/
+               "tbegin.;"
+               "beq    3f;"
+               "tsuspend.;"
+               "xxlxor 40,40,40;"              /* set 40 to 0 */
+               "std    5, 0(%[map]);"          /* cause kernel vmx copy page */
+               "tabort. 0;"
+               "tresume.;"
+               "tend.;"
+               "li     %[res], 0;"
+               "b      5f;"
+
+               /* Abort handler */
+               "3:;"
+               "li     %[res], 1;"
+
+               "5:;"
+               "stxvd2x 40,0,%[vecoutptr];"
+               : [res]"=r"(aborted)
+               : [vecinptr]"r"(&vecin),
+                 [vecoutptr]"r"(&vecout),
+                 [map]"r"(a)
+               : "memory", "r0", "r3", "r4", "r5", "r6", "r7");
+
+       if (aborted && (vecin != vecout)){
+               printf("FAILED: vector state leaked on abort %f != %f\n",
+                      (double)vecin, (double)vecout);
+               return 1;
+       }
+
+       munmap(a, size);
+
+       close(fd);
+
+       return 0;
+}
+
+int main(void)
+{
+       return test_harness(test_vmxcopy, "tm_vmxcopy");
+}
diff --git a/tools/testing/selftests/powerpc/tm/tm.h b/tools/testing/selftests/powerpc/tm/tm.h
new file mode 100644 (file)
index 0000000..24144b2
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#ifndef _SELFTESTS_POWERPC_TM_TM_H
+#define _SELFTESTS_POWERPC_TM_TM_H
+
+#include <stdbool.h>
+#include <asm/cputable.h>
+
+#include "../utils.h"
+
+static inline bool have_htm(void)
+{
+#ifdef PPC_FEATURE2_HTM
+       return have_hwcap2(PPC_FEATURE2_HTM);
+#else
+       printf("PPC_FEATURE2_HTM not defined, can't check AT_HWCAP2\n");
+       return false;
+#endif
+}
+
+static inline bool have_htm_nosc(void)
+{
+#ifdef PPC_FEATURE2_HTM_NOSC
+       return have_hwcap2(PPC_FEATURE2_HTM_NOSC);
+#else
+       printf("PPC_FEATURE2_HTM_NOSC not defined, can't check AT_HWCAP2\n");
+       return false;
+#endif
+}
+
+#endif /* _SELFTESTS_POWERPC_TM_TM_H */
diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c
new file mode 100644 (file)
index 0000000..dcf7418
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2013-2015, Michael Ellerman, IBM Corp.
+ * Licensed under GPLv2.
+ */
+
+#define _GNU_SOURCE    /* For CPU_ZERO etc. */
+
+#include <elf.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <link.h>
+#include <sched.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+static char auxv[4096];
+
+void *get_auxv_entry(int type)
+{
+       ElfW(auxv_t) *p;
+       void *result;
+       ssize_t num;
+       int fd;
+
+       fd = open("/proc/self/auxv", O_RDONLY);
+       if (fd == -1) {
+               perror("open");
+               return NULL;
+       }
+
+       result = NULL;
+
+       num = read(fd, auxv, sizeof(auxv));
+       if (num < 0) {
+               perror("read");
+               goto out;
+       }
+
+       if (num > sizeof(auxv)) {
+               printf("Overflowed auxv buffer\n");
+               goto out;
+       }
+
+       p = (ElfW(auxv_t) *)auxv;
+
+       while (p->a_type != AT_NULL) {
+               if (p->a_type == type) {
+                       result = (void *)p->a_un.a_val;
+                       break;
+               }
+
+               p++;
+       }
+out:
+       close(fd);
+       return result;
+}
+
+int pick_online_cpu(void)
+{
+       cpu_set_t mask;
+       int cpu;
+
+       CPU_ZERO(&mask);
+
+       if (sched_getaffinity(0, sizeof(mask), &mask)) {
+               perror("sched_getaffinity");
+               return -1;
+       }
+
+       /* We prefer a primary thread, but skip 0 */
+       for (cpu = 8; cpu < CPU_SETSIZE; cpu += 8)
+               if (CPU_ISSET(cpu, &mask))
+                       return cpu;
+
+       /* Search for anything, but in reverse */
+       for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--)
+               if (CPU_ISSET(cpu, &mask))
+                       return cpu;
+
+       printf("No cpus in affinity mask?!\n");
+       return -1;
+}
index b7d41086bb0a72978263da62440e04c600478bda..175ac6ad10dde4cc8b753a063d456fe3b8be1269 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <stdint.h>
 #include <stdbool.h>
+#include <linux/auxvec.h>
 
 /* Avoid headaches with PRI?64 - just use %ll? always */
 typedef unsigned long long u64;
@@ -21,6 +22,12 @@ typedef uint8_t u8;
 
 int test_harness(int (test_function)(void), char *name);
 extern void *get_auxv_entry(int type);
+int pick_online_cpu(void);
+
+static inline bool have_hwcap2(unsigned long ftr2)
+{
+       return ((unsigned long)get_auxv_entry(AT_HWCAP2) & ftr2) == ftr2;
+}
 
 /* Yes, this is evil */
 #define FAIL_IF(x)                                             \