]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
[IA64] Speed up lfetch.fault [NULL]
authorDavid Mosberger-Tang <davidm@hpl.hp.com>
Wed, 4 May 2005 20:25:00 +0000 (13:25 -0700)
committerTony Luck <tony.luck@intel.com>
Tue, 28 Jun 2005 16:28:16 +0000 (09:28 -0700)
This patch greatly speeds up the handling of lfetch.fault instructions
which result in NaT consumption. Due to the NaT-page mapped at address
0, this is guaranteed to happen when lfetch.fault'ing a NULL pointer.
With this patch in place, we can even define prefetch()/prefetchw() as
lfetch.fault without significant performance degradation.  More
importantly, it allows compilers to be more aggressive with using
lfetch.fault on pointers that might be NULL.

Signed-off-by: David Mosberger-Tang <davidm@hpl.hp.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
arch/ia64/kernel/ivt.S

index b28d2212a77939d6b0bce3b956d4abf98489790d..3bb3a13c4047566d70057db9342c93da06fefd8e 100644 (file)
@@ -1243,6 +1243,25 @@ END(disabled_fp_reg)
 // 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
 ENTRY(nat_consumption)
        DBG_FAULT(26)
+
+       mov r16=cr.ipsr
+       mov r17=cr.isr
+       mov r31=pr                              // save PR
+       ;;
+       and r18=0xf,r17                         // r18 = cr.ipsr.code{3:0}
+       tbit.z p6,p0=r17,IA64_ISR_NA_BIT
+       ;;
+       cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18
+       dep r16=-1,r16,IA64_PSR_ED_BIT,1
+(p6)   br.cond.spnt 1f         // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH)
+       ;;
+       mov cr.ipsr=r16         // set cr.ipsr.na
+       mov pr=r31,-1
+       ;;
+       rfi
+
+1:     mov pr=r31,-1
+       ;;
        FAULT(26)
 END(nat_consumption)