]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
parisc: fix bugs in pa_memcpy
authorMikulas Patocka <mpatocka@redhat.com>
Fri, 14 Apr 2017 18:15:20 +0000 (14:15 -0400)
committerHelge Deller <deller@gmx.de>
Sat, 15 Apr 2017 15:24:05 +0000 (17:24 +0200)
The patch 554bfeceb8a22d448cd986fc9efce25e833278a1 ("parisc: Fix access
fault handling in pa_memcpy()") reimplements the pa_memcpy function.
Unfortunatelly, it makes the kernel unbootable. The crash happens in the
function ide_complete_cmd where memcpy is called with the same source
and destination address.

This patch fixes a few bugs in pa_memcpy:

* When jumping to .Lcopy_loop_16 for the first time, don't skip the
  instruction "ldi 31,t0" (this bug made the kernel unbootable)
* Use the COND macro when comparing length, so that the comparison is
  64-bit (a theoretical issue, in case the length is greater than
  0xffffffff)
* Don't use the COND macro after the "extru" instruction (the PA-RISC
  specification says that the upper 32-bits of extru result are undefined,
  although they are set to zero in practice)
* Fix exception addresses in .Lcopy16_fault and .Lcopy8_fault
* Rename .Lcopy_loop_4 to .Lcopy_loop_8 (so that it is consistent with
  .Lcopy8_fault)

Cc: <stable@vger.kernel.org> # v4.9+
Fixes: 554bfeceb8a2 ("parisc: Fix access fault handling in pa_memcpy()")
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Helge Deller <deller@gmx.de>
arch/parisc/lib/lusercopy.S

index f01188c044ee83e41ba52162544464781056f262..85c28bb80fb7433dfcfac2fb10f6cc121448119f 100644 (file)
@@ -201,7 +201,7 @@ ENTRY_CFI(pa_memcpy)
        add     dst,len,end
 
        /* short copy with less than 16 bytes? */
-       cmpib,>>=,n 15,len,.Lbyte_loop
+       cmpib,COND(>>=),n 15,len,.Lbyte_loop
 
        /* same alignment? */
        xor     src,dst,t0
@@ -216,7 +216,7 @@ ENTRY_CFI(pa_memcpy)
        /* loop until we are 64-bit aligned */
 .Lalign_loop64:
        extru   dst,31,3,t1
-       cmpib,=,n       0,t1,.Lcopy_loop_16
+       cmpib,=,n       0,t1,.Lcopy_loop_16_start
 20:    ldb,ma  1(srcspc,src),t1
 21:    stb,ma  t1,1(dstspc,dst)
        b       .Lalign_loop64
@@ -225,6 +225,7 @@ ENTRY_CFI(pa_memcpy)
        ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
        ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
 
+.Lcopy_loop_16_start:
        ldi     31,t0
 .Lcopy_loop_16:
        cmpb,COND(>>=),n t0,len,.Lword_loop
@@ -267,7 +268,7 @@ ENTRY_CFI(pa_memcpy)
        /* loop until we are 32-bit aligned */
 .Lalign_loop32:
        extru   dst,31,2,t1
-       cmpib,=,n       0,t1,.Lcopy_loop_4
+       cmpib,=,n       0,t1,.Lcopy_loop_8
 20:    ldb,ma  1(srcspc,src),t1
 21:    stb,ma  t1,1(dstspc,dst)
        b       .Lalign_loop32
@@ -277,7 +278,7 @@ ENTRY_CFI(pa_memcpy)
        ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
 
 
-.Lcopy_loop_4:
+.Lcopy_loop_8:
        cmpib,COND(>>=),n 15,len,.Lbyte_loop
 
 10:    ldw     0(srcspc,src),t1
@@ -299,7 +300,7 @@ ENTRY_CFI(pa_memcpy)
        ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
        ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
 
-       b       .Lcopy_loop_4
+       b       .Lcopy_loop_8
        ldo     -16(len),len
 
 .Lbyte_loop:
@@ -324,7 +325,7 @@ ENTRY_CFI(pa_memcpy)
 .Lunaligned_copy:
        /* align until dst is 32bit-word-aligned */
        extru   dst,31,2,t1
-       cmpib,COND(=),n 0,t1,.Lcopy_dstaligned
+       cmpib,=,n       0,t1,.Lcopy_dstaligned
 20:    ldb     0(srcspc,src),t1
        ldo     1(src),src
 21:    stb,ma  t1,1(dstspc,dst)
@@ -362,7 +363,7 @@ ENTRY_CFI(pa_memcpy)
        cmpiclr,<> 1,t0,%r0
        b,n .Lcase1
 .Lcase0:
-       cmpb,= %r0,len,.Lcda_finish
+       cmpb,COND(=) %r0,len,.Lcda_finish
        nop
 
 1:     ldw,ma 4(srcspc,src), a3
@@ -376,7 +377,7 @@ ENTRY_CFI(pa_memcpy)
 1:     ldw,ma 4(srcspc,src), a3
        ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
        ldo -1(len),len
-       cmpb,=,n %r0,len,.Ldo0
+       cmpb,COND(=),n %r0,len,.Ldo0
 .Ldo4:
 1:     ldw,ma 4(srcspc,src), a0
        ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
@@ -402,7 +403,7 @@ ENTRY_CFI(pa_memcpy)
 1:     stw,ma t0, 4(dstspc,dst)
        ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
        ldo -4(len),len
-       cmpb,<> %r0,len,.Ldo4
+       cmpb,COND(<>) %r0,len,.Ldo4
        nop
 .Ldo0:
        shrpw a2, a3, %sar, t0
@@ -436,14 +437,14 @@ ENTRY_CFI(pa_memcpy)
        /* fault exception fixup handlers: */
 #ifdef CONFIG_64BIT
 .Lcopy16_fault:
-10:    b       .Lcopy_done
-       std,ma  t1,8(dstspc,dst)
+       b       .Lcopy_done
+10:    std,ma  t1,8(dstspc,dst)
        ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
 #endif
 
 .Lcopy8_fault:
-10:    b       .Lcopy_done
-       stw,ma  t1,4(dstspc,dst)
+       b       .Lcopy_done
+10:    stw,ma  t1,4(dstspc,dst)
        ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
 
        .exit