]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
[S390] maccess: arch specific probe_kernel_write() implementation
authorHeiko Carstens <heiko.carstens@de.ibm.com>
Fri, 12 Jun 2009 08:26:42 +0000 (10:26 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Fri, 12 Jun 2009 08:27:37 +0000 (10:27 +0200)
Add an s390 specific probe_kernel_write() function which allows to
write to the kernel text segment even if write protection is enabled.
This is implemented using the lra (load real address) and stura (store
using real address) instructions.

Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/mm/Makefile
arch/s390/mm/maccess.c [new file with mode: 0644]

index 2a745813454410cb35ff525e837efc9f29c0dd95..db05661ac8954269a36d796b548e7bb0a40f126c 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for the linux s390-specific parts of the memory manager.
 #
 
-obj-y   := init.o fault.o extmem.o mmap.o vmem.o pgtable.o
+obj-y   := init.o fault.o extmem.o mmap.o vmem.o pgtable.o maccess.o
 obj-$(CONFIG_CMM) += cmm.o
 obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
 obj-$(CONFIG_PAGE_STATES) += page-states.o
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
new file mode 100644 (file)
index 0000000..8175627
--- /dev/null
@@ -0,0 +1,61 @@
+/*
+ * Access kernel memory without faulting -- s390 specific implementation.
+ *
+ * Copyright IBM Corp. 2009
+ *
+ *   Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
+ *
+ */
+
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <asm/system.h>
+
+/*
+ * This function writes to kernel memory bypassing DAT and possible
+ * write protection. It copies one to four bytes from src to dst
+ * using the stura instruction.
+ * Returns the number of bytes copied or -EFAULT.
+ */
+static long probe_kernel_write_odd(void *dst, void *src, size_t size)
+{
+       unsigned long count, aligned;
+       int offset, mask;
+       int rc = -EFAULT;
+
+       aligned = (unsigned long) dst & ~3UL;
+       offset = (unsigned long) dst & 3;
+       count = min_t(unsigned long, 4 - offset, size);
+       mask = (0xf << (4 - count)) & 0xf;
+       mask >>= offset;
+       asm volatile(
+               "       bras    1,0f\n"
+               "       icm     0,0,0(%3)\n"
+               "0:     l       0,0(%1)\n"
+               "       lra     %1,0(%1)\n"
+               "1:     ex      %2,0(1)\n"
+               "2:     stura   0,%1\n"
+               "       la      %0,0\n"
+               "3:\n"
+               EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b)
+               : "+d" (rc), "+a" (aligned)
+               : "a" (mask), "a" (src) : "cc", "memory", "0", "1");
+       return rc ? rc : count;
+}
+
+long probe_kernel_write(void *dst, void *src, size_t size)
+{
+       long copied = 0;
+
+       while (size) {
+               copied = probe_kernel_write_odd(dst, src, size);
+               if (copied < 0)
+                       break;
+               dst += copied;
+               src += copied;
+               size -= copied;
+       }
+       return copied < 0 ? -EFAULT : 0;
+}