]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
byteorder: allow arch to opt to use GCC intrinsics for byteswapping
authorDavid Woodhouse <David.Woodhouse@intel.com>
Mon, 3 Dec 2012 16:25:40 +0000 (16:25 +0000)
committerDavid Woodhouse <David.Woodhouse@intel.com>
Thu, 6 Dec 2012 01:22:31 +0000 (01:22 +0000)
Since GCC 4.4, there have been __builtin_bswap32() and __builtin_bswap16()
intrinsics. A __builtin_bswap16() came a little later (4.6 for PowerPC,
48 for other platforms).

By using these instead of the inline assembler that most architectures
have in their __arch_swabXX() macros, we let the compiler see what's
actually happening. The resulting code should be at least as good, and
much *better* in the cases where it can be combined with a nearby load
or store, using a load-and-byteswap or store-and-byteswap instruction
(e.g. lwbrx/stwbrx on PowerPC, movbe on Atom).

When GCC is sufficiently recent *and* the architecture opts in to using
the intrinsics by setting CONFIG_ARCH_USE_BUILTIN_BSWAP, they will be
used in preference to the __arch_swabXX() macros. An architecture which
does not set ARCH_USE_BUILTIN_BSWAP will continue to use its own
hand-crafted macros.

Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
arch/Kconfig
include/linux/compiler-gcc4.h
include/linux/compiler-intel.h
include/uapi/linux/swab.h

index 366ec06a5185f0a59a7e5069357dbdb3da5a08c8..c31416b10586f5431c49b2971903299be0efc282 100644 (file)
@@ -112,6 +112,25 @@ config HAVE_EFFICIENT_UNALIGNED_ACCESS
          See Documentation/unaligned-memory-access.txt for more
          information on the topic of unaligned memory accesses.
 
+config ARCH_USE_BUILTIN_BSWAP
+       bool
+       help
+        Modern versions of GCC (since 4.4) have builtin functions
+        for handling byte-swapping. Using these, instead of the old
+        inline assembler that the architecture code provides in the
+        __arch_bswapXX() macros, allows the compiler to see what's
+        happening and offers more opportunity for optimisation. In
+        particular, the compiler will be able to combine the byteswap
+        with a nearby load or store and use load-and-swap or
+        store-and-swap instructions if the architecture has them. It
+        should almost *never* result in code which is worse than the
+        hand-coded assembler in <asm/swab.h>.  But just in case it
+        does, the use of the builtins is optional.
+
+        Any architecture with load-and-swap or store-and-swap
+        instructions should set this. And it shouldn't hurt to set it
+        on architectures that don't have such instructions.
+
 config HAVE_SYSCALL_WRAPPERS
        bool
 
index 412bc6c2b02358bd48f87cc0d029bd753dbae613..dc16a858e77c7870ad0a103a1e50a58b636a0b64 100644 (file)
 #define __compiletime_warning(message) __attribute__((warning(message)))
 #define __compiletime_error(message) __attribute__((error(message)))
 #endif
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#if __GNUC_MINOR__ >= 4
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#endif
+#if __GNUC_MINOR__ >= 8 || (defined(__powerpc__) && __GNUC_MINOR__ >= 6)
+#define __HAVE_BUILTIN_BSWAP16__
+#endif
+#endif
index d8e636e5607d78c40f75043b7dca312fc3c0748a..973ce10c40b651faa5d9e06f41eb49fefa492eab 100644 (file)
 #endif
 
 #define uninitialized_var(x) x
+
+#ifndef __HAVE_BUILTIN_BSWAP16__
+/* icc has this, but it's called _bswap16 */
+#define __HAVE_BUILTIN_BSWAP16__
+#define __builtin_bswap16 _bswap16
+#endif
+
index e811474724c210cd658bcf1efb110032ddd7d4b7..0e011eb91b5d7523c1774c1e786dab9fe0809720 100644 (file)
@@ -45,7 +45,9 @@
 
 static inline __attribute_const__ __u16 __fswab16(__u16 val)
 {
-#ifdef __arch_swab16
+#ifdef __HAVE_BUILTIN_BSWAP16__
+       return __builtin_bswap16(val);
+#elif defined (__arch_swab16)
        return __arch_swab16(val);
 #else
        return ___constant_swab16(val);
@@ -54,7 +56,9 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
 
 static inline __attribute_const__ __u32 __fswab32(__u32 val)
 {
-#ifdef __arch_swab32
+#ifdef __HAVE_BUILTIN_BSWAP32__
+       return __builtin_bswap32(val);
+#elif defined(__arch_swab32)
        return __arch_swab32(val);
 #else
        return ___constant_swab32(val);
@@ -63,7 +67,9 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
 
 static inline __attribute_const__ __u64 __fswab64(__u64 val)
 {
-#ifdef __arch_swab64
+#ifdef __HAVE_BUILTIN_BSWAP64__
+       return __builtin_bswap64(val);
+#elif defined (__arch_swab64)
        return __arch_swab64(val);
 #elif defined(__SWAB_64_THRU_32__)
        __u32 h = val >> 32;