]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
crypto: vmx - Reindent to kernel style
authorHerbert Xu <herbert@gondor.apana.org.au>
Mon, 15 Jun 2015 08:55:46 +0000 (16:55 +0800)
committerHerbert Xu <herbert@gondor.apana.org.au>
Tue, 16 Jun 2015 06:35:02 +0000 (14:35 +0800)
This patch reidents the vmx code-base to the kernel coding style.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
drivers/crypto/vmx/aes.c
drivers/crypto/vmx/aes_cbc.c
drivers/crypto/vmx/aes_ctr.c
drivers/crypto/vmx/aesp8-ppc.h
drivers/crypto/vmx/ghash.c
drivers/crypto/vmx/vmx.c

index ab300ea19434e3f93d9b08475f27ee59cf445390..023e5f01478324884af2df0206d7e90ff135134a 100644 (file)
 #include "aesp8-ppc.h"
 
 struct p8_aes_ctx {
-    struct crypto_cipher *fallback;
-    struct aes_key enc_key;
-    struct aes_key dec_key;
+       struct crypto_cipher *fallback;
+       struct aes_key enc_key;
+       struct aes_key dec_key;
 };
 
 static int p8_aes_init(struct crypto_tfm *tfm)
 {
-    const char *alg;
-    struct crypto_cipher *fallback;
-    struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
-
-    if (!(alg = crypto_tfm_alg_name(tfm))) {
-        printk(KERN_ERR "Failed to get algorithm name.\n");
-        return -ENOENT;
-    }
-
-    fallback = crypto_alloc_cipher(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK);
-    if (IS_ERR(fallback)) {
-        printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n",
-                alg, PTR_ERR(fallback));
-        return PTR_ERR(fallback);
-    }
-    printk(KERN_INFO "Using '%s' as fallback implementation.\n",
-            crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
-
-    crypto_cipher_set_flags(fallback,
-            crypto_cipher_get_flags((struct crypto_cipher *) tfm));
-    ctx->fallback = fallback;
-
-    return 0;
+       const char *alg;
+       struct crypto_cipher *fallback;
+       struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       if (!(alg = crypto_tfm_alg_name(tfm))) {
+               printk(KERN_ERR "Failed to get algorithm name.\n");
+               return -ENOENT;
+       }
+
+       fallback = crypto_alloc_cipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(fallback)) {
+               printk(KERN_ERR
+                      "Failed to allocate transformation for '%s': %ld\n",
+                      alg, PTR_ERR(fallback));
+               return PTR_ERR(fallback);
+       }
+       printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+              crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+
+       crypto_cipher_set_flags(fallback,
+                               crypto_cipher_get_flags((struct
+                                                        crypto_cipher *)
+                                                       tfm));
+       ctx->fallback = fallback;
+
+       return 0;
 }
 
 static void p8_aes_exit(struct crypto_tfm *tfm)
 {
-    struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 
-    if (ctx->fallback) {
-        crypto_free_cipher(ctx->fallback);
-        ctx->fallback = NULL;
-    }
+       if (ctx->fallback) {
+               crypto_free_cipher(ctx->fallback);
+               ctx->fallback = NULL;
+       }
 }
 
 static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
-    unsigned int keylen)
+                        unsigned int keylen)
 {
-    int ret;
-    struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
-
-    pagefault_disable();
-    enable_kernel_altivec();
-    ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
-    ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
-    pagefault_enable();
-    
-    ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
-    return ret;
+       int ret;
+       struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       pagefault_disable();
+       enable_kernel_altivec();
+       ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+       ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+       pagefault_enable();
+
+       ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
+       return ret;
 }
 
 static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
-    struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
-
-    if (in_interrupt()) {
-        crypto_cipher_encrypt_one(ctx->fallback, dst, src);
-    } else {
-        pagefault_disable();
-        enable_kernel_altivec();
-        aes_p8_encrypt(src, dst, &ctx->enc_key);
-        pagefault_enable();
-    }
+       struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       if (in_interrupt()) {
+               crypto_cipher_encrypt_one(ctx->fallback, dst, src);
+       } else {
+               pagefault_disable();
+               enable_kernel_altivec();
+               aes_p8_encrypt(src, dst, &ctx->enc_key);
+               pagefault_enable();
+       }
 }
 
 static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
-    struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
-
-    if (in_interrupt()) {
-        crypto_cipher_decrypt_one(ctx->fallback, dst, src);
-    } else {
-        pagefault_disable();
-        enable_kernel_altivec();
-        aes_p8_decrypt(src, dst, &ctx->dec_key);
-        pagefault_enable();
-    }
+       struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       if (in_interrupt()) {
+               crypto_cipher_decrypt_one(ctx->fallback, dst, src);
+       } else {
+               pagefault_disable();
+               enable_kernel_altivec();
+               aes_p8_decrypt(src, dst, &ctx->dec_key);
+               pagefault_enable();
+       }
 }
 
 struct crypto_alg p8_aes_alg = {
-    .cra_name = "aes",
-    .cra_driver_name = "p8_aes",
-    .cra_module = THIS_MODULE,
-    .cra_priority = 1000,
-    .cra_type = NULL,
-    .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_NEED_FALLBACK,
-    .cra_alignmask = 0,
-    .cra_blocksize = AES_BLOCK_SIZE,
-    .cra_ctxsize = sizeof(struct p8_aes_ctx),
-    .cra_init = p8_aes_init,
-    .cra_exit = p8_aes_exit,
-    .cra_cipher = {
-        .cia_min_keysize = AES_MIN_KEY_SIZE,
-        .cia_max_keysize = AES_MAX_KEY_SIZE,
-        .cia_setkey = p8_aes_setkey,
-        .cia_encrypt = p8_aes_encrypt,
-        .cia_decrypt = p8_aes_decrypt,
-    },
+       .cra_name = "aes",
+       .cra_driver_name = "p8_aes",
+       .cra_module = THIS_MODULE,
+       .cra_priority = 1000,
+       .cra_type = NULL,
+       .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_NEED_FALLBACK,
+       .cra_alignmask = 0,
+       .cra_blocksize = AES_BLOCK_SIZE,
+       .cra_ctxsize = sizeof(struct p8_aes_ctx),
+       .cra_init = p8_aes_init,
+       .cra_exit = p8_aes_exit,
+       .cra_cipher = {
+                      .cia_min_keysize = AES_MIN_KEY_SIZE,
+                      .cia_max_keysize = AES_MAX_KEY_SIZE,
+                      .cia_setkey = p8_aes_setkey,
+                      .cia_encrypt = p8_aes_encrypt,
+                      .cia_decrypt = p8_aes_decrypt,
+       },
 };
-
index 1a559b7dddb5f2f5ff3184e0d50c275bf498aa3c..7120ab24d8c6e05ad8b872a2b28ea29e084fc193 100644 (file)
 #include "aesp8-ppc.h"
 
 struct p8_aes_cbc_ctx {
-    struct crypto_blkcipher *fallback;
-    struct aes_key enc_key;
-    struct aes_key dec_key;
+       struct crypto_blkcipher *fallback;
+       struct aes_key enc_key;
+       struct aes_key dec_key;
 };
 
 static int p8_aes_cbc_init(struct crypto_tfm *tfm)
 {
-    const char *alg;
-    struct crypto_blkcipher *fallback;
-    struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
-
-    if (!(alg = crypto_tfm_alg_name(tfm))) {
-        printk(KERN_ERR "Failed to get algorithm name.\n");
-        return -ENOENT;
-    }
-
-    fallback = crypto_alloc_blkcipher(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK);
-    if (IS_ERR(fallback)) {
-        printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n",
-                alg, PTR_ERR(fallback));
-        return PTR_ERR(fallback);
-    }
-    printk(KERN_INFO "Using '%s' as fallback implementation.\n",
-            crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
-
-    crypto_blkcipher_set_flags(fallback,
-            crypto_blkcipher_get_flags((struct crypto_blkcipher *) tfm));
-    ctx->fallback = fallback;
-
-    return 0;
+       const char *alg;
+       struct crypto_blkcipher *fallback;
+       struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       if (!(alg = crypto_tfm_alg_name(tfm))) {
+               printk(KERN_ERR "Failed to get algorithm name.\n");
+               return -ENOENT;
+       }
+
+       fallback =
+           crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(fallback)) {
+               printk(KERN_ERR
+                      "Failed to allocate transformation for '%s': %ld\n",
+                      alg, PTR_ERR(fallback));
+               return PTR_ERR(fallback);
+       }
+       printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+              crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+
+       crypto_blkcipher_set_flags(
+               fallback,
+               crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm));
+       ctx->fallback = fallback;
+
+       return 0;
 }
 
 static void p8_aes_cbc_exit(struct crypto_tfm *tfm)
 {
-    struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
 
-    if (ctx->fallback) {
-        crypto_free_blkcipher(ctx->fallback);
-        ctx->fallback = NULL;
-    }
+       if (ctx->fallback) {
+               crypto_free_blkcipher(ctx->fallback);
+               ctx->fallback = NULL;
+       }
 }
 
 static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
-    unsigned int keylen)
+                            unsigned int keylen)
 {
-    int ret;
-    struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+       int ret;
+       struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
 
-    pagefault_disable();
-    enable_kernel_altivec();
-    ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
-    ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
-    pagefault_enable();
+       pagefault_disable();
+       enable_kernel_altivec();
+       ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+       ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
+       pagefault_enable();
 
-    ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
-    return ret;
+       ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
+       return ret;
 }
 
 static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
-    struct scatterlist *dst, struct scatterlist *src,
-    unsigned int nbytes)
+                             struct scatterlist *dst,
+                             struct scatterlist *src, unsigned int nbytes)
 {
-    int ret;
-    struct blkcipher_walk walk;
-    struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(
-            crypto_blkcipher_tfm(desc->tfm));
-    struct blkcipher_desc fallback_desc = {
-        .tfm = ctx->fallback,
-        .info = desc->info,
-        .flags = desc->flags
-    };
-
-    if (in_interrupt()) {
-        ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes);
-    } else {
-        pagefault_disable();
-        enable_kernel_altivec();
-
-       blkcipher_walk_init(&walk, dst, src, nbytes);
-        ret = blkcipher_walk_virt(desc, &walk);
-        while ((nbytes = walk.nbytes)) {
-                       aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
-                               nbytes & AES_BLOCK_MASK, &ctx->enc_key, walk.iv, 1);
+       int ret;
+       struct blkcipher_walk walk;
+       struct p8_aes_cbc_ctx *ctx =
+               crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
+       struct blkcipher_desc fallback_desc = {
+               .tfm = ctx->fallback,
+               .info = desc->info,
+               .flags = desc->flags
+       };
+
+       if (in_interrupt()) {
+               ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
+                                              nbytes);
+       } else {
+               pagefault_disable();
+               enable_kernel_altivec();
+
+               blkcipher_walk_init(&walk, dst, src, nbytes);
+               ret = blkcipher_walk_virt(desc, &walk);
+               while ((nbytes = walk.nbytes)) {
+                       aes_p8_cbc_encrypt(walk.src.virt.addr,
+                                          walk.dst.virt.addr,
+                                          nbytes & AES_BLOCK_MASK,
+                                          &ctx->enc_key, walk.iv, 1);
                        nbytes &= AES_BLOCK_SIZE - 1;
                        ret = blkcipher_walk_done(desc, &walk, nbytes);
-       }
+               }
 
-        pagefault_enable();
-    }
+               pagefault_enable();
+       }
 
-    return ret;
+       return ret;
 }
 
 static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
-    struct scatterlist *dst, struct scatterlist *src,
-    unsigned int nbytes)
+                             struct scatterlist *dst,
+                             struct scatterlist *src, unsigned int nbytes)
 {
-    int ret;
-    struct blkcipher_walk walk;
-    struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(
-            crypto_blkcipher_tfm(desc->tfm));
-    struct blkcipher_desc fallback_desc = {
-        .tfm = ctx->fallback,
-        .info = desc->info,
-        .flags = desc->flags
-    };
-
-    if (in_interrupt()) {
-        ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
-    } else {
-        pagefault_disable();
-        enable_kernel_altivec();
-
-       blkcipher_walk_init(&walk, dst, src, nbytes);
-        ret = blkcipher_walk_virt(desc, &walk);
-        while ((nbytes = walk.nbytes)) {
-                       aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
-                               nbytes & AES_BLOCK_MASK, &ctx->dec_key, walk.iv, 0);
+       int ret;
+       struct blkcipher_walk walk;
+       struct p8_aes_cbc_ctx *ctx =
+               crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
+       struct blkcipher_desc fallback_desc = {
+               .tfm = ctx->fallback,
+               .info = desc->info,
+               .flags = desc->flags
+       };
+
+       if (in_interrupt()) {
+               ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src,
+                                              nbytes);
+       } else {
+               pagefault_disable();
+               enable_kernel_altivec();
+
+               blkcipher_walk_init(&walk, dst, src, nbytes);
+               ret = blkcipher_walk_virt(desc, &walk);
+               while ((nbytes = walk.nbytes)) {
+                       aes_p8_cbc_encrypt(walk.src.virt.addr,
+                                          walk.dst.virt.addr,
+                                          nbytes & AES_BLOCK_MASK,
+                                          &ctx->dec_key, walk.iv, 0);
                        nbytes &= AES_BLOCK_SIZE - 1;
                        ret = blkcipher_walk_done(desc, &walk, nbytes);
                }
 
-        pagefault_enable();
-    }
+               pagefault_enable();
+       }
 
-    return ret;
+       return ret;
 }
 
 
 struct crypto_alg p8_aes_cbc_alg = {
-    .cra_name = "cbc(aes)",
-    .cra_driver_name = "p8_aes_cbc",
-    .cra_module = THIS_MODULE,
-    .cra_priority = 1000,
-    .cra_type = &crypto_blkcipher_type,
-    .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
-    .cra_alignmask = 0,
-    .cra_blocksize = AES_BLOCK_SIZE,
-    .cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
-    .cra_init = p8_aes_cbc_init,
-    .cra_exit = p8_aes_cbc_exit,
-    .cra_blkcipher = {
-        .ivsize = 0,
-        .min_keysize = AES_MIN_KEY_SIZE,
-        .max_keysize = AES_MAX_KEY_SIZE,
-        .setkey = p8_aes_cbc_setkey,
-        .encrypt = p8_aes_cbc_encrypt,
-        .decrypt = p8_aes_cbc_decrypt,
-    },
+       .cra_name = "cbc(aes)",
+       .cra_driver_name = "p8_aes_cbc",
+       .cra_module = THIS_MODULE,
+       .cra_priority = 1000,
+       .cra_type = &crypto_blkcipher_type,
+       .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
+       .cra_alignmask = 0,
+       .cra_blocksize = AES_BLOCK_SIZE,
+       .cra_ctxsize = sizeof(struct p8_aes_cbc_ctx),
+       .cra_init = p8_aes_cbc_init,
+       .cra_exit = p8_aes_cbc_exit,
+       .cra_blkcipher = {
+                         .ivsize = 0,
+                         .min_keysize = AES_MIN_KEY_SIZE,
+                         .max_keysize = AES_MAX_KEY_SIZE,
+                         .setkey = p8_aes_cbc_setkey,
+                         .encrypt = p8_aes_cbc_encrypt,
+                         .decrypt = p8_aes_cbc_decrypt,
+       },
 };
-
index 96dbee4bf4a6ddab87ea3156e354c0510deacd6c..7adae42a7b79ea81a5bc35ae2db9db9b6a2437e2 100644 (file)
 #include "aesp8-ppc.h"
 
 struct p8_aes_ctr_ctx {
-    struct crypto_blkcipher *fallback;
-    struct aes_key enc_key;
+       struct crypto_blkcipher *fallback;
+       struct aes_key enc_key;
 };
 
 static int p8_aes_ctr_init(struct crypto_tfm *tfm)
 {
-    const char *alg;
-    struct crypto_blkcipher *fallback;
-    struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
-
-    if (!(alg = crypto_tfm_alg_name(tfm))) {
-        printk(KERN_ERR "Failed to get algorithm name.\n");
-        return -ENOENT;
-    }
-
-    fallback = crypto_alloc_blkcipher(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK);
-    if (IS_ERR(fallback)) {
-        printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n",
-                alg, PTR_ERR(fallback));
-        return PTR_ERR(fallback);
-    }
-    printk(KERN_INFO "Using '%s' as fallback implementation.\n",
-            crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
-
-    crypto_blkcipher_set_flags(fallback,
-            crypto_blkcipher_get_flags((struct crypto_blkcipher *) tfm));
-    ctx->fallback = fallback;
-
-    return 0;
+       const char *alg;
+       struct crypto_blkcipher *fallback;
+       struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       if (!(alg = crypto_tfm_alg_name(tfm))) {
+               printk(KERN_ERR "Failed to get algorithm name.\n");
+               return -ENOENT;
+       }
+
+       fallback =
+           crypto_alloc_blkcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(fallback)) {
+               printk(KERN_ERR
+                      "Failed to allocate transformation for '%s': %ld\n",
+                      alg, PTR_ERR(fallback));
+               return PTR_ERR(fallback);
+       }
+       printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+              crypto_tfm_alg_driver_name((struct crypto_tfm *) fallback));
+
+       crypto_blkcipher_set_flags(
+               fallback,
+               crypto_blkcipher_get_flags((struct crypto_blkcipher *)tfm));
+       ctx->fallback = fallback;
+
+       return 0;
 }
 
 static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
 {
-    struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
 
-    if (ctx->fallback) {
-        crypto_free_blkcipher(ctx->fallback);
-        ctx->fallback = NULL;
-    }
+       if (ctx->fallback) {
+               crypto_free_blkcipher(ctx->fallback);
+               ctx->fallback = NULL;
+       }
 }
 
 static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
-    unsigned int keylen)
+                            unsigned int keylen)
 {
-    int ret;
-    struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+       int ret;
+       struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
 
-    pagefault_disable();
-    enable_kernel_altivec();
-    ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
-    pagefault_enable();
+       pagefault_disable();
+       enable_kernel_altivec();
+       ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+       pagefault_enable();
 
-    ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
-    return ret;
+       ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
+       return ret;
 }
 
 static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
-                struct blkcipher_walk *walk)
+                            struct blkcipher_walk *walk)
 {
-    u8 *ctrblk = walk->iv;
-    u8 keystream[AES_BLOCK_SIZE];
-    u8 *src = walk->src.virt.addr;
-    u8 *dst = walk->dst.virt.addr;
-    unsigned int nbytes = walk->nbytes;
-
-    pagefault_disable();
-    enable_kernel_altivec();
-    aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
-    pagefault_enable();
-
-    crypto_xor(keystream, src, nbytes);
-    memcpy(dst, keystream, nbytes);
-    crypto_inc(ctrblk, AES_BLOCK_SIZE);
+       u8 *ctrblk = walk->iv;
+       u8 keystream[AES_BLOCK_SIZE];
+       u8 *src = walk->src.virt.addr;
+       u8 *dst = walk->dst.virt.addr;
+       unsigned int nbytes = walk->nbytes;
+
+       pagefault_disable();
+       enable_kernel_altivec();
+       aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
+       pagefault_enable();
+
+       crypto_xor(keystream, src, nbytes);
+       memcpy(dst, keystream, nbytes);
+       crypto_inc(ctrblk, AES_BLOCK_SIZE);
 }
 
 static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
-    struct scatterlist *dst, struct scatterlist *src,
-    unsigned int nbytes)
+                           struct scatterlist *dst,
+                           struct scatterlist *src, unsigned int nbytes)
 {
-    int ret;
-    struct blkcipher_walk walk;
-    struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(
-            crypto_blkcipher_tfm(desc->tfm));
-    struct blkcipher_desc fallback_desc = {
-        .tfm = ctx->fallback,
-        .info = desc->info,
-        .flags = desc->flags
-    };
-
-    if (in_interrupt()) {
-        ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes);
-    } else {
-        blkcipher_walk_init(&walk, dst, src, nbytes);
-        ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
-        while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
-            pagefault_disable();
-            enable_kernel_altivec();
-            aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr,
-                (nbytes & AES_BLOCK_MASK)/AES_BLOCK_SIZE, &ctx->enc_key, walk.iv);
-            pagefault_enable();
-
-            crypto_inc(walk.iv, AES_BLOCK_SIZE);
-            nbytes &= AES_BLOCK_SIZE - 1;
-            ret = blkcipher_walk_done(desc, &walk, nbytes);
-        }
-        if (walk.nbytes) {
-            p8_aes_ctr_final(ctx, &walk);
-            ret = blkcipher_walk_done(desc, &walk, 0);
-        }
-    }
-
-    return ret;
+       int ret;
+       struct blkcipher_walk walk;
+       struct p8_aes_ctr_ctx *ctx =
+               crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
+       struct blkcipher_desc fallback_desc = {
+               .tfm = ctx->fallback,
+               .info = desc->info,
+               .flags = desc->flags
+       };
+
+       if (in_interrupt()) {
+               ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src,
+                                              nbytes);
+       } else {
+               blkcipher_walk_init(&walk, dst, src, nbytes);
+               ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
+               while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+                       pagefault_disable();
+                       enable_kernel_altivec();
+                       aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
+                                                   walk.dst.virt.addr,
+                                                   (nbytes &
+                                                    AES_BLOCK_MASK) /
+                                                   AES_BLOCK_SIZE,
+                                                   &ctx->enc_key,
+                                                   walk.iv);
+                       pagefault_enable();
+
+                       crypto_inc(walk.iv, AES_BLOCK_SIZE);
+                       nbytes &= AES_BLOCK_SIZE - 1;
+                       ret = blkcipher_walk_done(desc, &walk, nbytes);
+               }
+               if (walk.nbytes) {
+                       p8_aes_ctr_final(ctx, &walk);
+                       ret = blkcipher_walk_done(desc, &walk, 0);
+               }
+       }
+
+       return ret;
 }
 
 struct crypto_alg p8_aes_ctr_alg = {
-    .cra_name = "ctr(aes)",
-    .cra_driver_name = "p8_aes_ctr",
-    .cra_module = THIS_MODULE,
-    .cra_priority = 1000,
-    .cra_type = &crypto_blkcipher_type,
-    .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
-    .cra_alignmask = 0,
-    .cra_blocksize = 1,
-    .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
-    .cra_init = p8_aes_ctr_init,
-    .cra_exit = p8_aes_ctr_exit,
-    .cra_blkcipher = {
-        .ivsize = 0,
-        .min_keysize = AES_MIN_KEY_SIZE,
-        .max_keysize = AES_MAX_KEY_SIZE,
-        .setkey = p8_aes_ctr_setkey,
-        .encrypt = p8_aes_ctr_crypt,
-        .decrypt = p8_aes_ctr_crypt,
-    },
+       .cra_name = "ctr(aes)",
+       .cra_driver_name = "p8_aes_ctr",
+       .cra_module = THIS_MODULE,
+       .cra_priority = 1000,
+       .cra_type = &crypto_blkcipher_type,
+       .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_NEED_FALLBACK,
+       .cra_alignmask = 0,
+       .cra_blocksize = 1,
+       .cra_ctxsize = sizeof(struct p8_aes_ctr_ctx),
+       .cra_init = p8_aes_ctr_init,
+       .cra_exit = p8_aes_ctr_exit,
+       .cra_blkcipher = {
+                         .ivsize = 0,
+                         .min_keysize = AES_MIN_KEY_SIZE,
+                         .max_keysize = AES_MAX_KEY_SIZE,
+                         .setkey = p8_aes_ctr_setkey,
+                         .encrypt = p8_aes_ctr_crypt,
+                         .decrypt = p8_aes_ctr_crypt,
+       },
 };
index e963945a83e1639ce99cabd7387e436d519f84d2..4cd34ee54a94da0ac2725b9c3da82776272234dc 100644 (file)
@@ -4,17 +4,18 @@
 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE-1))
 
 struct aes_key {
-    u8 key[AES_MAX_KEYLENGTH];
-    int rounds;
+       u8 key[AES_MAX_KEYLENGTH];
+       int rounds;
 };
 
 int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
-        struct aes_key *key);
+                          struct aes_key *key);
 int aes_p8_set_decrypt_key(const u8 *userKey, const int bits,
-        struct aes_key *key);
+                          struct aes_key *key);
 void aes_p8_encrypt(const u8 *in, u8 *out, const struct aes_key *key);
-void aes_p8_decrypt(const u8 *in, u8 *out,const struct aes_key *key);
+void aes_p8_decrypt(const u8 *in, u8 *out, const struct aes_key *key);
 void aes_p8_cbc_encrypt(const u8 *in, u8 *out, size_t len,
-               const struct aes_key *key, u8 *iv, const int enc);
+                       const struct aes_key *key, u8 *iv, const int enc);
 void aes_p8_ctr32_encrypt_blocks(const u8 *in, u8 *out,
-        size_t len, const struct aes_key *key, const u8 *iv);
+                                size_t len, const struct aes_key *key,
+                                const u8 *iv);
index d0ffe277af5ca583157afbf881d5df2fbdec5bf5..4c3a8f7e5059978a8ec62ac127460fb184cd44cc 100644 (file)
 void gcm_init_p8(u128 htable[16], const u64 Xi[2]);
 void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]);
 void gcm_ghash_p8(u64 Xi[2], const u128 htable[16],
-        const u8 *in,size_t len);
+                 const u8 *in, size_t len);
 
 struct p8_ghash_ctx {
-    u128 htable[16];
-    struct crypto_shash *fallback;
+       u128 htable[16];
+       struct crypto_shash *fallback;
 };
 
 struct p8_ghash_desc_ctx {
-    u64 shash[2];
-    u8 buffer[GHASH_DIGEST_SIZE];
-    int bytes;
-    struct shash_desc fallback_desc;
+       u64 shash[2];
+       u8 buffer[GHASH_DIGEST_SIZE];
+       int bytes;
+       struct shash_desc fallback_desc;
 };
 
 static int p8_ghash_init_tfm(struct crypto_tfm *tfm)
 {
-    const char *alg;
-    struct crypto_shash *fallback;
-    struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
-    struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
-
-    if (!(alg = crypto_tfm_alg_name(tfm))) {
-        printk(KERN_ERR "Failed to get algorithm name.\n");
-        return -ENOENT;
-    }
-
-    fallback = crypto_alloc_shash(alg, 0 ,CRYPTO_ALG_NEED_FALLBACK);
-    if (IS_ERR(fallback)) {
-        printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n",
-                alg, PTR_ERR(fallback));
-        return PTR_ERR(fallback);
-    }
-    printk(KERN_INFO "Using '%s' as fallback implementation.\n",
-            crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback)));
-
-    crypto_shash_set_flags(fallback,
-            crypto_shash_get_flags((struct crypto_shash *) tfm));
-    ctx->fallback = fallback;
-
-    shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx)
-        + crypto_shash_descsize(fallback);
-
-    return 0;
+       const char *alg;
+       struct crypto_shash *fallback;
+       struct crypto_shash *shash_tfm = __crypto_shash_cast(tfm);
+       struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       if (!(alg = crypto_tfm_alg_name(tfm))) {
+               printk(KERN_ERR "Failed to get algorithm name.\n");
+               return -ENOENT;
+       }
+
+       fallback = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(fallback)) {
+               printk(KERN_ERR
+                      "Failed to allocate transformation for '%s': %ld\n",
+                      alg, PTR_ERR(fallback));
+               return PTR_ERR(fallback);
+       }
+       printk(KERN_INFO "Using '%s' as fallback implementation.\n",
+              crypto_tfm_alg_driver_name(crypto_shash_tfm(fallback)));
+
+       crypto_shash_set_flags(fallback,
+                              crypto_shash_get_flags((struct crypto_shash
+                                                      *) tfm));
+       ctx->fallback = fallback;
+
+       shash_tfm->descsize = sizeof(struct p8_ghash_desc_ctx)
+           + crypto_shash_descsize(fallback);
+
+       return 0;
 }
 
 static void p8_ghash_exit_tfm(struct crypto_tfm *tfm)
 {
-    struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm);
 
-    if (ctx->fallback) {
-        crypto_free_shash(ctx->fallback);
-        ctx->fallback = NULL;
-    }
+       if (ctx->fallback) {
+               crypto_free_shash(ctx->fallback);
+               ctx->fallback = NULL;
+       }
 }
 
 static int p8_ghash_init(struct shash_desc *desc)
 {
-    struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
-    struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-
-    dctx->bytes = 0;
-    memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
-    dctx->fallback_desc.tfm = ctx->fallback;
-    dctx->fallback_desc.flags = desc->flags;
-    return crypto_shash_init(&dctx->fallback_desc);
+       struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+       struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+       dctx->bytes = 0;
+       memset(dctx->shash, 0, GHASH_DIGEST_SIZE);
+       dctx->fallback_desc.tfm = ctx->fallback;
+       dctx->fallback_desc.flags = desc->flags;
+       return crypto_shash_init(&dctx->fallback_desc);
 }
 
 static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
-    unsigned int keylen)
+                          unsigned int keylen)
 {
-    struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm));
+       struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm));
 
-    if (keylen != GHASH_KEY_LEN)
-        return -EINVAL;
+       if (keylen != GHASH_KEY_LEN)
+               return -EINVAL;
 
-    pagefault_disable();
-    enable_kernel_altivec();
-    enable_kernel_fp();
-    gcm_init_p8(ctx->htable, (const u64 *) key);
-    pagefault_enable();
-    return crypto_shash_setkey(ctx->fallback, key, keylen);
+       pagefault_disable();
+       enable_kernel_altivec();
+       enable_kernel_fp();
+       gcm_init_p8(ctx->htable, (const u64 *) key);
+       pagefault_enable();
+       return crypto_shash_setkey(ctx->fallback, key, keylen);
 }
 
 static int p8_ghash_update(struct shash_desc *desc,
-        const u8 *src, unsigned int srclen)
+                          const u8 *src, unsigned int srclen)
 {
-    unsigned int len;
-    struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
-    struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-
-    if (IN_INTERRUPT) {
-        return crypto_shash_update(&dctx->fallback_desc, src, srclen);
-    } else {
-        if (dctx->bytes) {
-            if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
-                memcpy(dctx->buffer + dctx->bytes, src, srclen);
-                dctx->bytes += srclen;
-                return 0;
-            }
-            memcpy(dctx->buffer + dctx->bytes, src,
-                    GHASH_DIGEST_SIZE - dctx->bytes);
-            pagefault_disable();
-            enable_kernel_altivec();
-            enable_kernel_fp();
-            gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
-                    GHASH_DIGEST_SIZE);
-            pagefault_enable();
-            src += GHASH_DIGEST_SIZE - dctx->bytes;
-            srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
-            dctx->bytes = 0;
-        }
-        len = srclen & ~(GHASH_DIGEST_SIZE - 1);
-        if (len) {
-            pagefault_disable();
-            enable_kernel_altivec();
-            enable_kernel_fp();
-            gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
-            pagefault_enable();
-            src += len;
-            srclen -= len;
-        }
-        if (srclen) {
-            memcpy(dctx->buffer, src, srclen);
-            dctx->bytes = srclen;
-        }
-        return 0;
-    }
+       unsigned int len;
+       struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+       struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+       if (IN_INTERRUPT) {
+               return crypto_shash_update(&dctx->fallback_desc, src,
+                                          srclen);
+       } else {
+               if (dctx->bytes) {
+                       if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) {
+                               memcpy(dctx->buffer + dctx->bytes, src,
+                                      srclen);
+                               dctx->bytes += srclen;
+                               return 0;
+                       }
+                       memcpy(dctx->buffer + dctx->bytes, src,
+                              GHASH_DIGEST_SIZE - dctx->bytes);
+                       pagefault_disable();
+                       enable_kernel_altivec();
+                       enable_kernel_fp();
+                       gcm_ghash_p8(dctx->shash, ctx->htable,
+                                    dctx->buffer, GHASH_DIGEST_SIZE);
+                       pagefault_enable();
+                       src += GHASH_DIGEST_SIZE - dctx->bytes;
+                       srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
+                       dctx->bytes = 0;
+               }
+               len = srclen & ~(GHASH_DIGEST_SIZE - 1);
+               if (len) {
+                       pagefault_disable();
+                       enable_kernel_altivec();
+                       enable_kernel_fp();
+                       gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
+                       pagefault_enable();
+                       src += len;
+                       srclen -= len;
+               }
+               if (srclen) {
+                       memcpy(dctx->buffer, src, srclen);
+                       dctx->bytes = srclen;
+               }
+               return 0;
+       }
 }
 
 static int p8_ghash_final(struct shash_desc *desc, u8 *out)
 {
-    int i;
-    struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
-    struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-
-    if (IN_INTERRUPT) {
-        return crypto_shash_final(&dctx->fallback_desc, out);
-    } else {
-        if (dctx->bytes) {
-            for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
-                dctx->buffer[i] = 0;
-            pagefault_disable();
-            enable_kernel_altivec();
-            enable_kernel_fp();
-            gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
-                    GHASH_DIGEST_SIZE);
-            pagefault_enable();
-            dctx->bytes = 0;
-        }
-        memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
-        return 0;
-    }
+       int i;
+       struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm));
+       struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+       if (IN_INTERRUPT) {
+               return crypto_shash_final(&dctx->fallback_desc, out);
+       } else {
+               if (dctx->bytes) {
+                       for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
+                               dctx->buffer[i] = 0;
+                       pagefault_disable();
+                       enable_kernel_altivec();
+                       enable_kernel_fp();
+                       gcm_ghash_p8(dctx->shash, ctx->htable,
+                                    dctx->buffer, GHASH_DIGEST_SIZE);
+                       pagefault_enable();
+                       dctx->bytes = 0;
+               }
+               memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
+               return 0;
+       }
 }
 
 struct shash_alg p8_ghash_alg = {
-    .digestsize = GHASH_DIGEST_SIZE,
-    .init       = p8_ghash_init,
-    .update     = p8_ghash_update,
-    .final      = p8_ghash_final,
-    .setkey     = p8_ghash_setkey,
-    .descsize   = sizeof(struct p8_ghash_desc_ctx),
-    .base       = {
-        .cra_name = "ghash",
-        .cra_driver_name = "p8_ghash",
-        .cra_priority = 1000,
-        .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK,
-        .cra_blocksize = GHASH_BLOCK_SIZE,
-        .cra_ctxsize = sizeof(struct p8_ghash_ctx),
-        .cra_module = THIS_MODULE,
-        .cra_init = p8_ghash_init_tfm,
-        .cra_exit = p8_ghash_exit_tfm,
-    },
+       .digestsize = GHASH_DIGEST_SIZE,
+       .init = p8_ghash_init,
+       .update = p8_ghash_update,
+       .final = p8_ghash_final,
+       .setkey = p8_ghash_setkey,
+       .descsize = sizeof(struct p8_ghash_desc_ctx),
+       .base = {
+                .cra_name = "ghash",
+                .cra_driver_name = "p8_ghash",
+                .cra_priority = 1000,
+                .cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_NEED_FALLBACK,
+                .cra_blocksize = GHASH_BLOCK_SIZE,
+                .cra_ctxsize = sizeof(struct p8_ghash_ctx),
+                .cra_module = THIS_MODULE,
+                .cra_init = p8_ghash_init_tfm,
+                .cra_exit = p8_ghash_exit_tfm,
+       },
 };
index 4c398ddd8c10de5ac4f87e106aba6551f4d6b117..e163d5770438aa63f89cef0a74497b921423d42f 100644 (file)
@@ -32,57 +32,57 @@ extern struct crypto_alg p8_aes_alg;
 extern struct crypto_alg p8_aes_cbc_alg;
 extern struct crypto_alg p8_aes_ctr_alg;
 static struct crypto_alg *algs[] = {
-    &p8_aes_alg,
-    &p8_aes_cbc_alg,
-    &p8_aes_ctr_alg,
-    NULL,
+       &p8_aes_alg,
+       &p8_aes_cbc_alg,
+       &p8_aes_ctr_alg,
+       NULL,
 };
 
 int __init p8_init(void)
 {
-    int ret = 0;
-    struct crypto_alg **alg_it;
+       int ret = 0;
+       struct crypto_alg **alg_it;
 
-    if (!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO))
-        return -ENODEV;
+       if (!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_VEC_CRYPTO))
+               return -ENODEV;
 
-    for (alg_it = algs; *alg_it; alg_it++) {
-        ret = crypto_register_alg(*alg_it);
-        printk(KERN_INFO "crypto_register_alg '%s' = %d\n",
-                (*alg_it)->cra_name, ret);
-        if (ret) {
-            for (alg_it--; alg_it >= algs; alg_it--)
-                crypto_unregister_alg(*alg_it);
-            break;
-        }
-    }
-    if (ret)
-        return ret;
+       for (alg_it = algs; *alg_it; alg_it++) {
+               ret = crypto_register_alg(*alg_it);
+               printk(KERN_INFO "crypto_register_alg '%s' = %d\n",
+                      (*alg_it)->cra_name, ret);
+               if (ret) {
+                       for (alg_it--; alg_it >= algs; alg_it--)
+                               crypto_unregister_alg(*alg_it);
+                       break;
+               }
+       }
+       if (ret)
+               return ret;
 
-    ret = crypto_register_shash(&p8_ghash_alg);
-    if (ret) {
-        for (alg_it = algs; *alg_it; alg_it++)
-            crypto_unregister_alg(*alg_it);
-    }
-    return ret;
+       ret = crypto_register_shash(&p8_ghash_alg);
+       if (ret) {
+               for (alg_it = algs; *alg_it; alg_it++)
+                       crypto_unregister_alg(*alg_it);
+       }
+       return ret;
 }
 
 void __exit p8_exit(void)
 {
-    struct crypto_alg **alg_it;
+       struct crypto_alg **alg_it;
 
-    for (alg_it = algs; *alg_it; alg_it++) {
-        printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name);
-        crypto_unregister_alg(*alg_it);
-    }
-    crypto_unregister_shash(&p8_ghash_alg);
+       for (alg_it = algs; *alg_it; alg_it++) {
+               printk(KERN_INFO "Removing '%s'\n", (*alg_it)->cra_name);
+               crypto_unregister_alg(*alg_it);
+       }
+       crypto_unregister_shash(&p8_ghash_alg);
 }
 
 module_init(p8_init);
 module_exit(p8_exit);
 
 MODULE_AUTHOR("Marcelo Cerri<mhcerri@br.ibm.com>");
-MODULE_DESCRIPTION("IBM VMX cryptographic acceleration instructions support on Power 8");
+MODULE_DESCRIPTION("IBM VMX cryptographic acceleration instructions "
+                  "support on Power 8");
 MODULE_LICENSE("GPL");
 MODULE_VERSION("1.0.0");
-