forked from rrcarlosr/Jetpack
119 lines
3.6 KiB
Diff
119 lines
3.6 KiB
Diff
From 4e062fb62f334d6541698dc46775a95025e1a61d Mon Sep 17 00:00:00 2001
|
|
From: Peter Zijlstra <peterz@infradead.org>
|
|
Date: Mon, 14 Nov 2011 18:19:27 +0100
|
|
Subject: [PATCH 273/352] x86: crypto: Reduce preempt disabled regions
|
|
|
|
Restrict the preempt disabled regions to the actual floating point
|
|
operations and enable preemption for the administrative actions.
|
|
|
|
This is necessary on RT to avoid that kfree and other operations are
|
|
called with preemption disabled.
|
|
|
|
Reported-and-tested-by: Carsten Emde <cbe@osadl.org>
|
|
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
|
|
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
---
|
|
arch/x86/crypto/aesni-intel_glue.c | 24 +++++++++++++-----------
|
|
1 file changed, 13 insertions(+), 11 deletions(-)
|
|
|
|
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
|
|
index d9ae404..0f1d62d 100644
|
|
--- a/arch/x86/crypto/aesni-intel_glue.c
|
|
+++ b/arch/x86/crypto/aesni-intel_glue.c
|
|
@@ -372,14 +372,14 @@ static int ecb_encrypt(struct blkcipher_desc *desc,
|
|
err = blkcipher_walk_virt(desc, &walk);
|
|
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
- nbytes & AES_BLOCK_MASK);
|
|
+ nbytes & AES_BLOCK_MASK);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -396,14 +396,14 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
|
|
err = blkcipher_walk_virt(desc, &walk);
|
|
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -420,14 +420,14 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
|
|
err = blkcipher_walk_virt(desc, &walk);
|
|
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -444,14 +444,14 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
|
|
err = blkcipher_walk_virt(desc, &walk);
|
|
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes)) {
|
|
+ kernel_fpu_begin();
|
|
aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
@@ -503,18 +503,20 @@ static int ctr_crypt(struct blkcipher_desc *desc,
|
|
err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
|
|
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
|
|
|
- kernel_fpu_begin();
|
|
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
|
|
+ kernel_fpu_begin();
|
|
aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
|
|
nbytes & AES_BLOCK_MASK, walk.iv);
|
|
+ kernel_fpu_end();
|
|
nbytes &= AES_BLOCK_SIZE - 1;
|
|
err = blkcipher_walk_done(desc, &walk, nbytes);
|
|
}
|
|
if (walk.nbytes) {
|
|
+ kernel_fpu_begin();
|
|
ctr_crypt_final(ctx, &walk);
|
|
+ kernel_fpu_end();
|
|
err = blkcipher_walk_done(desc, &walk, 0);
|
|
}
|
|
- kernel_fpu_end();
|
|
|
|
return err;
|
|
}
|
|
--
|
|
2.7.4
|
|
|