Switch to the new code style
Signed-off-by: Gilles Peskine <Gilles.Peskine@arm.com>
diff --git a/library/aesni.c b/library/aesni.c
index 12125c9..d4abb4d 100644
--- a/library/aesni.c
+++ b/library/aesni.c
@@ -28,7 +28,8 @@
#if defined(__has_feature)
#if __has_feature(memory_sanitizer)
-#warning "MBEDTLS_AESNI_C is known to cause spurious error reports with some memory sanitizers as they do not understand the assembly code."
+#warning \
+ "MBEDTLS_AESNI_C is known to cause spurious error reports with some memory sanitizers as they do not understand the assembly code."
#endif
#endif
@@ -47,22 +48,21 @@
/*
* AES-NI support detection routine
*/
-int mbedtls_aesni_has_support( unsigned int what )
+int mbedtls_aesni_has_support(unsigned int what)
{
static int done = 0;
static unsigned int c = 0;
- if( ! done )
- {
- asm( "movl $1, %%eax \n\t"
+ if (!done) {
+ asm ("movl $1, %%eax \n\t"
"cpuid \n\t"
: "=c" (c)
:
- : "eax", "ebx", "edx" );
+ : "eax", "ebx", "edx");
done = 1;
}
- return( ( c & what ) != 0 );
+ return (c & what) != 0;
}
/*
@@ -94,12 +94,12 @@
/*
* AES-NI AES-ECB block en(de)cryption
*/
-int mbedtls_aesni_crypt_ecb( mbedtls_aes_context *ctx,
- int mode,
- const unsigned char input[16],
- unsigned char output[16] )
+int mbedtls_aesni_crypt_ecb(mbedtls_aes_context *ctx,
+ int mode,
+ const unsigned char input[16],
+ unsigned char output[16])
{
- asm( "movdqu (%3), %%xmm0 \n\t" // load input
+ asm ("movdqu (%3), %%xmm0 \n\t" // load input
"movdqu (%1), %%xmm1 \n\t" // load round key 0
"pxor %%xmm1, %%xmm0 \n\t" // round 0
"add $16, %1 \n\t" // point to next round key
@@ -110,51 +110,50 @@
"1: \n\t" // encryption loop
"movdqu (%1), %%xmm1 \n\t" // load round key
AESENC xmm1_xmm0 "\n\t" // do round
- "add $16, %1 \n\t" // point to next round key
- "subl $1, %0 \n\t" // loop
- "jnz 1b \n\t"
- "movdqu (%1), %%xmm1 \n\t" // load round key
+ "add $16, %1 \n\t" // point to next round key
+ "subl $1, %0 \n\t" // loop
+ "jnz 1b \n\t"
+ "movdqu (%1), %%xmm1 \n\t" // load round key
AESENCLAST xmm1_xmm0 "\n\t" // last round
- "jmp 3f \n\t"
+ "jmp 3f \n\t"
- "2: \n\t" // decryption loop
- "movdqu (%1), %%xmm1 \n\t"
+ "2: \n\t" // decryption loop
+ "movdqu (%1), %%xmm1 \n\t"
AESDEC xmm1_xmm0 "\n\t" // do round
- "add $16, %1 \n\t"
- "subl $1, %0 \n\t"
- "jnz 2b \n\t"
- "movdqu (%1), %%xmm1 \n\t" // load round key
+ "add $16, %1 \n\t"
+ "subl $1, %0 \n\t"
+ "jnz 2b \n\t"
+ "movdqu (%1), %%xmm1 \n\t" // load round key
AESDECLAST xmm1_xmm0 "\n\t" // last round
- "3: \n\t"
- "movdqu %%xmm0, (%4) \n\t" // export output
+ "3: \n\t"
+ "movdqu %%xmm0, (%4) \n\t" // export output
:
: "r" (ctx->nr), "r" (ctx->buf + ctx->rk_offset), "r" (mode), "r" (input), "r" (output)
- : "memory", "cc", "xmm0", "xmm1" );
+ : "memory", "cc", "xmm0", "xmm1");
- return( 0 );
+ return 0;
}
/*
* GCM multiplication: c = a times b in GF(2^128)
* Based on [CLMUL-WP] algorithms 1 (with equation 27) and 5.
*/
-void mbedtls_aesni_gcm_mult( unsigned char c[16],
- const unsigned char a[16],
- const unsigned char b[16] )
+void mbedtls_aesni_gcm_mult(unsigned char c[16],
+ const unsigned char a[16],
+ const unsigned char b[16])
{
unsigned char aa[16], bb[16], cc[16];
size_t i;
/* The inputs are in big-endian order, so byte-reverse them */
- for( i = 0; i < 16; i++ )
- {
+ for (i = 0; i < 16; i++) {
aa[i] = a[15 - i];
bb[i] = b[15 - i];
}
- asm( "movdqu (%0), %%xmm0 \n\t" // a1:a0
+ asm ("movdqu (%0), %%xmm0 \n\t" // a1:a0
"movdqu (%1), %%xmm1 \n\t" // b1:b0
/*
@@ -168,30 +167,30 @@
PCLMULQDQ xmm0_xmm2 ",0x11 \n\t" // a1*b1 = d1:d0
PCLMULQDQ xmm0_xmm3 ",0x10 \n\t" // a0*b1 = e1:e0
PCLMULQDQ xmm0_xmm4 ",0x01 \n\t" // a1*b0 = f1:f0
- "pxor %%xmm3, %%xmm4 \n\t" // e1+f1:e0+f0
- "movdqa %%xmm4, %%xmm3 \n\t" // same
- "psrldq $8, %%xmm4 \n\t" // 0:e1+f1
- "pslldq $8, %%xmm3 \n\t" // e0+f0:0
- "pxor %%xmm4, %%xmm2 \n\t" // d1:d0+e1+f1
- "pxor %%xmm3, %%xmm1 \n\t" // c1+e0+f1:c0
+ "pxor %%xmm3, %%xmm4 \n\t" // e1+f1:e0+f0
+ "movdqa %%xmm4, %%xmm3 \n\t" // same
+ "psrldq $8, %%xmm4 \n\t" // 0:e1+f1
+ "pslldq $8, %%xmm3 \n\t" // e0+f0:0
+ "pxor %%xmm4, %%xmm2 \n\t" // d1:d0+e1+f1
+ "pxor %%xmm3, %%xmm1 \n\t" // c1+e0+f1:c0
/*
* Now shift the result one bit to the left,
* taking advantage of [CLMUL-WP] eq 27 (p. 20)
*/
- "movdqa %%xmm1, %%xmm3 \n\t" // r1:r0
- "movdqa %%xmm2, %%xmm4 \n\t" // r3:r2
- "psllq $1, %%xmm1 \n\t" // r1<<1:r0<<1
- "psllq $1, %%xmm2 \n\t" // r3<<1:r2<<1
- "psrlq $63, %%xmm3 \n\t" // r1>>63:r0>>63
- "psrlq $63, %%xmm4 \n\t" // r3>>63:r2>>63
- "movdqa %%xmm3, %%xmm5 \n\t" // r1>>63:r0>>63
- "pslldq $8, %%xmm3 \n\t" // r0>>63:0
- "pslldq $8, %%xmm4 \n\t" // r2>>63:0
- "psrldq $8, %%xmm5 \n\t" // 0:r1>>63
- "por %%xmm3, %%xmm1 \n\t" // r1<<1|r0>>63:r0<<1
- "por %%xmm4, %%xmm2 \n\t" // r3<<1|r2>>62:r2<<1
- "por %%xmm5, %%xmm2 \n\t" // r3<<1|r2>>62:r2<<1|r1>>63
+ "movdqa %%xmm1, %%xmm3 \n\t" // r1:r0
+ "movdqa %%xmm2, %%xmm4 \n\t" // r3:r2
+ "psllq $1, %%xmm1 \n\t" // r1<<1:r0<<1
+ "psllq $1, %%xmm2 \n\t" // r3<<1:r2<<1
+ "psrlq $63, %%xmm3 \n\t" // r1>>63:r0>>63
+ "psrlq $63, %%xmm4 \n\t" // r3>>63:r2>>63
+ "movdqa %%xmm3, %%xmm5 \n\t" // r1>>63:r0>>63
+ "pslldq $8, %%xmm3 \n\t" // r0>>63:0
+ "pslldq $8, %%xmm4 \n\t" // r2>>63:0
+ "psrldq $8, %%xmm5 \n\t" // 0:r1>>63
+ "por %%xmm3, %%xmm1 \n\t" // r1<<1|r0>>63:r0<<1
+ "por %%xmm4, %%xmm2 \n\t" // r3<<1|r2>>62:r2<<1
+ "por %%xmm5, %%xmm2 \n\t" // r3<<1|r2>>62:r2<<1|r1>>63
/*
* Now reduce modulo the GCM polynomial x^128 + x^7 + x^2 + x + 1
@@ -199,51 +198,52 @@
* Currently xmm2:xmm1 holds x3:x2:x1:x0 (already shifted).
*/
/* Step 2 (1) */
- "movdqa %%xmm1, %%xmm3 \n\t" // x1:x0
- "movdqa %%xmm1, %%xmm4 \n\t" // same
- "movdqa %%xmm1, %%xmm5 \n\t" // same
- "psllq $63, %%xmm3 \n\t" // x1<<63:x0<<63 = stuff:a
- "psllq $62, %%xmm4 \n\t" // x1<<62:x0<<62 = stuff:b
- "psllq $57, %%xmm5 \n\t" // x1<<57:x0<<57 = stuff:c
+ "movdqa %%xmm1, %%xmm3 \n\t" // x1:x0
+ "movdqa %%xmm1, %%xmm4 \n\t" // same
+ "movdqa %%xmm1, %%xmm5 \n\t" // same
+ "psllq $63, %%xmm3 \n\t" // x1<<63:x0<<63 = stuff:a
+ "psllq $62, %%xmm4 \n\t" // x1<<62:x0<<62 = stuff:b
+ "psllq $57, %%xmm5 \n\t" // x1<<57:x0<<57 = stuff:c
/* Step 2 (2) */
- "pxor %%xmm4, %%xmm3 \n\t" // stuff:a+b
- "pxor %%xmm5, %%xmm3 \n\t" // stuff:a+b+c
- "pslldq $8, %%xmm3 \n\t" // a+b+c:0
- "pxor %%xmm3, %%xmm1 \n\t" // x1+a+b+c:x0 = d:x0
+ "pxor %%xmm4, %%xmm3 \n\t" // stuff:a+b
+ "pxor %%xmm5, %%xmm3 \n\t" // stuff:a+b+c
+ "pslldq $8, %%xmm3 \n\t" // a+b+c:0
+ "pxor %%xmm3, %%xmm1 \n\t" // x1+a+b+c:x0 = d:x0
/* Steps 3 and 4 */
- "movdqa %%xmm1,%%xmm0 \n\t" // d:x0
- "movdqa %%xmm1,%%xmm4 \n\t" // same
- "movdqa %%xmm1,%%xmm5 \n\t" // same
- "psrlq $1, %%xmm0 \n\t" // e1:x0>>1 = e1:e0'
- "psrlq $2, %%xmm4 \n\t" // f1:x0>>2 = f1:f0'
- "psrlq $7, %%xmm5 \n\t" // g1:x0>>7 = g1:g0'
- "pxor %%xmm4, %%xmm0 \n\t" // e1+f1:e0'+f0'
- "pxor %%xmm5, %%xmm0 \n\t" // e1+f1+g1:e0'+f0'+g0'
+ "movdqa %%xmm1,%%xmm0 \n\t" // d:x0
+ "movdqa %%xmm1,%%xmm4 \n\t" // same
+ "movdqa %%xmm1,%%xmm5 \n\t" // same
+ "psrlq $1, %%xmm0 \n\t" // e1:x0>>1 = e1:e0'
+ "psrlq $2, %%xmm4 \n\t" // f1:x0>>2 = f1:f0'
+ "psrlq $7, %%xmm5 \n\t" // g1:x0>>7 = g1:g0'
+ "pxor %%xmm4, %%xmm0 \n\t" // e1+f1:e0'+f0'
+ "pxor %%xmm5, %%xmm0 \n\t" // e1+f1+g1:e0'+f0'+g0'
// e0'+f0'+g0' is almost e0+f0+g0, ex\tcept for some missing
// bits carried from d. Now get those\t bits back in.
- "movdqa %%xmm1,%%xmm3 \n\t" // d:x0
- "movdqa %%xmm1,%%xmm4 \n\t" // same
- "movdqa %%xmm1,%%xmm5 \n\t" // same
- "psllq $63, %%xmm3 \n\t" // d<<63:stuff
- "psllq $62, %%xmm4 \n\t" // d<<62:stuff
- "psllq $57, %%xmm5 \n\t" // d<<57:stuff
- "pxor %%xmm4, %%xmm3 \n\t" // d<<63+d<<62:stuff
- "pxor %%xmm5, %%xmm3 \n\t" // missing bits of d:stuff
- "psrldq $8, %%xmm3 \n\t" // 0:missing bits of d
- "pxor %%xmm3, %%xmm0 \n\t" // e1+f1+g1:e0+f0+g0
- "pxor %%xmm1, %%xmm0 \n\t" // h1:h0
- "pxor %%xmm2, %%xmm0 \n\t" // x3+h1:x2+h0
+ "movdqa %%xmm1,%%xmm3 \n\t" // d:x0
+ "movdqa %%xmm1,%%xmm4 \n\t" // same
+ "movdqa %%xmm1,%%xmm5 \n\t" // same
+ "psllq $63, %%xmm3 \n\t" // d<<63:stuff
+ "psllq $62, %%xmm4 \n\t" // d<<62:stuff
+ "psllq $57, %%xmm5 \n\t" // d<<57:stuff
+ "pxor %%xmm4, %%xmm3 \n\t" // d<<63+d<<62:stuff
+ "pxor %%xmm5, %%xmm3 \n\t" // missing bits of d:stuff
+ "psrldq $8, %%xmm3 \n\t" // 0:missing bits of d
+ "pxor %%xmm3, %%xmm0 \n\t" // e1+f1+g1:e0+f0+g0
+ "pxor %%xmm1, %%xmm0 \n\t" // h1:h0
+ "pxor %%xmm2, %%xmm0 \n\t" // x3+h1:x2+h0
- "movdqu %%xmm0, (%2) \n\t" // done
+ "movdqu %%xmm0, (%2) \n\t" // done
:
: "r" (aa), "r" (bb), "r" (cc)
- : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" );
+ : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5");
/* Now byte-reverse the outputs */
- for( i = 0; i < 16; i++ )
+ for (i = 0; i < 16; i++) {
c[i] = cc[15 - i];
+ }
return;
}
@@ -251,32 +251,33 @@
/*
* Compute decryption round keys from encryption round keys
*/
-void mbedtls_aesni_inverse_key( unsigned char *invkey,
- const unsigned char *fwdkey, int nr )
+void mbedtls_aesni_inverse_key(unsigned char *invkey,
+ const unsigned char *fwdkey, int nr)
{
unsigned char *ik = invkey;
const unsigned char *fk = fwdkey + 16 * nr;
- memcpy( ik, fk, 16 );
+ memcpy(ik, fk, 16);
- for( fk -= 16, ik += 16; fk > fwdkey; fk -= 16, ik += 16 )
- asm( "movdqu (%0), %%xmm0 \n\t"
+ for (fk -= 16, ik += 16; fk > fwdkey; fk -= 16, ik += 16) {
+ asm ("movdqu (%0), %%xmm0 \n\t"
AESIMC xmm0_xmm0 "\n\t"
- "movdqu %%xmm0, (%1) \n\t"
+ "movdqu %%xmm0, (%1) \n\t"
:
: "r" (fk), "r" (ik)
- : "memory", "xmm0" );
+ : "memory", "xmm0");
+ }
- memcpy( ik, fk, 16 );
+ memcpy(ik, fk, 16);
}
/*
* Key expansion, 128-bit case
*/
-static void aesni_setkey_enc_128( unsigned char *rk,
- const unsigned char *key )
+static void aesni_setkey_enc_128(unsigned char *rk,
+ const unsigned char *key)
{
- asm( "movdqu (%1), %%xmm0 \n\t" // copy the original key
+ asm ("movdqu (%1), %%xmm0 \n\t" // copy the original key
"movdqu %%xmm0, (%0) \n\t" // as round key 0
"jmp 2f \n\t" // skip auxiliary routine
@@ -317,16 +318,16 @@
AESKEYGENA xmm0_xmm1 ",0x36 \n\tcall 1b \n\t"
:
: "r" (rk), "r" (key)
- : "memory", "cc", "0" );
+ : "memory", "cc", "0");
}
/*
* Key expansion, 192-bit case
*/
-static void aesni_setkey_enc_192( unsigned char *rk,
- const unsigned char *key )
+static void aesni_setkey_enc_192(unsigned char *rk,
+ const unsigned char *key)
{
- asm( "movdqu (%1), %%xmm0 \n\t" // copy original round key
+ asm ("movdqu (%1), %%xmm0 \n\t" // copy original round key
"movdqu %%xmm0, (%0) \n\t"
"add $16, %0 \n\t"
"movq 16(%1), %%xmm1 \n\t"
@@ -374,16 +375,16 @@
:
: "r" (rk), "r" (key)
- : "memory", "cc", "0" );
+ : "memory", "cc", "0");
}
/*
* Key expansion, 256-bit case
*/
-static void aesni_setkey_enc_256( unsigned char *rk,
- const unsigned char *key )
+static void aesni_setkey_enc_256(unsigned char *rk,
+ const unsigned char *key)
{
- asm( "movdqu (%1), %%xmm0 \n\t"
+ asm ("movdqu (%1), %%xmm0 \n\t"
"movdqu %%xmm0, (%0) \n\t"
"add $16, %0 \n\t"
"movdqu 16(%1), %%xmm1 \n\t"
@@ -414,23 +415,23 @@
/* Set xmm2 to stuff:Y:stuff:stuff with Y = subword( r11 )
* and proceed to generate next round key from there */
AESKEYGENA xmm0_xmm2 ",0x00 \n\t"
- "pshufd $0xaa, %%xmm2, %%xmm2 \n\t"
- "pxor %%xmm1, %%xmm2 \n\t"
- "pslldq $4, %%xmm1 \n\t"
- "pxor %%xmm1, %%xmm2 \n\t"
- "pslldq $4, %%xmm1 \n\t"
- "pxor %%xmm1, %%xmm2 \n\t"
- "pslldq $4, %%xmm1 \n\t"
- "pxor %%xmm2, %%xmm1 \n\t"
- "add $16, %0 \n\t"
- "movdqu %%xmm1, (%0) \n\t"
- "ret \n\t"
+ "pshufd $0xaa, %%xmm2, %%xmm2 \n\t"
+ "pxor %%xmm1, %%xmm2 \n\t"
+ "pslldq $4, %%xmm1 \n\t"
+ "pxor %%xmm1, %%xmm2 \n\t"
+ "pslldq $4, %%xmm1 \n\t"
+ "pxor %%xmm1, %%xmm2 \n\t"
+ "pslldq $4, %%xmm1 \n\t"
+ "pxor %%xmm2, %%xmm1 \n\t"
+ "add $16, %0 \n\t"
+ "movdqu %%xmm1, (%0) \n\t"
+ "ret \n\t"
/*
* Main "loop" - Generating one more key than necessary,
* see definition of mbedtls_aes_context.buf
*/
- "2: \n\t"
+ "2: \n\t"
AESKEYGENA xmm1_xmm2 ",0x01 \n\tcall 1b \n\t"
AESKEYGENA xmm1_xmm2 ",0x02 \n\tcall 1b \n\t"
AESKEYGENA xmm1_xmm2 ",0x04 \n\tcall 1b \n\t"
@@ -440,25 +441,24 @@
AESKEYGENA xmm1_xmm2 ",0x40 \n\tcall 1b \n\t"
:
: "r" (rk), "r" (key)
- : "memory", "cc", "0" );
+ : "memory", "cc", "0");
}
/*
* Key expansion, wrapper
*/
-int mbedtls_aesni_setkey_enc( unsigned char *rk,
- const unsigned char *key,
- size_t bits )
+int mbedtls_aesni_setkey_enc(unsigned char *rk,
+ const unsigned char *key,
+ size_t bits)
{
- switch( bits )
- {
- case 128: aesni_setkey_enc_128( rk, key ); break;
- case 192: aesni_setkey_enc_192( rk, key ); break;
- case 256: aesni_setkey_enc_256( rk, key ); break;
- default : return( MBEDTLS_ERR_AES_INVALID_KEY_LENGTH );
+ switch (bits) {
+ case 128: aesni_setkey_enc_128(rk, key); break;
+ case 192: aesni_setkey_enc_192(rk, key); break;
+ case 256: aesni_setkey_enc_256(rk, key); break;
+ default: return MBEDTLS_ERR_AES_INVALID_KEY_LENGTH;
}
- return( 0 );
+ return 0;
}
#endif /* MBEDTLS_HAVE_X86_64 */