Fix coding style in library/armv8ce_aes.c
Signed-off-by: Hanno Becker <hanno.becker@arm.com>
diff --git a/library/armv8ce_aes.c b/library/armv8ce_aes.c
index df686b4..37b9fab 100644
--- a/library/armv8ce_aes.c
+++ b/library/armv8ce_aes.c
@@ -49,12 +49,12 @@
const uint8_t *rk;
uint8x16_t x, k;
- x = vld1q_u8( input ); // input block
- rk = (const uint8_t *) ctx->rk; // round keys
+ x = vld1q_u8( input ); /* input block */
+ rk = (const uint8_t *) ctx->rk; /* round keys */
if( mode == MBEDTLS_AES_ENCRYPT )
{
- for( i = ctx->nr - 1; i ; i-- ) // encryption loop
+ for( i = ctx->nr - 1; i != 0; i-- ) /* encryption loop */
{
k = vld1q_u8( rk );
rk += 16;
@@ -67,7 +67,7 @@
}
else
{
- for( i = ctx->nr - 1; i ; i-- ) // decryption loop
+ for( i = ctx->nr - 1; i != 0 ; i-- ) /* decryption loop */
{
k = vld1q_u8( rk );
rk += 16;
@@ -79,9 +79,9 @@
x = vaesdq_u8( x, k );
}
- k = vld1q_u8( rk ); // final key just XORed
+ k = vld1q_u8( rk ); /* final key just XORed */
x = veorq_u8( x, k );
- vst1q_u8( output, x ); // write out
+ vst1q_u8( output, x ); /* write out */
return ( 0 );
}
@@ -99,42 +99,42 @@
const unsigned char a[16],
const unsigned char b[16] )
{
- // GCM's GF(2^128) polynomial basis is x^128 + x^7 + x^2 + x + 1
- const uint64x2_t base = { 0, 0x86 }; // note missing LS bit
+ /* GCM's GF(2^128) polynomial basis is x^128 + x^7 + x^2 + x + 1 */
+ const uint64x2_t base = { 0, 0x86 }; /* note missing LS bit */
- register uint8x16_t vc asm( "v0" ); // named registers
- register uint8x16_t va asm( "v1" ); // (to avoid conflict)
+ register uint8x16_t vc asm( "v0" ); /* named registers */
+ register uint8x16_t va asm( "v1" ); /* (to avoid conflict) */
register uint8x16_t vb asm( "v2" );
register uint64x2_t vp asm( "v3" );
- va = vld1q_u8( a ); // load inputs
+ va = vld1q_u8( a ); /* load inputs */
vb = vld1q_u8( b );
vp = base;
asm (
- "rbit %1.16b, %1.16b \n\t" // reverse bit order
+ "rbit %1.16b, %1.16b \n\t" /* reverse bit order */
"rbit %2.16b, %2.16b \n\t"
- "pmull2 %0.1q, %1.2d, %2.2d \n\t" // v0 = a.hi * b.hi
- "pmull2 v4.1q, %0.2d, %3.2d \n\t" // mul v0 by x^64, reduce
+ "pmull2 %0.1q, %1.2d, %2.2d \n\t" /* v0 = a.hi * b.hi */
+ "pmull2 v4.1q, %0.2d, %3.2d \n\t" /* mul v0 by x^64, reduce */
"ext %0.16b, %0.16b, %0.16b, #8 \n\t"
"eor %0.16b, %0.16b, v4.16b \n\t"
- "ext v5.16b, %2.16b, %2.16b, #8 \n\t" // (swap hi and lo in b)
- "pmull v4.1q, %1.1d, v5.1d \n\t" // v0 ^= a.lo * b.hi
+ "ext v5.16b, %2.16b, %2.16b, #8 \n\t" /* (swap hi and lo in b) */
+ "pmull v4.1q, %1.1d, v5.1d \n\t" /* v0 ^= a.lo * b.hi */
"eor %0.16b, %0.16b, v4.16b \n\t"
- "pmull2 v4.1q, %1.2d, v5.2d \n\t" // v0 ^= a.hi * b.lo
+ "pmull2 v4.1q, %1.2d, v5.2d \n\t" /* v0 ^= a.hi * b.lo */
"eor %0.16b, %0.16b, v4.16b \n\t"
- "pmull2 v4.1q, %0.2d, %3.2d \n\t" // mul v0 by x^64, reduce
+ "pmull2 v4.1q, %0.2d, %3.2d \n\t" /* mul v0 by x^64, reduce */
"ext %0.16b, %0.16b, %0.16b, #8 \n\t"
"eor %0.16b, %0.16b, v4.16b \n\t"
- "pmull v4.1q, %1.1d, %2.1d \n\t" // v0 ^= a.lo * b.lo
+ "pmull v4.1q, %1.1d, %2.1d \n\t" /* v0 ^= a.lo * b.lo */
"eor %0.16b, %0.16b, v4.16b \n\t"
- "rbit %0.16b, %0.16b \n\t" // reverse bits for output
- : "=w" (vc) // q0: output
- : "w" (va), "w" (vb), "w" (vp) // q1, q2: input
- : "v4", "v5" // q4, q5: clobbered
+ "rbit %0.16b, %0.16b \n\t" /* reverse bits for output */
+ : "=w" (vc) /* q0: output */
+ : "w" (va), "w" (vb), "w" (vp) /* q1, q2: input */
+ : "v4", "v5" /* q4, q5: clobbered */
);
- vst1q_u8( c, vc ); // write out
+ vst1q_u8( c, vc ); /* write out */
}
#endif /* MBEDTLS_GCM_C */