Changed every memcpy to SCA equivalent mbedtls_platform_memcpy

This makes physical attacks more difficult.
diff --git a/library/cmac.c b/library/cmac.c
index 027072c..5503b91 100644
--- a/library/cmac.c
+++ b/library/cmac.c
@@ -260,7 +260,7 @@
     if( cmac_ctx->unprocessed_len > 0 &&
         ilen > block_size - cmac_ctx->unprocessed_len )
     {
-        memcpy( &cmac_ctx->unprocessed_block[cmac_ctx->unprocessed_len],
+        mbedtls_platform_memcpy( &cmac_ctx->unprocessed_block[cmac_ctx->unprocessed_len],
                 input,
                 block_size - cmac_ctx->unprocessed_len );
 
@@ -297,7 +297,7 @@
     /* If there is data left over that wasn't aligned to a block */
     if( ilen > 0 )
     {
-        memcpy( &cmac_ctx->unprocessed_block[cmac_ctx->unprocessed_len],
+        mbedtls_platform_memcpy( &cmac_ctx->unprocessed_block[cmac_ctx->unprocessed_len],
                 input,
                 ilen );
         cmac_ctx->unprocessed_len += ilen;
@@ -352,7 +352,7 @@
         goto exit;
     }
 
-    memcpy( output, state, block_size );
+    mbedtls_platform_memcpy( output, state, block_size );
 
 exit:
     /* Wipe the generated keys on the stack, and any other transients to avoid
@@ -446,7 +446,7 @@
     if( key_length == MBEDTLS_AES_BLOCK_SIZE )
     {
         /* Use key as is */
-        memcpy( int_key, key, MBEDTLS_AES_BLOCK_SIZE );
+        mbedtls_platform_memcpy( int_key, key, MBEDTLS_AES_BLOCK_SIZE );
     }
     else
     {