continue secure channel

This commit is contained in:
Michele Balistreri 2022-10-03 17:39:00 +02:00
parent 429a5699a6
commit 8559bd0691
21 changed files with 167 additions and 3459 deletions

16
Core/Inc/crypto/aes.h Normal file
View File

@ -0,0 +1,16 @@
#ifndef __AES_H__
#define __AES_H__
#include "main.h"
#include <stdint.h>
#define AES_256_KEY_SIZE 32
#define AES_IV_SIZE 16
void aes_init(CRYP_HandleTypeDef* aes);
uint8_t aes_encrypt(const uint8_t* key, const uint8_t* iv, const uint8_t* data, uint32_t len, uint8_t* out);
uint8_t aes_decrypt(const uint8_t* key, const uint8_t* iv, const uint8_t* data, uint32_t len, uint8_t* out);
uint8_t aes_cmac(const uint8_t* key, const uint8_t* data, uint32_t len, uint8_t* out);
#endif

View File

@ -1,226 +0,0 @@
/*
---------------------------------------------------------------------------
Copyright (c) 1998-2013, Brian Gladman, Worcester, UK. All rights reserved.
The redistribution and use of this software (with or without changes)
is allowed without the payment of fees or royalties provided that:
source code distributions include the above copyright notice, this
list of conditions and the following disclaimer;
binary distributions include the above copyright notice, this list
of conditions and the following disclaimer in their documentation.
This software is provided 'as is' with no explicit or implied warranties
in respect of its operation, including, but not limited to, correctness
and fitness for purpose.
---------------------------------------------------------------------------
Issue Date: 02/08/2018
This file contains the definitions required to use AES in C. See aesopt.h
for optimisation details.
*/
#ifndef _AES_H
#define _AES_H
#include <stdlib.h>
#include <stdint.h>
#define VOID_RETURN void
#define INT_RETURN int
#define ALIGN_OFFSET(x,n) (((intptr_t)(x)) & ((n) - 1))
#define ALIGN_FLOOR(x,n) ((uint8_t*)(x) - ( ((intptr_t)(x)) & ((n) - 1)))
#define ALIGN_CEIL(x,n) ((uint8_t*)(x) + (-((intptr_t)(x)) & ((n) - 1)))
#if defined(__cplusplus)
extern "C"
{
#endif
// #define AES_128 /* if a fast 128 bit key scheduler is needed */
// #define AES_192 /* if a fast 192 bit key scheduler is needed */
#define AES_256 /* if a fast 256 bit key scheduler is needed */
// #define AES_VAR /* if variable key size scheduler is needed */
#if 1
# define AES_MODES /* if support is needed for modes in the C code */
#endif /* (these will use AES_NI if it is present) */
#if 0 /* add this to make direct calls to the AES_NI */
# /* implemented CBC and CTR modes available */
# define ADD_AESNI_MODE_CALLS
#endif
/* The following must also be set in assembler files if being used */
#define AES_ENCRYPT /* if support for encryption is needed */
#define AES_DECRYPT /* if support for decryption is needed */
#define AES_BLOCK_SIZE_P2 4 /* AES block size as a power of 2 */
#define AES_BLOCK_SIZE (1 << AES_BLOCK_SIZE_P2) /* AES block size */
#define N_COLS 4 /* the number of columns in the state */
/* The key schedule length is 11, 13 or 15 16-byte blocks for 128, */
/* 192 or 256-bit keys respectively. That is 176, 208 or 240 bytes */
/* or 44, 52 or 60 32-bit words. */
#if defined( AES_VAR ) || defined( AES_256 )
#define KS_LENGTH 60
#elif defined( AES_192 )
#define KS_LENGTH 52
#else
#define KS_LENGTH 44
#endif
#define AES_RETURN INT_RETURN
/* the character array 'inf' in the following structures is used */
/* to hold AES context information. This AES code uses cx->inf.b[0] */
/* to hold the number of rounds multiplied by 16. The other three */
/* elements can be used by code that implements additional modes */
typedef union
{ uint32_t l;
uint8_t b[4];
} aes_inf;
#ifdef _MSC_VER
# pragma warning( disable : 4324 )
#endif
#if defined(_MSC_VER) && defined(_WIN64)
#define ALIGNED_(x) __declspec(align(x))
#elif defined(__GNUC__) && defined(__x86_64__)
#define ALIGNED_(x) __attribute__ ((aligned(x)))
#else
#define ALIGNED_(x)
#endif
typedef struct ALIGNED_(16)
{ uint32_t ks[KS_LENGTH];
aes_inf inf;
} aes_encrypt_ctx;
typedef struct ALIGNED_(16)
{ uint32_t ks[KS_LENGTH];
aes_inf inf;
} aes_decrypt_ctx;
#ifdef _MSC_VER
# pragma warning( default : 4324 )
#endif
/* This routine must be called before first use if non-static */
/* tables are being used */
AES_RETURN aes_init(void);
/* Key lengths in the range 16 <= key_len <= 32 are given in bytes, */
/* those in the range 128 <= key_len <= 256 are given in bits */
#if defined( AES_ENCRYPT )
#if defined( AES_128 ) || defined( AES_VAR)
AES_RETURN aes_encrypt_key128(const unsigned char *key, aes_encrypt_ctx cx[1]);
#endif
#if defined( AES_192 ) || defined( AES_VAR)
AES_RETURN aes_encrypt_key192(const unsigned char *key, aes_encrypt_ctx cx[1]);
#endif
#if defined( AES_256 ) || defined( AES_VAR)
AES_RETURN aes_encrypt_key256(const unsigned char *key, aes_encrypt_ctx cx[1]);
#endif
#if defined( AES_VAR )
AES_RETURN aes_encrypt_key(const unsigned char *key, int key_len, aes_encrypt_ctx cx[1]);
#endif
AES_RETURN aes_encrypt(const unsigned char *in, unsigned char *out, const aes_encrypt_ctx cx[1]);
#endif
#if defined( AES_DECRYPT )
#if defined( AES_128 ) || defined( AES_VAR)
AES_RETURN aes_decrypt_key128(const unsigned char *key, aes_decrypt_ctx cx[1]);
#endif
#if defined( AES_192 ) || defined( AES_VAR)
AES_RETURN aes_decrypt_key192(const unsigned char *key, aes_decrypt_ctx cx[1]);
#endif
#if defined( AES_256 ) || defined( AES_VAR)
AES_RETURN aes_decrypt_key256(const unsigned char *key, aes_decrypt_ctx cx[1]);
#endif
#if defined( AES_VAR )
AES_RETURN aes_decrypt_key(const unsigned char *key, int key_len, aes_decrypt_ctx cx[1]);
#endif
AES_RETURN aes_decrypt(const unsigned char *in, unsigned char *out, const aes_decrypt_ctx cx[1]);
#endif
#if defined( AES_MODES )
/* Multiple calls to the following subroutines for multiple block */
/* ECB, CBC, CFB, OFB and CTR mode encryption can be used to handle */
/* long messages incrementally provided that the context AND the iv */
/* are preserved between all such calls. For the ECB and CBC modes */
/* each individual call within a series of incremental calls must */
/* process only full blocks (i.e. len must be a multiple of 16) but */
/* the CFB, OFB and CTR mode calls can handle multiple incremental */
/* calls of any length. Each mode is reset when a new AES key is */
/* set but ECB needs no reset and CBC can be reset without setting */
/* a new key by setting a new IV value. To reset CFB, OFB and CTR */
/* without setting the key, aes_mode_reset() must be called and the */
/* IV must be set. NOTE: All these calls update the IV on exit so */
/* this has to be reset if a new operation with the same IV as the */
/* previous one is required (or decryption follows encryption with */
/* the same IV array). */
AES_RETURN aes_test_alignment_detection(unsigned int n);
AES_RETURN aes_ecb_encrypt(const unsigned char *ibuf, unsigned char *obuf,
int len, const aes_encrypt_ctx cx[1]);
AES_RETURN aes_ecb_decrypt(const unsigned char *ibuf, unsigned char *obuf,
int len, const aes_decrypt_ctx cx[1]);
AES_RETURN aes_cbc_encrypt(const unsigned char *ibuf, unsigned char *obuf,
int len, unsigned char *iv, const aes_encrypt_ctx cx[1]);
AES_RETURN aes_cbc_decrypt(const unsigned char *ibuf, unsigned char *obuf,
int len, unsigned char *iv, const aes_decrypt_ctx cx[1]);
AES_RETURN aes_mode_reset(aes_encrypt_ctx cx[1]);
AES_RETURN aes_cfb_encrypt(const unsigned char *ibuf, unsigned char *obuf,
int len, unsigned char *iv, aes_encrypt_ctx cx[1]);
AES_RETURN aes_cfb_decrypt(const unsigned char *ibuf, unsigned char *obuf,
int len, unsigned char *iv, aes_encrypt_ctx cx[1]);
#define aes_ofb_encrypt aes_ofb_crypt
#define aes_ofb_decrypt aes_ofb_crypt
AES_RETURN aes_ofb_crypt(const unsigned char *ibuf, unsigned char *obuf,
int len, unsigned char *iv, aes_encrypt_ctx cx[1]);
typedef void cbuf_inc(unsigned char *cbuf);
#define aes_ctr_encrypt aes_ctr_crypt
#define aes_ctr_decrypt aes_ctr_crypt
AES_RETURN aes_ctr_crypt(const unsigned char *ibuf, unsigned char *obuf,
int len, unsigned char *cbuf, cbuf_inc ctr_inc, aes_encrypt_ctx cx[1]);
void aes_ctr_cbuf_inc(unsigned char *cbuf);
#endif
#if defined(__cplusplus)
}
#endif
#endif

View File

@ -1,784 +0,0 @@
/*
---------------------------------------------------------------------------
Copyright (c) 1998-2013, Brian Gladman, Worcester, UK. All rights reserved.
The redistribution and use of this software (with or without changes)
is allowed without the payment of fees or royalties provided that:
source code distributions include the above copyright notice, this
list of conditions and the following disclaimer;
binary distributions include the above copyright notice, this list
of conditions and the following disclaimer in their documentation.
This software is provided 'as is' with no explicit or implied warranties
in respect of its operation, including, but not limited to, correctness
and fitness for purpose.
---------------------------------------------------------------------------
Issue Date: 20/12/2007
This file contains the compilation options for AES (Rijndael) and code
that is common across encryption, key scheduling and table generation.
OPERATION
These source code files implement the AES algorithm Rijndael designed by
Joan Daemen and Vincent Rijmen. This version is designed for the standard
block size of 16 bytes and for key sizes of 128, 192 and 256 bits (16, 24
and 32 bytes).
This version is designed for flexibility and speed using operations on
32-bit words rather than operations on bytes. It can be compiled with
either big or little endian internal byte order but is faster when the
native byte order for the processor is used.
THE CIPHER INTERFACE
The cipher interface is implemented as an array of bytes in which lower
AES bit sequence indexes map to higher numeric significance within bytes.
uint8_t (an unsigned 8-bit type)
uint32_t (an unsigned 32-bit type)
struct aes_encrypt_ctx (structure for the cipher encryption context)
struct aes_decrypt_ctx (structure for the cipher decryption context)
AES_RETURN the function return type
C subroutine calls:
AES_RETURN aes_encrypt_key128(const unsigned char *key, aes_encrypt_ctx cx[1]);
AES_RETURN aes_encrypt_key192(const unsigned char *key, aes_encrypt_ctx cx[1]);
AES_RETURN aes_encrypt_key256(const unsigned char *key, aes_encrypt_ctx cx[1]);
AES_RETURN aes_encrypt(const unsigned char *in, unsigned char *out,
const aes_encrypt_ctx cx[1]);
AES_RETURN aes_decrypt_key128(const unsigned char *key, aes_decrypt_ctx cx[1]);
AES_RETURN aes_decrypt_key192(const unsigned char *key, aes_decrypt_ctx cx[1]);
AES_RETURN aes_decrypt_key256(const unsigned char *key, aes_decrypt_ctx cx[1]);
AES_RETURN aes_decrypt(const unsigned char *in, unsigned char *out,
const aes_decrypt_ctx cx[1]);
IMPORTANT NOTE: If you are using this C interface with dynamic tables make sure that
you call aes_init() before AES is used so that the tables are initialised.
C++ aes class subroutines:
Class AESencrypt for encryption
Constructors:
AESencrypt(void)
AESencrypt(const unsigned char *key) - 128 bit key
Members:
AES_RETURN key128(const unsigned char *key)
AES_RETURN key192(const unsigned char *key)
AES_RETURN key256(const unsigned char *key)
AES_RETURN encrypt(const unsigned char *in, unsigned char *out) const
Class AESdecrypt for encryption
Constructors:
AESdecrypt(void)
AESdecrypt(const unsigned char *key) - 128 bit key
Members:
AES_RETURN key128(const unsigned char *key)
AES_RETURN key192(const unsigned char *key)
AES_RETURN key256(const unsigned char *key)
AES_RETURN decrypt(const unsigned char *in, unsigned char *out) const
*/
#if !defined( _AESOPT_H )
#define _AESOPT_H
#if defined( __cplusplus )
#include "aescpp.h"
#else
#include "aes.h"
#endif
/* PLATFORM SPECIFIC INCLUDES */
#define IS_BIG_ENDIAN 4321
#define IS_LITTLE_ENDIAN 1234
#define PLATFORM_BYTE_ORDER IS_LITTLE_ENDIAN
/* CONFIGURATION - THE USE OF DEFINES
Later in this section there are a number of defines that control the
operation of the code. In each section, the purpose of each define is
explained so that the relevant form can be included or excluded by
setting either 1's or 0's respectively on the branches of the related
#if clauses. The following local defines should not be changed.
*/
#define ENCRYPTION_IN_C 1
#define DECRYPTION_IN_C 2
#define ENC_KEYING_IN_C 4
#define DEC_KEYING_IN_C 8
#define NO_TABLES 0
#define ONE_TABLE 1
#define FOUR_TABLES 4
#define NONE 0
#define PARTIAL 1
#define FULL 2
/* --- START OF USER CONFIGURED OPTIONS --- */
/* 1. BYTE ORDER WITHIN 32 BIT WORDS
The fundamental data processing units in Rijndael are 8-bit bytes. The
input, output and key input are all enumerated arrays of bytes in which
bytes are numbered starting at zero and increasing to one less than the
number of bytes in the array in question. This enumeration is only used
for naming bytes and does not imply any adjacency or order relationship
from one byte to another. When these inputs and outputs are considered
as bit sequences, bits 8*n to 8*n+7 of the bit sequence are mapped to
byte[n] with bit 8n+i in the sequence mapped to bit 7-i within the byte.
In this implementation bits are numbered from 0 to 7 starting at the
numerically least significant end of each byte (bit n represents 2^n).
However, Rijndael can be implemented more efficiently using 32-bit
words by packing bytes into words so that bytes 4*n to 4*n+3 are placed
into word[n]. While in principle these bytes can be assembled into words
in any positions, this implementation only supports the two formats in
which bytes in adjacent positions within words also have adjacent byte
numbers. This order is called big-endian if the lowest numbered bytes
in words have the highest numeric significance and little-endian if the
opposite applies.
This code can work in either order irrespective of the order used by the
machine on which it runs. Normally the internal byte order will be set
to the order of the processor on which the code is to be run but this
define can be used to reverse this in special situations
WARNING: Assembler code versions rely on PLATFORM_BYTE_ORDER being set.
This define will hence be redefined later (in section 4) if necessary
*/
#if 1
# define ALGORITHM_BYTE_ORDER PLATFORM_BYTE_ORDER
#elif 0
# define ALGORITHM_BYTE_ORDER IS_LITTLE_ENDIAN
#elif 0
# define ALGORITHM_BYTE_ORDER IS_BIG_ENDIAN
#else
# error The algorithm byte order is not defined
#endif
/* 2. Intel AES AND VIA ACE SUPPORT */
#if defined( __GNUC__ ) && defined( __i386__ ) && !defined(__BEOS__) \
|| defined( _WIN32 ) && defined( _M_IX86 ) && !(defined( _WIN64 ) \
|| defined( _WIN32_WCE ) || defined( _MSC_VER ) && ( _MSC_VER <= 800 ))
# define VIA_ACE_POSSIBLE
#endif
/* AESNI is supported by all Windows x64 compilers, but for Linux/GCC
we have to test for SSE 2, SSE 3, and AES to before enabling it; */
#if !defined( INTEL_AES_POSSIBLE )
# if defined( _WIN64 ) && defined( _MSC_VER ) \
|| defined( __GNUC__ ) && defined( __x86_64__ ) && \
defined( __SSE2__ ) && defined( __SSE3__ ) && \
defined( __AES__ )
# define INTEL_AES_POSSIBLE
# endif
#endif
/* Define this option if support for the Intel AESNI is required
If USE_INTEL_AES_IF_PRESENT is defined then AESNI will be used
if it is detected (both present and enabled).
AESNI uses a decryption key schedule with the first decryption
round key at the high end of the key scedule with the following
round keys at lower positions in memory. So AES_REV_DKS must NOT
be defined when AESNI will be used. Although it is unlikely that
assembler code will be used with an AESNI build, if it is then
AES_REV_DKS must NOT be defined when the assembler files are
built (the definition of USE_INTEL_AES_IF_PRESENT in the assembler
code files must match that here if they are used).
*/
#if 0 && defined( INTEL_AES_POSSIBLE ) && !defined( USE_INTEL_AES_IF_PRESENT )
# define USE_INTEL_AES_IF_PRESENT
#endif
/* Define this option if support for the VIA ACE is required. This uses
inline assembler instructions and is only implemented for the Microsoft,
Intel and GCC compilers. If VIA ACE is known to be present, then defining
ASSUME_VIA_ACE_PRESENT will remove the ordinary encryption/decryption
code. If USE_VIA_ACE_IF_PRESENT is defined then VIA ACE will be used if
it is detected (both present and enabled) but the normal AES code will
also be present.
When VIA ACE is to be used, all AES encryption contexts MUST be 16 byte
aligned; other input/output buffers do not need to be 16 byte aligned
but there are very large performance gains if this can be arranged.
VIA ACE also requires the decryption key schedule to be in reverse
order (which later checks below ensure).
AES_REV_DKS must be set for assembler code used with a VIA ACE build
*/
#if 0 && defined( VIA_ACE_POSSIBLE ) && !defined( USE_VIA_ACE_IF_PRESENT )
# define USE_VIA_ACE_IF_PRESENT
#endif
#if 0 && defined( VIA_ACE_POSSIBLE ) && !defined( ASSUME_VIA_ACE_PRESENT )
# define ASSUME_VIA_ACE_PRESENT
# endif
/* 3. ASSEMBLER SUPPORT
This define (which can be on the command line) enables the use of the
assembler code routines for encryption, decryption and key scheduling
as follows:
ASM_X86_V1C uses the assembler (aes_x86_v1.asm) with large tables for
encryption and decryption and but with key scheduling in C
ASM_X86_V2 uses assembler (aes_x86_v2.asm) with compressed tables for
encryption, decryption and key scheduling
ASM_X86_V2C uses assembler (aes_x86_v2.asm) with compressed tables for
encryption and decryption and but with key scheduling in C
ASM_AMD64_C uses assembler (aes_amd64.asm) with compressed tables for
encryption and decryption and but with key scheduling in C
Change one 'if 0' below to 'if 1' to select the version or define
as a compilation option.
*/
#if 0 && !defined( ASM_X86_V1C )
# define ASM_X86_V1C
#elif 0 && !defined( ASM_X86_V2 )
# define ASM_X86_V2
#elif 0 && !defined( ASM_X86_V2C )
# define ASM_X86_V2C
#elif 0 && !defined( ASM_AMD64_C )
# define ASM_AMD64_C
#endif
#if defined( __i386 ) || defined( _M_IX86 )
# define A32_
#elif defined( __x86_64__ ) || defined( _M_X64 )
# define A64_
#endif
#if (defined ( ASM_X86_V1C ) || defined( ASM_X86_V2 ) || defined( ASM_X86_V2C )) \
&& !defined( A32_ ) || defined( ASM_AMD64_C ) && !defined( A64_ )
# error Assembler code is only available for x86 and AMD64 systems
#endif
/* 4. FAST INPUT/OUTPUT OPERATIONS.
On some machines it is possible to improve speed by transferring the
bytes in the input and output arrays to and from the internal 32-bit
variables by addressing these arrays as if they are arrays of 32-bit
words. On some machines this will always be possible but there may
be a large performance penalty if the byte arrays are not aligned on
the normal word boundaries. On other machines this technique will
lead to memory access errors when such 32-bit word accesses are not
properly aligned. The option SAFE_IO avoids such problems but will
often be slower on those machines that support misaligned access
(especially so if care is taken to align the input and output byte
arrays on 32-bit word boundaries). If SAFE_IO is not defined it is
assumed that access to byte arrays as if they are arrays of 32-bit
words will not cause problems when such accesses are misaligned.
*/
#if 1 && !defined( _MSC_VER )
# define SAFE_IO
#endif
/* 5. LOOP UNROLLING
The code for encryption and decrytpion cycles through a number of rounds
that can be implemented either in a loop or by expanding the code into a
long sequence of instructions, the latter producing a larger program but
one that will often be much faster. The latter is called loop unrolling.
There are also potential speed advantages in expanding two iterations in
a loop with half the number of iterations, which is called partial loop
unrolling. The following options allow partial or full loop unrolling
to be set independently for encryption and decryption
*/
#if 1
# define ENC_UNROLL FULL
#elif 0
# define ENC_UNROLL PARTIAL
#else
# define ENC_UNROLL NONE
#endif
#if 1
# define DEC_UNROLL FULL
#elif 0
# define DEC_UNROLL PARTIAL
#else
# define DEC_UNROLL NONE
#endif
#if 1
# define ENC_KS_UNROLL
#endif
#if 1
# define DEC_KS_UNROLL
#endif
/* 6. FAST FINITE FIELD OPERATIONS
If this section is included, tables are used to provide faster finite
field arithmetic (this has no effect if STATIC_TABLES is defined).
*/
#if 1
# define FF_TABLES
#endif
/* 7. INTERNAL STATE VARIABLE FORMAT
The internal state of Rijndael is stored in a number of local 32-bit
word varaibles which can be defined either as an array or as individual
names variables. Include this section if you want to store these local
varaibles in arrays. Otherwise individual local variables will be used.
*/
#if 1
# define ARRAYS
#endif
/* 8. FIXED OR DYNAMIC TABLES
When this section is included the tables used by the code are compiled
statically into the binary file. Otherwise the subroutine aes_init()
must be called to compute them before the code is first used.
*/
#if 1 && !(defined( _MSC_VER ) && ( _MSC_VER <= 800 ))
# define STATIC_TABLES
#endif
/* 9. MASKING OR CASTING FROM LONGER VALUES TO BYTES
In some systems it is better to mask longer values to extract bytes
rather than using a cast. This option allows this choice.
*/
#if 0
# define to_byte(x) ((uint8_t)(x))
#else
# define to_byte(x) ((x) & 0xff)
#endif
/* 10. TABLE ALIGNMENT
On some sytsems speed will be improved by aligning the AES large lookup
tables on particular boundaries. This define should be set to a power of
two giving the desired alignment. It can be left undefined if alignment
is not needed. This option is specific to the Microsft VC++ compiler -
it seems to sometimes cause trouble for the VC++ version 6 compiler.
*/
#if 1 && defined( _MSC_VER ) && ( _MSC_VER >= 1300 )
# define TABLE_ALIGN 32
#endif
/* 11. REDUCE CODE AND TABLE SIZE
This replaces some expanded macros with function calls if AES_ASM_V2 or
AES_ASM_V2C are defined
*/
#if 1 && (defined( ASM_X86_V2 ) || defined( ASM_X86_V2C ))
# define REDUCE_CODE_SIZE
#endif
/* 12. TABLE OPTIONS
This cipher proceeds by repeating in a number of cycles known as 'rounds'
which are implemented by a round function which can optionally be speeded
up using tables. The basic tables are each 256 32-bit words, with either
one or four tables being required for each round function depending on
how much speed is required. The encryption and decryption round functions
are different and the last encryption and decrytpion round functions are
different again making four different round functions in all.
This means that:
1. Normal encryption and decryption rounds can each use either 0, 1
or 4 tables and table spaces of 0, 1024 or 4096 bytes each.
2. The last encryption and decryption rounds can also use either 0, 1
or 4 tables and table spaces of 0, 1024 or 4096 bytes each.
Include or exclude the appropriate definitions below to set the number
of tables used by this implementation.
*/
#if 1 /* set tables for the normal encryption round */
# define ENC_ROUND FOUR_TABLES
#elif 0
# define ENC_ROUND ONE_TABLE
#else
# define ENC_ROUND NO_TABLES
#endif
#if 1 /* set tables for the last encryption round */
# define LAST_ENC_ROUND FOUR_TABLES
#elif 0
# define LAST_ENC_ROUND ONE_TABLE
#else
# define LAST_ENC_ROUND NO_TABLES
#endif
#if 1 /* set tables for the normal decryption round */
# define DEC_ROUND FOUR_TABLES
#elif 0
# define DEC_ROUND ONE_TABLE
#else
# define DEC_ROUND NO_TABLES
#endif
#if 1 /* set tables for the last decryption round */
# define LAST_DEC_ROUND FOUR_TABLES
#elif 0
# define LAST_DEC_ROUND ONE_TABLE
#else
# define LAST_DEC_ROUND NO_TABLES
#endif
/* The decryption key schedule can be speeded up with tables in the same
way that the round functions can. Include or exclude the following
defines to set this requirement.
*/
#if 1
# define KEY_SCHED FOUR_TABLES
#elif 0
# define KEY_SCHED ONE_TABLE
#else
# define KEY_SCHED NO_TABLES
#endif
/* ---- END OF USER CONFIGURED OPTIONS ---- */
/* VIA ACE support is only available for VC++ and GCC */
#if !defined( _MSC_VER ) && !defined( __GNUC__ )
# if defined( ASSUME_VIA_ACE_PRESENT )
# undef ASSUME_VIA_ACE_PRESENT
# endif
# if defined( USE_VIA_ACE_IF_PRESENT )
# undef USE_VIA_ACE_IF_PRESENT
# endif
#endif
#if defined( ASSUME_VIA_ACE_PRESENT ) && !defined( USE_VIA_ACE_IF_PRESENT )
# define USE_VIA_ACE_IF_PRESENT
#endif
/* define to reverse decryption key schedule */
#if 1 || defined( USE_VIA_ACE_IF_PRESENT ) && !defined ( AES_REV_DKS )
# define AES_REV_DKS
#endif
/* Intel AESNI uses a decryption key schedule in the encryption order */
#if defined( USE_INTEL_AES_IF_PRESENT ) && defined ( AES_REV_DKS )
# undef AES_REV_DKS
#endif
/* Assembler support requires the use of platform byte order */
#if ( defined( ASM_X86_V1C ) || defined( ASM_X86_V2C ) || defined( ASM_AMD64_C ) ) \
&& (ALGORITHM_BYTE_ORDER != PLATFORM_BYTE_ORDER)
# undef ALGORITHM_BYTE_ORDER
# define ALGORITHM_BYTE_ORDER PLATFORM_BYTE_ORDER
#endif
/* In this implementation the columns of the state array are each held in
32-bit words. The state array can be held in various ways: in an array
of words, in a number of individual word variables or in a number of
processor registers. The following define maps a variable name x and
a column number c to the way the state array variable is to be held.
The first define below maps the state into an array x[c] whereas the
second form maps the state into a number of individual variables x0,
x1, etc. Another form could map individual state colums to machine
register names.
*/
#if defined( ARRAYS )
# define s(x,c) x[c]
#else
# define s(x,c) x##c
#endif
/* This implementation provides subroutines for encryption, decryption
and for setting the three key lengths (separately) for encryption
and decryption. Since not all functions are needed, masks are set
up here to determine which will be implemented in C
*/
#if !defined( AES_ENCRYPT )
# define EFUNCS_IN_C 0
#elif defined( ASSUME_VIA_ACE_PRESENT ) || defined( ASM_X86_V1C ) \
|| defined( ASM_X86_V2C ) || defined( ASM_AMD64_C )
# define EFUNCS_IN_C ENC_KEYING_IN_C
#elif !defined( ASM_X86_V2 )
# define EFUNCS_IN_C ( ENCRYPTION_IN_C | ENC_KEYING_IN_C )
#else
# define EFUNCS_IN_C 0
#endif
#if !defined( AES_DECRYPT )
# define DFUNCS_IN_C 0
#elif defined( ASSUME_VIA_ACE_PRESENT ) || defined( ASM_X86_V1C ) \
|| defined( ASM_X86_V2C ) || defined( ASM_AMD64_C )
# define DFUNCS_IN_C DEC_KEYING_IN_C
#elif !defined( ASM_X86_V2 )
# define DFUNCS_IN_C ( DECRYPTION_IN_C | DEC_KEYING_IN_C )
#else
# define DFUNCS_IN_C 0
#endif
#define FUNCS_IN_C ( EFUNCS_IN_C | DFUNCS_IN_C )
/* END OF CONFIGURATION OPTIONS */
#define RC_LENGTH (5 * (AES_BLOCK_SIZE / 4 - 2))
/* Disable or report errors on some combinations of options */
#if ENC_ROUND == NO_TABLES && LAST_ENC_ROUND != NO_TABLES
# undef LAST_ENC_ROUND
# define LAST_ENC_ROUND NO_TABLES
#elif ENC_ROUND == ONE_TABLE && LAST_ENC_ROUND == FOUR_TABLES
# undef LAST_ENC_ROUND
# define LAST_ENC_ROUND ONE_TABLE
#endif
#if ENC_ROUND == NO_TABLES && ENC_UNROLL != NONE
# undef ENC_UNROLL
# define ENC_UNROLL NONE
#endif
#if DEC_ROUND == NO_TABLES && LAST_DEC_ROUND != NO_TABLES
# undef LAST_DEC_ROUND
# define LAST_DEC_ROUND NO_TABLES
#elif DEC_ROUND == ONE_TABLE && LAST_DEC_ROUND == FOUR_TABLES
# undef LAST_DEC_ROUND
# define LAST_DEC_ROUND ONE_TABLE
#endif
#if DEC_ROUND == NO_TABLES && DEC_UNROLL != NONE
# undef DEC_UNROLL
# define DEC_UNROLL NONE
#endif
#if defined( bswap32 )
# define aes_sw32 bswap32
#elif defined( bswap_32 )
# define aes_sw32 bswap_32
#else
# define brot(x,n) (((uint32_t)(x) << n) | ((uint32_t)(x) >> (32 - n)))
# define aes_sw32(x) ((brot((x),8) & 0x00ff00ff) | (brot((x),24) & 0xff00ff00))
#endif
/* upr(x,n): rotates bytes within words by n positions, moving bytes to
higher index positions with wrap around into low positions
ups(x,n): moves bytes by n positions to higher index positions in
words but without wrap around
bval(x,n): extracts a byte from a word
WARNING: The definitions given here are intended only for use with
unsigned variables and with shift counts that are compile
time constants
*/
#if ( ALGORITHM_BYTE_ORDER == IS_LITTLE_ENDIAN )
# define upr(x,n) (((uint32_t)(x) << (8 * (n))) | ((uint32_t)(x) >> (32 - 8 * (n))))
# define ups(x,n) ((uint32_t) (x) << (8 * (n)))
# define bval(x,n) to_byte((x) >> (8 * (n)))
# define bytes2word(b0, b1, b2, b3) \
(((uint32_t)(b3) << 24) | ((uint32_t)(b2) << 16) | ((uint32_t)(b1) << 8) | (b0))
#endif
#if ( ALGORITHM_BYTE_ORDER == IS_BIG_ENDIAN )
# define upr(x,n) (((uint32_t)(x) >> (8 * (n))) | ((uint32_t)(x) << (32 - 8 * (n))))
# define ups(x,n) ((uint32_t) (x) >> (8 * (n)))
# define bval(x,n) to_byte((x) >> (24 - 8 * (n)))
# define bytes2word(b0, b1, b2, b3) \
(((uint32_t)(b0) << 24) | ((uint32_t)(b1) << 16) | ((uint32_t)(b2) << 8) | (b3))
#endif
#if defined( SAFE_IO )
# define word_in(x,c) bytes2word(((const uint8_t*)(x)+4*c)[0], ((const uint8_t*)(x)+4*c)[1], \
((const uint8_t*)(x)+4*c)[2], ((const uint8_t*)(x)+4*c)[3])
# define word_out(x,c,v) { ((uint8_t*)(x)+4*c)[0] = bval(v,0); ((uint8_t*)(x)+4*c)[1] = bval(v,1); \
((uint8_t*)(x)+4*c)[2] = bval(v,2); ((uint8_t*)(x)+4*c)[3] = bval(v,3); }
#elif ( ALGORITHM_BYTE_ORDER == PLATFORM_BYTE_ORDER )
# define word_in(x,c) (*((uint32_t*)(x)+(c)))
# define word_out(x,c,v) (*((uint32_t*)(x)+(c)) = (v))
#else
# define word_in(x,c) aes_sw32(*((uint32_t*)(x)+(c)))
# define word_out(x,c,v) (*((uint32_t*)(x)+(c)) = aes_sw32(v))
#endif
/* the finite field modular polynomial and elements */
#define WPOLY 0x011b
#define BPOLY 0x1b
/* multiply four bytes in GF(2^8) by 'x' {02} in parallel */
#define gf_c1 0x80808080
#define gf_c2 0x7f7f7f7f
#define gf_mulx(x) ((((x) & gf_c2) << 1) ^ ((((x) & gf_c1) >> 7) * BPOLY))
/* The following defines provide alternative definitions of gf_mulx that might
give improved performance if a fast 32-bit multiply is not available. Note
that a temporary variable u needs to be defined where gf_mulx is used.
#define gf_mulx(x) (u = (x) & gf_c1, u |= (u >> 1), ((x) & gf_c2) << 1) ^ ((u >> 3) | (u >> 6))
#define gf_c4 (0x01010101 * BPOLY)
#define gf_mulx(x) (u = (x) & gf_c1, ((x) & gf_c2) << 1) ^ ((u - (u >> 7)) & gf_c4)
*/
/* Work out which tables are needed for the different options */
#if defined( ASM_X86_V1C )
# if defined( ENC_ROUND )
# undef ENC_ROUND
# endif
# define ENC_ROUND FOUR_TABLES
# if defined( LAST_ENC_ROUND )
# undef LAST_ENC_ROUND
# endif
# define LAST_ENC_ROUND FOUR_TABLES
# if defined( DEC_ROUND )
# undef DEC_ROUND
# endif
# define DEC_ROUND FOUR_TABLES
# if defined( LAST_DEC_ROUND )
# undef LAST_DEC_ROUND
# endif
# define LAST_DEC_ROUND FOUR_TABLES
# if defined( KEY_SCHED )
# undef KEY_SCHED
# define KEY_SCHED FOUR_TABLES
# endif
#endif
#if ( FUNCS_IN_C & ENCRYPTION_IN_C ) || defined( ASM_X86_V1C )
# if ENC_ROUND == ONE_TABLE
# define FT1_SET
# elif ENC_ROUND == FOUR_TABLES
# define FT4_SET
# else
# define SBX_SET
# endif
# if LAST_ENC_ROUND == ONE_TABLE
# define FL1_SET
# elif LAST_ENC_ROUND == FOUR_TABLES
# define FL4_SET
# elif !defined( SBX_SET )
# define SBX_SET
# endif
#endif
#if ( FUNCS_IN_C & DECRYPTION_IN_C ) || defined( ASM_X86_V1C )
# if DEC_ROUND == ONE_TABLE
# define IT1_SET
# elif DEC_ROUND == FOUR_TABLES
# define IT4_SET
# else
# define ISB_SET
# endif
# if LAST_DEC_ROUND == ONE_TABLE
# define IL1_SET
# elif LAST_DEC_ROUND == FOUR_TABLES
# define IL4_SET
# elif !defined(ISB_SET)
# define ISB_SET
# endif
#endif
#if !(defined( REDUCE_CODE_SIZE ) && (defined( ASM_X86_V2 ) || defined( ASM_X86_V2C )))
# if ((FUNCS_IN_C & ENC_KEYING_IN_C) || (FUNCS_IN_C & DEC_KEYING_IN_C))
# if KEY_SCHED == ONE_TABLE
# if !defined( FL1_SET ) && !defined( FL4_SET )
# define LS1_SET
# endif
# elif KEY_SCHED == FOUR_TABLES
# if !defined( FL4_SET )
# define LS4_SET
# endif
# elif !defined( SBX_SET )
# define SBX_SET
# endif
# endif
# if (FUNCS_IN_C & DEC_KEYING_IN_C)
# if KEY_SCHED == ONE_TABLE
# define IM1_SET
# elif KEY_SCHED == FOUR_TABLES
# define IM4_SET
# elif !defined( SBX_SET )
# define SBX_SET
# endif
# endif
#endif
/* generic definitions of Rijndael macros that use tables */
#define no_table(x,box,vf,rf,c) bytes2word( \
box[bval(vf(x,0,c),rf(0,c))], \
box[bval(vf(x,1,c),rf(1,c))], \
box[bval(vf(x,2,c),rf(2,c))], \
box[bval(vf(x,3,c),rf(3,c))])
#define one_table(x,op,tab,vf,rf,c) \
( tab[bval(vf(x,0,c),rf(0,c))] \
^ op(tab[bval(vf(x,1,c),rf(1,c))],1) \
^ op(tab[bval(vf(x,2,c),rf(2,c))],2) \
^ op(tab[bval(vf(x,3,c),rf(3,c))],3))
#define four_tables(x,tab,vf,rf,c) \
( tab[0][bval(vf(x,0,c),rf(0,c))] \
^ tab[1][bval(vf(x,1,c),rf(1,c))] \
^ tab[2][bval(vf(x,2,c),rf(2,c))] \
^ tab[3][bval(vf(x,3,c),rf(3,c))])
#define vf1(x,r,c) (x)
#define rf1(r,c) (r)
#define rf2(r,c) ((8+r-c)&3)
/* perform forward and inverse column mix operation on four bytes in long word x in */
/* parallel. NOTE: x must be a simple variable, NOT an expression in these macros. */
#if !(defined( REDUCE_CODE_SIZE ) && (defined( ASM_X86_V2 ) || defined( ASM_X86_V2C )))
#if defined( FM4_SET ) /* not currently used */
# define fwd_mcol(x) four_tables(x,t_use(f,m),vf1,rf1,0)
#elif defined( FM1_SET ) /* not currently used */
# define fwd_mcol(x) one_table(x,upr,t_use(f,m),vf1,rf1,0)
#else
# define dec_fmvars uint32_t g2
# define fwd_mcol(x) (g2 = gf_mulx(x), g2 ^ upr((x) ^ g2, 3) ^ upr((x), 2) ^ upr((x), 1))
#endif
#if defined( IM4_SET )
# define inv_mcol(x) four_tables(x,t_use(i,m),vf1,rf1,0)
#elif defined( IM1_SET )
# define inv_mcol(x) one_table(x,upr,t_use(i,m),vf1,rf1,0)
#else
# define dec_imvars uint32_t g2, g4, g9
# define inv_mcol(x) (g2 = gf_mulx(x), g4 = gf_mulx(g2), g9 = (x) ^ gf_mulx(g4), g4 ^= g9, \
(x) ^ g2 ^ g4 ^ upr(g2 ^ g9, 3) ^ upr(g4, 2) ^ upr(g9, 1))
#endif
#if defined( FL4_SET )
# define ls_box(x,c) four_tables(x,t_use(f,l),vf1,rf2,c)
#elif defined( LS4_SET )
# define ls_box(x,c) four_tables(x,t_use(l,s),vf1,rf2,c)
#elif defined( FL1_SET )
# define ls_box(x,c) one_table(x,upr,t_use(f,l),vf1,rf2,c)
#elif defined( LS1_SET )
# define ls_box(x,c) one_table(x,upr,t_use(l,s),vf1,rf2,c)
#else
# define ls_box(x,c) no_table(x,t_use(s,box),vf1,rf2,c)
#endif
#endif
#if defined( ASM_X86_V1C ) && defined( AES_DECRYPT ) && !defined( ISB_SET )
# define ISB_SET
#endif
#endif

View File

@ -1,173 +0,0 @@
/*
---------------------------------------------------------------------------
Copyright (c) 1998-2013, Brian Gladman, Worcester, UK. All rights reserved.
The redistribution and use of this software (with or without changes)
is allowed without the payment of fees or royalties provided that:
source code distributions include the above copyright notice, this
list of conditions and the following disclaimer;
binary distributions include the above copyright notice, this list
of conditions and the following disclaimer in their documentation.
This software is provided 'as is' with no explicit or implied warranties
in respect of its operation, including, but not limited to, correctness
and fitness for purpose.
---------------------------------------------------------------------------
Issue Date: 20/12/2007
This file contains the code for declaring the tables needed to implement
AES. The file aesopt.h is assumed to be included before this header file.
If there are no global variables, the definitions here can be used to put
the AES tables in a structure so that a pointer can then be added to the
AES context to pass them to the AES routines that need them. If this
facility is used, the calling program has to ensure that this pointer is
managed appropriately. In particular, the value of the t_dec(in,it) item
in the table structure must be set to zero in order to ensure that the
tables are initialised. In practice the three code sequences in aeskey.c
that control the calls to aes_init() and the aes_init() routine itself will
have to be changed for a specific implementation. If global variables are
available it will generally be preferable to use them with the precomputed
STATIC_TABLES option that uses static global tables.
The following defines can be used to control the way the tables
are defined, initialised and used in embedded environments that
require special features for these purposes
the 't_dec' construction is used to declare fixed table arrays
the 't_set' construction is used to set fixed table values
the 't_use' construction is used to access fixed table values
256 byte tables:
t_xxx(s,box) => forward S box
t_xxx(i,box) => inverse S box
256 32-bit word OR 4 x 256 32-bit word tables:
t_xxx(f,n) => forward normal round
t_xxx(f,l) => forward last round
t_xxx(i,n) => inverse normal round
t_xxx(i,l) => inverse last round
t_xxx(l,s) => key schedule table
t_xxx(i,m) => key schedule table
Other variables and tables:
t_xxx(r,c) => the rcon table
*/
#if !defined( _AESTAB_H )
#define _AESTAB_H
#if defined(__cplusplus)
extern "C" {
#endif
#define t_dec(m,n) t_##m##n
#define t_set(m,n) t_##m##n
#define t_use(m,n) t_##m##n
#if defined(STATIC_TABLES)
# if !defined( __GNUC__ ) && (defined( __MSDOS__ ) || defined( __WIN16__ ))
/* make tables far data to avoid using too much DGROUP space (PG) */
# define CONST const far
# else
# define CONST const
# endif
#else
# define CONST
#endif
#if defined(DO_TABLES)
# define EXTERN
#else
# define EXTERN extern
#endif
#if defined(_MSC_VER) && defined(TABLE_ALIGN)
#define ALIGN __declspec(align(TABLE_ALIGN))
#else
#define ALIGN
#endif
#if defined( __WATCOMC__ ) && ( __WATCOMC__ >= 1100 )
# define XP_DIR __cdecl
#else
# define XP_DIR
#endif
#if defined(DO_TABLES) && defined(STATIC_TABLES)
#define d_1(t,n,b,e) EXTERN ALIGN CONST XP_DIR t n[256] = b(e)
#define d_4(t,n,b,e,f,g,h) EXTERN ALIGN CONST XP_DIR t n[4][256] = { b(e), b(f), b(g), b(h) }
EXTERN ALIGN CONST uint32_t t_dec(r,c)[RC_LENGTH] = rc_data(w0);
#else
#define d_1(t,n,b,e) EXTERN ALIGN CONST XP_DIR t n[256]
#define d_4(t,n,b,e,f,g,h) EXTERN ALIGN CONST XP_DIR t n[4][256]
EXTERN ALIGN CONST uint32_t t_dec(r,c)[RC_LENGTH];
#endif
#if defined( SBX_SET )
d_1(uint8_t, t_dec(s,box), sb_data, h0);
#endif
#if defined( ISB_SET )
d_1(uint8_t, t_dec(i,box), isb_data, h0);
#endif
#if defined( FT1_SET )
d_1(uint32_t, t_dec(f,n), sb_data, u0);
#endif
#if defined( FT4_SET )
d_4(uint32_t, t_dec(f,n), sb_data, u0, u1, u2, u3);
#endif
#if defined( FL1_SET )
d_1(uint32_t, t_dec(f,l), sb_data, w0);
#endif
#if defined( FL4_SET )
d_4(uint32_t, t_dec(f,l), sb_data, w0, w1, w2, w3);
#endif
#if defined( IT1_SET )
d_1(uint32_t, t_dec(i,n), isb_data, v0);
#endif
#if defined( IT4_SET )
d_4(uint32_t, t_dec(i,n), isb_data, v0, v1, v2, v3);
#endif
#if defined( IL1_SET )
d_1(uint32_t, t_dec(i,l), isb_data, w0);
#endif
#if defined( IL4_SET )
d_4(uint32_t, t_dec(i,l), isb_data, w0, w1, w2, w3);
#endif
#if defined( LS1_SET )
#if defined( FL1_SET )
#undef LS1_SET
#else
d_1(uint32_t, t_dec(l,s), sb_data, w0);
#endif
#endif
#if defined( LS4_SET )
#if defined( FL4_SET )
#undef LS4_SET
#else
d_4(uint32_t, t_dec(l,s), sb_data, w0, w1, w2, w3);
#endif
#endif
#if defined( IM1_SET )
d_1(uint32_t, t_dec(i,m), mm_data, v0);
#endif
#if defined( IM4_SET )
d_4(uint32_t, t_dec(i,m), mm_data, v0, v1, v2, v3);
#endif
#if defined(__cplusplus)
}
#endif
#endif

20
Core/Inc/crypto/util.h Normal file
View File

@ -0,0 +1,20 @@
#ifndef __PAD_H__
#define __PAD_H__
#include <string.h>
#include <stdint.h>
uint32_t pad_iso9797_m1(uint8_t* data, uint8_t plen, uint32_t size);
uint32_t unpad_iso9797_m1(uint8_t* data, uint32_t size);
static inline int memcmp_ct(const uint8_t* a, const uint8_t* b, size_t length) {
int compareSum = 0;
for (int i = 0; i < length; i++) {
compareSum |= a[i] ^ b[i];
}
return compareSum;
}
#endif

View File

@ -10,14 +10,14 @@
#define APDU_P2(__APDU__) ((__APDU__)->data[3])
#define APDU_LC(__APDU__) ((__APDU__)->has_lc ? (__APDU__)->data[4] : -1)
#define APDU_DATA(__APDU__)(&((__APDU__)->data[5]))
#define APDU_RESP(__APDU__)((__APDU__)->data)
#define APDU_RESP(__APDU__)(&((__APDU__)->data[1]))
#define APDU_LE(__APDU__) ((__APDU__)->has_le ? (__APDU__)->data[(__APDU__)->has_lc ? 5 : 4] : -1)
#define APDU_SET_LC(__APDU__, __val__) {(__APDU__)->data[4] = __val__; (__APDU__)->has_lc = 1;}
#define APDU_SET_LE(__APDU__, __val__) {(__APDU__)->data[5 + APDU_LC(__APDU__)] = __val__; (__APDU__)->has_le = 1;}
#define APDU_RESET(__APDU__) {(__APDU__)->has_lc = 0; (__APDU__)->has_le = 0; (__APDU__)->lr = 0;}
#define APDU_LEN(__APDU__) (4 + (__APDU__)->has_le + ((__APDU__)->has_lc ? (1 + (__APDU__)->data[4]) : 0))
#define APDU_SW1(__APDU__) ((__APDU__)->data[(__APDU__)->lr - 2])
#define APDU_SW2(__APDU__) ((__APDU__)->data[(__APDU__)->lr - 1])
#define APDU_SW1(__APDU__) ((__APDU__)->data[(__APDU__)->lr - 1])
#define APDU_SW2(__APDU__) ((__APDU__)->data[(__APDU__)->lr])
#define APDU_SW(__APDU__) ((APDU_SW1(__APDU__) << 8) | APDU_SW2(__APDU__))
#define APDU_BUF_LEN 255 + 6
@ -49,7 +49,7 @@ typedef struct {
uint32_t etu_10ns;
} SmartCard;
typedef struct {
typedef struct __attribute__((packed,aligned(4))) {
uint8_t has_lc;
uint8_t has_le;
uint8_t lr;

View File

@ -3,11 +3,13 @@
#include "pairing.h"
#include "smartcard.h"
#include "crypto/aes.h"
#define AES_256_KEY_SIZE 32
#define AES_IV_SIZE 16
#define SC_PAD AES_IV_SIZE
typedef struct __attribute__((packed)) {
#define SC_BUF(__NAME__, __LEN__) uint8_t __NAME__[__LEN__+SC_PAD] __attribute__((aligned(4)))
typedef struct __attribute__((packed, aligned(4))) {
uint8_t encKey[AES_256_KEY_SIZE];
uint8_t macKey[AES_256_KEY_SIZE];
uint8_t iv[AES_IV_SIZE];

44
Core/Src/crypto/aes.c Normal file
View File

@ -0,0 +1,44 @@
#include <string.h>
#include "aes.h"
#define AES_TIMEOUT 1000
const static uint8_t cmac_iv[AES_IV_SIZE] __attribute__((aligned(4))) = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
static CRYP_HandleTypeDef* _aes;
void aes_init(CRYP_HandleTypeDef* aes) {
_aes = aes;
}
uint8_t aes_encrypt(const uint8_t* key, const uint8_t* iv, const uint8_t* data, uint32_t len, uint8_t* out) {
_aes->Init.pKey = (uint32_t*) key;
_aes->Init.pInitVect = (uint32_t*) iv;
return HAL_CRYP_Encrypt(_aes, (uint32_t*)data, len, (uint32_t*)out, AES_TIMEOUT) == HAL_OK;
}
uint8_t aes_decrypt(const uint8_t* key, const uint8_t* iv, const uint8_t* data, uint32_t len, uint8_t* out) {
_aes->Init.pKey = (uint32_t*) key;
_aes->Init.pInitVect = (uint32_t*) iv;
return HAL_CRYP_Decrypt(_aes, (uint32_t*)data, len, (uint32_t*)out, AES_TIMEOUT) == HAL_OK;
}
//TODO: Must be reimplemented completely
static uint8_t cmac_tmp[256] __attribute__((aligned(4)));
uint8_t aes_cmac(const uint8_t* key, const uint8_t* data, uint32_t len, uint8_t* out) {
if (len >= 256) {
return 0;
}
if (!aes_encrypt(key, cmac_iv, data, len, cmac_tmp)) {
return 0;
}
memcpy(out, &cmac_tmp[(len-AES_IV_SIZE)-1], AES_IV_SIZE);
return 1;
}

View File

@ -1,957 +0,0 @@
/*
---------------------------------------------------------------------------
Copyright (c) 1998-2013, Brian Gladman, Worcester, UK. All rights reserved.
The redistribution and use of this software (with or without changes)
is allowed without the payment of fees or royalties provided that:
source code distributions include the above copyright notice, this
list of conditions and the following disclaimer;
binary distributions include the above copyright notice, this list
of conditions and the following disclaimer in their documentation.
This software is provided 'as is' with no explicit or implied warranties
in respect of its operation, including, but not limited to, correctness
and fitness for purpose.
---------------------------------------------------------------------------
Issue Date: 20/12/2007
These subroutines implement multiple block AES modes for ECB, CBC, CFB,
OFB and CTR encryption, The code provides support for the VIA Advanced
Cryptography Engine (ACE).
NOTE: In the following subroutines, the AES contexts (ctx) must be
16 byte aligned if VIA ACE is being used
*/
#include <string.h>
#include <assert.h>
#include <stdint.h>
#include "aesopt.h"
#if defined( AES_MODES )
#if defined(__cplusplus)
extern "C"
{
#endif
#if defined( _MSC_VER ) && ( _MSC_VER > 800 )
#pragma intrinsic(memcpy)
#endif
#define BFR_BLOCKS 8
/* These values are used to detect long word alignment in order to */
/* speed up some buffer operations. This facility may not work on */
/* some machines so this define can be commented out if necessary */
#define FAST_BUFFER_OPERATIONS
#define lp32(x) ((uint32_t*)(x))
#if defined( USE_VIA_ACE_IF_PRESENT )
#include "aes_via_ace.h"
#pragma pack(16)
aligned_array(unsigned long, enc_gen_table, 12, 16) = NEH_ENC_GEN_DATA;
aligned_array(unsigned long, enc_load_table, 12, 16) = NEH_ENC_LOAD_DATA;
aligned_array(unsigned long, enc_hybrid_table, 12, 16) = NEH_ENC_HYBRID_DATA;
aligned_array(unsigned long, dec_gen_table, 12, 16) = NEH_DEC_GEN_DATA;
aligned_array(unsigned long, dec_load_table, 12, 16) = NEH_DEC_LOAD_DATA;
aligned_array(unsigned long, dec_hybrid_table, 12, 16) = NEH_DEC_HYBRID_DATA;
/* NOTE: These control word macros must only be used after */
/* a key has been set up because they depend on key size */
/* See the VIA ACE documentation for key type information */
/* and aes_via_ace.h for non-default NEH_KEY_TYPE values */
#ifndef NEH_KEY_TYPE
# define NEH_KEY_TYPE NEH_HYBRID
#endif
#if NEH_KEY_TYPE == NEH_LOAD
#define kd_adr(c) ((uint8_t*)(c)->ks)
#elif NEH_KEY_TYPE == NEH_GENERATE
#define kd_adr(c) ((uint8_t*)(c)->ks + (c)->inf.b[0])
#elif NEH_KEY_TYPE == NEH_HYBRID
#define kd_adr(c) ((uint8_t*)(c)->ks + ((c)->inf.b[0] == 160 ? 160 : 0))
#else
#error no key type defined for VIA ACE
#endif
#else
#define aligned_array(type, name, no, stride) type name[no]
#define aligned_auto(type, name, no, stride) type name[no]
#endif
#if defined( _MSC_VER ) && _MSC_VER > 1200
#define via_cwd(cwd, ty, dir, len) \
unsigned long* cwd = (dir##_##ty##_table + ((len - 128) >> 4))
#else
#define via_cwd(cwd, ty, dir, len) \
aligned_auto(unsigned long, cwd, 4, 16); \
cwd[1] = cwd[2] = cwd[3] = 0; \
cwd[0] = neh_##dir##_##ty##_key(len)
#endif
/* test the code for detecting and setting pointer alignment */
AES_RETURN aes_test_alignment_detection(unsigned int n) /* 4 <= n <= 16 */
{ uint8_t p[16];
uint32_t i = 0, count_eq = 0, count_neq = 0;
if(n < 4 || n > 16)
return EXIT_FAILURE;
for(i = 0; i < n; ++i)
{
uint8_t *qf = ALIGN_FLOOR(p + i, n),
*qh = ALIGN_CEIL(p + i, n);
if(qh == qf)
++count_eq;
else if(qh == qf + n)
++count_neq;
else
return EXIT_FAILURE;
}
return (count_eq != 1 || count_neq != n - 1 ? EXIT_FAILURE : EXIT_SUCCESS);
}
AES_RETURN aes_mode_reset(aes_encrypt_ctx ctx[1])
{
ctx->inf.b[2] = 0;
return EXIT_SUCCESS;
}
AES_RETURN aes_ecb_encrypt(const unsigned char *ibuf, unsigned char *obuf,
int len, const aes_encrypt_ctx ctx[1])
{ int nb = len >> AES_BLOCK_SIZE_P2;
if(len & (AES_BLOCK_SIZE - 1))
return EXIT_FAILURE;
#if defined( USE_VIA_ACE_IF_PRESENT )
if(ctx->inf.b[1] == 0xff)
{ uint8_t *ksp = (uint8_t*)(ctx->ks);
via_cwd(cwd, hybrid, enc, 2 * ctx->inf.b[0] - 192);
if(ALIGN_OFFSET( ctx, 16 ))
return EXIT_FAILURE;
if(!ALIGN_OFFSET( ibuf, 16 ) && !ALIGN_OFFSET( obuf, 16 ))
{
via_ecb_op5(ksp, cwd, ibuf, obuf, nb);
}
else
{ aligned_auto(uint8_t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16);
uint8_t *ip = NULL, *op = NULL;
while(nb)
{
int m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb);
ip = (ALIGN_OFFSET( ibuf, 16 ) ? buf : ibuf);
op = (ALIGN_OFFSET( obuf, 16 ) ? buf : obuf);
if(ip != ibuf)
memcpy(buf, ibuf, m * AES_BLOCK_SIZE);
via_ecb_op5(ksp, cwd, ip, op, m);
if(op != obuf)
memcpy(obuf, buf, m * AES_BLOCK_SIZE);
ibuf += m * AES_BLOCK_SIZE;
obuf += m * AES_BLOCK_SIZE;
nb -= m;
}
}
return EXIT_SUCCESS;
}
#endif
#if !defined( ASSUME_VIA_ACE_PRESENT )
while(nb--)
{
if(aes_encrypt(ibuf, obuf, ctx) != EXIT_SUCCESS)
return EXIT_FAILURE;
ibuf += AES_BLOCK_SIZE;
obuf += AES_BLOCK_SIZE;
}
#endif
return EXIT_SUCCESS;
}
AES_RETURN aes_ecb_decrypt(const unsigned char *ibuf, unsigned char *obuf,
int len, const aes_decrypt_ctx ctx[1])
{ int nb = len >> AES_BLOCK_SIZE_P2;
if(len & (AES_BLOCK_SIZE - 1))
return EXIT_FAILURE;
#if defined( USE_VIA_ACE_IF_PRESENT )
if(ctx->inf.b[1] == 0xff)
{ uint8_t *ksp = kd_adr(ctx);
via_cwd(cwd, hybrid, dec, 2 * ctx->inf.b[0] - 192);
if(ALIGN_OFFSET( ctx, 16 ))
return EXIT_FAILURE;
if(!ALIGN_OFFSET( ibuf, 16 ) && !ALIGN_OFFSET( obuf, 16 ))
{
via_ecb_op5(ksp, cwd, ibuf, obuf, nb);
}
else
{ aligned_auto(uint8_t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16);
uint8_t *ip = NULL, *op = NULL;
while(nb)
{
int m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb);
ip = (ALIGN_OFFSET( ibuf, 16 ) ? buf : ibuf);
op = (ALIGN_OFFSET( obuf, 16 ) ? buf : obuf);
if(ip != ibuf)
memcpy(buf, ibuf, m * AES_BLOCK_SIZE);
via_ecb_op5(ksp, cwd, ip, op, m);
if(op != obuf)
memcpy(obuf, buf, m * AES_BLOCK_SIZE);
ibuf += m * AES_BLOCK_SIZE;
obuf += m * AES_BLOCK_SIZE;
nb -= m;
}
}
return EXIT_SUCCESS;
}
#endif
#if !defined( ASSUME_VIA_ACE_PRESENT )
while(nb--)
{
if(aes_decrypt(ibuf, obuf, ctx) != EXIT_SUCCESS)
return EXIT_FAILURE;
ibuf += AES_BLOCK_SIZE;
obuf += AES_BLOCK_SIZE;
}
#endif
return EXIT_SUCCESS;
}
AES_RETURN aes_cbc_encrypt(const unsigned char *ibuf, unsigned char *obuf,
int len, unsigned char *iv, const aes_encrypt_ctx ctx[1])
{ int nb = len >> AES_BLOCK_SIZE_P2;
if(len & (AES_BLOCK_SIZE - 1))
return EXIT_FAILURE;
#if defined( USE_VIA_ACE_IF_PRESENT )
if(ctx->inf.b[1] == 0xff)
{ uint8_t *ksp = (uint8_t*)(ctx->ks), *ivp = iv;
aligned_auto(uint8_t, liv, AES_BLOCK_SIZE, 16);
via_cwd(cwd, hybrid, enc, 2 * ctx->inf.b[0] - 192);
if(ALIGN_OFFSET( ctx, 16 ))
return EXIT_FAILURE;
if(ALIGN_OFFSET( iv, 16 )) /* ensure an aligned iv */
{
ivp = liv;
memcpy(liv, iv, AES_BLOCK_SIZE);
}
if(!ALIGN_OFFSET( ibuf, 16 ) && !ALIGN_OFFSET( obuf, 16 ) && !ALIGN_OFFSET( iv, 16 ))
{
via_cbc_op7(ksp, cwd, ibuf, obuf, nb, ivp, ivp);
}
else
{ aligned_auto(uint8_t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16);
uint8_t *ip = NULL, *op = NULL;
while(nb)
{
int m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb);
ip = (ALIGN_OFFSET( ibuf, 16 ) ? buf : ibuf);
op = (ALIGN_OFFSET( obuf, 16 ) ? buf : obuf);
if(ip != ibuf)
memcpy(buf, ibuf, m * AES_BLOCK_SIZE);
via_cbc_op7(ksp, cwd, ip, op, m, ivp, ivp);
if(op != obuf)
memcpy(obuf, buf, m * AES_BLOCK_SIZE);
ibuf += m * AES_BLOCK_SIZE;
obuf += m * AES_BLOCK_SIZE;
nb -= m;
}
}
if(iv != ivp)
memcpy(iv, ivp, AES_BLOCK_SIZE);
return EXIT_SUCCESS;
}
#endif
#if !defined( ASSUME_VIA_ACE_PRESENT )
# ifdef FAST_BUFFER_OPERATIONS
if(!ALIGN_OFFSET( ibuf, 4 ) && !ALIGN_OFFSET( iv, 4 ))
while(nb--)
{
lp32(iv)[0] ^= lp32(ibuf)[0];
lp32(iv)[1] ^= lp32(ibuf)[1];
lp32(iv)[2] ^= lp32(ibuf)[2];
lp32(iv)[3] ^= lp32(ibuf)[3];
if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS)
return EXIT_FAILURE;
memcpy(obuf, iv, AES_BLOCK_SIZE);
ibuf += AES_BLOCK_SIZE;
obuf += AES_BLOCK_SIZE;
}
else
# endif
while(nb--)
{
iv[ 0] ^= ibuf[ 0]; iv[ 1] ^= ibuf[ 1];
iv[ 2] ^= ibuf[ 2]; iv[ 3] ^= ibuf[ 3];
iv[ 4] ^= ibuf[ 4]; iv[ 5] ^= ibuf[ 5];
iv[ 6] ^= ibuf[ 6]; iv[ 7] ^= ibuf[ 7];
iv[ 8] ^= ibuf[ 8]; iv[ 9] ^= ibuf[ 9];
iv[10] ^= ibuf[10]; iv[11] ^= ibuf[11];
iv[12] ^= ibuf[12]; iv[13] ^= ibuf[13];
iv[14] ^= ibuf[14]; iv[15] ^= ibuf[15];
if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS)
return EXIT_FAILURE;
memcpy(obuf, iv, AES_BLOCK_SIZE);
ibuf += AES_BLOCK_SIZE;
obuf += AES_BLOCK_SIZE;
}
#endif
return EXIT_SUCCESS;
}
AES_RETURN aes_cbc_decrypt(const unsigned char *ibuf, unsigned char *obuf,
int len, unsigned char *iv, const aes_decrypt_ctx ctx[1])
{ unsigned char tmp[AES_BLOCK_SIZE];
int nb = len >> AES_BLOCK_SIZE_P2;
if(len & (AES_BLOCK_SIZE - 1))
return EXIT_FAILURE;
#if defined( USE_VIA_ACE_IF_PRESENT )
if(ctx->inf.b[1] == 0xff)
{ uint8_t *ksp = kd_adr(ctx), *ivp = iv;
aligned_auto(uint8_t, liv, AES_BLOCK_SIZE, 16);
via_cwd(cwd, hybrid, dec, 2 * ctx->inf.b[0] - 192);
if(ALIGN_OFFSET( ctx, 16 ))
return EXIT_FAILURE;
if(ALIGN_OFFSET( iv, 16 )) /* ensure an aligned iv */
{
ivp = liv;
memcpy(liv, iv, AES_BLOCK_SIZE);
}
if(!ALIGN_OFFSET( ibuf, 16 ) && !ALIGN_OFFSET( obuf, 16 ) && !ALIGN_OFFSET( iv, 16 ))
{
via_cbc_op6(ksp, cwd, ibuf, obuf, nb, ivp);
}
else
{ aligned_auto(uint8_t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16);
uint8_t *ip = NULL, *op = NULL;
while(nb)
{
int m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb);
ip = (ALIGN_OFFSET( ibuf, 16 ) ? buf : ibuf);
op = (ALIGN_OFFSET( obuf, 16 ) ? buf : obuf);
if(ip != ibuf)
memcpy(buf, ibuf, m * AES_BLOCK_SIZE);
via_cbc_op6(ksp, cwd, ip, op, m, ivp);
if(op != obuf)
memcpy(obuf, buf, m * AES_BLOCK_SIZE);
ibuf += m * AES_BLOCK_SIZE;
obuf += m * AES_BLOCK_SIZE;
nb -= m;
}
}
if(iv != ivp)
memcpy(iv, ivp, AES_BLOCK_SIZE);
return EXIT_SUCCESS;
}
#endif
#if !defined( ASSUME_VIA_ACE_PRESENT )
# ifdef FAST_BUFFER_OPERATIONS
if(!ALIGN_OFFSET( obuf, 4 ) && !ALIGN_OFFSET( iv, 4 ))
while(nb--)
{
memcpy(tmp, ibuf, AES_BLOCK_SIZE);
if(aes_decrypt(ibuf, obuf, ctx) != EXIT_SUCCESS)
return EXIT_FAILURE;
lp32(obuf)[0] ^= lp32(iv)[0];
lp32(obuf)[1] ^= lp32(iv)[1];
lp32(obuf)[2] ^= lp32(iv)[2];
lp32(obuf)[3] ^= lp32(iv)[3];
memcpy(iv, tmp, AES_BLOCK_SIZE);
ibuf += AES_BLOCK_SIZE;
obuf += AES_BLOCK_SIZE;
}
else
# endif
while(nb--)
{
memcpy(tmp, ibuf, AES_BLOCK_SIZE);
if(aes_decrypt(ibuf, obuf, ctx) != EXIT_SUCCESS)
return EXIT_FAILURE;
obuf[ 0] ^= iv[ 0]; obuf[ 1] ^= iv[ 1];
obuf[ 2] ^= iv[ 2]; obuf[ 3] ^= iv[ 3];
obuf[ 4] ^= iv[ 4]; obuf[ 5] ^= iv[ 5];
obuf[ 6] ^= iv[ 6]; obuf[ 7] ^= iv[ 7];
obuf[ 8] ^= iv[ 8]; obuf[ 9] ^= iv[ 9];
obuf[10] ^= iv[10]; obuf[11] ^= iv[11];
obuf[12] ^= iv[12]; obuf[13] ^= iv[13];
obuf[14] ^= iv[14]; obuf[15] ^= iv[15];
memcpy(iv, tmp, AES_BLOCK_SIZE);
ibuf += AES_BLOCK_SIZE;
obuf += AES_BLOCK_SIZE;
}
#endif
return EXIT_SUCCESS;
}
AES_RETURN aes_cfb_encrypt(const unsigned char *ibuf, unsigned char *obuf,
int len, unsigned char *iv, aes_encrypt_ctx ctx[1])
{ int cnt = 0, b_pos = (int)ctx->inf.b[2], nb;
if(b_pos) /* complete any partial block */
{
while(b_pos < AES_BLOCK_SIZE && cnt < len)
{
*obuf++ = (iv[b_pos++] ^= *ibuf++);
cnt++;
}
b_pos = (b_pos == AES_BLOCK_SIZE ? 0 : b_pos);
}
if((nb = (len - cnt) >> AES_BLOCK_SIZE_P2) != 0) /* process whole blocks */
{
#if defined( USE_VIA_ACE_IF_PRESENT )
if(ctx->inf.b[1] == 0xff)
{ int m;
uint8_t *ksp = (uint8_t*)(ctx->ks), *ivp = iv;
aligned_auto(uint8_t, liv, AES_BLOCK_SIZE, 16);
via_cwd(cwd, hybrid, enc, 2 * ctx->inf.b[0] - 192);
if(ALIGN_OFFSET( ctx, 16 ))
return EXIT_FAILURE;
if(ALIGN_OFFSET( iv, 16 )) /* ensure an aligned iv */
{
ivp = liv;
memcpy(liv, iv, AES_BLOCK_SIZE);
}
if(!ALIGN_OFFSET( ibuf, 16 ) && !ALIGN_OFFSET( obuf, 16 ))
{
via_cfb_op7(ksp, cwd, ibuf, obuf, nb, ivp, ivp);
ibuf += nb * AES_BLOCK_SIZE;
obuf += nb * AES_BLOCK_SIZE;
cnt += nb * AES_BLOCK_SIZE;
}
else /* input, output or both are unaligned */
{ aligned_auto(uint8_t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16);
uint8_t *ip = NULL, *op = NULL;
while(nb)
{
m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb), nb -= m;
ip = (ALIGN_OFFSET( ibuf, 16 ) ? buf : ibuf);
op = (ALIGN_OFFSET( obuf, 16 ) ? buf : obuf);
if(ip != ibuf)
memcpy(buf, ibuf, m * AES_BLOCK_SIZE);
via_cfb_op7(ksp, cwd, ip, op, m, ivp, ivp);
if(op != obuf)
memcpy(obuf, buf, m * AES_BLOCK_SIZE);
ibuf += m * AES_BLOCK_SIZE;
obuf += m * AES_BLOCK_SIZE;
cnt += m * AES_BLOCK_SIZE;
}
}
if(ivp != iv)
memcpy(iv, ivp, AES_BLOCK_SIZE);
}
#else
# ifdef FAST_BUFFER_OPERATIONS
if(!ALIGN_OFFSET( ibuf, 4 ) && !ALIGN_OFFSET( obuf, 4 ) && !ALIGN_OFFSET( iv, 4 ))
while(cnt + AES_BLOCK_SIZE <= len)
{
assert(b_pos == 0);
if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS)
return EXIT_FAILURE;
lp32(obuf)[0] = lp32(iv)[0] ^= lp32(ibuf)[0];
lp32(obuf)[1] = lp32(iv)[1] ^= lp32(ibuf)[1];
lp32(obuf)[2] = lp32(iv)[2] ^= lp32(ibuf)[2];
lp32(obuf)[3] = lp32(iv)[3] ^= lp32(ibuf)[3];
ibuf += AES_BLOCK_SIZE;
obuf += AES_BLOCK_SIZE;
cnt += AES_BLOCK_SIZE;
}
else
# endif
while(cnt + AES_BLOCK_SIZE <= len)
{
assert(b_pos == 0);
if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS)
return EXIT_FAILURE;
obuf[ 0] = iv[ 0] ^= ibuf[ 0]; obuf[ 1] = iv[ 1] ^= ibuf[ 1];
obuf[ 2] = iv[ 2] ^= ibuf[ 2]; obuf[ 3] = iv[ 3] ^= ibuf[ 3];
obuf[ 4] = iv[ 4] ^= ibuf[ 4]; obuf[ 5] = iv[ 5] ^= ibuf[ 5];
obuf[ 6] = iv[ 6] ^= ibuf[ 6]; obuf[ 7] = iv[ 7] ^= ibuf[ 7];
obuf[ 8] = iv[ 8] ^= ibuf[ 8]; obuf[ 9] = iv[ 9] ^= ibuf[ 9];
obuf[10] = iv[10] ^= ibuf[10]; obuf[11] = iv[11] ^= ibuf[11];
obuf[12] = iv[12] ^= ibuf[12]; obuf[13] = iv[13] ^= ibuf[13];
obuf[14] = iv[14] ^= ibuf[14]; obuf[15] = iv[15] ^= ibuf[15];
ibuf += AES_BLOCK_SIZE;
obuf += AES_BLOCK_SIZE;
cnt += AES_BLOCK_SIZE;
}
#endif
}
while(cnt < len)
{
if(!b_pos && aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS)
return EXIT_FAILURE;
while(cnt < len && b_pos < AES_BLOCK_SIZE)
{
*obuf++ = (iv[b_pos++] ^= *ibuf++);
cnt++;
}
b_pos = (b_pos == AES_BLOCK_SIZE ? 0 : b_pos);
}
ctx->inf.b[2] = (uint8_t)b_pos;
return EXIT_SUCCESS;
}
AES_RETURN aes_cfb_decrypt(const unsigned char *ibuf, unsigned char *obuf,
int len, unsigned char *iv, aes_encrypt_ctx ctx[1])
{ int cnt = 0, b_pos = (int)ctx->inf.b[2], nb;
if(b_pos) /* complete any partial block */
{ uint8_t t;
while(b_pos < AES_BLOCK_SIZE && cnt < len)
{
t = *ibuf++;
*obuf++ = t ^ iv[b_pos];
iv[b_pos++] = t;
cnt++;
}
b_pos = (b_pos == AES_BLOCK_SIZE ? 0 : b_pos);
}
if((nb = (len - cnt) >> AES_BLOCK_SIZE_P2) != 0) /* process whole blocks */
{
#if defined( USE_VIA_ACE_IF_PRESENT )
if(ctx->inf.b[1] == 0xff)
{ int m;
uint8_t *ksp = (uint8_t*)(ctx->ks), *ivp = iv;
aligned_auto(uint8_t, liv, AES_BLOCK_SIZE, 16);
via_cwd(cwd, hybrid, dec, 2 * ctx->inf.b[0] - 192);
if(ALIGN_OFFSET( ctx, 16 ))
return EXIT_FAILURE;
if(ALIGN_OFFSET( iv, 16 )) /* ensure an aligned iv */
{
ivp = liv;
memcpy(liv, iv, AES_BLOCK_SIZE);
}
if(!ALIGN_OFFSET( ibuf, 16 ) && !ALIGN_OFFSET( obuf, 16 ))
{
via_cfb_op6(ksp, cwd, ibuf, obuf, nb, ivp);
ibuf += nb * AES_BLOCK_SIZE;
obuf += nb * AES_BLOCK_SIZE;
cnt += nb * AES_BLOCK_SIZE;
}
else /* input, output or both are unaligned */
{ aligned_auto(uint8_t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16);
uint8_t *ip = NULL, *op = NULL;
while(nb)
{
m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb), nb -= m;
ip = (ALIGN_OFFSET( ibuf, 16 ) ? buf : ibuf);
op = (ALIGN_OFFSET( obuf, 16 ) ? buf : obuf);
if(ip != ibuf) /* input buffer is not aligned */
memcpy(buf, ibuf, m * AES_BLOCK_SIZE);
via_cfb_op6(ksp, cwd, ip, op, m, ivp);
if(op != obuf) /* output buffer is not aligned */
memcpy(obuf, buf, m * AES_BLOCK_SIZE);
ibuf += m * AES_BLOCK_SIZE;
obuf += m * AES_BLOCK_SIZE;
cnt += m * AES_BLOCK_SIZE;
}
}
if(ivp != iv)
memcpy(iv, ivp, AES_BLOCK_SIZE);
}
#else
# ifdef FAST_BUFFER_OPERATIONS
if(!ALIGN_OFFSET( ibuf, 4 ) && !ALIGN_OFFSET( obuf, 4 ) &&!ALIGN_OFFSET( iv, 4 ))
while(cnt + AES_BLOCK_SIZE <= len)
{ uint32_t t;
assert(b_pos == 0);
if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS)
return EXIT_FAILURE;
t = lp32(ibuf)[0], lp32(obuf)[0] = t ^ lp32(iv)[0], lp32(iv)[0] = t;
t = lp32(ibuf)[1], lp32(obuf)[1] = t ^ lp32(iv)[1], lp32(iv)[1] = t;
t = lp32(ibuf)[2], lp32(obuf)[2] = t ^ lp32(iv)[2], lp32(iv)[2] = t;
t = lp32(ibuf)[3], lp32(obuf)[3] = t ^ lp32(iv)[3], lp32(iv)[3] = t;
ibuf += AES_BLOCK_SIZE;
obuf += AES_BLOCK_SIZE;
cnt += AES_BLOCK_SIZE;
}
else
# endif
while(cnt + AES_BLOCK_SIZE <= len)
{ uint8_t t;
assert(b_pos == 0);
if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS)
return EXIT_FAILURE;
t = ibuf[ 0], obuf[ 0] = t ^ iv[ 0], iv[ 0] = t;
t = ibuf[ 1], obuf[ 1] = t ^ iv[ 1], iv[ 1] = t;
t = ibuf[ 2], obuf[ 2] = t ^ iv[ 2], iv[ 2] = t;
t = ibuf[ 3], obuf[ 3] = t ^ iv[ 3], iv[ 3] = t;
t = ibuf[ 4], obuf[ 4] = t ^ iv[ 4], iv[ 4] = t;
t = ibuf[ 5], obuf[ 5] = t ^ iv[ 5], iv[ 5] = t;
t = ibuf[ 6], obuf[ 6] = t ^ iv[ 6], iv[ 6] = t;
t = ibuf[ 7], obuf[ 7] = t ^ iv[ 7], iv[ 7] = t;
t = ibuf[ 8], obuf[ 8] = t ^ iv[ 8], iv[ 8] = t;
t = ibuf[ 9], obuf[ 9] = t ^ iv[ 9], iv[ 9] = t;
t = ibuf[10], obuf[10] = t ^ iv[10], iv[10] = t;
t = ibuf[11], obuf[11] = t ^ iv[11], iv[11] = t;
t = ibuf[12], obuf[12] = t ^ iv[12], iv[12] = t;
t = ibuf[13], obuf[13] = t ^ iv[13], iv[13] = t;
t = ibuf[14], obuf[14] = t ^ iv[14], iv[14] = t;
t = ibuf[15], obuf[15] = t ^ iv[15], iv[15] = t;
ibuf += AES_BLOCK_SIZE;
obuf += AES_BLOCK_SIZE;
cnt += AES_BLOCK_SIZE;
}
#endif
}
while(cnt < len)
{ uint8_t t;
if(!b_pos && aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS)
return EXIT_FAILURE;
while(cnt < len && b_pos < AES_BLOCK_SIZE)
{
t = *ibuf++;
*obuf++ = t ^ iv[b_pos];
iv[b_pos++] = t;
cnt++;
}
b_pos = (b_pos == AES_BLOCK_SIZE ? 0 : b_pos);
}
ctx->inf.b[2] = (uint8_t)b_pos;
return EXIT_SUCCESS;
}
AES_RETURN aes_ofb_crypt(const unsigned char *ibuf, unsigned char *obuf,
int len, unsigned char *iv, aes_encrypt_ctx ctx[1])
{ int cnt = 0, b_pos = (int)ctx->inf.b[2], nb;
if(b_pos) /* complete any partial block */
{
while(b_pos < AES_BLOCK_SIZE && cnt < len)
{
*obuf++ = iv[b_pos++] ^ *ibuf++;
cnt++;
}
b_pos = (b_pos == AES_BLOCK_SIZE ? 0 : b_pos);
}
if((nb = (len - cnt) >> AES_BLOCK_SIZE_P2) != 0) /* process whole blocks */
{
#if defined( USE_VIA_ACE_IF_PRESENT )
if(ctx->inf.b[1] == 0xff)
{ int m;
uint8_t *ksp = (uint8_t*)(ctx->ks), *ivp = iv;
aligned_auto(uint8_t, liv, AES_BLOCK_SIZE, 16);
via_cwd(cwd, hybrid, enc, 2 * ctx->inf.b[0] - 192);
if(ALIGN_OFFSET( ctx, 16 ))
return EXIT_FAILURE;
if(ALIGN_OFFSET( iv, 16 )) /* ensure an aligned iv */
{
ivp = liv;
memcpy(liv, iv, AES_BLOCK_SIZE);
}
if(!ALIGN_OFFSET( ibuf, 16 ) && !ALIGN_OFFSET( obuf, 16 ))
{
via_ofb_op6(ksp, cwd, ibuf, obuf, nb, ivp);
ibuf += nb * AES_BLOCK_SIZE;
obuf += nb * AES_BLOCK_SIZE;
cnt += nb * AES_BLOCK_SIZE;
}
else /* input, output or both are unaligned */
{ aligned_auto(uint8_t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16);
uint8_t *ip = NULL, *op = NULL;
while(nb)
{
m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb), nb -= m;
ip = (ALIGN_OFFSET( ibuf, 16 ) ? buf : ibuf);
op = (ALIGN_OFFSET( obuf, 16 ) ? buf : obuf);
if(ip != ibuf)
memcpy(buf, ibuf, m * AES_BLOCK_SIZE);
via_ofb_op6(ksp, cwd, ip, op, m, ivp);
if(op != obuf)
memcpy(obuf, buf, m * AES_BLOCK_SIZE);
ibuf += m * AES_BLOCK_SIZE;
obuf += m * AES_BLOCK_SIZE;
cnt += m * AES_BLOCK_SIZE;
}
}
if(ivp != iv)
memcpy(iv, ivp, AES_BLOCK_SIZE);
}
#else
# ifdef FAST_BUFFER_OPERATIONS
if(!ALIGN_OFFSET( ibuf, 4 ) && !ALIGN_OFFSET( obuf, 4 ) && !ALIGN_OFFSET( iv, 4 ))
while(cnt + AES_BLOCK_SIZE <= len)
{
assert(b_pos == 0);
if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS)
return EXIT_FAILURE;
lp32(obuf)[0] = lp32(iv)[0] ^ lp32(ibuf)[0];
lp32(obuf)[1] = lp32(iv)[1] ^ lp32(ibuf)[1];
lp32(obuf)[2] = lp32(iv)[2] ^ lp32(ibuf)[2];
lp32(obuf)[3] = lp32(iv)[3] ^ lp32(ibuf)[3];
ibuf += AES_BLOCK_SIZE;
obuf += AES_BLOCK_SIZE;
cnt += AES_BLOCK_SIZE;
}
else
# endif
while(cnt + AES_BLOCK_SIZE <= len)
{
assert(b_pos == 0);
if(aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS)
return EXIT_FAILURE;
obuf[ 0] = iv[ 0] ^ ibuf[ 0]; obuf[ 1] = iv[ 1] ^ ibuf[ 1];
obuf[ 2] = iv[ 2] ^ ibuf[ 2]; obuf[ 3] = iv[ 3] ^ ibuf[ 3];
obuf[ 4] = iv[ 4] ^ ibuf[ 4]; obuf[ 5] = iv[ 5] ^ ibuf[ 5];
obuf[ 6] = iv[ 6] ^ ibuf[ 6]; obuf[ 7] = iv[ 7] ^ ibuf[ 7];
obuf[ 8] = iv[ 8] ^ ibuf[ 8]; obuf[ 9] = iv[ 9] ^ ibuf[ 9];
obuf[10] = iv[10] ^ ibuf[10]; obuf[11] = iv[11] ^ ibuf[11];
obuf[12] = iv[12] ^ ibuf[12]; obuf[13] = iv[13] ^ ibuf[13];
obuf[14] = iv[14] ^ ibuf[14]; obuf[15] = iv[15] ^ ibuf[15];
ibuf += AES_BLOCK_SIZE;
obuf += AES_BLOCK_SIZE;
cnt += AES_BLOCK_SIZE;
}
#endif
}
while(cnt < len)
{
if(!b_pos && aes_encrypt(iv, iv, ctx) != EXIT_SUCCESS)
return EXIT_FAILURE;
while(cnt < len && b_pos < AES_BLOCK_SIZE)
{
*obuf++ = iv[b_pos++] ^ *ibuf++;
cnt++;
}
b_pos = (b_pos == AES_BLOCK_SIZE ? 0 : b_pos);
}
ctx->inf.b[2] = (uint8_t)b_pos;
return EXIT_SUCCESS;
}
#define BFR_LENGTH (BFR_BLOCKS * AES_BLOCK_SIZE)
AES_RETURN aes_ctr_crypt(const unsigned char *ibuf, unsigned char *obuf,
int len, unsigned char *cbuf, cbuf_inc ctr_inc, aes_encrypt_ctx ctx[1])
{ unsigned char *ip;
int i = 0, blen = 0, b_pos = (int)(ctx->inf.b[2]);
#if defined( USE_VIA_ACE_IF_PRESENT )
aligned_auto(uint8_t, buf, BFR_LENGTH, 16);
if(ctx->inf.b[1] == 0xff && ALIGN_OFFSET( ctx, 16 ))
return EXIT_FAILURE;
#else
uint8_t buf[BFR_LENGTH] = {0};
#endif
if(b_pos)
{
memcpy(buf, cbuf, AES_BLOCK_SIZE);
if(aes_ecb_encrypt(buf, buf, AES_BLOCK_SIZE, ctx) != EXIT_SUCCESS)
return EXIT_FAILURE;
while(b_pos < AES_BLOCK_SIZE && len)
{
*obuf++ = *ibuf++ ^ buf[b_pos++];
--len;
}
if(len)
ctr_inc(cbuf), b_pos = 0;
}
while(len)
{
blen = (len > BFR_LENGTH ? BFR_LENGTH : len), len -= blen;
for(i = 0, ip = buf; i < (blen >> AES_BLOCK_SIZE_P2); ++i)
{
memcpy(ip, cbuf, AES_BLOCK_SIZE);
ctr_inc(cbuf);
ip += AES_BLOCK_SIZE;
}
if(blen & (AES_BLOCK_SIZE - 1))
memcpy(ip, cbuf, AES_BLOCK_SIZE), i++;
#if defined( USE_VIA_ACE_IF_PRESENT )
if(ctx->inf.b[1] == 0xff)
{
via_cwd(cwd, hybrid, enc, 2 * ctx->inf.b[0] - 192);
via_ecb_op5((ctx->ks), cwd, buf, buf, i);
}
else
#endif
if(aes_ecb_encrypt(buf, buf, i * AES_BLOCK_SIZE, ctx) != EXIT_SUCCESS)
return EXIT_FAILURE;
i = 0; ip = buf;
# ifdef FAST_BUFFER_OPERATIONS
if(!ALIGN_OFFSET( ibuf, 4 ) && !ALIGN_OFFSET( obuf, 4 ) && !ALIGN_OFFSET( ip, 4 ))
while(i + AES_BLOCK_SIZE <= blen)
{
lp32(obuf)[0] = lp32(ibuf)[0] ^ lp32(ip)[0];
lp32(obuf)[1] = lp32(ibuf)[1] ^ lp32(ip)[1];
lp32(obuf)[2] = lp32(ibuf)[2] ^ lp32(ip)[2];
lp32(obuf)[3] = lp32(ibuf)[3] ^ lp32(ip)[3];
i += AES_BLOCK_SIZE;
ip += AES_BLOCK_SIZE;
ibuf += AES_BLOCK_SIZE;
obuf += AES_BLOCK_SIZE;
}
else
#endif
while(i + AES_BLOCK_SIZE <= blen)
{
obuf[ 0] = ibuf[ 0] ^ ip[ 0]; obuf[ 1] = ibuf[ 1] ^ ip[ 1];
obuf[ 2] = ibuf[ 2] ^ ip[ 2]; obuf[ 3] = ibuf[ 3] ^ ip[ 3];
obuf[ 4] = ibuf[ 4] ^ ip[ 4]; obuf[ 5] = ibuf[ 5] ^ ip[ 5];
obuf[ 6] = ibuf[ 6] ^ ip[ 6]; obuf[ 7] = ibuf[ 7] ^ ip[ 7];
obuf[ 8] = ibuf[ 8] ^ ip[ 8]; obuf[ 9] = ibuf[ 9] ^ ip[ 9];
obuf[10] = ibuf[10] ^ ip[10]; obuf[11] = ibuf[11] ^ ip[11];
obuf[12] = ibuf[12] ^ ip[12]; obuf[13] = ibuf[13] ^ ip[13];
obuf[14] = ibuf[14] ^ ip[14]; obuf[15] = ibuf[15] ^ ip[15];
i += AES_BLOCK_SIZE;
ip += AES_BLOCK_SIZE;
ibuf += AES_BLOCK_SIZE;
obuf += AES_BLOCK_SIZE;
}
while(i++ < blen)
*obuf++ = *ibuf++ ^ ip[b_pos++];
}
ctx->inf.b[2] = (uint8_t)b_pos;
return EXIT_SUCCESS;
}
void aes_ctr_cbuf_inc(unsigned char *cbuf)
{
int i = AES_BLOCK_SIZE - 1;
while (i >= 0) {
cbuf[i]++;
if (cbuf[i]) return; // if there was no overflow
i--;
}
}
#if defined(__cplusplus)
}
#endif
#endif

View File

@ -1,307 +0,0 @@
/*
---------------------------------------------------------------------------
Copyright (c) 1998-2013, Brian Gladman, Worcester, UK. All rights reserved.
The redistribution and use of this software (with or without changes)
is allowed without the payment of fees or royalties provided that:
source code distributions include the above copyright notice, this
list of conditions and the following disclaimer;
binary distributions include the above copyright notice, this list
of conditions and the following disclaimer in their documentation.
This software is provided 'as is' with no explicit or implied warranties
in respect of its operation, including, but not limited to, correctness
and fitness for purpose.
---------------------------------------------------------------------------
Issue Date: 20/12/2007
*/
#include "aesopt.h"
#include "aestab.h"
#if defined( USE_INTEL_AES_IF_PRESENT )
# include "aes_ni.h"
#else
/* map names here to provide the external API ('name' -> 'aes_name') */
# define aes_xi(x) aes_ ## x
#endif
#if defined(__cplusplus)
extern "C"
{
#endif
#define si(y,x,k,c) (s(y,c) = word_in(x, c) ^ (k)[c])
#define so(y,x,c) word_out(y, c, s(x,c))
#if defined(ARRAYS)
#define locals(y,x) x[4],y[4]
#else
#define locals(y,x) x##0,x##1,x##2,x##3,y##0,y##1,y##2,y##3
#endif
#define l_copy(y, x) s(y,0) = s(x,0); s(y,1) = s(x,1); \
s(y,2) = s(x,2); s(y,3) = s(x,3);
#define state_in(y,x,k) si(y,x,k,0); si(y,x,k,1); si(y,x,k,2); si(y,x,k,3)
#define state_out(y,x) so(y,x,0); so(y,x,1); so(y,x,2); so(y,x,3)
#define round(rm,y,x,k) rm(y,x,k,0); rm(y,x,k,1); rm(y,x,k,2); rm(y,x,k,3)
#if ( FUNCS_IN_C & ENCRYPTION_IN_C )
/* Visual C++ .Net v7.1 provides the fastest encryption code when using
Pentium optimiation with small code but this is poor for decryption
so we need to control this with the following VC++ pragmas
*/
#if defined( _MSC_VER ) && !defined( _WIN64 ) && !defined( __clang__ )
#pragma optimize( "s", on )
#endif
/* Given the column (c) of the output state variable, the following
macros give the input state variables which are needed in its
computation for each row (r) of the state. All the alternative
macros give the same end values but expand into different ways
of calculating these values. In particular the complex macro
used for dynamically variable block sizes is designed to expand
to a compile time constant whenever possible but will expand to
conditional clauses on some branches (I am grateful to Frank
Yellin for this construction)
*/
#define fwd_var(x,r,c)\
( r == 0 ? ( c == 0 ? s(x,0) : c == 1 ? s(x,1) : c == 2 ? s(x,2) : s(x,3))\
: r == 1 ? ( c == 0 ? s(x,1) : c == 1 ? s(x,2) : c == 2 ? s(x,3) : s(x,0))\
: r == 2 ? ( c == 0 ? s(x,2) : c == 1 ? s(x,3) : c == 2 ? s(x,0) : s(x,1))\
: ( c == 0 ? s(x,3) : c == 1 ? s(x,0) : c == 2 ? s(x,1) : s(x,2)))
#if defined(FT4_SET)
#undef dec_fmvars
#define fwd_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(f,n),fwd_var,rf1,c))
#elif defined(FT1_SET)
#undef dec_fmvars
#define fwd_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,upr,t_use(f,n),fwd_var,rf1,c))
#else
#define fwd_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ fwd_mcol(no_table(x,t_use(s,box),fwd_var,rf1,c)))
#endif
#if defined(FL4_SET)
#define fwd_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(f,l),fwd_var,rf1,c))
#elif defined(FL1_SET)
#define fwd_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,ups,t_use(f,l),fwd_var,rf1,c))
#else
#define fwd_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ no_table(x,t_use(s,box),fwd_var,rf1,c))
#endif
AES_RETURN aes_xi(encrypt)(const unsigned char *in, unsigned char *out, const aes_encrypt_ctx cx[1])
{ uint32_t locals(b0, b1);
const uint32_t *kp = NULL;
#if defined( dec_fmvars )
dec_fmvars; /* declare variables for fwd_mcol() if needed */
#endif
if(cx->inf.b[0] != 10 * AES_BLOCK_SIZE && cx->inf.b[0] != 12 * AES_BLOCK_SIZE && cx->inf.b[0] != 14 * AES_BLOCK_SIZE)
return EXIT_FAILURE;
kp = cx->ks;
state_in(b0, in, kp);
#if (ENC_UNROLL == FULL)
switch(cx->inf.b[0])
{
case 14 * AES_BLOCK_SIZE:
round(fwd_rnd, b1, b0, kp + 1 * N_COLS);
round(fwd_rnd, b0, b1, kp + 2 * N_COLS);
kp += 2 * N_COLS;
//-fallthrough
case 12 * AES_BLOCK_SIZE:
round(fwd_rnd, b1, b0, kp + 1 * N_COLS);
round(fwd_rnd, b0, b1, kp + 2 * N_COLS);
kp += 2 * N_COLS;
//-fallthrough
case 10 * AES_BLOCK_SIZE:
round(fwd_rnd, b1, b0, kp + 1 * N_COLS);
round(fwd_rnd, b0, b1, kp + 2 * N_COLS);
round(fwd_rnd, b1, b0, kp + 3 * N_COLS);
round(fwd_rnd, b0, b1, kp + 4 * N_COLS);
round(fwd_rnd, b1, b0, kp + 5 * N_COLS);
round(fwd_rnd, b0, b1, kp + 6 * N_COLS);
round(fwd_rnd, b1, b0, kp + 7 * N_COLS);
round(fwd_rnd, b0, b1, kp + 8 * N_COLS);
round(fwd_rnd, b1, b0, kp + 9 * N_COLS);
round(fwd_lrnd, b0, b1, kp +10 * N_COLS);
//-fallthrough
}
#else
#if (ENC_UNROLL == PARTIAL)
{ uint32_t rnd;
for(rnd = 0; rnd < (cx->inf.b[0] >> 5) - 1; ++rnd)
{
kp += N_COLS;
round(fwd_rnd, b1, b0, kp);
kp += N_COLS;
round(fwd_rnd, b0, b1, kp);
}
kp += N_COLS;
round(fwd_rnd, b1, b0, kp);
#else
{ uint32_t rnd;
for(rnd = 0; rnd < (cx->inf.b[0] >> 4) - 1; ++rnd)
{
kp += N_COLS;
round(fwd_rnd, b1, b0, kp);
l_copy(b0, b1);
}
#endif
kp += N_COLS;
round(fwd_lrnd, b0, b1, kp);
}
#endif
state_out(out, b0);
return EXIT_SUCCESS;
}
#endif
#if ( FUNCS_IN_C & DECRYPTION_IN_C)
/* Visual C++ .Net v7.1 provides the fastest encryption code when using
Pentium optimiation with small code but this is poor for decryption
so we need to control this with the following VC++ pragmas
*/
#if defined( _MSC_VER ) && !defined( _WIN64 ) && !defined( __clang__ )
#pragma optimize( "t", on )
#endif
/* Given the column (c) of the output state variable, the following
macros give the input state variables which are needed in its
computation for each row (r) of the state. All the alternative
macros give the same end values but expand into different ways
of calculating these values. In particular the complex macro
used for dynamically variable block sizes is designed to expand
to a compile time constant whenever possible but will expand to
conditional clauses on some branches (I am grateful to Frank
Yellin for this construction)
*/
#define inv_var(x,r,c)\
( r == 0 ? ( c == 0 ? s(x,0) : c == 1 ? s(x,1) : c == 2 ? s(x,2) : s(x,3))\
: r == 1 ? ( c == 0 ? s(x,3) : c == 1 ? s(x,0) : c == 2 ? s(x,1) : s(x,2))\
: r == 2 ? ( c == 0 ? s(x,2) : c == 1 ? s(x,3) : c == 2 ? s(x,0) : s(x,1))\
: ( c == 0 ? s(x,1) : c == 1 ? s(x,2) : c == 2 ? s(x,3) : s(x,0)))
#if defined(IT4_SET)
#undef dec_imvars
#define inv_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(i,n),inv_var,rf1,c))
#elif defined(IT1_SET)
#undef dec_imvars
#define inv_rnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,upr,t_use(i,n),inv_var,rf1,c))
#else
#define inv_rnd(y,x,k,c) (s(y,c) = inv_mcol((k)[c] ^ no_table(x,t_use(i,box),inv_var,rf1,c)))
#endif
#if defined(IL4_SET)
#define inv_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ four_tables(x,t_use(i,l),inv_var,rf1,c))
#elif defined(IL1_SET)
#define inv_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ one_table(x,ups,t_use(i,l),inv_var,rf1,c))
#else
#define inv_lrnd(y,x,k,c) (s(y,c) = (k)[c] ^ no_table(x,t_use(i,box),inv_var,rf1,c))
#endif
/* This code can work with the decryption key schedule in the */
/* order that is used for encrytpion (where the 1st decryption */
/* round key is at the high end ot the schedule) or with a key */
/* schedule that has been reversed to put the 1st decryption */
/* round key at the low end of the schedule in memory (when */
/* AES_REV_DKS is defined) */
#ifdef AES_REV_DKS
#define key_ofs 0
#define rnd_key(n) (kp + n * N_COLS)
#else
#define key_ofs 1
#define rnd_key(n) (kp - n * N_COLS)
#endif
AES_RETURN aes_xi(decrypt)(const unsigned char *in, unsigned char *out, const aes_decrypt_ctx cx[1])
{ uint32_t locals(b0, b1);
#if defined( dec_imvars )
dec_imvars; /* declare variables for inv_mcol() if needed */
#endif
const uint32_t *kp = NULL;
if(cx->inf.b[0] != 10 * AES_BLOCK_SIZE && cx->inf.b[0] != 12 * AES_BLOCK_SIZE && cx->inf.b[0] != 14 * AES_BLOCK_SIZE)
return EXIT_FAILURE;
kp = cx->ks + (key_ofs ? (cx->inf.b[0] >> 2) : 0);
state_in(b0, in, kp);
#if (DEC_UNROLL == FULL)
kp = cx->ks + (key_ofs ? 0 : (cx->inf.b[0] >> 2));
switch(cx->inf.b[0])
{
case 14 * AES_BLOCK_SIZE:
round(inv_rnd, b1, b0, rnd_key(-13));
round(inv_rnd, b0, b1, rnd_key(-12));
//-fallthrough
case 12 * AES_BLOCK_SIZE:
round(inv_rnd, b1, b0, rnd_key(-11));
round(inv_rnd, b0, b1, rnd_key(-10));
//-fallthrough
case 10 * AES_BLOCK_SIZE:
round(inv_rnd, b1, b0, rnd_key(-9));
round(inv_rnd, b0, b1, rnd_key(-8));
round(inv_rnd, b1, b0, rnd_key(-7));
round(inv_rnd, b0, b1, rnd_key(-6));
round(inv_rnd, b1, b0, rnd_key(-5));
round(inv_rnd, b0, b1, rnd_key(-4));
round(inv_rnd, b1, b0, rnd_key(-3));
round(inv_rnd, b0, b1, rnd_key(-2));
round(inv_rnd, b1, b0, rnd_key(-1));
round(inv_lrnd, b0, b1, rnd_key( 0));
//-fallthrough
}
#else
#if (DEC_UNROLL == PARTIAL)
{ uint32_t rnd;
for(rnd = 0; rnd < (cx->inf.b[0] >> 5) - 1; ++rnd)
{
kp = rnd_key(1);
round(inv_rnd, b1, b0, kp);
kp = rnd_key(1);
round(inv_rnd, b0, b1, kp);
}
kp = rnd_key(1);
round(inv_rnd, b1, b0, kp);
#else
{ uint32_t rnd;
for(rnd = 0; rnd < (cx->inf.b[0] >> 4) - 1; ++rnd)
{
kp = rnd_key(1);
round(inv_rnd, b1, b0, kp);
l_copy(b0, b1);
}
#endif
kp = rnd_key(1);
round(inv_lrnd, b0, b1, kp);
}
#endif
state_out(out, b0);
return EXIT_SUCCESS;
}
#endif
#if defined(__cplusplus)
}
#endif

View File

@ -1,560 +0,0 @@
/*
---------------------------------------------------------------------------
Copyright (c) 1998-2013, Brian Gladman, Worcester, UK. All rights reserved.
The redistribution and use of this software (with or without changes)
is allowed without the payment of fees or royalties provided that:
source code distributions include the above copyright notice, this
list of conditions and the following disclaimer;
binary distributions include the above copyright notice, this list
of conditions and the following disclaimer in their documentation.
This software is provided 'as is' with no explicit or implied warranties
in respect of its operation, including, but not limited to, correctness
and fitness for purpose.
---------------------------------------------------------------------------
Issue Date: 20/12/2007
*/
#include "aesopt.h"
#include "aestab.h"
#if defined( USE_INTEL_AES_IF_PRESENT )
# include "aes_ni.h"
#else
/* map names here to provide the external API ('name' -> 'aes_name') */
# define aes_xi(x) aes_ ## x
#endif
#ifdef USE_VIA_ACE_IF_PRESENT
# include "aes_via_ace.h"
#endif
#if defined(__cplusplus)
extern "C"
{
#endif
/* Initialise the key schedule from the user supplied key. The key
length can be specified in bytes, with legal values of 16, 24
and 32, or in bits, with legal values of 128, 192 and 256. These
values correspond with Nk values of 4, 6 and 8 respectively.
The following macros implement a single cycle in the key
schedule generation process. The number of cycles needed
for each cx->n_col and nk value is:
nk = 4 5 6 7 8
------------------------------
cx->n_col = 4 10 9 8 7 7
cx->n_col = 5 14 11 10 9 9
cx->n_col = 6 19 15 12 11 11
cx->n_col = 7 21 19 16 13 14
cx->n_col = 8 29 23 19 17 14
*/
#if defined( REDUCE_CODE_SIZE )
# define ls_box ls_sub
uint32_t ls_sub(const uint32_t t, const uint32_t n);
# define inv_mcol im_sub
uint32_t im_sub(const uint32_t x);
# ifdef ENC_KS_UNROLL
# undef ENC_KS_UNROLL
# endif
# ifdef DEC_KS_UNROLL
# undef DEC_KS_UNROLL
# endif
#endif
#if (FUNCS_IN_C & ENC_KEYING_IN_C)
#if defined(AES_128) || defined( AES_VAR )
#define ke4(k,i) \
{ k[4*(i)+4] = ss[0] ^= ls_box(ss[3],3) ^ t_use(r,c)[i]; \
k[4*(i)+5] = ss[1] ^= ss[0]; \
k[4*(i)+6] = ss[2] ^= ss[1]; \
k[4*(i)+7] = ss[3] ^= ss[2]; \
}
AES_RETURN aes_xi(encrypt_key128)(const unsigned char *key, aes_encrypt_ctx cx[1])
{ uint32_t ss[4];
cx->ks[0] = ss[0] = word_in(key, 0);
cx->ks[1] = ss[1] = word_in(key, 1);
cx->ks[2] = ss[2] = word_in(key, 2);
cx->ks[3] = ss[3] = word_in(key, 3);
#ifdef ENC_KS_UNROLL
ke4(cx->ks, 0); ke4(cx->ks, 1);
ke4(cx->ks, 2); ke4(cx->ks, 3);
ke4(cx->ks, 4); ke4(cx->ks, 5);
ke4(cx->ks, 6); ke4(cx->ks, 7);
ke4(cx->ks, 8);
#else
{ uint32_t i;
for(i = 0; i < 9; ++i)
ke4(cx->ks, i);
}
#endif
ke4(cx->ks, 9);
cx->inf.l = 0;
cx->inf.b[0] = 10 * AES_BLOCK_SIZE;
#ifdef USE_VIA_ACE_IF_PRESENT
if(VIA_ACE_AVAILABLE)
cx->inf.b[1] = 0xff;
#endif
return EXIT_SUCCESS;
}
#endif
#if defined(AES_192) || defined( AES_VAR )
#define kef6(k,i) \
{ k[6*(i)+ 6] = ss[0] ^= ls_box(ss[5],3) ^ t_use(r,c)[i]; \
k[6*(i)+ 7] = ss[1] ^= ss[0]; \
k[6*(i)+ 8] = ss[2] ^= ss[1]; \
k[6*(i)+ 9] = ss[3] ^= ss[2]; \
}
#define ke6(k,i) \
{ kef6(k,i); \
k[6*(i)+10] = ss[4] ^= ss[3]; \
k[6*(i)+11] = ss[5] ^= ss[4]; \
}
AES_RETURN aes_xi(encrypt_key192)(const unsigned char *key, aes_encrypt_ctx cx[1])
{ uint32_t ss[6];
cx->ks[0] = ss[0] = word_in(key, 0);
cx->ks[1] = ss[1] = word_in(key, 1);
cx->ks[2] = ss[2] = word_in(key, 2);
cx->ks[3] = ss[3] = word_in(key, 3);
cx->ks[4] = ss[4] = word_in(key, 4);
cx->ks[5] = ss[5] = word_in(key, 5);
#ifdef ENC_KS_UNROLL
ke6(cx->ks, 0); ke6(cx->ks, 1);
ke6(cx->ks, 2); ke6(cx->ks, 3);
ke6(cx->ks, 4); ke6(cx->ks, 5);
ke6(cx->ks, 6);
#else
{ uint32_t i;
for(i = 0; i < 7; ++i)
ke6(cx->ks, i);
}
#endif
kef6(cx->ks, 7);
cx->inf.l = 0;
cx->inf.b[0] = 12 * AES_BLOCK_SIZE;
#ifdef USE_VIA_ACE_IF_PRESENT
if(VIA_ACE_AVAILABLE)
cx->inf.b[1] = 0xff;
#endif
return EXIT_SUCCESS;
}
#endif
#if defined(AES_256) || defined( AES_VAR )
#define kef8(k,i) \
{ k[8*(i)+ 8] = ss[0] ^= ls_box(ss[7],3) ^ t_use(r,c)[i]; \
k[8*(i)+ 9] = ss[1] ^= ss[0]; \
k[8*(i)+10] = ss[2] ^= ss[1]; \
k[8*(i)+11] = ss[3] ^= ss[2]; \
}
#define ke8(k,i) \
{ kef8(k,i); \
k[8*(i)+12] = ss[4] ^= ls_box(ss[3],0); \
k[8*(i)+13] = ss[5] ^= ss[4]; \
k[8*(i)+14] = ss[6] ^= ss[5]; \
k[8*(i)+15] = ss[7] ^= ss[6]; \
}
AES_RETURN aes_xi(encrypt_key256)(const unsigned char *key, aes_encrypt_ctx cx[1])
{ uint32_t ss[8];
cx->ks[0] = ss[0] = word_in(key, 0);
cx->ks[1] = ss[1] = word_in(key, 1);
cx->ks[2] = ss[2] = word_in(key, 2);
cx->ks[3] = ss[3] = word_in(key, 3);
cx->ks[4] = ss[4] = word_in(key, 4);
cx->ks[5] = ss[5] = word_in(key, 5);
cx->ks[6] = ss[6] = word_in(key, 6);
cx->ks[7] = ss[7] = word_in(key, 7);
#ifdef ENC_KS_UNROLL
ke8(cx->ks, 0); ke8(cx->ks, 1);
ke8(cx->ks, 2); ke8(cx->ks, 3);
ke8(cx->ks, 4); ke8(cx->ks, 5);
#else
{ uint32_t i;
for(i = 0; i < 6; ++i)
ke8(cx->ks, i);
}
#endif
kef8(cx->ks, 6);
cx->inf.l = 0;
cx->inf.b[0] = 14 * AES_BLOCK_SIZE;
#ifdef USE_VIA_ACE_IF_PRESENT
if(VIA_ACE_AVAILABLE)
cx->inf.b[1] = 0xff;
#endif
return EXIT_SUCCESS;
}
#endif
#endif
#if (FUNCS_IN_C & DEC_KEYING_IN_C)
/* this is used to store the decryption round keys */
/* in forward or reverse order */
#ifdef AES_REV_DKS
#define v(n,i) ((n) - (i) + 2 * ((i) & 3))
#else
#define v(n,i) (i)
#endif
#if DEC_ROUND == NO_TABLES
#define ff(x) (x)
#else
#define ff(x) inv_mcol(x)
#if defined( dec_imvars )
#define d_vars dec_imvars
#endif
#endif
#if defined(AES_128) || defined( AES_VAR )
#define k4e(k,i) \
{ k[v(40,(4*(i))+4)] = ss[0] ^= ls_box(ss[3],3) ^ t_use(r,c)[i]; \
k[v(40,(4*(i))+5)] = ss[1] ^= ss[0]; \
k[v(40,(4*(i))+6)] = ss[2] ^= ss[1]; \
k[v(40,(4*(i))+7)] = ss[3] ^= ss[2]; \
}
#if 1
#define kdf4(k,i) \
{ ss[0] = ss[0] ^ ss[2] ^ ss[1] ^ ss[3]; \
ss[1] = ss[1] ^ ss[3]; \
ss[2] = ss[2] ^ ss[3]; \
ss[4] = ls_box(ss[(i+3) % 4], 3) ^ t_use(r,c)[i]; \
ss[i % 4] ^= ss[4]; \
ss[4] ^= k[v(40,(4*(i)))]; k[v(40,(4*(i))+4)] = ff(ss[4]); \
ss[4] ^= k[v(40,(4*(i))+1)]; k[v(40,(4*(i))+5)] = ff(ss[4]); \
ss[4] ^= k[v(40,(4*(i))+2)]; k[v(40,(4*(i))+6)] = ff(ss[4]); \
ss[4] ^= k[v(40,(4*(i))+3)]; k[v(40,(4*(i))+7)] = ff(ss[4]); \
}
#define kd4(k,i) \
{ ss[4] = ls_box(ss[(i+3) % 4], 3) ^ t_use(r,c)[i]; \
ss[i % 4] ^= ss[4]; ss[4] = ff(ss[4]); \
k[v(40,(4*(i))+4)] = ss[4] ^= k[v(40,(4*(i)))]; \
k[v(40,(4*(i))+5)] = ss[4] ^= k[v(40,(4*(i))+1)]; \
k[v(40,(4*(i))+6)] = ss[4] ^= k[v(40,(4*(i))+2)]; \
k[v(40,(4*(i))+7)] = ss[4] ^= k[v(40,(4*(i))+3)]; \
}
#define kdl4(k,i) \
{ ss[4] = ls_box(ss[(i+3) % 4], 3) ^ t_use(r,c)[i]; ss[i % 4] ^= ss[4]; \
k[v(40,(4*(i))+4)] = (ss[0] ^= ss[1]) ^ ss[2] ^ ss[3]; \
k[v(40,(4*(i))+5)] = ss[1] ^ ss[3]; \
k[v(40,(4*(i))+6)] = ss[0]; \
k[v(40,(4*(i))+7)] = ss[1]; \
}
#else
#define kdf4(k,i) \
{ ss[0] ^= ls_box(ss[3],3) ^ t_use(r,c)[i]; k[v(40,(4*(i))+ 4)] = ff(ss[0]); \
ss[1] ^= ss[0]; k[v(40,(4*(i))+ 5)] = ff(ss[1]); \
ss[2] ^= ss[1]; k[v(40,(4*(i))+ 6)] = ff(ss[2]); \
ss[3] ^= ss[2]; k[v(40,(4*(i))+ 7)] = ff(ss[3]); \
}
#define kd4(k,i) \
{ ss[4] = ls_box(ss[3],3) ^ t_use(r,c)[i]; \
ss[0] ^= ss[4]; ss[4] = ff(ss[4]); k[v(40,(4*(i))+ 4)] = ss[4] ^= k[v(40,(4*(i)))]; \
ss[1] ^= ss[0]; k[v(40,(4*(i))+ 5)] = ss[4] ^= k[v(40,(4*(i))+ 1)]; \
ss[2] ^= ss[1]; k[v(40,(4*(i))+ 6)] = ss[4] ^= k[v(40,(4*(i))+ 2)]; \
ss[3] ^= ss[2]; k[v(40,(4*(i))+ 7)] = ss[4] ^= k[v(40,(4*(i))+ 3)]; \
}
#define kdl4(k,i) \
{ ss[0] ^= ls_box(ss[3],3) ^ t_use(r,c)[i]; k[v(40,(4*(i))+ 4)] = ss[0]; \
ss[1] ^= ss[0]; k[v(40,(4*(i))+ 5)] = ss[1]; \
ss[2] ^= ss[1]; k[v(40,(4*(i))+ 6)] = ss[2]; \
ss[3] ^= ss[2]; k[v(40,(4*(i))+ 7)] = ss[3]; \
}
#endif
AES_RETURN aes_xi(decrypt_key128)(const unsigned char *key, aes_decrypt_ctx cx[1])
{ uint32_t ss[5];
#if defined( d_vars )
d_vars;
#endif
cx->ks[v(40,(0))] = ss[0] = word_in(key, 0);
cx->ks[v(40,(1))] = ss[1] = word_in(key, 1);
cx->ks[v(40,(2))] = ss[2] = word_in(key, 2);
cx->ks[v(40,(3))] = ss[3] = word_in(key, 3);
#ifdef DEC_KS_UNROLL
kdf4(cx->ks, 0); kd4(cx->ks, 1);
kd4(cx->ks, 2); kd4(cx->ks, 3);
kd4(cx->ks, 4); kd4(cx->ks, 5);
kd4(cx->ks, 6); kd4(cx->ks, 7);
kd4(cx->ks, 8); kdl4(cx->ks, 9);
#else
{ uint32_t i;
for(i = 0; i < 10; ++i)
k4e(cx->ks, i);
#if !(DEC_ROUND == NO_TABLES)
for(i = N_COLS; i < 10 * N_COLS; ++i)
cx->ks[i] = inv_mcol(cx->ks[i]);
#endif
}
#endif
cx->inf.l = 0;
cx->inf.b[0] = 10 * AES_BLOCK_SIZE;
#ifdef USE_VIA_ACE_IF_PRESENT
if(VIA_ACE_AVAILABLE)
cx->inf.b[1] = 0xff;
#endif
return EXIT_SUCCESS;
}
#endif
#if defined(AES_192) || defined( AES_VAR )
#define k6ef(k,i) \
{ k[v(48,(6*(i))+ 6)] = ss[0] ^= ls_box(ss[5],3) ^ t_use(r,c)[i]; \
k[v(48,(6*(i))+ 7)] = ss[1] ^= ss[0]; \
k[v(48,(6*(i))+ 8)] = ss[2] ^= ss[1]; \
k[v(48,(6*(i))+ 9)] = ss[3] ^= ss[2]; \
}
#define k6e(k,i) \
{ k6ef(k,i); \
k[v(48,(6*(i))+10)] = ss[4] ^= ss[3]; \
k[v(48,(6*(i))+11)] = ss[5] ^= ss[4]; \
}
#define kdf6(k,i) \
{ ss[0] ^= ls_box(ss[5],3) ^ t_use(r,c)[i]; k[v(48,(6*(i))+ 6)] = ff(ss[0]); \
ss[1] ^= ss[0]; k[v(48,(6*(i))+ 7)] = ff(ss[1]); \
ss[2] ^= ss[1]; k[v(48,(6*(i))+ 8)] = ff(ss[2]); \
ss[3] ^= ss[2]; k[v(48,(6*(i))+ 9)] = ff(ss[3]); \
ss[4] ^= ss[3]; k[v(48,(6*(i))+10)] = ff(ss[4]); \
ss[5] ^= ss[4]; k[v(48,(6*(i))+11)] = ff(ss[5]); \
}
#define kd6(k,i) \
{ ss[6] = ls_box(ss[5],3) ^ t_use(r,c)[i]; \
ss[0] ^= ss[6]; ss[6] = ff(ss[6]); k[v(48,(6*(i))+ 6)] = ss[6] ^= k[v(48,(6*(i)))]; \
ss[1] ^= ss[0]; k[v(48,(6*(i))+ 7)] = ss[6] ^= k[v(48,(6*(i))+ 1)]; \
ss[2] ^= ss[1]; k[v(48,(6*(i))+ 8)] = ss[6] ^= k[v(48,(6*(i))+ 2)]; \
ss[3] ^= ss[2]; k[v(48,(6*(i))+ 9)] = ss[6] ^= k[v(48,(6*(i))+ 3)]; \
ss[4] ^= ss[3]; k[v(48,(6*(i))+10)] = ss[6] ^= k[v(48,(6*(i))+ 4)]; \
ss[5] ^= ss[4]; k[v(48,(6*(i))+11)] = ss[6] ^= k[v(48,(6*(i))+ 5)]; \
}
#define kdl6(k,i) \
{ ss[0] ^= ls_box(ss[5],3) ^ t_use(r,c)[i]; k[v(48,(6*(i))+ 6)] = ss[0]; \
ss[1] ^= ss[0]; k[v(48,(6*(i))+ 7)] = ss[1]; \
ss[2] ^= ss[1]; k[v(48,(6*(i))+ 8)] = ss[2]; \
ss[3] ^= ss[2]; k[v(48,(6*(i))+ 9)] = ss[3]; \
}
AES_RETURN aes_xi(decrypt_key192)(const unsigned char *key, aes_decrypt_ctx cx[1])
{ uint32_t ss[7];
#if defined( d_vars )
d_vars;
#endif
cx->ks[v(48,(0))] = ss[0] = word_in(key, 0);
cx->ks[v(48,(1))] = ss[1] = word_in(key, 1);
cx->ks[v(48,(2))] = ss[2] = word_in(key, 2);
cx->ks[v(48,(3))] = ss[3] = word_in(key, 3);
#ifdef DEC_KS_UNROLL
ss[4] = word_in(key, 4);
ss[5] = word_in(key, 5);
cx->ks[v(48,(4))] = ff(ss[4]);
cx->ks[v(48,(5))] = ff(ss[5]);
kdf6(cx->ks, 0); kd6(cx->ks, 1);
kd6(cx->ks, 2); kd6(cx->ks, 3);
kd6(cx->ks, 4); kd6(cx->ks, 5);
kd6(cx->ks, 6); kdl6(cx->ks, 7);
#else
cx->ks[v(48,(4))] = ss[4] = word_in(key, 4);
cx->ks[v(48,(5))] = ss[5] = word_in(key, 5);
{ uint32_t i;
for(i = 0; i < 7; ++i)
k6e(cx->ks, i);
k6ef(cx->ks, 7);
#if !(DEC_ROUND == NO_TABLES)
for(i = N_COLS; i < 12 * N_COLS; ++i)
cx->ks[i] = inv_mcol(cx->ks[i]);
#endif
}
#endif
cx->inf.l = 0;
cx->inf.b[0] = 12 * AES_BLOCK_SIZE;
#ifdef USE_VIA_ACE_IF_PRESENT
if(VIA_ACE_AVAILABLE)
cx->inf.b[1] = 0xff;
#endif
return EXIT_SUCCESS;
}
#endif
#if defined(AES_256) || defined( AES_VAR )
#define k8ef(k,i) \
{ k[v(56,(8*(i))+ 8)] = ss[0] ^= ls_box(ss[7],3) ^ t_use(r,c)[i]; \
k[v(56,(8*(i))+ 9)] = ss[1] ^= ss[0]; \
k[v(56,(8*(i))+10)] = ss[2] ^= ss[1]; \
k[v(56,(8*(i))+11)] = ss[3] ^= ss[2]; \
}
#define k8e(k,i) \
{ k8ef(k,i); \
k[v(56,(8*(i))+12)] = ss[4] ^= ls_box(ss[3],0); \
k[v(56,(8*(i))+13)] = ss[5] ^= ss[4]; \
k[v(56,(8*(i))+14)] = ss[6] ^= ss[5]; \
k[v(56,(8*(i))+15)] = ss[7] ^= ss[6]; \
}
#define kdf8(k,i) \
{ ss[0] ^= ls_box(ss[7],3) ^ t_use(r,c)[i]; k[v(56,(8*(i))+ 8)] = ff(ss[0]); \
ss[1] ^= ss[0]; k[v(56,(8*(i))+ 9)] = ff(ss[1]); \
ss[2] ^= ss[1]; k[v(56,(8*(i))+10)] = ff(ss[2]); \
ss[3] ^= ss[2]; k[v(56,(8*(i))+11)] = ff(ss[3]); \
ss[4] ^= ls_box(ss[3],0); k[v(56,(8*(i))+12)] = ff(ss[4]); \
ss[5] ^= ss[4]; k[v(56,(8*(i))+13)] = ff(ss[5]); \
ss[6] ^= ss[5]; k[v(56,(8*(i))+14)] = ff(ss[6]); \
ss[7] ^= ss[6]; k[v(56,(8*(i))+15)] = ff(ss[7]); \
}
#define kd8(k,i) \
{ ss[8] = ls_box(ss[7],3) ^ t_use(r,c)[i]; \
ss[0] ^= ss[8]; ss[8] = ff(ss[8]); k[v(56,(8*(i))+ 8)] = ss[8] ^= k[v(56,(8*(i)))]; \
ss[1] ^= ss[0]; k[v(56,(8*(i))+ 9)] = ss[8] ^= k[v(56,(8*(i))+ 1)]; \
ss[2] ^= ss[1]; k[v(56,(8*(i))+10)] = ss[8] ^= k[v(56,(8*(i))+ 2)]; \
ss[3] ^= ss[2]; k[v(56,(8*(i))+11)] = ss[8] ^= k[v(56,(8*(i))+ 3)]; \
ss[8] = ls_box(ss[3],0); \
ss[4] ^= ss[8]; ss[8] = ff(ss[8]); k[v(56,(8*(i))+12)] = ss[8] ^= k[v(56,(8*(i))+ 4)]; \
ss[5] ^= ss[4]; k[v(56,(8*(i))+13)] = ss[8] ^= k[v(56,(8*(i))+ 5)]; \
ss[6] ^= ss[5]; k[v(56,(8*(i))+14)] = ss[8] ^= k[v(56,(8*(i))+ 6)]; \
ss[7] ^= ss[6]; k[v(56,(8*(i))+15)] = ss[8] ^= k[v(56,(8*(i))+ 7)]; \
}
#define kdl8(k,i) \
{ ss[0] ^= ls_box(ss[7],3) ^ t_use(r,c)[i]; k[v(56,(8*(i))+ 8)] = ss[0]; \
ss[1] ^= ss[0]; k[v(56,(8*(i))+ 9)] = ss[1]; \
ss[2] ^= ss[1]; k[v(56,(8*(i))+10)] = ss[2]; \
ss[3] ^= ss[2]; k[v(56,(8*(i))+11)] = ss[3]; \
}
AES_RETURN aes_xi(decrypt_key256)(const unsigned char *key, aes_decrypt_ctx cx[1])
{ uint32_t ss[9];
#if defined( d_vars )
d_vars;
#endif
cx->ks[v(56,(0))] = ss[0] = word_in(key, 0);
cx->ks[v(56,(1))] = ss[1] = word_in(key, 1);
cx->ks[v(56,(2))] = ss[2] = word_in(key, 2);
cx->ks[v(56,(3))] = ss[3] = word_in(key, 3);
#ifdef DEC_KS_UNROLL
ss[4] = word_in(key, 4);
ss[5] = word_in(key, 5);
ss[6] = word_in(key, 6);
ss[7] = word_in(key, 7);
cx->ks[v(56,(4))] = ff(ss[4]);
cx->ks[v(56,(5))] = ff(ss[5]);
cx->ks[v(56,(6))] = ff(ss[6]);
cx->ks[v(56,(7))] = ff(ss[7]);
kdf8(cx->ks, 0); kd8(cx->ks, 1);
kd8(cx->ks, 2); kd8(cx->ks, 3);
kd8(cx->ks, 4); kd8(cx->ks, 5);
kdl8(cx->ks, 6);
#else
cx->ks[v(56,(4))] = ss[4] = word_in(key, 4);
cx->ks[v(56,(5))] = ss[5] = word_in(key, 5);
cx->ks[v(56,(6))] = ss[6] = word_in(key, 6);
cx->ks[v(56,(7))] = ss[7] = word_in(key, 7);
{ uint32_t i;
for(i = 0; i < 6; ++i)
k8e(cx->ks, i);
k8ef(cx->ks, 6);
#if !(DEC_ROUND == NO_TABLES)
for(i = N_COLS; i < 14 * N_COLS; ++i)
cx->ks[i] = inv_mcol(cx->ks[i]);
#endif
}
#endif
cx->inf.l = 0;
cx->inf.b[0] = 14 * AES_BLOCK_SIZE;
#ifdef USE_VIA_ACE_IF_PRESENT
if(VIA_ACE_AVAILABLE)
cx->inf.b[1] = 0xff;
#endif
return EXIT_SUCCESS;
}
#endif
#endif
#if defined( AES_VAR )
AES_RETURN aes_encrypt_key(const unsigned char *key, int key_len, aes_encrypt_ctx cx[1])
{
switch(key_len)
{
case 16: case 128: return aes_encrypt_key128(key, cx);
case 24: case 192: return aes_encrypt_key192(key, cx);
case 32: case 256: return aes_encrypt_key256(key, cx);
default: return EXIT_FAILURE;
}
}
AES_RETURN aes_decrypt_key(const unsigned char *key, int key_len, aes_decrypt_ctx cx[1])
{
switch(key_len)
{
case 16: case 128: return aes_decrypt_key128(key, cx);
case 24: case 192: return aes_decrypt_key192(key, cx);
case 32: case 256: return aes_decrypt_key256(key, cx);
default: return EXIT_FAILURE;
}
}
#endif
#if defined(__cplusplus)
}
#endif

View File

@ -1,418 +0,0 @@
/*
---------------------------------------------------------------------------
Copyright (c) 1998-2013, Brian Gladman, Worcester, UK. All rights reserved.
The redistribution and use of this software (with or without changes)
is allowed without the payment of fees or royalties provided that:
source code distributions include the above copyright notice, this
list of conditions and the following disclaimer;
binary distributions include the above copyright notice, this list
of conditions and the following disclaimer in their documentation.
This software is provided 'as is' with no explicit or implied warranties
in respect of its operation, including, but not limited to, correctness
and fitness for purpose.
---------------------------------------------------------------------------
Issue Date: 20/12/2007
*/
#define DO_TABLES
#include "aes.h"
#include "aesopt.h"
#if defined(STATIC_TABLES)
#define sb_data(w) {\
w(0x63), w(0x7c), w(0x77), w(0x7b), w(0xf2), w(0x6b), w(0x6f), w(0xc5),\
w(0x30), w(0x01), w(0x67), w(0x2b), w(0xfe), w(0xd7), w(0xab), w(0x76),\
w(0xca), w(0x82), w(0xc9), w(0x7d), w(0xfa), w(0x59), w(0x47), w(0xf0),\
w(0xad), w(0xd4), w(0xa2), w(0xaf), w(0x9c), w(0xa4), w(0x72), w(0xc0),\
w(0xb7), w(0xfd), w(0x93), w(0x26), w(0x36), w(0x3f), w(0xf7), w(0xcc),\
w(0x34), w(0xa5), w(0xe5), w(0xf1), w(0x71), w(0xd8), w(0x31), w(0x15),\
w(0x04), w(0xc7), w(0x23), w(0xc3), w(0x18), w(0x96), w(0x05), w(0x9a),\
w(0x07), w(0x12), w(0x80), w(0xe2), w(0xeb), w(0x27), w(0xb2), w(0x75),\
w(0x09), w(0x83), w(0x2c), w(0x1a), w(0x1b), w(0x6e), w(0x5a), w(0xa0),\
w(0x52), w(0x3b), w(0xd6), w(0xb3), w(0x29), w(0xe3), w(0x2f), w(0x84),\
w(0x53), w(0xd1), w(0x00), w(0xed), w(0x20), w(0xfc), w(0xb1), w(0x5b),\
w(0x6a), w(0xcb), w(0xbe), w(0x39), w(0x4a), w(0x4c), w(0x58), w(0xcf),\
w(0xd0), w(0xef), w(0xaa), w(0xfb), w(0x43), w(0x4d), w(0x33), w(0x85),\
w(0x45), w(0xf9), w(0x02), w(0x7f), w(0x50), w(0x3c), w(0x9f), w(0xa8),\
w(0x51), w(0xa3), w(0x40), w(0x8f), w(0x92), w(0x9d), w(0x38), w(0xf5),\
w(0xbc), w(0xb6), w(0xda), w(0x21), w(0x10), w(0xff), w(0xf3), w(0xd2),\
w(0xcd), w(0x0c), w(0x13), w(0xec), w(0x5f), w(0x97), w(0x44), w(0x17),\
w(0xc4), w(0xa7), w(0x7e), w(0x3d), w(0x64), w(0x5d), w(0x19), w(0x73),\
w(0x60), w(0x81), w(0x4f), w(0xdc), w(0x22), w(0x2a), w(0x90), w(0x88),\
w(0x46), w(0xee), w(0xb8), w(0x14), w(0xde), w(0x5e), w(0x0b), w(0xdb),\
w(0xe0), w(0x32), w(0x3a), w(0x0a), w(0x49), w(0x06), w(0x24), w(0x5c),\
w(0xc2), w(0xd3), w(0xac), w(0x62), w(0x91), w(0x95), w(0xe4), w(0x79),\
w(0xe7), w(0xc8), w(0x37), w(0x6d), w(0x8d), w(0xd5), w(0x4e), w(0xa9),\
w(0x6c), w(0x56), w(0xf4), w(0xea), w(0x65), w(0x7a), w(0xae), w(0x08),\
w(0xba), w(0x78), w(0x25), w(0x2e), w(0x1c), w(0xa6), w(0xb4), w(0xc6),\
w(0xe8), w(0xdd), w(0x74), w(0x1f), w(0x4b), w(0xbd), w(0x8b), w(0x8a),\
w(0x70), w(0x3e), w(0xb5), w(0x66), w(0x48), w(0x03), w(0xf6), w(0x0e),\
w(0x61), w(0x35), w(0x57), w(0xb9), w(0x86), w(0xc1), w(0x1d), w(0x9e),\
w(0xe1), w(0xf8), w(0x98), w(0x11), w(0x69), w(0xd9), w(0x8e), w(0x94),\
w(0x9b), w(0x1e), w(0x87), w(0xe9), w(0xce), w(0x55), w(0x28), w(0xdf),\
w(0x8c), w(0xa1), w(0x89), w(0x0d), w(0xbf), w(0xe6), w(0x42), w(0x68),\
w(0x41), w(0x99), w(0x2d), w(0x0f), w(0xb0), w(0x54), w(0xbb), w(0x16) }
#define isb_data(w) {\
w(0x52), w(0x09), w(0x6a), w(0xd5), w(0x30), w(0x36), w(0xa5), w(0x38),\
w(0xbf), w(0x40), w(0xa3), w(0x9e), w(0x81), w(0xf3), w(0xd7), w(0xfb),\
w(0x7c), w(0xe3), w(0x39), w(0x82), w(0x9b), w(0x2f), w(0xff), w(0x87),\
w(0x34), w(0x8e), w(0x43), w(0x44), w(0xc4), w(0xde), w(0xe9), w(0xcb),\
w(0x54), w(0x7b), w(0x94), w(0x32), w(0xa6), w(0xc2), w(0x23), w(0x3d),\
w(0xee), w(0x4c), w(0x95), w(0x0b), w(0x42), w(0xfa), w(0xc3), w(0x4e),\
w(0x08), w(0x2e), w(0xa1), w(0x66), w(0x28), w(0xd9), w(0x24), w(0xb2),\
w(0x76), w(0x5b), w(0xa2), w(0x49), w(0x6d), w(0x8b), w(0xd1), w(0x25),\
w(0x72), w(0xf8), w(0xf6), w(0x64), w(0x86), w(0x68), w(0x98), w(0x16),\
w(0xd4), w(0xa4), w(0x5c), w(0xcc), w(0x5d), w(0x65), w(0xb6), w(0x92),\
w(0x6c), w(0x70), w(0x48), w(0x50), w(0xfd), w(0xed), w(0xb9), w(0xda),\
w(0x5e), w(0x15), w(0x46), w(0x57), w(0xa7), w(0x8d), w(0x9d), w(0x84),\
w(0x90), w(0xd8), w(0xab), w(0x00), w(0x8c), w(0xbc), w(0xd3), w(0x0a),\
w(0xf7), w(0xe4), w(0x58), w(0x05), w(0xb8), w(0xb3), w(0x45), w(0x06),\
w(0xd0), w(0x2c), w(0x1e), w(0x8f), w(0xca), w(0x3f), w(0x0f), w(0x02),\
w(0xc1), w(0xaf), w(0xbd), w(0x03), w(0x01), w(0x13), w(0x8a), w(0x6b),\
w(0x3a), w(0x91), w(0x11), w(0x41), w(0x4f), w(0x67), w(0xdc), w(0xea),\
w(0x97), w(0xf2), w(0xcf), w(0xce), w(0xf0), w(0xb4), w(0xe6), w(0x73),\
w(0x96), w(0xac), w(0x74), w(0x22), w(0xe7), w(0xad), w(0x35), w(0x85),\
w(0xe2), w(0xf9), w(0x37), w(0xe8), w(0x1c), w(0x75), w(0xdf), w(0x6e),\
w(0x47), w(0xf1), w(0x1a), w(0x71), w(0x1d), w(0x29), w(0xc5), w(0x89),\
w(0x6f), w(0xb7), w(0x62), w(0x0e), w(0xaa), w(0x18), w(0xbe), w(0x1b),\
w(0xfc), w(0x56), w(0x3e), w(0x4b), w(0xc6), w(0xd2), w(0x79), w(0x20),\
w(0x9a), w(0xdb), w(0xc0), w(0xfe), w(0x78), w(0xcd), w(0x5a), w(0xf4),\
w(0x1f), w(0xdd), w(0xa8), w(0x33), w(0x88), w(0x07), w(0xc7), w(0x31),\
w(0xb1), w(0x12), w(0x10), w(0x59), w(0x27), w(0x80), w(0xec), w(0x5f),\
w(0x60), w(0x51), w(0x7f), w(0xa9), w(0x19), w(0xb5), w(0x4a), w(0x0d),\
w(0x2d), w(0xe5), w(0x7a), w(0x9f), w(0x93), w(0xc9), w(0x9c), w(0xef),\
w(0xa0), w(0xe0), w(0x3b), w(0x4d), w(0xae), w(0x2a), w(0xf5), w(0xb0),\
w(0xc8), w(0xeb), w(0xbb), w(0x3c), w(0x83), w(0x53), w(0x99), w(0x61),\
w(0x17), w(0x2b), w(0x04), w(0x7e), w(0xba), w(0x77), w(0xd6), w(0x26),\
w(0xe1), w(0x69), w(0x14), w(0x63), w(0x55), w(0x21), w(0x0c), w(0x7d) }
#define mm_data(w) {\
w(0x00), w(0x01), w(0x02), w(0x03), w(0x04), w(0x05), w(0x06), w(0x07),\
w(0x08), w(0x09), w(0x0a), w(0x0b), w(0x0c), w(0x0d), w(0x0e), w(0x0f),\
w(0x10), w(0x11), w(0x12), w(0x13), w(0x14), w(0x15), w(0x16), w(0x17),\
w(0x18), w(0x19), w(0x1a), w(0x1b), w(0x1c), w(0x1d), w(0x1e), w(0x1f),\
w(0x20), w(0x21), w(0x22), w(0x23), w(0x24), w(0x25), w(0x26), w(0x27),\
w(0x28), w(0x29), w(0x2a), w(0x2b), w(0x2c), w(0x2d), w(0x2e), w(0x2f),\
w(0x30), w(0x31), w(0x32), w(0x33), w(0x34), w(0x35), w(0x36), w(0x37),\
w(0x38), w(0x39), w(0x3a), w(0x3b), w(0x3c), w(0x3d), w(0x3e), w(0x3f),\
w(0x40), w(0x41), w(0x42), w(0x43), w(0x44), w(0x45), w(0x46), w(0x47),\
w(0x48), w(0x49), w(0x4a), w(0x4b), w(0x4c), w(0x4d), w(0x4e), w(0x4f),\
w(0x50), w(0x51), w(0x52), w(0x53), w(0x54), w(0x55), w(0x56), w(0x57),\
w(0x58), w(0x59), w(0x5a), w(0x5b), w(0x5c), w(0x5d), w(0x5e), w(0x5f),\
w(0x60), w(0x61), w(0x62), w(0x63), w(0x64), w(0x65), w(0x66), w(0x67),\
w(0x68), w(0x69), w(0x6a), w(0x6b), w(0x6c), w(0x6d), w(0x6e), w(0x6f),\
w(0x70), w(0x71), w(0x72), w(0x73), w(0x74), w(0x75), w(0x76), w(0x77),\
w(0x78), w(0x79), w(0x7a), w(0x7b), w(0x7c), w(0x7d), w(0x7e), w(0x7f),\
w(0x80), w(0x81), w(0x82), w(0x83), w(0x84), w(0x85), w(0x86), w(0x87),\
w(0x88), w(0x89), w(0x8a), w(0x8b), w(0x8c), w(0x8d), w(0x8e), w(0x8f),\
w(0x90), w(0x91), w(0x92), w(0x93), w(0x94), w(0x95), w(0x96), w(0x97),\
w(0x98), w(0x99), w(0x9a), w(0x9b), w(0x9c), w(0x9d), w(0x9e), w(0x9f),\
w(0xa0), w(0xa1), w(0xa2), w(0xa3), w(0xa4), w(0xa5), w(0xa6), w(0xa7),\
w(0xa8), w(0xa9), w(0xaa), w(0xab), w(0xac), w(0xad), w(0xae), w(0xaf),\
w(0xb0), w(0xb1), w(0xb2), w(0xb3), w(0xb4), w(0xb5), w(0xb6), w(0xb7),\
w(0xb8), w(0xb9), w(0xba), w(0xbb), w(0xbc), w(0xbd), w(0xbe), w(0xbf),\
w(0xc0), w(0xc1), w(0xc2), w(0xc3), w(0xc4), w(0xc5), w(0xc6), w(0xc7),\
w(0xc8), w(0xc9), w(0xca), w(0xcb), w(0xcc), w(0xcd), w(0xce), w(0xcf),\
w(0xd0), w(0xd1), w(0xd2), w(0xd3), w(0xd4), w(0xd5), w(0xd6), w(0xd7),\
w(0xd8), w(0xd9), w(0xda), w(0xdb), w(0xdc), w(0xdd), w(0xde), w(0xdf),\
w(0xe0), w(0xe1), w(0xe2), w(0xe3), w(0xe4), w(0xe5), w(0xe6), w(0xe7),\
w(0xe8), w(0xe9), w(0xea), w(0xeb), w(0xec), w(0xed), w(0xee), w(0xef),\
w(0xf0), w(0xf1), w(0xf2), w(0xf3), w(0xf4), w(0xf5), w(0xf6), w(0xf7),\
w(0xf8), w(0xf9), w(0xfa), w(0xfb), w(0xfc), w(0xfd), w(0xfe), w(0xff) }
#define rc_data(w) {\
w(0x01), w(0x02), w(0x04), w(0x08), w(0x10),w(0x20), w(0x40), w(0x80),\
w(0x1b), w(0x36) }
#define h0(x) (x)
#define w0(p) bytes2word(p, 0, 0, 0)
#define w1(p) bytes2word(0, p, 0, 0)
#define w2(p) bytes2word(0, 0, p, 0)
#define w3(p) bytes2word(0, 0, 0, p)
#define u0(p) bytes2word(f2(p), p, p, f3(p))
#define u1(p) bytes2word(f3(p), f2(p), p, p)
#define u2(p) bytes2word(p, f3(p), f2(p), p)
#define u3(p) bytes2word(p, p, f3(p), f2(p))
#define v0(p) bytes2word(fe(p), f9(p), fd(p), fb(p))
#define v1(p) bytes2word(fb(p), fe(p), f9(p), fd(p))
#define v2(p) bytes2word(fd(p), fb(p), fe(p), f9(p))
#define v3(p) bytes2word(f9(p), fd(p), fb(p), fe(p))
#endif
#if defined(STATIC_TABLES) || !defined(FF_TABLES)
#define f2(x) ((x<<1) ^ (((x>>7) & 1) * WPOLY))
#define f4(x) ((x<<2) ^ (((x>>6) & 1) * WPOLY) ^ (((x>>6) & 2) * WPOLY))
#define f8(x) ((x<<3) ^ (((x>>5) & 1) * WPOLY) ^ (((x>>5) & 2) * WPOLY) \
^ (((x>>5) & 4) * WPOLY))
#define f3(x) (f2(x) ^ x)
#define f9(x) (f8(x) ^ x)
#define fb(x) (f8(x) ^ f2(x) ^ x)
#define fd(x) (f8(x) ^ f4(x) ^ x)
#define fe(x) (f8(x) ^ f4(x) ^ f2(x))
#else
#define f2(x) ((x) ? pow[log[x] + 0x19] : 0)
#define f3(x) ((x) ? pow[log[x] + 0x01] : 0)
#define f9(x) ((x) ? pow[log[x] + 0xc7] : 0)
#define fb(x) ((x) ? pow[log[x] + 0x68] : 0)
#define fd(x) ((x) ? pow[log[x] + 0xee] : 0)
#define fe(x) ((x) ? pow[log[x] + 0xdf] : 0)
#endif
#include "aestab.h"
#if defined(__cplusplus)
extern "C"
{
#endif
#if defined(STATIC_TABLES)
/* implemented in case of wrong call for fixed tables */
AES_RETURN aes_init(void)
{
return EXIT_SUCCESS;
}
#else /* Generate the tables for the dynamic table option */
#if defined(FF_TABLES)
#define gf_inv(x) ((x) ? pow[ 255 - log[x]] : 0)
#else
/* It will generally be sensible to use tables to compute finite
field multiplies and inverses but where memory is scarse this
code might sometimes be better. But it only has effect during
initialisation so its pretty unimportant in overall terms.
*/
/* return 2 ^ (n - 1) where n is the bit number of the highest bit
set in x with x in the range 1 < x < 0x00000200. This form is
used so that locals within fi can be bytes rather than words
*/
static uint8_t hibit(const uint32_t x)
{ uint8_t r = (uint8_t)((x >> 1) | (x >> 2));
r |= (r >> 2);
r |= (r >> 4);
return (r + 1) >> 1;
}
/* return the inverse of the finite field element x */
static uint8_t gf_inv(const uint8_t x)
{ uint8_t p1 = x, p2 = BPOLY, n1 = hibit(x), n2 = 0x80, v1 = 1, v2 = 0;
if(x < 2)
return x;
for( ; ; )
{
if(n1)
while(n2 >= n1) /* divide polynomial p2 by p1 */
{
n2 /= n1; /* shift smaller polynomial left */
p2 ^= (p1 * n2) & 0xff; /* and remove from larger one */
v2 ^= v1 * n2; /* shift accumulated value and */
n2 = hibit(p2); /* add into result */
}
else
return v1;
if(n2) /* repeat with values swapped */
while(n1 >= n2)
{
n1 /= n2;
p1 ^= p2 * n1;
v1 ^= v2 * n1;
n1 = hibit(p1);
}
else
return v2;
}
}
#endif
/* The forward and inverse affine transformations used in the S-box */
uint8_t fwd_affine(const uint8_t x)
{ uint32_t w = x;
w ^= (w << 1) ^ (w << 2) ^ (w << 3) ^ (w << 4);
return 0x63 ^ ((w ^ (w >> 8)) & 0xff);
}
uint8_t inv_affine(const uint8_t x)
{ uint32_t w = x;
w = (w << 1) ^ (w << 3) ^ (w << 6);
return 0x05 ^ ((w ^ (w >> 8)) & 0xff);
}
static int init = 0;
AES_RETURN aes_init(void)
{ uint32_t i, w;
#if defined(FF_TABLES)
uint8_t pow[512] = {0}, log[256] = {0};
if(init)
return EXIT_SUCCESS;
/* log and power tables for GF(2^8) finite field with
WPOLY as modular polynomial - the simplest primitive
root is 0x03, used here to generate the tables
*/
i = 0; w = 1;
do
{
pow[i] = (uint8_t)w;
pow[i + 255] = (uint8_t)w;
log[w] = (uint8_t)i++;
w ^= (w << 1) ^ (w & 0x80 ? WPOLY : 0);
}
while (w != 1);
#else
if(init)
return EXIT_SUCCESS;
#endif
for(i = 0, w = 1; i < RC_LENGTH; ++i)
{
t_set(r,c)[i] = bytes2word(w, 0, 0, 0);
w = f2(w);
}
for(i = 0; i < 256; ++i)
{ uint8_t b;
b = fwd_affine(gf_inv((uint8_t)i));
w = bytes2word(f2(b), b, b, f3(b));
#if defined( SBX_SET )
t_set(s,box)[i] = b;
#endif
#if defined( FT1_SET ) /* tables for a normal encryption round */
t_set(f,n)[i] = w;
#endif
#if defined( FT4_SET )
t_set(f,n)[0][i] = w;
t_set(f,n)[1][i] = upr(w,1);
t_set(f,n)[2][i] = upr(w,2);
t_set(f,n)[3][i] = upr(w,3);
#endif
w = bytes2word(b, 0, 0, 0);
#if defined( FL1_SET ) /* tables for last encryption round (may also */
t_set(f,l)[i] = w; /* be used in the key schedule) */
#endif
#if defined( FL4_SET )
t_set(f,l)[0][i] = w;
t_set(f,l)[1][i] = upr(w,1);
t_set(f,l)[2][i] = upr(w,2);
t_set(f,l)[3][i] = upr(w,3);
#endif
#if defined( LS1_SET ) /* table for key schedule if t_set(f,l) above is*/
t_set(l,s)[i] = w; /* not of the required form */
#endif
#if defined( LS4_SET )
t_set(l,s)[0][i] = w;
t_set(l,s)[1][i] = upr(w,1);
t_set(l,s)[2][i] = upr(w,2);
t_set(l,s)[3][i] = upr(w,3);
#endif
b = gf_inv(inv_affine((uint8_t)i));
w = bytes2word(fe(b), f9(b), fd(b), fb(b));
#if defined( IM1_SET ) /* tables for the inverse mix column operation */
t_set(i,m)[b] = w;
#endif
#if defined( IM4_SET )
t_set(i,m)[0][b] = w;
t_set(i,m)[1][b] = upr(w,1);
t_set(i,m)[2][b] = upr(w,2);
t_set(i,m)[3][b] = upr(w,3);
#endif
#if defined( ISB_SET )
t_set(i,box)[i] = b;
#endif
#if defined( IT1_SET ) /* tables for a normal decryption round */
t_set(i,n)[i] = w;
#endif
#if defined( IT4_SET )
t_set(i,n)[0][i] = w;
t_set(i,n)[1][i] = upr(w,1);
t_set(i,n)[2][i] = upr(w,2);
t_set(i,n)[3][i] = upr(w,3);
#endif
w = bytes2word(b, 0, 0, 0);
#if defined( IL1_SET ) /* tables for last decryption round */
t_set(i,l)[i] = w;
#endif
#if defined( IL4_SET )
t_set(i,l)[0][i] = w;
t_set(i,l)[1][i] = upr(w,1);
t_set(i,l)[2][i] = upr(w,2);
t_set(i,l)[3][i] = upr(w,3);
#endif
}
init = 1;
return EXIT_SUCCESS;
}
/*
Automatic code initialisation (suggested by by Henrik S. Gaßmann)
based on code provided by Joe Lowe and placed in the public domain at:
http://stackoverflow.com/questions/1113409/attribute-constructor-equivalent-in-vc
*/
#ifdef _MSC_VER
#pragma section(".CRT$XCU", read)
__declspec(allocate(".CRT$XCU")) void (__cdecl *aes_startup)(void) = aes_init;
#elif defined(__GNUC__)
static void aes_startup(void) __attribute__((constructor));
static void aes_startup(void)
{
aes_init();
}
#else
#pragma message( "dynamic tables must be initialised manually on your system" )
#endif
#endif
#if defined(__cplusplus)
}
#endif

View File

@ -26,7 +26,9 @@
#include <string.h>
#include "address.h"
#if USE_NEM
#include "aes/aes.h"
#endif
#include "base58.h"
#include "bignum.h"
#include "bip32.h"

13
Core/Src/crypto/util.c Normal file
View File

@ -0,0 +1,13 @@
#include "util.h"
uint32_t pad_iso9797_m1(uint8_t* data, uint8_t plen, uint32_t size) {
uint32_t padding = plen - (size % plen);
data[size] = 0x80;
memset(&data[size+1], 0, (padding - 1));
return padding + size;
}
uint32_t unpad_iso9797_m1(uint8_t* data, uint32_t size) {
while(data[--size] != 0x80) {}
return size;
}

View File

@ -126,7 +126,7 @@ uint8_t T1_Handle_Resp(SmartCard* sc, APDU* apdu) {
uint8_t s;
if ((header[1] & T1_R_BLOCK) == 0) {
data = &apdu->data[apdu->lr];
data = &apdu->data[(apdu->lr+1)];
apdu->lr += blen;
} else if ((header[1] & T1_S_BLOCK) == T1_S_BLOCK) {
data = &s;

View File

@ -7,6 +7,7 @@
#include "error.h"
#include "crypto/rand.h"
#include "crypto/sha2.h"
#include "crypto/util.h"
const uint8_t keycard_aid[] = {0xa0, 0x00, 0x00, 0x08, 0x04, 0x00, 0x01, 0x01, 0x01};
const uint8_t keycard_aid_len = 9;
@ -15,17 +16,6 @@ const uint8_t keycard_default_psk[] = {0x67, 0x5d, 0xea, 0xbb, 0x0d, 0x7c, 0x72,
static int tested = 0;
static inline int Constant_Compare(const uint8_t* a, const uint8_t* b, int length) {
int i;
int compareSum = 0;
for (i = 0; i < length; i++) {
compareSum |= a[i] ^ b[i];
}
return compareSum;
}
void Keycard_Activate(SmartCard* sc) {
SmartCard_Activate(sc);
}
@ -92,7 +82,7 @@ uint16_t Keycard_CMD_AutoPair(SmartCard* sc, APDU* apdu, const uint8_t* psk, Pai
sha256_Update(&sha256, buf, SHA256_DIGEST_LENGTH);
sha256_Final(&sha256, buf);
if (Constant_Compare(card_cryptogram, buf, SHA256_DIGEST_LENGTH) != 0) {
if (memcmp_ct(card_cryptogram, buf, SHA256_DIGEST_LENGTH) != 0) {
return ERR_CRYPTO;
}

View File

@ -22,6 +22,7 @@
/* Private includes ----------------------------------------------------------*/
/* USER CODE BEGIN Includes */
#include "keycard.h"
#include "crypto/aes.h"
#include "iso7816/smartcard.h"
/* USER CODE END Includes */
@ -43,8 +44,8 @@
ADC_HandleTypeDef hadc1;
CRYP_HandleTypeDef hcryp;
__ALIGN_BEGIN static const uint32_t pKeyAES[4] __ALIGN_END = {
0x00000000,0x00000000,0x00000000,0x00000000};
__ALIGN_BEGIN static const uint32_t pKeyAES[8] __ALIGN_END = {
0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000};
__ALIGN_BEGIN static const uint32_t pInitVectAES[4] __ALIGN_END = {
0x00000000,0x00000000,0x00000000,0x00000000};
@ -127,6 +128,7 @@ int main(void)
MX_SPI1_Init();
MX_TIM6_Init();
/* USER CODE BEGIN 2 */
aes_init(&hcryp);
Keycard_Init();
SmartCard_Init(&sc, &hsmartcard2, &htim6);
/* USER CODE END 2 */
@ -268,7 +270,7 @@ static void MX_AES_Init(void)
/* USER CODE END AES_Init 1 */
hcryp.Instance = AES;
hcryp.Init.DataType = CRYP_DATATYPE_32B;
hcryp.Init.KeySize = CRYP_KEYSIZE_128B;
hcryp.Init.KeySize = CRYP_KEYSIZE_256B;
hcryp.Init.pKey = (uint32_t *)pKeyAES;
hcryp.Init.pInitVect = (uint32_t *)pInitVectAES;
hcryp.Init.Algorithm = CRYP_AES_CBC;

View File

@ -5,13 +5,14 @@
#include "crypto/sha2.h"
#include "crypto/rand.h"
#include "crypto/secp256k1.h"
#include "crypto/util.h"
#include "error.h"
#define SECP256K1_KEYLEN 32
#define SECP256K1_PUBLEN 65
uint16_t SecureChannel_Mutual_Authenticate(SecureChannel* sc, SmartCard* card, APDU* apdu) {
uint8_t data[32];
SC_BUF(data, 32);
random_buffer(data, 32);
APDU_RESET(apdu);
APDU_CLA(apdu) = 0x80;
@ -69,20 +70,65 @@ uint16_t SecureChannel_Open(SecureChannel* sc, SmartCard* card, APDU* apdu, Pair
sha512_Update(&sha512, &secret[1], SHA256_DIGEST_LENGTH);
sha512_Final(&sha512, sc->encKey);
memcpy(sc->iv, &APDU_RESP(apdu)[32], AES_IV_SIZE);
memcpy(sc->iv, &APDU_RESP(apdu)[SHA256_DIGEST_LENGTH], AES_IV_SIZE);
sc->open = 1;
return SecureChannel_Mutual_Authenticate(sc, card, apdu);
}
uint16_t SecureChannel_Protect_APDU(SecureChannel *sc, APDU* apdu, uint8_t* data, uint32_t len) {
return 0;
len = pad_iso9797_m1(data, SC_PAD, len);
uint8_t* apduData = APDU_DATA(apdu);
if (!aes_encrypt(sc->encKey, sc->iv, data, len, &apduData[AES_IV_SIZE])) {
return ERR_CRYPTO;
}
len += 16;
APDU_SET_LC(apdu, len);
APDU_SET_LE(apdu, 0);
memset(apduData, 0, AES_IV_SIZE);
apduData[0] = APDU_CLA(apdu);
apduData[1] = APDU_INS(apdu);
apduData[2] = APDU_P1(apdu);
apduData[3] = APDU_P2(apdu);
apduData[4] = len;
aes_cmac(sc->macKey, apduData, len, sc->iv);
memcpy(apduData, sc->iv, AES_IV_SIZE);
return ERR_OK;
}
uint16_t SecureChannel_Decrypt_APDU(SecureChannel *sc, APDU* apdu) {
return 0;
if (APDU_SW(apdu) == 0x6982) {
sc->open = 0;
return ERR_CRYPTO;
}
SC_BUF(cmac, AES_IV_SIZE);
SC_BUF(new_iv, AES_IV_SIZE);
uint8_t* data = APDU_RESP(apdu);
memcpy(cmac, data, AES_IV_SIZE);
memset(data, 0, AES_IV_SIZE);
data[0] = apdu->lr;
aes_cmac(sc->macKey, data, apdu->lr, new_iv);
if (memcmp_ct(sc->iv, cmac, AES_IV_SIZE) != 0) {
sc->open = 0;
return ERR_CRYPTO;
}
aes_decrypt(sc->macKey, sc->iv, &data[AES_IV_SIZE], (apdu->lr - AES_IV_SIZE), data);
apdu->lr = unpad_iso9797_m1(data, (apdu->lr - AES_IV_SIZE));
return ERR_OK;
}
void SecureChannel_Close(SecureChannel* sc) {
memset(sc->encKey, 0, AES_256_KEY_SIZE);
memset(sc->iv, 0, AES_IV_SIZE);
sc->open = 0;
}

View File

@ -1,5 +1,5 @@
##########################################################################################################################
# File automatically-generated by tool: [projectgenerator] version: [3.17.1] date: [Fri Sep 16 14:08:17 CEST 2022]
# File automatically-generated by tool: [projectgenerator] version: [3.17.1] date: [Mon Oct 03 11:42:36 CEST 2022]
##########################################################################################################################
# ------------------------------------------------

View File

@ -38,10 +38,7 @@ BUILD_DIR = build
C_SOURCES = \
Core/Src/application_info.c \
Core/Src/crypto/address.c \
Core/Src/crypto/aes/aes_modes.c \
Core/Src/crypto/aes/aescrypt.c \
Core/Src/crypto/aes/aeskey.c \
Core/Src/crypto/aes/aestab.c \
Core/Src/crypto/aes.c \
Core/Src/crypto/base32.c \
Core/Src/crypto/base58.c \
Core/Src/crypto/bignum.c \
@ -89,6 +86,7 @@ Core/Src/crypto/sha2.c \
Core/Src/crypto/sha3.c \
Core/Src/crypto/shamir.c \
Core/Src/crypto/slip39.c \
Core/Src/crypto/util.c \
Core/Src/iso7816/atr.c \
Core/Src/iso7816/pps.c \
Core/Src/iso7816/smartcard.c \
@ -216,7 +214,6 @@ AS_INCLUDES = \
C_INCLUDES = \
-ICore/Inc \
-ICore/Inc/crypto \
-ICore/Inc/crypto/aes \
-ICore/Inc/crypto/chacha20poly1305 \
-ICore/Inc/crypto/ed25519-donna \
-ICore/Inc/iso7816 \

View File

@ -8,7 +8,8 @@ ADC1.SelectedChannel=ADC_CHANNEL_10
ADC1.master=1
AES.Algorithm=CRYP_AES_CBC
AES.Header=__NULL
AES.IPParameters=Algorithm,Header
AES.IPParameters=Algorithm,KeySize,Header
AES.KeySize=CRYP_KEYSIZE_256B
File.Version=6
GPIO.groupedBy=Group By Peripherals
KeepUserPlacement=false