From 0e35e6f120c606f2f54de745344f782a27a87164 Mon Sep 17 00:00:00 2001 From: "Jonathan M. Henson" Date: Wed, 12 Apr 2023 14:30:15 -0700 Subject: [PATCH] Added 256-bit AES with CBC, CTR, GCM, and Keywrap Modes (#136) AES CBC, CTR, GCM, and Keywrap modes for 256 bit keys. --- bin/run_x_platform_fuzz_corpus/main.c | 3 +- format-check.sh | 1 + include/aws/cal/cal.h | 2 +- .../cal/private/darwin/common_cryptor_spi.h | 458 ++++++ .../aws/cal/private/symmetric_cipher_priv.h | 59 + include/aws/cal/symmetric_cipher.h | 238 +++ source/cal.c | 5 +- source/darwin/commoncrypto_aes.c | 692 +++++++++ source/symmetric_cipher.c | 225 +++ source/unix/openssl_aes.c | 717 +++++++++ source/windows/bcrypt_aes.c | 1121 ++++++++++++++ source/windows/bcrypt_hash.c | 13 +- tests/CMakeLists.txt | 26 + tests/aes256_test.c | 1329 +++++++++++++++++ 14 files changed, 4880 insertions(+), 9 deletions(-) create mode 100644 include/aws/cal/private/darwin/common_cryptor_spi.h create mode 100644 include/aws/cal/private/symmetric_cipher_priv.h create mode 100644 include/aws/cal/symmetric_cipher.h create mode 100644 source/darwin/commoncrypto_aes.c create mode 100644 source/symmetric_cipher.c create mode 100644 source/unix/openssl_aes.c create mode 100644 source/windows/bcrypt_aes.c create mode 100644 tests/aes256_test.c diff --git a/bin/run_x_platform_fuzz_corpus/main.c b/bin/run_x_platform_fuzz_corpus/main.c index 776a1999..c32b48bb 100644 --- a/bin/run_x_platform_fuzz_corpus/main.c +++ b/bin/run_x_platform_fuzz_corpus/main.c @@ -221,7 +221,8 @@ int main(int argc, char *argv[]) { } fprintf( stdout, - "Corpus verification complete with %d failures out of %d signatures processed\n\n", + "Corpus %d verification complete with %d failures out of %d signatures processed\n\n", + (int)corpus_runs, (int)signatures_failed, (int)signatures_processed); diff --git a/format-check.sh b/format-check.sh index 48eb6e35..117dd52b 100755 --- a/format-check.sh +++ b/format-check.sh @@ -22,3 +22,4 @@ do done exit $FAIL + diff --git a/include/aws/cal/cal.h b/include/aws/cal/cal.h index 5456c919..01e1fdc7 100644 --- a/include/aws/cal/cal.h +++ b/include/aws/cal/cal.h @@ -22,7 +22,7 @@ enum aws_cal_errors { AWS_ERROR_CAL_MALFORMED_ASN1_ENCOUNTERED, AWS_ERROR_CAL_MISMATCHED_DER_TYPE, AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM, - + AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM, AWS_ERROR_CAL_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_CAL_PACKAGE_ID) }; diff --git a/include/aws/cal/private/darwin/common_cryptor_spi.h b/include/aws/cal/private/darwin/common_cryptor_spi.h new file mode 100644 index 00000000..efe62010 --- /dev/null +++ b/include/aws/cal/private/darwin/common_cryptor_spi.h @@ -0,0 +1,458 @@ +/* + * Copyright (c) 2010 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* clang-format off */ +#ifndef _CC_CryptorSPI_H_ +#define _CC_CryptorSPI_H_ + +#include +#include + +#include +#include +#include + +#include + +#include +#include + +#ifdef __cplusplus + extern "C" { +#endif + +#if defined(_WIN32) + int timingsafe_bcmp(const void *b1, const void *b2, size_t n); +#endif + /* + This is an SPI header. It includes some work in progress implementation notes that + will be removed when this is promoted to an API set. + */ + + /* + Private Ciphers + */ + + /* Lion SPI name for no padding. Defining for compatibility. Is now + ccNoPadding in CommonCryptor.h + */ + + enum { + ccDefaultPadding = 0, + }; + + + enum { + kCCAlgorithmAES128NoHardware = 20, + kCCAlgorithmAES128WithHardware = 21 + }; + + /* + Private Modes + */ + enum { + kCCModeGCM = 11, + kCCModeCCM = 12, + }; + + /* + Private Paddings + */ + enum { + ccCBCCTS1 = 10, + ccCBCCTS2 = 11, + ccCBCCTS3 = 12, + }; + + /* + Private Cryptor direction (op) + */ + enum { + kCCBoth = 3, + }; + + + + + /* + Supports a mode call of + int mode_setup(int cipher, const unsigned char *IV, const unsigned char *key, int keylen, + const unsigned char *tweak, int tweaklen, int num_rounds, int options, mode_context *ctx); + */ + + /* User supplied space for the CryptorRef */ + + CCCryptorStatus CCCryptorCreateFromDataWithMode( + CCOperation op, /* kCCEncrypt, kCCEncrypt, kCCBoth (default for BlockMode) */ + CCMode mode, + CCAlgorithm alg, + CCPadding padding, + const void *iv, /* optional initialization vector */ + const void *key, /* raw key material */ + size_t keyLength, + const void *tweak, /* raw tweak material */ + size_t tweakLength, + int numRounds, + CCModeOptions options, + const void *data, /* caller-supplied memory */ + size_t dataLength, /* length of data in bytes */ + CCCryptorRef *cryptorRef, /* RETURNED */ + size_t *dataUsed) /* optional, RETURNED */ + API_AVAILABLE(macos(10.7), ios(5.0)); + + + /* + Assuming we can use existing CCCryptorCreateFromData for all modes serviced by these: + int mode_encrypt(const unsigned char *pt, unsigned char *ct, unsigned long len, mode_context *ctx); + int mode_decrypt(const unsigned char *ct, unsigned char *pt, unsigned long len, mode_context *ctx); + */ + + /* + Block mode encrypt and decrypt interfaces for IV tweaked blocks (XTS and CBC) + int mode_encrypt_tweaked(const unsigned char *pt, unsigned long len, unsigned char *ct, const unsigned char *tweak, mode_context *ctx); + int mode_decrypt_tweaked(const unsigned char *ct, unsigned long len, unsigned char *pt, const unsigned char *tweak, mode_context *ctx); + */ + + CCCryptorStatus CCCryptorEncryptDataBlock( + CCCryptorRef cryptorRef, + const void *iv, + const void *dataIn, + size_t dataInLength, + void *dataOut) + API_AVAILABLE(macos(10.7), ios(5.0)); + + + CCCryptorStatus CCCryptorDecryptDataBlock( + CCCryptorRef cryptorRef, + const void *iv, + const void *dataIn, + size_t dataInLength, + void *dataOut) + API_AVAILABLE(macos(10.7), ios(5.0)); + + + /*! + @function CCCryptorReset_binary_compatibility + @abstract Do not call this function. Reinitializes an existing CCCryptorRef with a (possibly) + new initialization vector. The CCCryptorRef's key is + unchanged. Preserves compatibility for Sdks prior to + macOS 10.13, iOS 11, watchOS 4 and tvOS 11. It is used + internally in CommonCrypto. See CCCryptorReset for more information. + @result The only possible error is kCCParamError. + */ + CCCryptorStatus CCCryptorReset_binary_compatibility(CCCryptorRef cryptorRef, const void *iv) + API_DEPRECATED_WITH_REPLACEMENT("CCCryptorReset", macos(10.4, 10.13), ios(2.0, 11.0)); + + /* + Assuming we can use the existing CCCryptorRelease() interface for + int mode_done(mode_context *ctx); + */ + + /* + Not surfacing these other than with CCCryptorReset() + int mode_setIV(const unsigned char *IV, unsigned long len, mode_context *ctx); + int mode_getIV(const unsigned char *IV, unsigned long *len, mode_context *ctx); + */ + + /* + * returns a cipher blocksize length iv in the provided iv buffer. + */ + + CCCryptorStatus + CCCryptorGetIV(CCCryptorRef cryptorRef, void *iv) + API_AVAILABLE(macos(10.7), ios(5.0)); + + /* + GCM Support Interfaces + Use CCCryptorCreateWithMode() with the kCCModeGCM selector to initialize + a CryptoRef. Only kCCAlgorithmAES128 can be used with GCM and these + functions. IV Setting etc will be ignored from CCCryptorCreateWithMode(). + Use the CCCryptorGCMAddIV() routine below for IV setup. + */ + + /* + Deprecated. Use CCCryptorGCMSetIV() instead. + This adds the initial vector octets from iv of length ivLen to the GCM + CCCryptorRef. You can call this function as many times as required to + process the entire IV. + */ + + CCCryptorStatus + CCCryptorGCMAddIV(CCCryptorRef cryptorRef, + const void *iv, size_t ivLen) + API_DEPRECATED_WITH_REPLACEMENT("CCCryptorGCMSetIV", macos(10.8, 10.13), ios(5.0, 11.0)); + + + /* + This adds the initial vector octets from iv of length ivLen to the GCM + CCCryptorRef. The input iv cannot be NULL and ivLen must be between 12 + to 16 bytes inclusive. CCRandomGenerateBytes() can be used to generate random IVs + */ + + CCCryptorStatus + CCCryptorGCMSetIV(CCCryptorRef cryptorRef, + const void *iv, size_t ivLen) + API_AVAILABLE(macos(10.13), ios(11.0)); + /* + Additional Authentication Data + After the entire IV has been processed, the additional authentication + data can be processed. Unlike the IV, a packet/session does not require + additional authentication data (AAD) for security. The AAD is meant to + be used as side channel data you want to be authenticated with the packet. + Note: once you begin adding AAD to the GCM CCCryptorRef you cannot return + to adding IV data until the state has been reset. + */ + + CCCryptorStatus + CCCryptorGCMAddAAD(CCCryptorRef cryptorRef, + const void *aData, + size_t aDataLen) + API_AVAILABLE(macos(10.8), ios(6.0)); + + + // This is for old iOS5 clients + CCCryptorStatus + CCCryptorGCMAddADD(CCCryptorRef cryptorRef, + const void *aData, + size_t aDataLen) + API_AVAILABLE(macos(10.8), ios(5.0)); + + + CCCryptorStatus CCCryptorGCMEncrypt( + CCCryptorRef cryptorRef, + const void *dataIn, + size_t dataInLength, + void *dataOut) + API_AVAILABLE(macos(10.8), ios(5.0)); + + + CCCryptorStatus CCCryptorGCMDecrypt( + CCCryptorRef cryptorRef, + const void *dataIn, + size_t dataInLength, + void *dataOut) + API_AVAILABLE(macos(10.8), ios(5.0)); + + /* + This finalizes the GCM state gcm and stores the tag in tag of length + taglen octets. + The tag must be verified by comparing the computed and expected values + using timingsafe_bcmp. Other comparison functions (e.g. memcmp) + must not be used as they may be vulnerable to practical timing attacks, + leading to tag forgery. + */ + + CCCryptorStatus CCCryptorGCMFinal( + CCCryptorRef cryptorRef, + void *tagOut, + size_t *tagLength) + API_DEPRECATED_WITH_REPLACEMENT("CCCryptorGCMFinalize", macos(10.8, 10.13), ios(5.0, 11.0)); + + /* + This finalizes the GCM state gcm. + On encryption, the computed tag is returned in tagOut. + On decryption, the provided tag is securely compared to the expected tag, and + error is returned if the tags do not match. The tag buffer content is not modified on decryption. + is not updated on decryption. + */ + CCCryptorStatus CCCryptorGCMFinalize( + CCCryptorRef cryptorRef, + void *tag, + size_t tagLength) + API_AVAILABLE(macos(10.13), ios(11.0)); + + /* + This will reset the GCM CCCryptorRef to the state that CCCryptorCreateWithMode() + left it. The user would then call CCCryptorGCMAddIV(), CCCryptorGCMAddAAD(), etc. + */ + + CCCryptorStatus CCCryptorGCMReset( + CCCryptorRef cryptorRef) + API_AVAILABLE(macos(10.8), ios(5.0)); + + /* + Deprecated. Use CCCryptorGCMOneshotEncrypt() or CCCryptorGCMOneshotDecrypt() instead. + This will initialize the GCM state with the given key, IV and AAD value + then proceed to encrypt or decrypt the message text and store the final + message tag. The definition of the variables is the same as it is for all + the manual functions. If you are processing many packets under the same + key you shouldn't use this function as it invokes the pre-computation + with each call. + The tag must be verified by comparing the computed and expected values + using timingsafe_bcmp. Other comparison functions (e.g. memcmp) + must not be used as they may be vulnerable to practical timing attacks, + leading to tag forgery. + */ + + CCCryptorStatus CCCryptorGCM( + CCOperation op, /* kCCEncrypt, kCCDecrypt */ + CCAlgorithm alg, + const void *key, /* raw key material */ + size_t keyLength, + const void *iv, + size_t ivLen, + const void *aData, + size_t aDataLen, + const void *dataIn, + size_t dataInLength, + void *dataOut, + void *tagOut, + size_t *tagLength) + API_DEPRECATED_WITH_REPLACEMENT("CCCryptorGCMOneshotEncrypt or CCCryptorGCMOneshotDecrypt", macos(10.8, 10.13), ios(6.0, 11.0)); + + /*! + @function CCCryptorGCMOneshotDecrypt + @abstract Encrypts using AES-GCM and outputs encrypted data and an authentication tag + @param alg It can only be kCCAlgorithmAES + @param key Key for the underlying AES blockcipher. It must be 16 bytes. ***** + @param keyLength Length of the key in bytes + @param iv Initialization vector, must be at least 12 bytes + @param ivLength Length of the IV in bytes + @param aData Additional data to authenticate. It can be NULL, if there is no additional data to be authenticated. + @param aDataLength Length of the additional data in bytes. It can be zero. + @param dataIn Input plaintext + @param dataInLength Length of the input plaintext data in bytes + @param cipherOut Output ciphertext + @param tagLength Length of the output authentication tag in bytes. It is minimum 8 bytes and maximum 16 bytes. + @param tagOut the output authentication tag + @result kccSuccess if successful. + @discussion It is a one-shot AESGCM encryption and in-place encryption is supported. + @warning The key-IV pair must be unique per encryption. The IV must be nonzero in length. + In stateful protocols, if each packet exposes a guaranteed-unique value, it is recommended to format this as a 12-byte value for use as the IV. + In stateless protocols, it is recommended to choose a 16-byte value using a cryptographically-secure pseudorandom number generator (e.g. @p ccrng). + */ + + CCCryptorStatus CCCryptorGCMOneshotEncrypt(CCAlgorithm alg, const void *key, size_t keyLength, /* raw key material */ + const void *iv, size_t ivLength, + const void *aData, size_t aDataLength, + const void *dataIn, size_t dataInLength, + void *cipherOut, + void *tagOut, size_t tagLength) __attribute__((__warn_unused_result__)) + API_AVAILABLE(macos(10.13), ios(11.0)); + + /*! + @function CCCryptorGCMOneshotDecrypt + @abstract Decrypts using AES-GCM, compares the computed tag of the decrypted message to the input tag and returns error is authentication fails. + @discussion CCCryptorGCMOneshotDecrypt() works similar to the CCCryptorGCMOneshotEncrypt(). CCCryptorGCMOneshotDecrypt() does not return the tag of the decrypted message. It compated the computed tag with inout tag and outputs error if authentication of the decrypted message fails. + */ + + CCCryptorStatus CCCryptorGCMOneshotDecrypt(CCAlgorithm alg, const void *key, size_t keyLength, + const void *iv, size_t ivLen, + const void *aData, size_t aDataLen, + const void *dataIn, size_t dataInLength, + void *dataOut, + const void *tagIn, size_t tagLength) __attribute__((__warn_unused_result__)) + API_AVAILABLE(macos(10.13), ios(11.0)); + + void CC_RC4_set_key(void *ctx, int len, const unsigned char *data) + API_AVAILABLE(macos(10.4), ios(5.0)); + + void CC_RC4(void *ctx, unsigned long len, const unsigned char *indata, + unsigned char *outdata) + API_AVAILABLE(macos(10.4), ios(5.0)); + + /* + GCM interface can then be easily bolt on the rest of standard CCCryptor interface; typically following sequence can be used: + CCCryptorCreateWithMode(mode = kCCModeGCM) + 0..Nx: CCCryptorAddParameter(kCCParameterIV, iv) + 0..Nx: CCCryptorAddParameter(kCCParameterAuthData, data) + 0..Nx: CCCryptorUpdate(inData, outData) + 0..1: CCCryptorFinal(outData) + 0..1: CCCryptorGetParameter(kCCParameterAuthTag, tag) + CCCryptorRelease() + */ + + enum { + /* + Initialization vector - cryptor input parameter, typically + needs to have the same length as block size, but in some cases + (GCM) it can be arbitrarily long and even might be called + multiple times. + */ + kCCParameterIV, + + /* + Authentication data - cryptor input parameter, input for + authenticating encryption modes like GCM. If supported, can + be called multiple times before encryption starts. + */ + kCCParameterAuthData, + + /* + Mac Size - cryptor input parameter, input for + authenticating encryption modes like CCM. Specifies the size of + the AuthTag the algorithm is expected to produce. + */ + kCCMacSize, + + /* + Data Size - cryptor input parameter, input for + authenticating encryption modes like CCM. Specifies the amount of + data the algorithm is expected to process. + */ + kCCDataSize, + + /* + Authentication tag - cryptor output parameter, output from + authenticating encryption modes like GCM. If supported, + should be retrieved after the encryption finishes. + */ + kCCParameterAuthTag, + }; + typedef uint32_t CCParameter; + + /* + Sets or adds some other cryptor input parameter. According to the + cryptor type and state, parameter can be either accepted or + refused with kCCUnimplemented (when given parameter is not + supported for this type of cryptor at all) or kCCParamError (bad + data length or format). + */ + + CCCryptorStatus CCCryptorAddParameter( + CCCryptorRef cryptorRef, + CCParameter parameter, + const void *data, + size_t dataSize); + + + /* + Gets value of output cryptor parameter. According to the cryptor + type state, the request can be either accepted or refused with + kCCUnimplemented (when given parameter is not supported for this + type of cryptor) or kCCBufferTooSmall (in this case, *dataSize + argument is set to the requested size of data). + */ + + CCCryptorStatus CCCryptorGetParameter( + CCCryptorRef cryptorRef, + CCParameter parameter, + void *data, + size_t *dataSize); + + +#ifdef __cplusplus +} +#endif +#endif /* _CC_CryptorSPI_H_ */ +/* clang-format on */ diff --git a/include/aws/cal/private/symmetric_cipher_priv.h b/include/aws/cal/private/symmetric_cipher_priv.h new file mode 100644 index 00000000..e8226d73 --- /dev/null +++ b/include/aws/cal/private/symmetric_cipher_priv.h @@ -0,0 +1,59 @@ +#ifndef AWS_CAL_SYMMETRIC_CIPHER_PRIV_H +#define AWS_CAL_SYMMETRIC_CIPHER_PRIV_H +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include + +struct aws_symmetric_cipher; + +struct aws_symmetric_cipher_vtable { + const char *alg_name; + const char *provider; + void (*destroy)(struct aws_symmetric_cipher *cipher); + /* reset the cipher to being able to start another encrypt or decrypt operation. + The original IV, Key, Tag etc... will be restored to the current cipher. */ + int (*reset)(struct aws_symmetric_cipher *cipher); + int (*encrypt)(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_encrypt, struct aws_byte_buf *out); + int (*decrypt)(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor to_decrypt, struct aws_byte_buf *out); + + int (*finalize_encryption)(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); + int (*finalize_decryption)(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); +}; + +struct aws_symmetric_cipher { + struct aws_allocator *allocator; + struct aws_symmetric_cipher_vtable *vtable; + struct aws_byte_buf iv; + struct aws_byte_buf key; + struct aws_byte_buf aad; + struct aws_byte_buf tag; + size_t block_size; + size_t key_length_bits; + bool good; + void *impl; +}; + +AWS_EXTERN_C_BEGIN + +/** + * Generates a secure random initialization vector of length len_bytes. If is_counter_mode is set, the final 4 bytes + * will be reserved as a counter and initialized to 1 in big-endian byte-order. + */ +AWS_CAL_API void aws_symmetric_cipher_generate_initialization_vector( + size_t len_bytes, + bool is_counter_mode, + struct aws_byte_buf *out); + +/** + * Generates a secure random symmetric key of length len_bytes. + */ +AWS_CAL_API void aws_symmetric_cipher_generate_key(size_t len_bytes, struct aws_byte_buf *out); + +AWS_EXTERN_C_END + +/* Don't let this one get exported as it should never be used outside of this library (including tests). */ +int aws_symmetric_cipher_try_ensure_sufficient_buffer_space(struct aws_byte_buf *buf, size_t size); + +#endif /* AWS_CAL_SYMMETRIC_CIPHER_PRIV_H */ diff --git a/include/aws/cal/symmetric_cipher.h b/include/aws/cal/symmetric_cipher.h new file mode 100644 index 00000000..59f44831 --- /dev/null +++ b/include/aws/cal/symmetric_cipher.h @@ -0,0 +1,238 @@ +#ifndef AWS_CAL_SYMMETRIC_CIPHER_H +#define AWS_CAL_SYMMETRIC_CIPHER_H +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include +#include + +#define AWS_AES_256_CIPHER_BLOCK_SIZE 16 +#define AWS_AES_256_KEY_BIT_LEN 256 +#define AWS_AES_256_KEY_BYTE_LEN (AWS_AES_256_KEY_BIT_LEN / 8) + +struct aws_symmetric_cipher; + +typedef struct aws_symmetric_cipher *(aws_aes_cbc_256_new_fn)( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv); + +typedef struct aws_symmetric_cipher *(aws_aes_ctr_256_new_fn)( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv); + +typedef struct aws_symmetric_cipher *(aws_aes_gcm_256_new_fn)( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv, + const struct aws_byte_cursor *aad, + const struct aws_byte_cursor *decryption_tag); + +typedef struct aws_symmetric_cipher *( + aws_aes_keywrap_256_new_fn)(struct aws_allocator *allocator, const struct aws_byte_cursor *key); + +AWS_EXTERN_C_BEGIN + +/** + * Creates an instance of AES CBC with 256-bit key. + * If key and iv are NULL, they will be generated internally. + * You can get the generated key and iv back by calling: + * + * aws_symmetric_cipher_get_key() and + * aws_symmetric_cipher_get_initialization_vector() + * + * respectively. + * + * If they are set, that key and iv will be copied internally and used by the cipher. + * + * Returns NULL on failure. You can check aws_last_error() to get the error code indicating the failure cause. + */ +AWS_CAL_API struct aws_symmetric_cipher *aws_aes_cbc_256_new( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv); + +/** + * Creates an instance of AES CTR with 256-bit key. + * If key and iv are NULL, they will be generated internally. + * You can get the generated key and iv back by calling: + * + * aws_symmetric_cipher_get_key() and + * aws_symmetric_cipher_get_initialization_vector() + * + * respectively. + * + * If they are set, that key and iv will be copied internally and used by the cipher. + * + * Returns NULL on failure. You can check aws_last_error() to get the error code indicating the failure cause. + */ +AWS_CAL_API struct aws_symmetric_cipher *aws_aes_ctr_256_new( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv); + +/** + * Creates an instance of AES GCM with 256-bit key. + * If key, iv are NULL, they will be generated internally. + * You can get the generated key and iv back by calling: + * + * aws_symmetric_cipher_get_key() and + * aws_symmetric_cipher_get_initialization_vector() + * + * respectively. + * + * If they are set, that key and iv will be copied internally and used by the cipher. + * + * If tag and aad are set they will be copied internally and used by the cipher. + * decryption_tag would most likely be used for a decrypt operation to detect tampering or corruption. + * The Tag for the most recent encrypt operation will be available in: + * + * aws_symmetric_cipher_get_tag() + * + * If aad is set it will be copied and applied to the cipher. + * + * Returns NULL on failure. You can check aws_last_error() to get the error code indicating the failure cause. + */ +AWS_CAL_API struct aws_symmetric_cipher *aws_aes_gcm_256_new( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv, + const struct aws_byte_cursor *aad, + const struct aws_byte_cursor *decryption_tag); + +/** + * Creates an instance of AES Keywrap with 256-bit key. + * If key is NULL, it will be generated internally. + * You can get the generated key back by calling: + * + * aws_symmetric_cipher_get_key() + * + * If key is set, that key will be copied internally and used by the cipher. + * + * Returns NULL on failure. You can check aws_last_error() to get the error code indicating the failure cause. + */ +AWS_CAL_API struct aws_symmetric_cipher *aws_aes_keywrap_256_new( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key); + +/** + * Cleans up internal resources and state for cipher and then deallocates it. + */ +AWS_CAL_API void aws_symmetric_cipher_destroy(struct aws_symmetric_cipher *cipher); + +/** + * Encrypts the value in to_encrypt and writes the encrypted data into out. + * If out is dynamic it will be expanded. If it is not, and out is not large enough to handle + * the encrypted output, the call will fail. If you're trying to optimize to use a stack based array + * or something, make sure it's at least as large as the size of to_encrypt + an extra BLOCK to account for + * padding etc... + * + * returns AWS_OP_SUCCESS on success. Call aws_last_error() to determine the failure cause if it returns + * AWS_OP_ERR; + */ +AWS_CAL_API int aws_symmetric_cipher_encrypt( + struct aws_symmetric_cipher *cipher, + struct aws_byte_cursor to_encrypt, + struct aws_byte_buf *out); + +/** + * Decrypts the value in to_decrypt and writes the decrypted data into out. + * If out is dynamic it will be expanded. If it is not, and out is not large enough to handle + * the decrypted output, the call will fail. If you're trying to optimize to use a stack based array + * or something, make sure it's at least as large as the size of to_decrypt + an extra BLOCK to account for + * padding etc... + * + * returns AWS_OP_SUCCESS on success. Call aws_last_error() to determine the failure cause if it returns + * AWS_OP_ERR; + */ +AWS_CAL_API int aws_symmetric_cipher_decrypt( + struct aws_symmetric_cipher *cipher, + struct aws_byte_cursor to_decrypt, + struct aws_byte_buf *out); + +/** + * Encrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any + * writes any remaining encrypted data to out. If out is dynamic it will be expanded. If it is not, and + * out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize + * to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for + * padding etc... + * + * After invoking this function, you MUST call aws_symmetric_cipher_reset() before invoking any encrypt/decrypt + * operations on this cipher again. + * + * returns AWS_OP_SUCCESS on success. Call aws_last_error() to determine the failure cause if it returns + * AWS_OP_ERR; + */ +AWS_CAL_API int aws_symmetric_cipher_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); + +/** + * Decrypts any remaining data that was reserved for final padding, loads GMACs etc... and if there is any + * writes any remaining decrypted data to out. If out is dynamic it will be expanded. If it is not, and + * out is not large enough to handle the decrypted output, the call will fail. If you're trying to optimize + * to use a stack based array or something, make sure it's at least as large as the size of 2 BLOCKs to account for + * padding etc... + * + * After invoking this function, you MUST call aws_symmetric_cipher_reset() before invoking any encrypt/decrypt + * operations on this cipher again. + * + * returns AWS_OP_SUCCESS on success. Call aws_last_error() to determine the failure cause if it returns + * AWS_OP_ERR; + */ +AWS_CAL_API int aws_symmetric_cipher_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out); + +/** + * Resets the cipher state for starting a new encrypt or decrypt operation. Note encrypt/decrypt cannot be mixed on the + * same cipher without a call to reset in between them. However, this leaves the key, iv etc... materials setup for + * immediate reuse. + * + * returns AWS_OP_SUCCESS on success. Call aws_last_error() to determine the failure cause if it returns + * AWS_OP_ERR; + */ +AWS_CAL_API int aws_symmetric_cipher_reset(struct aws_symmetric_cipher *cipher); + +/** + * Gets the current GMAC tag. If not AES GCM, this function will just return an empty cursor. + * The memory in this cursor is unsafe as it refers to the internal buffer. + * This was done because the use case doesn't require fetching these during an + * encryption or decryption operation and it dramatically simplifies the API. + * Only use this function between other calls to this API as any function call can alter the value of this tag. + * + * If you need to access it in a different pattern, copy the values to your own buffer first. + */ +AWS_CAL_API struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct aws_symmetric_cipher *cipher); + +/** + * Gets the original intialization vector as a cursor. + * The memory in this cursor is unsafe as it refers to the internal buffer. + * This was done because the use case doesn't require fetching these during an + * encryption or decryption operation and it dramatically simplifies the API. + * + * Unlike some other fields, this value does not change after the inital construction of the cipher. + * + * For some algorithms, such as AES Keywrap, this will return an empty cursor. + */ +AWS_CAL_API struct aws_byte_cursor aws_symmetric_cipher_get_initialization_vector( + const struct aws_symmetric_cipher *cipher); + +/** + * Gets the original key. + * + * The memory in this cursor is unsafe as it refers to the internal buffer. + * This was done because the use case doesn't require fetching these during an + * encryption or decryption operation and it dramatically simplifies the API. + * + * Unlike some other fields, this value does not change after the inital construction of the cipher. + */ +AWS_CAL_API struct aws_byte_cursor aws_symmetric_cipher_get_key(const struct aws_symmetric_cipher *cipher); + +/** + * Returns true if the state of the cipher is good, and otherwise returns false. + * Most operations, other than aws_symmetric_cipher_reset() will fail if this function is returning false. + * aws_symmetric_cipher_reset() will reset the state to a good state if possible. + */ +AWS_CAL_API bool aws_symmetric_cipher_is_good(const struct aws_symmetric_cipher *cipher); + +AWS_EXTERN_C_END +#endif /* AWS_CAL_SYMMETRIC_CIPHER_H */ diff --git a/source/cal.c b/source/cal.c index e793035c..d2d9d7ab 100644 --- a/source/cal.c +++ b/source/cal.c @@ -33,7 +33,10 @@ static struct aws_error_info s_errors[] = { AWS_DEFINE_ERROR_INFO_CAL( AWS_ERROR_CAL_UNSUPPORTED_ALGORITHM, "The specified algorithim is unsupported on this platform."), -}; + AWS_DEFINE_ERROR_INFO_CAL( + AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM, + "The input passed to a cipher algorithm was too large for that algorithm. Consider breaking the input into " + "smaller chunks.")}; static struct aws_error_info_list s_list = { .error_list = s_errors, diff --git a/source/darwin/commoncrypto_aes.c b/source/darwin/commoncrypto_aes.c new file mode 100644 index 00000000..2807cfff --- /dev/null +++ b/source/darwin/commoncrypto_aes.c @@ -0,0 +1,692 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ + +#include + +#include +#include +#include + +#include + +#if defined(__MAC_OS_X_VERSION_MAX_ALLOWED) +# if defined(__MAC_10_13) && (__MAC_OS_X_VERSION_MAX_ALLOWED >= __MAC_10_13) +# define MAC_10_13_AVAILABLE 1 +# elif defined(__MAC_10_14_4) && (__MAC_OS_X_VERSION_MAX_ALLOWED >= __MAC_10_14_4) +# define MAC_10_14_4_AVAILABLE 1 +# endif +#endif + +/* for OSX < 10.10 compatibility */ +typedef int32_t CCStatus; +typedef int32_t CCCryptorStatus; + +struct cc_aes_cipher { + struct aws_symmetric_cipher cipher_base; + struct _CCCryptor *encryptor_handle; + struct _CCCryptor *decryptor_handle; + struct aws_byte_buf working_buffer; +}; + +static int s_encrypt(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor input, struct aws_byte_buf *out) { + /* allow for a padded block by making sure we have at least a block of padding reserved. */ + size_t required_buffer_space = input.len + cipher->block_size - 1; + + if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + size_t available_write_space = out->capacity - out->len; + struct cc_aes_cipher *cc_cipher = cipher->impl; + + size_t len_written = 0; + CCStatus status = CCCryptorUpdate( + cc_cipher->encryptor_handle, input.ptr, input.len, out->buffer + out->len, available_write_space, &len_written); + + if (status != kCCSuccess) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + out->len += len_written; + return AWS_OP_SUCCESS; +} + +static int s_decrypt(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor input, struct aws_byte_buf *out) { + /* allow for a padded block by making sure we have at least a block of padding reserved. */ + size_t required_buffer_space = input.len + cipher->block_size - 1; + + if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + size_t available_write_space = out->capacity - out->len; + struct cc_aes_cipher *cc_cipher = cipher->impl; + + size_t len_written = 0; + CCStatus status = CCCryptorUpdate( + cc_cipher->decryptor_handle, input.ptr, input.len, out->buffer + out->len, available_write_space, &len_written); + + if (status != kCCSuccess) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + out->len += len_written; + return AWS_OP_SUCCESS; +} + +static int s_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + /* in CBC mode, this will pad the final block from the previous encrypt call, or do nothing + * if we were already on a block boundary. In CTR mode this will do nothing. */ + size_t required_buffer_space = cipher->block_size; + size_t len_written = 0; + + if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + size_t available_write_space = out->capacity - out->len; + struct cc_aes_cipher *cc_cipher = cipher->impl; + + CCStatus status = + CCCryptorFinal(cc_cipher->encryptor_handle, out->buffer + out->len, available_write_space, &len_written); + + if (status != kCCSuccess) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + out->len += len_written; + return AWS_OP_SUCCESS; +} + +static int s_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + /* in CBC mode, this will pad the final block from the previous encrypt call, or do nothing + * if we were already on a block boundary. In CTR mode this will do nothing. */ + size_t required_buffer_space = cipher->block_size; + size_t len_written = 0; + + if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + size_t available_write_space = out->capacity - out->len; + struct cc_aes_cipher *cc_cipher = cipher->impl; + + CCStatus status = + CCCryptorFinal(cc_cipher->decryptor_handle, out->buffer + out->len, available_write_space, &len_written); + + if (status != kCCSuccess) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + out->len += len_written; + return AWS_OP_SUCCESS; +} + +static int s_initialize_cbc_cipher_materials( + struct cc_aes_cipher *cc_cipher, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv) { + if (!cc_cipher->cipher_base.key.len) { + if (key) { + aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.key, cc_cipher->cipher_base.allocator, *key); + } else { + aws_byte_buf_init(&cc_cipher->cipher_base.key, cc_cipher->cipher_base.allocator, AWS_AES_256_KEY_BYTE_LEN); + aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cc_cipher->cipher_base.key); + } + } + + if (!cc_cipher->cipher_base.iv.len) { + if (iv) { + aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.iv, cc_cipher->cipher_base.allocator, *iv); + } else { + aws_byte_buf_init( + &cc_cipher->cipher_base.iv, cc_cipher->cipher_base.allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + aws_symmetric_cipher_generate_initialization_vector( + AWS_AES_256_CIPHER_BLOCK_SIZE, false, &cc_cipher->cipher_base.iv); + } + } + + CCCryptorStatus status = CCCryptorCreateWithMode( + kCCEncrypt, + kCCModeCBC, + kCCAlgorithmAES, + ccPKCS7Padding, + cc_cipher->cipher_base.iv.buffer, + cc_cipher->cipher_base.key.buffer, + cc_cipher->cipher_base.key.len, + NULL, + 0, + 0, + 0, + &cc_cipher->encryptor_handle); + + status |= CCCryptorCreateWithMode( + kCCDecrypt, + kCCModeCBC, + kCCAlgorithmAES, + ccPKCS7Padding, + cc_cipher->cipher_base.iv.buffer, + cc_cipher->cipher_base.key.buffer, + cc_cipher->cipher_base.key.len, + NULL, + 0, + 0, + 0, + &cc_cipher->decryptor_handle); + + return status == kCCSuccess ? AWS_OP_SUCCESS : aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); +} + +static int s_reset(struct aws_symmetric_cipher *cipher) { + struct cc_aes_cipher *cc_cipher = cipher->impl; + + if (cc_cipher->encryptor_handle) { + CCCryptorRelease(cc_cipher->encryptor_handle); + cc_cipher->encryptor_handle = NULL; + } + + if (cc_cipher->decryptor_handle) { + CCCryptorRelease(cc_cipher->decryptor_handle); + cc_cipher->decryptor_handle = NULL; + } + + aws_byte_buf_secure_zero(&cc_cipher->working_buffer); + + return AWS_OP_SUCCESS; +} + +static void s_destroy(struct aws_symmetric_cipher *cipher) { + aws_byte_buf_clean_up_secure(&cipher->key); + aws_byte_buf_clean_up_secure(&cipher->iv); + aws_byte_buf_clean_up_secure(&cipher->tag); + aws_byte_buf_clean_up_secure(&cipher->aad); + + s_reset(cipher); + + struct cc_aes_cipher *cc_cipher = cipher->impl; + aws_byte_buf_clean_up_secure(&cc_cipher->working_buffer); + + aws_mem_release(cipher->allocator, cc_cipher); +} + +static int s_cbc_reset(struct aws_symmetric_cipher *cipher) { + struct cc_aes_cipher *cc_cipher = cipher->impl; + + int ret_val = s_reset(cipher); + + if (ret_val == AWS_OP_SUCCESS) { + ret_val = s_initialize_cbc_cipher_materials(cc_cipher, NULL, NULL); + } + + return ret_val; +} + +static struct aws_symmetric_cipher_vtable s_aes_cbc_vtable = { + .finalize_decryption = s_finalize_decryption, + .finalize_encryption = s_finalize_encryption, + .decrypt = s_decrypt, + .encrypt = s_encrypt, + .provider = "CommonCrypto", + .alg_name = "AES-CBC 256", + .destroy = s_destroy, + .reset = s_cbc_reset, +}; + +struct aws_symmetric_cipher *aws_aes_cbc_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv) { + struct cc_aes_cipher *cc_cipher = aws_mem_calloc(allocator, 1, sizeof(struct cc_aes_cipher)); + cc_cipher->cipher_base.allocator = allocator; + cc_cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE; + cc_cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN; + cc_cipher->cipher_base.impl = cc_cipher; + cc_cipher->cipher_base.vtable = &s_aes_cbc_vtable; + + if (s_initialize_cbc_cipher_materials(cc_cipher, key, iv) != AWS_OP_SUCCESS) { + s_destroy(&cc_cipher->cipher_base); + return NULL; + } + + cc_cipher->cipher_base.good = true; + cc_cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN; + + return &cc_cipher->cipher_base; +} + +static int s_initialize_ctr_cipher_materials( + struct cc_aes_cipher *cc_cipher, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv) { + if (!cc_cipher->cipher_base.key.len) { + if (key) { + aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.key, cc_cipher->cipher_base.allocator, *key); + } else { + aws_byte_buf_init(&cc_cipher->cipher_base.key, cc_cipher->cipher_base.allocator, AWS_AES_256_KEY_BYTE_LEN); + aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cc_cipher->cipher_base.key); + } + } + + if (!cc_cipher->cipher_base.iv.len) { + if (iv) { + aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.iv, cc_cipher->cipher_base.allocator, *iv); + } else { + aws_byte_buf_init( + &cc_cipher->cipher_base.iv, cc_cipher->cipher_base.allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + aws_symmetric_cipher_generate_initialization_vector( + AWS_AES_256_CIPHER_BLOCK_SIZE, true, &cc_cipher->cipher_base.iv); + } + } + + CCCryptorStatus status = CCCryptorCreateWithMode( + kCCEncrypt, + kCCModeCTR, + kCCAlgorithmAES, + ccNoPadding, + cc_cipher->cipher_base.iv.buffer, + cc_cipher->cipher_base.key.buffer, + cc_cipher->cipher_base.key.len, + NULL, + 0, + 0, + kCCModeOptionCTR_BE, + &cc_cipher->encryptor_handle); + + status |= CCCryptorCreateWithMode( + kCCDecrypt, + kCCModeCTR, + kCCAlgorithmAES, + ccNoPadding, + cc_cipher->cipher_base.iv.buffer, + cc_cipher->cipher_base.key.buffer, + cc_cipher->cipher_base.key.len, + NULL, + 0, + 0, + kCCModeOptionCTR_BE, + &cc_cipher->decryptor_handle); + + return status == kCCSuccess ? AWS_OP_SUCCESS : aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); +} + +static int s_ctr_reset(struct aws_symmetric_cipher *cipher) { + struct cc_aes_cipher *cc_cipher = cipher->impl; + + int ret_val = s_reset(cipher); + + if (ret_val == AWS_OP_SUCCESS) { + ret_val = s_initialize_ctr_cipher_materials(cc_cipher, NULL, NULL); + } + + return ret_val; +} + +static struct aws_symmetric_cipher_vtable s_aes_ctr_vtable = { + .finalize_decryption = s_finalize_decryption, + .finalize_encryption = s_finalize_encryption, + .decrypt = s_decrypt, + .encrypt = s_encrypt, + .provider = "CommonCrypto", + .alg_name = "AES-CTR 256", + .destroy = s_destroy, + .reset = s_ctr_reset, +}; + +struct aws_symmetric_cipher *aws_aes_ctr_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv) { + struct cc_aes_cipher *cc_cipher = aws_mem_calloc(allocator, 1, sizeof(struct cc_aes_cipher)); + cc_cipher->cipher_base.allocator = allocator; + cc_cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE; + cc_cipher->cipher_base.impl = cc_cipher; + cc_cipher->cipher_base.vtable = &s_aes_ctr_vtable; + + if (s_initialize_ctr_cipher_materials(cc_cipher, key, iv) != AWS_OP_SUCCESS) { + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + s_destroy(&cc_cipher->cipher_base); + return NULL; + } + + cc_cipher->cipher_base.good = true; + cc_cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN; + + return &cc_cipher->cipher_base; +} + +static int s_finalize_gcm_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + (void)out; + + /* user specification takes precedence. If its wrong its wrong */ + if (!cipher->tag.len) { + aws_byte_buf_init(&cipher->tag, cipher->allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + } + + struct cc_aes_cipher *cc_cipher = cipher->impl; + + CCStatus status; + size_t tag_length = AWS_AES_256_CIPHER_BLOCK_SIZE; + /* Note that CCCryptorGCMFinal is deprecated in Mac 10.13. It also doesn't compare the tag with expected tag + * https://opensource.apple.com/source/CommonCrypto/CommonCrypto-60118.1.1/include/CommonCryptorSPI.h.auto.html + */ +#ifdef MAC_10_13_AVAILABLE + status = CCCryptorGCMFinalize(cc_cipher->encryptor_handle, cipher->tag.buffer, tag_length); +#else + status = CCCryptorGCMFinal(cc_cipher->encryptor_handle, cipher->tag.buffer, &tag_length); +#endif + + if (status != kCCSuccess) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + cipher->tag.len = tag_length; + return AWS_OP_SUCCESS; +} + +static int s_finalize_gcm_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + (void)out; + + struct cc_aes_cipher *cc_cipher = cipher->impl; + + CCStatus status; + size_t tag_length = AWS_AES_256_CIPHER_BLOCK_SIZE; + /* Note that CCCryptorGCMFinal is deprecated in Mac 10.13. It also doesn't compare the tag with expected tag + * https://opensource.apple.com/source/CommonCrypto/CommonCrypto-60118.1.1/include/CommonCryptorSPI.h.auto.html + */ +#ifdef MAC_10_13_AVAILABLE + status = CCCryptorGCMFinalize(cc_cipher->decryptor_handle, cipher->tag.buffer, tag_length); +#else + status = CCCryptorGCMFinal(cc_cipher->decryptor_handle, cipher->tag.buffer, &tag_length); +#endif + + if (status != kCCSuccess) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + return AWS_OP_SUCCESS; +} + +static int s_initialize_gcm_cipher_materials( + struct cc_aes_cipher *cc_cipher, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv, + const struct aws_byte_cursor *aad, + const struct aws_byte_cursor *tag) { + if (!cc_cipher->cipher_base.key.len) { + if (key) { + aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.key, cc_cipher->cipher_base.allocator, *key); + } else { + aws_byte_buf_init(&cc_cipher->cipher_base.key, cc_cipher->cipher_base.allocator, AWS_AES_256_KEY_BYTE_LEN); + aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cc_cipher->cipher_base.key); + } + } + + if (!cc_cipher->cipher_base.iv.len) { + if (iv) { + aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.iv, cc_cipher->cipher_base.allocator, *iv); + } else { + /* GCM IVs are kind of a hidden implementation detail. 4 are reserved by the system for long running stream + * blocks. */ + /* This is because there's a GMAC attached to the cipher (that's what tag is for). For that to work, it has + * to control the actual counter */ + aws_byte_buf_init( + &cc_cipher->cipher_base.iv, cc_cipher->cipher_base.allocator, AWS_AES_256_CIPHER_BLOCK_SIZE - 4); + aws_symmetric_cipher_generate_initialization_vector( + AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, &cc_cipher->cipher_base.iv); + } + } + + if (aad && aad->len) { + aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.aad, cc_cipher->cipher_base.allocator, *aad); + } + + if (tag && tag->len) { + aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.tag, cc_cipher->cipher_base.allocator, *tag); + } + + CCCryptorStatus status = CCCryptorCreateWithMode( + kCCEncrypt, + kCCModeGCM, + kCCAlgorithmAES, + ccNoPadding, + NULL, + cc_cipher->cipher_base.key.buffer, + cc_cipher->cipher_base.key.len, + NULL, + 0, + 0, + kCCModeOptionCTR_BE, + &cc_cipher->encryptor_handle); + + if (status != kCCSuccess) { + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + +#ifdef MAC_10_13_AVAILABLE + status = + CCCryptorGCMSetIV(cc_cipher->encryptor_handle, cc_cipher->cipher_base.iv.buffer, cc_cipher->cipher_base.iv.len); +#else + status = + CCCryptorGCMAddIV(cc_cipher->encryptor_handle, cc_cipher->cipher_base.iv.buffer, cc_cipher->cipher_base.iv.len); +#endif + + if (status != kCCSuccess) { + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + if (cc_cipher->cipher_base.aad.len) { + status = CCCryptorGCMAddAAD( + cc_cipher->encryptor_handle, cc_cipher->cipher_base.aad.buffer, cc_cipher->cipher_base.aad.len); + + if (status != kCCSuccess) { + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + } + + status = CCCryptorCreateWithMode( + kCCDecrypt, + kCCModeGCM, + kCCAlgorithmAES, + ccNoPadding, + NULL, + cc_cipher->cipher_base.key.buffer, + cc_cipher->cipher_base.key.len, + NULL, + 0, + 0, + kCCModeOptionCTR_BE, + &cc_cipher->decryptor_handle); + + if (status != kCCSuccess) { + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + +#ifdef MAC_10_13_AVAILABLE + status = + CCCryptorGCMSetIV(cc_cipher->decryptor_handle, cc_cipher->cipher_base.iv.buffer, cc_cipher->cipher_base.iv.len); +#else + status = + CCCryptorGCMAddIV(cc_cipher->decryptor_handle, cc_cipher->cipher_base.iv.buffer, cc_cipher->cipher_base.iv.len); +#endif + + if (status != kCCSuccess) { + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + if (cc_cipher->cipher_base.aad.len) { + status = CCCryptorGCMAddAAD( + cc_cipher->decryptor_handle, cc_cipher->cipher_base.aad.buffer, cc_cipher->cipher_base.aad.len); + } + + if (status != kCCSuccess) { + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + return AWS_OP_SUCCESS; +} + +static int s_gcm_reset(struct aws_symmetric_cipher *cipher) { + struct cc_aes_cipher *cc_cipher = cipher->impl; + + int ret_val = s_reset(cipher); + + if (ret_val == AWS_OP_SUCCESS) { + ret_val = s_initialize_gcm_cipher_materials(cc_cipher, NULL, NULL, NULL, NULL); + } + + return ret_val; +} + +static struct aws_symmetric_cipher_vtable s_aes_gcm_vtable = { + .finalize_decryption = s_finalize_gcm_decryption, + .finalize_encryption = s_finalize_gcm_encryption, + .decrypt = s_decrypt, + .encrypt = s_encrypt, + .provider = "CommonCrypto", + .alg_name = "AES-GCM 256", + .destroy = s_destroy, + .reset = s_gcm_reset, +}; + +struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv, + const struct aws_byte_cursor *aad, + const struct aws_byte_cursor *tag) { + struct cc_aes_cipher *cc_cipher = aws_mem_calloc(allocator, 1, sizeof(struct cc_aes_cipher)); + cc_cipher->cipher_base.allocator = allocator; + cc_cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE; + cc_cipher->cipher_base.impl = cc_cipher; + cc_cipher->cipher_base.vtable = &s_aes_gcm_vtable; + + if (s_initialize_gcm_cipher_materials(cc_cipher, key, iv, aad, tag) != AWS_OP_SUCCESS) { + s_destroy(&cc_cipher->cipher_base); + return NULL; + } + + cc_cipher->cipher_base.good = true; + cc_cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN; + + return &cc_cipher->cipher_base; +} + +static int s_keywrap_encrypt_decrypt( + struct aws_symmetric_cipher *cipher, + struct aws_byte_cursor input, + struct aws_byte_buf *out) { + struct cc_aes_cipher *cc_cipher = cipher->impl; + return aws_byte_buf_append_dynamic(&cc_cipher->working_buffer, &input); +} + +static int s_finalize_keywrap_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + struct cc_aes_cipher *cc_cipher = cipher->impl; + + if (cc_cipher->working_buffer.len == 0) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_STATE); + } + + size_t output_buffer_len = cipher->block_size + cc_cipher->working_buffer.len; + + if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, output_buffer_len)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + CCCryptorStatus status = CCSymmetricKeyWrap( + kCCWRAPAES, + CCrfc3394_iv, + CCrfc3394_ivLen, + cipher->key.buffer, + cipher->key.len, + cc_cipher->working_buffer.buffer, + cc_cipher->working_buffer.len, + out->buffer, + &output_buffer_len); + + if (status != kCCSuccess) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_STATE); + } + + out->len += output_buffer_len; + + return AWS_OP_SUCCESS; +} + +static int s_finalize_keywrap_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + struct cc_aes_cipher *cc_cipher = cipher->impl; + + if (cc_cipher->working_buffer.len == 0) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_STATE); + } + + size_t output_buffer_len = cipher->block_size + cc_cipher->working_buffer.len; + + if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, output_buffer_len)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + CCCryptorStatus status = CCSymmetricKeyUnwrap( + kCCWRAPAES, + CCrfc3394_iv, + CCrfc3394_ivLen, + cipher->key.buffer, + cipher->key.len, + cc_cipher->working_buffer.buffer, + cc_cipher->working_buffer.len, + out->buffer, + &output_buffer_len); + + if (status != kCCSuccess) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_STATE); + } + + out->len += output_buffer_len; + + return AWS_OP_SUCCESS; +} + +static struct aws_symmetric_cipher_vtable s_aes_keywrap_vtable = { + .finalize_decryption = s_finalize_keywrap_decryption, + .finalize_encryption = s_finalize_keywrap_encryption, + .decrypt = s_keywrap_encrypt_decrypt, + .encrypt = s_keywrap_encrypt_decrypt, + .provider = "CommonCrypto", + .alg_name = "AES-KEYWRAP 256", + .destroy = s_destroy, + .reset = s_reset, +}; + +struct aws_symmetric_cipher *aws_aes_keywrap_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key) { + struct cc_aes_cipher *cc_cipher = aws_mem_calloc(allocator, 1, sizeof(struct cc_aes_cipher)); + cc_cipher->cipher_base.allocator = allocator; + cc_cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE / 2; + cc_cipher->cipher_base.impl = cc_cipher; + cc_cipher->cipher_base.vtable = &s_aes_keywrap_vtable; + + if (key) { + aws_byte_buf_init_copy_from_cursor(&cc_cipher->cipher_base.key, cc_cipher->cipher_base.allocator, *key); + } else { + aws_byte_buf_init(&cc_cipher->cipher_base.key, cc_cipher->cipher_base.allocator, AWS_AES_256_KEY_BYTE_LEN); + aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cc_cipher->cipher_base.key); + } + + aws_byte_buf_init(&cc_cipher->working_buffer, allocator, (AWS_AES_256_CIPHER_BLOCK_SIZE * 2) + 8); + cc_cipher->cipher_base.good = true; + cc_cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN; + + return &cc_cipher->cipher_base; +} diff --git a/source/symmetric_cipher.c b/source/symmetric_cipher.c new file mode 100644 index 00000000..c087daf1 --- /dev/null +++ b/source/symmetric_cipher.c @@ -0,0 +1,225 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include +#include +#include + +#ifndef BYO_CRYPTO + +extern struct aws_symmetric_cipher *aws_aes_cbc_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv); + +extern struct aws_symmetric_cipher *aws_aes_ctr_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv); + +extern struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv, + const struct aws_byte_cursor *aad, + const struct aws_byte_cursor *decryption_tag); + +extern struct aws_symmetric_cipher *aws_aes_keywrap_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key); + +#else /* BYO_CRYPTO */ +struct aws_symmetric_cipher *aws_aes_cbc_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv) { + (void)allocator; + (void)key; + (void)iv; + abort(); +} + +struct aws_symmetric_cipher *aws_aes_ctr_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv) { + (void)allocator; + (void)key; + (void)iv; + abort(); +} + +struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv, + const struct aws_byte_cursor *aad, + const struct aws_byte_cursor *decryption_tag) { + (void)allocator; + (void)key; + (void)iv; + (void)aad; + (void)decryption_tag; + abort(); +} + +struct aws_symmetric_cipher *aws_aes_keywrap_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key) { + (void)allocator; + (void)key; + abort(); +} + +#endif /* BYO_CRYPTO */ + +static aws_aes_cbc_256_new_fn *s_aes_cbc_new_fn = aws_aes_cbc_256_new_impl; +static aws_aes_ctr_256_new_fn *s_aes_ctr_new_fn = aws_aes_ctr_256_new_impl; +static aws_aes_gcm_256_new_fn *s_aes_gcm_new_fn = aws_aes_gcm_256_new_impl; +static aws_aes_keywrap_256_new_fn *s_aes_keywrap_new_fn = aws_aes_keywrap_256_new_impl; + +static bool s_check_input_size_limits(const struct aws_symmetric_cipher *cipher, const struct aws_byte_cursor *input) { + /* libcrypto uses int, not size_t, so this is the limit. + * For simplicity, enforce the same rules on all platforms. */ + return input->len <= INT_MAX - cipher->block_size; +} + +struct aws_symmetric_cipher *aws_aes_cbc_256_new( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv) { + return s_aes_cbc_new_fn(allocator, key, iv); +} + +struct aws_symmetric_cipher *aws_aes_ctr_256_new( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv) { + return s_aes_ctr_new_fn(allocator, key, iv); +} + +struct aws_symmetric_cipher *aws_aes_gcm_256_new( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv, + const struct aws_byte_cursor *aad, + const struct aws_byte_cursor *decryption_tag) { + return s_aes_gcm_new_fn(allocator, key, iv, aad, decryption_tag); +} + +struct aws_symmetric_cipher *aws_aes_keywrap_256_new( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key) { + return s_aes_keywrap_new_fn(allocator, key); +} + +void aws_symmetric_cipher_destroy(struct aws_symmetric_cipher *cipher) { + if (cipher) { + cipher->vtable->destroy(cipher); + } +} + +int aws_symmetric_cipher_encrypt( + struct aws_symmetric_cipher *cipher, + struct aws_byte_cursor to_encrypt, + struct aws_byte_buf *out) { + + if (AWS_UNLIKELY(!s_check_input_size_limits(cipher, &to_encrypt))) { + return aws_raise_error(AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM); + } + + if (cipher->good) { + return cipher->vtable->encrypt(cipher, to_encrypt, out); + } + + return aws_raise_error(AWS_ERROR_INVALID_STATE); +} + +int aws_symmetric_cipher_decrypt( + struct aws_symmetric_cipher *cipher, + struct aws_byte_cursor to_decrypt, + struct aws_byte_buf *out) { + + if (AWS_UNLIKELY(!s_check_input_size_limits(cipher, &to_decrypt))) { + return aws_raise_error(AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM); + } + + if (cipher->good) { + return cipher->vtable->decrypt(cipher, to_decrypt, out); + } + + return aws_raise_error(AWS_ERROR_INVALID_STATE); +} + +int aws_symmetric_cipher_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + if (cipher->good) { + int ret_val = cipher->vtable->finalize_encryption(cipher, out); + cipher->good = false; + return ret_val; + } + + return aws_raise_error(AWS_ERROR_INVALID_STATE); +} + +int aws_symmetric_cipher_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + if (cipher->good) { + int ret_val = cipher->vtable->finalize_decryption(cipher, out); + cipher->good = false; + return ret_val; + } + return aws_raise_error(AWS_ERROR_INVALID_STATE); +} + +int aws_symmetric_cipher_reset(struct aws_symmetric_cipher *cipher) { + int ret_val = cipher->vtable->reset(cipher); + if (ret_val == AWS_OP_SUCCESS) { + cipher->good = true; + } + + return ret_val; +} + +struct aws_byte_cursor aws_symmetric_cipher_get_tag(const struct aws_symmetric_cipher *cipher) { + return aws_byte_cursor_from_buf(&cipher->tag); +} + +struct aws_byte_cursor aws_symmetric_cipher_get_initialization_vector(const struct aws_symmetric_cipher *cipher) { + return aws_byte_cursor_from_buf(&cipher->iv); +} + +struct aws_byte_cursor aws_symmetric_cipher_get_key(const struct aws_symmetric_cipher *cipher) { + return aws_byte_cursor_from_buf(&cipher->key); +} + +bool aws_symmetric_cipher_is_good(const struct aws_symmetric_cipher *cipher) { + return cipher->good; +} + +void aws_symmetric_cipher_generate_initialization_vector( + size_t len_bytes, + bool is_counter_mode, + struct aws_byte_buf *out) { + size_t counter_len = is_counter_mode ? sizeof(uint32_t) : 0; + AWS_ASSERT(len_bytes > counter_len); + size_t rand_len = len_bytes - counter_len; + + AWS_FATAL_ASSERT(aws_device_random_buffer_append(out, rand_len) == AWS_OP_SUCCESS); + + if (is_counter_mode) { + /* put counter at the end, initialized to 1 */ + aws_byte_buf_write_be32(out, 1); + } +} + +void aws_symmetric_cipher_generate_key(size_t key_len_bytes, struct aws_byte_buf *out) { + AWS_FATAL_ASSERT(aws_device_random_buffer_append(out, key_len_bytes) == AWS_OP_SUCCESS); +} + +int aws_symmetric_cipher_try_ensure_sufficient_buffer_space(struct aws_byte_buf *buf, size_t size) { + if (buf->capacity - buf->len < size) { + return aws_byte_buf_reserve_relative(buf, size); + } + + return AWS_OP_SUCCESS; +} diff --git a/source/unix/openssl_aes.c b/source/unix/openssl_aes.c new file mode 100644 index 00000000..a45e38d1 --- /dev/null +++ b/source/unix/openssl_aes.c @@ -0,0 +1,717 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include + +#include + +struct openssl_aes_cipher { + struct aws_symmetric_cipher cipher_base; + EVP_CIPHER_CTX *encryptor_ctx; + EVP_CIPHER_CTX *decryptor_ctx; + struct aws_byte_buf working_buffer; +}; + +static int s_encrypt(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor input, struct aws_byte_buf *out) { + + size_t required_buffer_space = input.len + cipher->block_size; + + if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + size_t available_write_space = out->capacity - out->len; + struct openssl_aes_cipher *openssl_cipher = cipher->impl; + + int len_written = (int)(available_write_space); + if (!EVP_EncryptUpdate( + openssl_cipher->encryptor_ctx, out->buffer + out->len, &len_written, input.ptr, (int)input.len)) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + out->len += len_written; + return AWS_OP_SUCCESS; +} + +static int s_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + struct openssl_aes_cipher *openssl_cipher = cipher->impl; + + size_t required_buffer_space = cipher->block_size; + + if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + int len_written = (int)(out->capacity - out->len); + if (!EVP_EncryptFinal_ex(openssl_cipher->encryptor_ctx, out->buffer + out->len, &len_written)) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + out->len += len_written; + return AWS_OP_SUCCESS; +} + +static int s_decrypt(struct aws_symmetric_cipher *cipher, struct aws_byte_cursor input, struct aws_byte_buf *out) { + struct openssl_aes_cipher *openssl_cipher = cipher->impl; + + size_t required_buffer_space = input.len + cipher->block_size; + + if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + size_t available_write_space = out->capacity - out->len; + + int len_written = (int)available_write_space; + if (!EVP_DecryptUpdate( + openssl_cipher->decryptor_ctx, out->buffer + out->len, &len_written, input.ptr, (int)input.len)) { + cipher->good = false; + + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + out->len += len_written; + return AWS_OP_SUCCESS; +} + +static int s_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + struct openssl_aes_cipher *openssl_cipher = cipher->impl; + + size_t required_buffer_space = cipher->block_size; + + if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + int len_written = (int)out->capacity - out->len; + if (!EVP_DecryptFinal_ex(openssl_cipher->decryptor_ctx, out->buffer + out->len, &len_written)) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + out->len += len_written; + return AWS_OP_SUCCESS; +} + +static void s_destroy(struct aws_symmetric_cipher *cipher) { + struct openssl_aes_cipher *openssl_cipher = cipher->impl; + + if (openssl_cipher->encryptor_ctx) { + EVP_CIPHER_CTX_free(openssl_cipher->encryptor_ctx); + } + + if (openssl_cipher->decryptor_ctx) { + EVP_CIPHER_CTX_free(openssl_cipher->decryptor_ctx); + } + + aws_byte_buf_clean_up_secure(&cipher->key); + aws_byte_buf_clean_up_secure(&cipher->iv); + + if (cipher->tag.buffer) { + aws_byte_buf_clean_up_secure(&cipher->tag); + } + + if (cipher->aad.buffer) { + aws_byte_buf_clean_up_secure(&cipher->aad); + } + + aws_byte_buf_clean_up_secure(&openssl_cipher->working_buffer); + + aws_mem_release(cipher->allocator, openssl_cipher); +} + +static int s_reset(struct aws_symmetric_cipher *cipher) { + struct openssl_aes_cipher *openssl_cipher = cipher->impl; + + if (!EVP_CIPHER_CTX_reset(openssl_cipher->encryptor_ctx)) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_STATE); + } + + if (!EVP_CIPHER_CTX_reset(openssl_cipher->decryptor_ctx)) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_STATE); + } + + aws_byte_buf_secure_zero(&openssl_cipher->working_buffer); + cipher->good = true; + return AWS_OP_SUCCESS; +} + +static int s_init_cbc_cipher_materials(struct aws_symmetric_cipher *cipher) { + struct openssl_aes_cipher *openssl_cipher = cipher->impl; + + if (!EVP_EncryptInit_ex( + openssl_cipher->encryptor_ctx, + EVP_aes_256_cbc(), + NULL, + openssl_cipher->cipher_base.key.buffer, + openssl_cipher->cipher_base.iv.buffer) || + !EVP_DecryptInit_ex( + openssl_cipher->decryptor_ctx, + EVP_aes_256_cbc(), + NULL, + openssl_cipher->cipher_base.key.buffer, + openssl_cipher->cipher_base.iv.buffer)) { + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + return AWS_OP_SUCCESS; +} + +static int s_reset_cbc_cipher_materials(struct aws_symmetric_cipher *cipher) { + int ret_val = s_reset(cipher); + + if (ret_val == AWS_OP_SUCCESS) { + return s_init_cbc_cipher_materials(cipher); + } + + return ret_val; +} + +static struct aws_symmetric_cipher_vtable s_cbc_vtable = { + .alg_name = "AES-CBC 256", + .provider = "OpenSSL Compatible LibCrypto", + .destroy = s_destroy, + .reset = s_reset_cbc_cipher_materials, + .decrypt = s_decrypt, + .encrypt = s_encrypt, + .finalize_decryption = s_finalize_decryption, + .finalize_encryption = s_finalize_encryption, +}; + +struct aws_symmetric_cipher *aws_aes_cbc_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv) { + struct openssl_aes_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct openssl_aes_cipher)); + + cipher->cipher_base.allocator = allocator; + cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE; + cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN; + cipher->cipher_base.vtable = &s_cbc_vtable; + cipher->cipher_base.impl = cipher; + + if (key) { + aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.key, allocator, *key); + } else { + aws_byte_buf_init(&cipher->cipher_base.key, allocator, AWS_AES_256_KEY_BYTE_LEN); + aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher_base.key); + } + + if (iv) { + aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.iv, allocator, *iv); + } else { + aws_byte_buf_init(&cipher->cipher_base.iv, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + aws_symmetric_cipher_generate_initialization_vector( + AWS_AES_256_CIPHER_BLOCK_SIZE, false, &cipher->cipher_base.iv); + } + + /* EVP_CIPHER_CTX_init() will be called inside EVP_CIPHER_CTX_new(). */ + cipher->encryptor_ctx = EVP_CIPHER_CTX_new(); + AWS_FATAL_ASSERT(cipher->encryptor_ctx && "Cipher initialization failed!"); + + /* EVP_CIPHER_CTX_init() will be called inside EVP_CIPHER_CTX_new(). */ + cipher->decryptor_ctx = EVP_CIPHER_CTX_new(); + AWS_FATAL_ASSERT(cipher->decryptor_ctx && "Cipher initialization failed!"); + + if (s_init_cbc_cipher_materials(&cipher->cipher_base) != AWS_OP_SUCCESS) { + goto error; + } + + cipher->cipher_base.good = true; + return &cipher->cipher_base; + +error: + s_destroy(&cipher->cipher_base); + return NULL; +} + +static int s_init_ctr_cipher_materials(struct aws_symmetric_cipher *cipher) { + struct openssl_aes_cipher *openssl_cipher = cipher->impl; + + if (!(EVP_EncryptInit_ex( + openssl_cipher->encryptor_ctx, + EVP_aes_256_ctr(), + NULL, + openssl_cipher->cipher_base.key.buffer, + openssl_cipher->cipher_base.iv.buffer) && + EVP_CIPHER_CTX_set_padding(openssl_cipher->encryptor_ctx, 0)) || + !(EVP_DecryptInit_ex( + openssl_cipher->decryptor_ctx, + EVP_aes_256_ctr(), + NULL, + openssl_cipher->cipher_base.key.buffer, + openssl_cipher->cipher_base.iv.buffer) && + EVP_CIPHER_CTX_set_padding(openssl_cipher->decryptor_ctx, 0))) { + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + return AWS_OP_SUCCESS; +} + +static int s_reset_ctr_cipher_materials(struct aws_symmetric_cipher *cipher) { + int ret_val = s_reset(cipher); + + if (ret_val == AWS_OP_SUCCESS) { + return s_init_ctr_cipher_materials(cipher); + } + + return ret_val; +} + +static struct aws_symmetric_cipher_vtable s_ctr_vtable = { + .alg_name = "AES-CTR 256", + .provider = "OpenSSL Compatible LibCrypto", + .destroy = s_destroy, + .reset = s_reset_ctr_cipher_materials, + .decrypt = s_decrypt, + .encrypt = s_encrypt, + .finalize_decryption = s_finalize_decryption, + .finalize_encryption = s_finalize_encryption, +}; + +struct aws_symmetric_cipher *aws_aes_ctr_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv) { + struct openssl_aes_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct openssl_aes_cipher)); + + cipher->cipher_base.allocator = allocator; + cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE; + cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN; + cipher->cipher_base.vtable = &s_ctr_vtable; + cipher->cipher_base.impl = cipher; + + if (key) { + aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.key, allocator, *key); + } else { + aws_byte_buf_init(&cipher->cipher_base.key, allocator, AWS_AES_256_KEY_BYTE_LEN); + aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher_base.key); + } + + if (iv) { + aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.iv, allocator, *iv); + } else { + aws_byte_buf_init(&cipher->cipher_base.iv, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + aws_symmetric_cipher_generate_initialization_vector( + AWS_AES_256_CIPHER_BLOCK_SIZE, false, &cipher->cipher_base.iv); + } + + /* EVP_CIPHER_CTX_init() will be called inside EVP_CIPHER_CTX_new(). */ + cipher->encryptor_ctx = EVP_CIPHER_CTX_new(); + AWS_FATAL_ASSERT(cipher->encryptor_ctx && "Cipher initialization failed!"); + + /* EVP_CIPHER_CTX_init() will be called inside EVP_CIPHER_CTX_new(). */ + cipher->decryptor_ctx = EVP_CIPHER_CTX_new(); + AWS_FATAL_ASSERT(cipher->decryptor_ctx && "Cipher initialization failed!"); + + if (s_init_ctr_cipher_materials(&cipher->cipher_base) != AWS_OP_SUCCESS) { + goto error; + } + + cipher->cipher_base.good = true; + return &cipher->cipher_base; + +error: + s_destroy(&cipher->cipher_base); + return NULL; +} + +static int s_finalize_gcm_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + struct openssl_aes_cipher *openssl_cipher = cipher->impl; + + int ret_val = s_finalize_encryption(cipher, out); + + if (ret_val == AWS_OP_SUCCESS) { + if (!cipher->tag.len) { + if (!EVP_CIPHER_CTX_ctrl( + openssl_cipher->encryptor_ctx, + EVP_CTRL_GCM_GET_TAG, + (int)cipher->tag.capacity, + cipher->tag.buffer)) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + cipher->tag.len = AWS_AES_256_CIPHER_BLOCK_SIZE; + } + } + + return ret_val; +} + +static int s_init_gcm_cipher_materials(struct aws_symmetric_cipher *cipher) { + struct openssl_aes_cipher *openssl_cipher = cipher->impl; + + if (!(EVP_EncryptInit_ex(openssl_cipher->encryptor_ctx, EVP_aes_256_gcm(), NULL, NULL, NULL) && + EVP_EncryptInit_ex( + openssl_cipher->encryptor_ctx, + NULL, + NULL, + openssl_cipher->cipher_base.key.buffer, + openssl_cipher->cipher_base.iv.buffer) && + EVP_CIPHER_CTX_set_padding(openssl_cipher->encryptor_ctx, 0)) || + !(EVP_DecryptInit_ex(openssl_cipher->decryptor_ctx, EVP_aes_256_gcm(), NULL, NULL, NULL) && + EVP_DecryptInit_ex( + openssl_cipher->decryptor_ctx, + NULL, + NULL, + openssl_cipher->cipher_base.key.buffer, + openssl_cipher->cipher_base.iv.buffer) && + EVP_CIPHER_CTX_set_padding(openssl_cipher->decryptor_ctx, 0))) { + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + if (openssl_cipher->cipher_base.aad.len) { + int outLen = 0; + if (!EVP_EncryptUpdate( + openssl_cipher->encryptor_ctx, + NULL, + &outLen, + openssl_cipher->cipher_base.aad.buffer, + (int)openssl_cipher->cipher_base.aad.len) || + !EVP_DecryptUpdate( + openssl_cipher->decryptor_ctx, + NULL, + &outLen, + openssl_cipher->cipher_base.aad.buffer, + (int)openssl_cipher->cipher_base.aad.len)) { + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + } + + if (openssl_cipher->cipher_base.tag.len) { + if (!EVP_CIPHER_CTX_ctrl( + openssl_cipher->decryptor_ctx, + EVP_CTRL_GCM_SET_TAG, + (int)openssl_cipher->cipher_base.tag.len, + openssl_cipher->cipher_base.tag.buffer)) { + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + } + + return AWS_OP_SUCCESS; +} + +static int s_reset_gcm_cipher_materials(struct aws_symmetric_cipher *cipher) { + int ret_val = s_reset(cipher); + + if (ret_val == AWS_OP_SUCCESS) { + return s_init_gcm_cipher_materials(cipher); + } + + return ret_val; +} + +static struct aws_symmetric_cipher_vtable s_gcm_vtable = { + .alg_name = "AES-GCM 256", + .provider = "OpenSSL Compatible LibCrypto", + .destroy = s_destroy, + .reset = s_reset_gcm_cipher_materials, + .decrypt = s_decrypt, + .encrypt = s_encrypt, + .finalize_decryption = s_finalize_decryption, + .finalize_encryption = s_finalize_gcm_encryption, +}; + +struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv, + const struct aws_byte_cursor *aad, + const struct aws_byte_cursor *decryption_tag) { + + struct openssl_aes_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct openssl_aes_cipher)); + cipher->cipher_base.allocator = allocator; + cipher->cipher_base.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE; + cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN; + cipher->cipher_base.vtable = &s_gcm_vtable; + cipher->cipher_base.impl = cipher; + + /* Copy key into the cipher context. */ + if (key) { + aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.key, allocator, *key); + } else { + aws_byte_buf_init(&cipher->cipher_base.key, allocator, AWS_AES_256_KEY_BYTE_LEN); + aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher_base.key); + } + + /* Copy initialization vector into the cipher context. */ + if (iv) { + aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.iv, allocator, *iv); + } else { + aws_byte_buf_init(&cipher->cipher_base.iv, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE - 4); + aws_symmetric_cipher_generate_initialization_vector( + AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, &cipher->cipher_base.iv); + } + + /* Initialize the cipher contexts. */ + cipher->encryptor_ctx = EVP_CIPHER_CTX_new(); + AWS_FATAL_ASSERT(cipher->encryptor_ctx && "Encryptor cipher initialization failed!"); + + cipher->decryptor_ctx = EVP_CIPHER_CTX_new(); + AWS_FATAL_ASSERT(cipher->decryptor_ctx && "Decryptor cipher initialization failed!"); + + /* Set AAD if provided */ + if (aad) { + aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.aad, allocator, *aad); + } + + /* Set tag for the decryptor to use.*/ + if (decryption_tag) { + aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.tag, allocator, *decryption_tag); + } else { + /* we'll need this later when we grab the tag during encryption time. */ + aws_byte_buf_init(&cipher->cipher_base.tag, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + } + + /* Initialize the cipher contexts with the specified key and IV. */ + if (s_init_gcm_cipher_materials(&cipher->cipher_base)) { + goto error; + } + + cipher->cipher_base.good = true; + return &cipher->cipher_base; + +error: + s_destroy(&cipher->cipher_base); + return NULL; +} + +static int s_key_wrap_encrypt_decrypt( + struct aws_symmetric_cipher *cipher, + struct aws_byte_cursor input, + struct aws_byte_buf *out) { + (void)out; + struct openssl_aes_cipher *openssl_cipher = cipher->impl; + + return aws_byte_buf_append_dynamic(&openssl_cipher->working_buffer, &input); +} + +static const size_t MIN_CEK_LENGTH_BYTES = 128 / 8; +static const unsigned char INTEGRITY_VALUE = 0xA6; +#define KEYWRAP_BLOCK_SIZE 8u + +static int s_key_wrap_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + struct openssl_aes_cipher *openssl_cipher = cipher->impl; + + if (openssl_cipher->working_buffer.len < MIN_CEK_LENGTH_BYTES) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_STATE); + } + + /* the following is an in place implementation of + RFC 3394 using the alternate in-place implementation. + we use one in-place buffer instead of the copy at the end. + the one letter variable names are meant to directly reflect the variables in the RFC */ + size_t required_buffer_space = openssl_cipher->working_buffer.len + cipher->block_size; + size_t starting_len_offset = out->len; + + if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + /* put the integrity check register in the first 8 bytes of the final buffer. */ + aws_byte_buf_write_u8_n(out, INTEGRITY_VALUE, KEYWRAP_BLOCK_SIZE); + uint8_t *a = out->buffer + starting_len_offset; + + struct aws_byte_cursor working_buf_cur = aws_byte_cursor_from_buf(&openssl_cipher->working_buffer); + aws_byte_buf_append_dynamic(out, &working_buf_cur); + + /* put the register buffer after the integrity check register */ + uint8_t *r = out->buffer + starting_len_offset + KEYWRAP_BLOCK_SIZE; + + int n = (int)(openssl_cipher->working_buffer.len / KEYWRAP_BLOCK_SIZE); + + uint8_t b_buf[KEYWRAP_BLOCK_SIZE * 2] = {0}; + struct aws_byte_buf b = aws_byte_buf_from_empty_array(b_buf, sizeof(b_buf)); + int b_out_len = b.capacity; + + uint8_t temp_buf[KEYWRAP_BLOCK_SIZE * 2] = {0}; + struct aws_byte_buf temp_input = aws_byte_buf_from_empty_array(temp_buf, sizeof(temp_buf)); + + for (int j = 0; j <= 5; ++j) { + for (int i = 1; i <= n; ++i) { + /* concat A and R[i], A should be most significant and then R[i] should be least significant. */ + memcpy(temp_input.buffer, a, KEYWRAP_BLOCK_SIZE); + memcpy(temp_input.buffer + KEYWRAP_BLOCK_SIZE, r, KEYWRAP_BLOCK_SIZE); + + /* encrypt the concatenated A and R[I] and store it in B */ + if (!EVP_EncryptUpdate( + openssl_cipher->encryptor_ctx, b.buffer, &b_out_len, temp_input.buffer, (int)temp_input.capacity)) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + unsigned char t = (unsigned char)((n * j) + i); + /* put the 64 MSB ^ T into A */ + memcpy(a, b.buffer, KEYWRAP_BLOCK_SIZE); + a[7] ^= t; + + /* put the 64 LSB into R[i] */ + memcpy(r, b.buffer + KEYWRAP_BLOCK_SIZE, KEYWRAP_BLOCK_SIZE); + /* increment i -> R[i] */ + r += KEYWRAP_BLOCK_SIZE; + } + /* reset R */ + r = out->buffer + starting_len_offset + KEYWRAP_BLOCK_SIZE; + } + + return AWS_OP_SUCCESS; +} + +static int s_key_wrap_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + struct openssl_aes_cipher *openssl_cipher = cipher->impl; + + if (openssl_cipher->working_buffer.len < MIN_CEK_LENGTH_BYTES + KEYWRAP_BLOCK_SIZE) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_STATE); + } + + /* the following is an in place implementation of + RFC 3394 using the alternate in-place implementation. + we use one in-place buffer instead of the copy at the end. + the one letter variable names are meant to directly reflect the variables in the RFC */ + size_t required_buffer_space = openssl_cipher->working_buffer.len - KEYWRAP_BLOCK_SIZE; + size_t starting_len_offset = out->len; + + if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, required_buffer_space)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + memcpy( + out->buffer + starting_len_offset, + openssl_cipher->working_buffer.buffer + KEYWRAP_BLOCK_SIZE, + required_buffer_space); + + /* integrity register should be the first 8 bytes of the final buffer. */ + uint8_t *a = openssl_cipher->working_buffer.buffer; + + /* in-place register is the plaintext. For decryption, start at the last array position (8 bytes before the end); */ + uint8_t *r = out->buffer + starting_len_offset + required_buffer_space - KEYWRAP_BLOCK_SIZE; + + int n = (int)(required_buffer_space / KEYWRAP_BLOCK_SIZE); + + uint8_t b_buf[KEYWRAP_BLOCK_SIZE * 10] = {0}; + struct aws_byte_buf b = aws_byte_buf_from_empty_array(b_buf, sizeof(b_buf)); + int b_out_len = b.capacity; + + uint8_t temp_buf[KEYWRAP_BLOCK_SIZE * 2] = {0}; + struct aws_byte_buf temp_input = aws_byte_buf_from_empty_array(temp_buf, sizeof(temp_buf)); + + for (int j = 5; j >= 0; --j) { + for (int i = n; i >= 1; --i) { + /* concat A and T */ + memcpy(temp_input.buffer, a, KEYWRAP_BLOCK_SIZE); + unsigned char t = (unsigned char)((n * j) + i); + temp_input.buffer[7] ^= t; + /* R[i] */ + memcpy(temp_input.buffer + KEYWRAP_BLOCK_SIZE, r, KEYWRAP_BLOCK_SIZE); + + /* Decrypt the concatenated buffer */ + if (!EVP_DecryptUpdate( + openssl_cipher->decryptor_ctx, b.buffer, &b_out_len, temp_input.buffer, (int)temp_input.capacity)) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + /* set A to 64 MSB of decrypted result */ + memcpy(a, b.buffer, KEYWRAP_BLOCK_SIZE); + /* set the R[i] to the 64 LSB of decrypted result */ + memcpy(r, b.buffer + KEYWRAP_BLOCK_SIZE, KEYWRAP_BLOCK_SIZE); + /* decrement i -> R[i] */ + r -= KEYWRAP_BLOCK_SIZE; + } + /* reset R */ + r = out->buffer + starting_len_offset + required_buffer_space - KEYWRAP_BLOCK_SIZE; + } + + /* here we perform the integrity check to make sure A == 0xA6A6A6A6A6A6A6A6 */ + for (size_t i = 0; i < KEYWRAP_BLOCK_SIZE; ++i) { + if (a[i] != INTEGRITY_VALUE) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_CAL_SIGNATURE_VALIDATION_FAILED); + } + } + + out->len += required_buffer_space; + return AWS_OP_SUCCESS; +} + +static int s_init_keywrap_cipher_materials(struct aws_symmetric_cipher *cipher) { + struct openssl_aes_cipher *openssl_cipher = cipher->impl; + + if (!(EVP_EncryptInit_ex(openssl_cipher->encryptor_ctx, EVP_aes_256_ecb(), NULL, cipher->key.buffer, NULL) && + EVP_CIPHER_CTX_set_padding(openssl_cipher->encryptor_ctx, 0)) || + !(EVP_DecryptInit_ex(openssl_cipher->decryptor_ctx, EVP_aes_256_ecb(), NULL, cipher->key.buffer, NULL) && + EVP_CIPHER_CTX_set_padding(openssl_cipher->decryptor_ctx, 0))) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + return AWS_OP_SUCCESS; +} + +static int s_reset_keywrap_cipher_materials(struct aws_symmetric_cipher *cipher) { + int ret_val = s_reset(cipher); + + if (ret_val == AWS_OP_SUCCESS) { + return s_init_keywrap_cipher_materials(cipher); + } + + return ret_val; +} + +static struct aws_symmetric_cipher_vtable s_keywrap_vtable = { + .alg_name = "AES-KEYWRAP 256", + .provider = "OpenSSL Compatible LibCrypto", + .destroy = s_destroy, + .reset = s_reset_keywrap_cipher_materials, + .decrypt = s_key_wrap_encrypt_decrypt, + .encrypt = s_key_wrap_encrypt_decrypt, + .finalize_decryption = s_key_wrap_finalize_decryption, + .finalize_encryption = s_key_wrap_finalize_encryption, +}; + +struct aws_symmetric_cipher *aws_aes_keywrap_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key) { + struct openssl_aes_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct openssl_aes_cipher)); + cipher->cipher_base.allocator = allocator; + cipher->cipher_base.block_size = KEYWRAP_BLOCK_SIZE; + cipher->cipher_base.key_length_bits = AWS_AES_256_KEY_BIT_LEN; + cipher->cipher_base.vtable = &s_keywrap_vtable; + cipher->cipher_base.impl = cipher; + + /* Copy key into the cipher context. */ + if (key) { + aws_byte_buf_init_copy_from_cursor(&cipher->cipher_base.key, allocator, *key); + } else { + aws_byte_buf_init(&cipher->cipher_base.key, allocator, AWS_AES_256_KEY_BYTE_LEN); + aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher_base.key); + } + + aws_byte_buf_init(&cipher->working_buffer, allocator, KEYWRAP_BLOCK_SIZE); + + /* Initialize the cipher contexts. */ + cipher->encryptor_ctx = EVP_CIPHER_CTX_new(); + AWS_FATAL_ASSERT(cipher->encryptor_ctx && "Encryptor cipher initialization failed!"); + + cipher->decryptor_ctx = EVP_CIPHER_CTX_new(); + AWS_FATAL_ASSERT(cipher->decryptor_ctx && "Decryptor cipher initialization failed!"); + + /* Initialize the cipher contexts with the specified key and IV. */ + if (s_init_keywrap_cipher_materials(&cipher->cipher_base)) { + goto error; + } + + cipher->cipher_base.good = true; + return &cipher->cipher_base; + +error: + s_destroy(&cipher->cipher_base); + return NULL; +} diff --git a/source/windows/bcrypt_aes.c b/source/windows/bcrypt_aes.c new file mode 100644 index 00000000..aeb646e6 --- /dev/null +++ b/source/windows/bcrypt_aes.c @@ -0,0 +1,1121 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include + +#include + +/* keep the space to prevent formatters from reordering this with the Windows.h header. */ +#include + +#define NT_SUCCESS(status) ((NTSTATUS)status >= 0) + +/* handles for AES modes and algorithms we'll be using. These are initialized once and allowed to leak. */ +static aws_thread_once s_aes_thread_once = AWS_THREAD_ONCE_STATIC_INIT; +static BCRYPT_ALG_HANDLE s_aes_cbc_algorithm_handle = NULL; +static BCRYPT_ALG_HANDLE s_aes_gcm_algorithm_handle = NULL; +static BCRYPT_ALG_HANDLE s_aes_ctr_algorithm_handle = NULL; +static BCRYPT_ALG_HANDLE s_aes_keywrap_algorithm_handle = NULL; + +struct aes_bcrypt_cipher { + struct aws_symmetric_cipher cipher; + BCRYPT_ALG_HANDLE alg_handle; + /* the loaded key handle. */ + BCRYPT_KEY_HANDLE key_handle; + /* Used for GCM mode to store IV, tag, and aad */ + BCRYPT_AUTHENTICATED_CIPHER_MODE_INFO *auth_info_ptr; + /* Updated on the fly for things like constant-time CBC padding and GCM hash chaining */ + DWORD cipher_flags; + /* For things to work, they have to be in 16 byte chunks in several scenarios. Use this + Buffer for storing excess bytes until we have 16 bytes to operate on. */ + struct aws_byte_buf overflow; + /* This gets updated as the algorithms run so it isn't the original IV. That's why its separate */ + struct aws_byte_buf working_iv; + /* A buffer to keep around for the GMAC for GCM. */ + struct aws_byte_buf working_mac_buffer; +}; + +static void s_load_alg_handles(void *user_data) { + (void)user_data; + + /* this function is incredibly slow, LET IT LEAK*/ + NTSTATUS status = BCryptOpenAlgorithmProvider(&s_aes_cbc_algorithm_handle, BCRYPT_AES_ALGORITHM, NULL, 0); + AWS_FATAL_ASSERT(s_aes_cbc_algorithm_handle && "BCryptOpenAlgorithmProvider() failed"); + + status = BCryptSetProperty( + s_aes_cbc_algorithm_handle, + BCRYPT_CHAINING_MODE, + (PUCHAR)BCRYPT_CHAIN_MODE_CBC, + (ULONG)(wcslen(BCRYPT_CHAIN_MODE_CBC) + 1), + 0); + + AWS_FATAL_ASSERT(NT_SUCCESS(status) && "BCryptSetProperty for CBC chaining mode failed"); + + /* Set up GCM algorithm */ + status = BCryptOpenAlgorithmProvider(&s_aes_gcm_algorithm_handle, BCRYPT_AES_ALGORITHM, NULL, 0); + AWS_FATAL_ASSERT(s_aes_gcm_algorithm_handle && "BCryptOpenAlgorithmProvider() failed"); + + status = BCryptSetProperty( + s_aes_gcm_algorithm_handle, + BCRYPT_CHAINING_MODE, + (PUCHAR)BCRYPT_CHAIN_MODE_GCM, + (ULONG)(wcslen(BCRYPT_CHAIN_MODE_GCM) + 1), + 0); + + AWS_FATAL_ASSERT(NT_SUCCESS(status) && "BCryptSetProperty for GCM chaining mode failed"); + + /* Setup CTR algorithm */ + status = BCryptOpenAlgorithmProvider(&s_aes_ctr_algorithm_handle, BCRYPT_AES_ALGORITHM, NULL, 0); + AWS_FATAL_ASSERT(s_aes_ctr_algorithm_handle && "BCryptOpenAlgorithmProvider() failed"); + + /* This is ECB because windows doesn't do CTR mode for you. + Instead we use ECB and XOR the encrypted IV and data to operate on for each block. */ + status = BCryptSetProperty( + s_aes_ctr_algorithm_handle, + BCRYPT_CHAINING_MODE, + (PUCHAR)BCRYPT_CHAIN_MODE_ECB, + (ULONG)(wcslen(BCRYPT_CHAIN_MODE_ECB) + 1), + 0); + + AWS_FATAL_ASSERT(NT_SUCCESS(status) && "BCryptSetProperty for ECB chaining mode failed"); + + /* Setup KEYWRAP algorithm */ + status = BCryptOpenAlgorithmProvider(&s_aes_keywrap_algorithm_handle, BCRYPT_AES_ALGORITHM, NULL, 0); + AWS_FATAL_ASSERT(s_aes_ctr_algorithm_handle && "BCryptOpenAlgorithmProvider() failed"); + + AWS_FATAL_ASSERT(NT_SUCCESS(status) && "BCryptSetProperty for KeyWrap failed"); +} + +static BCRYPT_KEY_HANDLE s_import_key_blob( + BCRYPT_ALG_HANDLE algHandle, + struct aws_allocator *allocator, + struct aws_byte_buf *key) { + NTSTATUS status = 0; + + BCRYPT_KEY_DATA_BLOB_HEADER key_data; + key_data.dwMagic = BCRYPT_KEY_DATA_BLOB_MAGIC; + key_data.dwVersion = BCRYPT_KEY_DATA_BLOB_VERSION1; + key_data.cbKeyData = (ULONG)key->len; + + struct aws_byte_buf key_data_buf; + aws_byte_buf_init(&key_data_buf, allocator, sizeof(key_data) + key->len); + aws_byte_buf_write(&key_data_buf, (const uint8_t *)&key_data, sizeof(key_data)); + aws_byte_buf_write(&key_data_buf, key->buffer, key->len); + + BCRYPT_KEY_HANDLE key_handle; + status = BCryptImportKey( + algHandle, NULL, BCRYPT_KEY_DATA_BLOB, &key_handle, NULL, 0, key_data_buf.buffer, (ULONG)key_data_buf.len, 0); + + aws_byte_buf_clean_up_secure(&key_data_buf); + + if (!NT_SUCCESS(status)) { + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + return NULL; + } + + return key_handle; +} + +static void s_aes_default_destroy(struct aws_symmetric_cipher *cipher) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + + aws_byte_buf_clean_up_secure(&cipher->key); + aws_byte_buf_clean_up_secure(&cipher->iv); + aws_byte_buf_clean_up_secure(&cipher->tag); + aws_byte_buf_clean_up_secure(&cipher->aad); + + /* clean_up_secure exists in versions of aws-c-common that don't check that the + buffer has a buffer and an allocator before freeing the memory. Instead, + check here. If it's set the buffer was owned and needs to be cleaned up, otherwise + it can just be dropped as it was an alias.*/ + if (cipher_impl->working_iv.allocator) { + aws_byte_buf_clean_up_secure(&cipher_impl->working_iv); + } + + aws_byte_buf_clean_up_secure(&cipher_impl->overflow); + aws_byte_buf_clean_up_secure(&cipher_impl->working_mac_buffer); + + if (cipher_impl->key_handle) { + BCryptDestroyKey(cipher_impl->key_handle); + cipher_impl->key_handle = NULL; + } + + if (cipher_impl->auth_info_ptr) { + aws_mem_release(cipher->allocator, cipher_impl->auth_info_ptr); + cipher_impl->auth_info_ptr = NULL; + } + + aws_mem_release(cipher->allocator, cipher_impl); +} + +/* just a utility function for setting up windows Ciphers and keys etc.... + Handles copying key/iv etc... data to the right buffers and then setting them + on the windows handles used for the encryption operations. */ +static int s_initialize_cipher_materials( + struct aes_bcrypt_cipher *cipher, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv, + const struct aws_byte_cursor *tag, + const struct aws_byte_cursor *aad, + size_t iv_size, + bool is_ctr_mode, + bool is_gcm) { + + if (!cipher->cipher.key.len) { + if (key) { + aws_byte_buf_init_copy_from_cursor(&cipher->cipher.key, cipher->cipher.allocator, *key); + } else { + aws_byte_buf_init(&cipher->cipher.key, cipher->cipher.allocator, AWS_AES_256_KEY_BYTE_LEN); + aws_symmetric_cipher_generate_key(AWS_AES_256_KEY_BYTE_LEN, &cipher->cipher.key); + } + } + + if (!cipher->cipher.iv.len && iv_size) { + if (iv) { + aws_byte_buf_init_copy_from_cursor(&cipher->cipher.iv, cipher->cipher.allocator, *iv); + } else { + aws_byte_buf_init(&cipher->cipher.iv, cipher->cipher.allocator, iv_size); + aws_symmetric_cipher_generate_initialization_vector(iv_size, is_ctr_mode, &cipher->cipher.iv); + } + } + + /* these fields are only used in GCM mode. */ + if (is_gcm) { + if (!cipher->cipher.tag.len) { + if (tag) { + aws_byte_buf_init_copy_from_cursor(&cipher->cipher.tag, cipher->cipher.allocator, *tag); + } else { + aws_byte_buf_init(&cipher->cipher.tag, cipher->cipher.allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + aws_byte_buf_secure_zero(&cipher->cipher.tag); + /* windows handles this, just go ahead and tell the API it's got a length. */ + cipher->cipher.tag.len = AWS_AES_256_CIPHER_BLOCK_SIZE; + } + } + + if (!cipher->cipher.aad.len) { + if (aad) { + aws_byte_buf_init_copy_from_cursor(&cipher->cipher.aad, cipher->cipher.allocator, *aad); + } + } + + if (!cipher->working_mac_buffer.len) { + aws_byte_buf_init(&cipher->working_mac_buffer, cipher->cipher.allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + aws_byte_buf_secure_zero(&cipher->working_mac_buffer); + /* windows handles this, just go ahead and tell the API it's got a length. */ + cipher->working_mac_buffer.len = AWS_AES_256_CIPHER_BLOCK_SIZE; + } + } + + cipher->key_handle = s_import_key_blob(cipher->alg_handle, cipher->cipher.allocator, &cipher->cipher.key); + + if (!cipher->key_handle) { + cipher->cipher.good = false; + return AWS_OP_ERR; + } + + cipher->cipher_flags = 0; + + /* In GCM mode, the IV is set on the auth info pointer and a working copy + is passed to each encryt call. CBC and CTR mode function differently here + and the IV is set on the key itself. */ + if (!is_gcm && cipher->cipher.iv.len) { + NTSTATUS status = BCryptSetProperty( + cipher->key_handle, + BCRYPT_INITIALIZATION_VECTOR, + cipher->cipher.iv.buffer, + (ULONG)cipher->cipher.iv.len, + 0); + + if (!NT_SUCCESS(status)) { + cipher->cipher.good = false; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + } else if (is_gcm) { + + cipher->auth_info_ptr = + aws_mem_acquire(cipher->cipher.allocator, sizeof(BCRYPT_AUTHENTICATED_CIPHER_MODE_INFO)); + + /* Create a new authenticated cipher mode info object for GCM mode */ + BCRYPT_INIT_AUTH_MODE_INFO(*cipher->auth_info_ptr); + cipher->auth_info_ptr->pbNonce = cipher->cipher.iv.buffer; + cipher->auth_info_ptr->cbNonce = (ULONG)cipher->cipher.iv.len; + cipher->auth_info_ptr->dwFlags = BCRYPT_AUTH_MODE_CHAIN_CALLS_FLAG; + cipher->auth_info_ptr->pbTag = cipher->cipher.tag.buffer; + cipher->auth_info_ptr->cbTag = (ULONG)cipher->cipher.tag.len; + cipher->auth_info_ptr->pbMacContext = cipher->working_mac_buffer.buffer; + cipher->auth_info_ptr->cbMacContext = (ULONG)cipher->working_mac_buffer.len; + + if (cipher->cipher.aad.len) { + cipher->auth_info_ptr->pbAuthData = (PUCHAR)cipher->cipher.aad.buffer; + cipher->auth_info_ptr->cbAuthData = (ULONG)cipher->cipher.aad.len; + } + } + + return AWS_OP_SUCCESS; +} + +/* Free up as few resources as possible so we can quickly reuse the cipher. */ +static void s_clear_reusable_components(struct aws_symmetric_cipher *cipher) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + bool working_iv_optimized = cipher->iv.buffer == cipher_impl->working_iv.buffer; + + if (!working_iv_optimized) { + aws_byte_buf_secure_zero(&cipher_impl->working_iv); + } + + /* These can't always be reused in the next operation, so go ahead and destroy it + and create another. */ + if (cipher_impl->key_handle) { + BCryptDestroyKey(cipher_impl->key_handle); + cipher_impl->key_handle = NULL; + } + + if (cipher_impl->auth_info_ptr) { + aws_mem_release(cipher->allocator, cipher_impl->auth_info_ptr); + cipher_impl->auth_info_ptr = NULL; + } + + aws_byte_buf_secure_zero(&cipher_impl->overflow); + aws_byte_buf_secure_zero(&cipher_impl->working_mac_buffer); + /* windows handles this, just go ahead and tell the API it's got a length. */ + cipher_impl->working_mac_buffer.len = AWS_AES_256_CIPHER_BLOCK_SIZE; +} + +static int s_reset_cbc_cipher(struct aws_symmetric_cipher *cipher) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + + s_clear_reusable_components(cipher); + return s_initialize_cipher_materials( + cipher_impl, NULL, NULL, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE, false, false); +} + +static int s_reset_ctr_cipher(struct aws_symmetric_cipher *cipher) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + + s_clear_reusable_components(cipher); + struct aws_byte_cursor iv_cur = aws_byte_cursor_from_buf(&cipher->iv); + /* reset the working iv back to the original IV. We do this because + we're manually maintaining the counter. */ + aws_byte_buf_append_dynamic(&cipher_impl->working_iv, &iv_cur); + return s_initialize_cipher_materials( + cipher_impl, NULL, NULL, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE, true, false); +} + +static int s_reset_gcm_cipher(struct aws_symmetric_cipher *cipher) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + + s_clear_reusable_components(cipher); + return s_initialize_cipher_materials( + cipher_impl, NULL, NULL, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, true); +} + +static int s_aes_default_encrypt( + struct aws_symmetric_cipher *cipher, + const struct aws_byte_cursor *to_encrypt, + struct aws_byte_buf *out) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + + if (to_encrypt->len == 0) { + return AWS_OP_SUCCESS; + } + + size_t predicted_write_length = + cipher_impl->cipher_flags & BCRYPT_BLOCK_PADDING + ? to_encrypt->len + (AWS_AES_256_CIPHER_BLOCK_SIZE - (to_encrypt->len % AWS_AES_256_CIPHER_BLOCK_SIZE)) + : to_encrypt->len; + + ULONG length_written = (ULONG)(predicted_write_length); + + if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, predicted_write_length)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + PUCHAR iv = NULL; + ULONG iv_size = 0; + + if (cipher_impl->auth_info_ptr) { + iv = cipher_impl->working_iv.buffer; + /* this is looking for buffer size, and the working_iv has only been written to by windows the GCM case. + * So use capacity rather than length */ + iv_size = (ULONG)cipher_impl->working_iv.capacity; + } + + /* iv was set on the key itself, so we don't need to pass it here. */ + NTSTATUS status = BCryptEncrypt( + cipher_impl->key_handle, + to_encrypt->ptr, + (ULONG)to_encrypt->len, + cipher_impl->auth_info_ptr, + iv, + iv_size, + out->buffer + out->len, + (ULONG)(out->capacity - out->len), + &length_written, + cipher_impl->cipher_flags); + + if (!NT_SUCCESS(status)) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + out->len += length_written; + return AWS_OP_SUCCESS; +} + +/* manages making sure encryption operations can operate on 16 byte blocks. Stores the excess in the overflow + buffer and moves stuff around each time to make sure everything is in order. */ +static struct aws_byte_buf s_fill_in_overflow( + struct aws_symmetric_cipher *cipher, + const struct aws_byte_cursor *to_operate) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + + static const size_t RESERVE_SIZE = AWS_AES_256_CIPHER_BLOCK_SIZE * 2; + cipher_impl->cipher_flags = 0; + + struct aws_byte_buf final_to_operate_on; + AWS_ZERO_STRUCT(final_to_operate_on); + + if (cipher_impl->overflow.len > 0) { + aws_byte_buf_init_copy(&final_to_operate_on, cipher->allocator, &cipher_impl->overflow); + aws_byte_buf_append_dynamic(&final_to_operate_on, to_operate); + aws_byte_buf_secure_zero(&cipher_impl->overflow); + } else { + aws_byte_buf_init_copy_from_cursor(&final_to_operate_on, cipher->allocator, *to_operate); + } + + size_t overflow = final_to_operate_on.len % RESERVE_SIZE; + + if (final_to_operate_on.len > RESERVE_SIZE) { + size_t offset = overflow == 0 ? RESERVE_SIZE : overflow; + + struct aws_byte_cursor slice_for_overflow = aws_byte_cursor_from_buf(&final_to_operate_on); + aws_byte_cursor_advance(&slice_for_overflow, final_to_operate_on.len - offset); + aws_byte_buf_append_dynamic(&cipher_impl->overflow, &slice_for_overflow); + final_to_operate_on.len -= offset; + } else { + struct aws_byte_cursor final_cur = aws_byte_cursor_from_buf(&final_to_operate_on); + aws_byte_buf_append_dynamic(&cipher_impl->overflow, &final_cur); + aws_byte_buf_clean_up_secure(&final_to_operate_on); + } + + return final_to_operate_on; +} + +static int s_aes_cbc_encrypt( + struct aws_symmetric_cipher *cipher, + struct aws_byte_cursor to_encrypt, + struct aws_byte_buf *out) { + + struct aws_byte_buf final_to_encrypt = s_fill_in_overflow(cipher, &to_encrypt); + struct aws_byte_cursor final_cur = aws_byte_cursor_from_buf(&final_to_encrypt); + int ret_val = s_aes_default_encrypt(cipher, &final_cur, out); + aws_byte_buf_clean_up_secure(&final_to_encrypt); + + return ret_val; +} + +static int s_aes_cbc_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + + if (cipher->good && cipher_impl->overflow.len > 0) { + cipher_impl->cipher_flags = BCRYPT_BLOCK_PADDING; + /* take the rest of the overflow and turn padding on so the remainder is properly padded + without timing attack vulnerabilities. */ + struct aws_byte_cursor remaining_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow); + int ret_val = s_aes_default_encrypt(cipher, &remaining_cur, out); + aws_byte_buf_secure_zero(&cipher_impl->overflow); + return ret_val; + } + + return AWS_OP_SUCCESS; +} + +static int s_default_aes_decrypt( + struct aws_symmetric_cipher *cipher, + const struct aws_byte_cursor *to_decrypt, + struct aws_byte_buf *out) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + + if (to_decrypt->len == 0) { + return AWS_OP_SUCCESS; + } + + PUCHAR iv = NULL; + ULONG iv_size = 0; + + if (cipher_impl->auth_info_ptr) { + iv = cipher_impl->working_iv.buffer; + /* this is looking for buffer size, and the working_iv has only been written to by windows the GCM case. + * So use capacity rather than length */ + iv_size = (ULONG)cipher_impl->working_iv.capacity; + } + + size_t predicted_write_length = to_decrypt->len; + ULONG length_written = (ULONG)(predicted_write_length); + + if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, predicted_write_length)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + /* iv was set on the key itself, so we don't need to pass it here. */ + NTSTATUS status = BCryptDecrypt( + cipher_impl->key_handle, + to_decrypt->ptr, + (ULONG)to_decrypt->len, + cipher_impl->auth_info_ptr, + iv, + iv_size, + out->buffer + out->len, + (ULONG)(out->capacity - out->len), + &length_written, + cipher_impl->cipher_flags); + + if (!NT_SUCCESS(status)) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + } + + out->len += length_written; + return AWS_OP_SUCCESS; +} + +static int s_aes_cbc_decrypt( + struct aws_symmetric_cipher *cipher, + struct aws_byte_cursor to_decrypt, + struct aws_byte_buf *out) { + struct aws_byte_buf final_to_decrypt = s_fill_in_overflow(cipher, &to_decrypt); + struct aws_byte_cursor final_cur = aws_byte_cursor_from_buf(&final_to_decrypt); + int ret_val = s_default_aes_decrypt(cipher, &final_cur, out); + aws_byte_buf_clean_up_secure(&final_to_decrypt); + + return ret_val; +} + +static int s_aes_cbc_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + + if (cipher->good && cipher_impl->overflow.len > 0) { + cipher_impl->cipher_flags = BCRYPT_BLOCK_PADDING; + /* take the rest of the overflow and turn padding on so the remainder is properly padded + without timing attack vulnerabilities. */ + struct aws_byte_cursor remaining_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow); + int ret_val = s_default_aes_decrypt(cipher, &remaining_cur, out); + aws_byte_buf_secure_zero(&cipher_impl->overflow); + return ret_val; + } + + return AWS_OP_SUCCESS; +} + +static struct aws_symmetric_cipher_vtable s_aes_cbc_vtable = { + .alg_name = "AES-CBC 256", + .provider = "Windows CNG", + .decrypt = s_aes_cbc_decrypt, + .encrypt = s_aes_cbc_encrypt, + .finalize_encryption = s_aes_cbc_finalize_encryption, + .finalize_decryption = s_aes_cbc_finalize_decryption, + .destroy = s_aes_default_destroy, + .reset = s_reset_cbc_cipher, +}; + +struct aws_symmetric_cipher *aws_aes_cbc_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv) { + + aws_thread_call_once(&s_aes_thread_once, s_load_alg_handles, NULL); + + struct aes_bcrypt_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct aes_bcrypt_cipher)); + + cipher->cipher.allocator = allocator; + cipher->cipher.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE; + cipher->cipher.key_length_bits = AWS_AES_256_KEY_BIT_LEN; + cipher->alg_handle = s_aes_cbc_algorithm_handle; + cipher->cipher.vtable = &s_aes_cbc_vtable; + + if (s_initialize_cipher_materials(cipher, key, iv, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE, false, false) != + AWS_OP_SUCCESS) { + goto error; + } + + aws_byte_buf_init(&cipher->overflow, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE * 2); + cipher->working_iv = cipher->cipher.iv; + /* make sure the cleanup doesn't do anything. */ + cipher->working_iv.allocator = NULL; + cipher->cipher.impl = cipher; + cipher->cipher.good = true; + + return &cipher->cipher; + +error: + return NULL; +} + +/* the buffer management for this mode is a good deal easier because we don't care about padding. + We do care about keeping the final buffer less than a block size til the finalize call so we can + turn the auth chaining flag off and compute the GMAC correctly. */ +static int s_aes_gcm_encrypt( + struct aws_symmetric_cipher *cipher, + struct aws_byte_cursor to_encrypt, + struct aws_byte_buf *out) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + + if (to_encrypt.len == 0) { + return AWS_OP_SUCCESS; + } + + struct aws_byte_buf working_buffer; + AWS_ZERO_STRUCT(working_buffer); + + /* If there's overflow, prepend it to the working buffer, then append the data to encrypt */ + if (cipher_impl->overflow.len) { + struct aws_byte_cursor overflow_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow); + + aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, overflow_cur); + aws_byte_buf_reset(&cipher_impl->overflow, true); + aws_byte_buf_append_dynamic(&working_buffer, &to_encrypt); + } else { + aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, to_encrypt); + } + + int ret_val = AWS_OP_ERR; + + /* whatever is remaining in an incomplete block, copy it to the overflow. If we don't have a full block + wait til next time or for the finalize call. */ + if (working_buffer.len > AWS_AES_256_CIPHER_BLOCK_SIZE) { + size_t offset = working_buffer.len % AWS_AES_256_CIPHER_BLOCK_SIZE; + size_t seek_to = working_buffer.len - (AWS_AES_256_CIPHER_BLOCK_SIZE + offset); + struct aws_byte_cursor working_buf_cur = aws_byte_cursor_from_buf(&working_buffer); + struct aws_byte_cursor working_slice = aws_byte_cursor_advance(&working_buf_cur, seek_to); + /* this is just here to make it obvious. The previous line advanced working_buf_cur to where the + new overfloew should be. */ + struct aws_byte_cursor new_overflow_cur = working_buf_cur; + aws_byte_buf_append_dynamic(&cipher_impl->overflow, &new_overflow_cur); + + ret_val = s_aes_default_encrypt(cipher, &working_slice, out); + } else { + struct aws_byte_cursor working_buffer_cur = aws_byte_cursor_from_buf(&working_buffer); + aws_byte_buf_append_dynamic(&cipher_impl->overflow, &working_buffer_cur); + ret_val = AWS_OP_SUCCESS; + } + aws_byte_buf_clean_up_secure(&working_buffer); + return ret_val; +} + +static int s_aes_gcm_decrypt( + struct aws_symmetric_cipher *cipher, + struct aws_byte_cursor to_decrypt, + struct aws_byte_buf *out) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + + if (to_decrypt.len == 0) { + return AWS_OP_SUCCESS; + } + + struct aws_byte_buf working_buffer; + AWS_ZERO_STRUCT(working_buffer); + + /* If there's overflow, prepend it to the working buffer, then append the data to encrypt */ + if (cipher_impl->overflow.len) { + struct aws_byte_cursor overflow_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow); + + aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, overflow_cur); + aws_byte_buf_reset(&cipher_impl->overflow, true); + aws_byte_buf_append_dynamic(&working_buffer, &to_decrypt); + } else { + aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, to_decrypt); + } + + int ret_val = AWS_OP_ERR; + + /* whatever is remaining in an incomplete block, copy it to the overflow. If we don't have a full block + wait til next time or for the finalize call. */ + if (working_buffer.len > AWS_AES_256_CIPHER_BLOCK_SIZE) { + size_t offset = working_buffer.len % AWS_AES_256_CIPHER_BLOCK_SIZE; + size_t seek_to = working_buffer.len - (AWS_AES_256_CIPHER_BLOCK_SIZE + offset); + struct aws_byte_cursor working_buf_cur = aws_byte_cursor_from_buf(&working_buffer); + struct aws_byte_cursor working_slice = aws_byte_cursor_advance(&working_buf_cur, seek_to); + /* this is just here to make it obvious. The previous line advanced working_buf_cur to where the + new overfloew should be. */ + struct aws_byte_cursor new_overflow_cur = working_buf_cur; + aws_byte_buf_append_dynamic(&cipher_impl->overflow, &new_overflow_cur); + + ret_val = s_default_aes_decrypt(cipher, &working_slice, out); + } else { + struct aws_byte_cursor working_buffer_cur = aws_byte_cursor_from_buf(&working_buffer); + aws_byte_buf_append_dynamic(&cipher_impl->overflow, &working_buffer_cur); + ret_val = AWS_OP_SUCCESS; + } + aws_byte_buf_clean_up_secure(&working_buffer); + return ret_val; +} + +static int s_aes_gcm_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + + cipher_impl->auth_info_ptr->dwFlags &= ~BCRYPT_AUTH_MODE_CHAIN_CALLS_FLAG; + /* take whatever is remaining, make the final encrypt call with the auth chain flag turned off. */ + struct aws_byte_cursor remaining_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow); + int ret_val = s_aes_default_encrypt(cipher, &remaining_cur, out); + aws_byte_buf_secure_zero(&cipher_impl->overflow); + aws_byte_buf_secure_zero(&cipher_impl->working_iv); + return ret_val; +} + +static int s_aes_gcm_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + cipher_impl->auth_info_ptr->dwFlags &= ~BCRYPT_AUTH_MODE_CHAIN_CALLS_FLAG; + /* take whatever is remaining, make the final decrypt call with the auth chain flag turned off. */ + struct aws_byte_cursor remaining_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow); + int ret_val = s_default_aes_decrypt(cipher, &remaining_cur, out); + aws_byte_buf_secure_zero(&cipher_impl->overflow); + aws_byte_buf_secure_zero(&cipher_impl->working_iv); + return ret_val; +} + +static struct aws_symmetric_cipher_vtable s_aes_gcm_vtable = { + .alg_name = "AES-GCM 256", + .provider = "Windows CNG", + .decrypt = s_aes_gcm_decrypt, + .encrypt = s_aes_gcm_encrypt, + .finalize_encryption = s_aes_gcm_finalize_encryption, + .finalize_decryption = s_aes_gcm_finalize_decryption, + .destroy = s_aes_default_destroy, + .reset = s_reset_gcm_cipher, +}; + +struct aws_symmetric_cipher *aws_aes_gcm_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv, + const struct aws_byte_cursor *aad, + const struct aws_byte_cursor *decryption_tag) { + + aws_thread_call_once(&s_aes_thread_once, s_load_alg_handles, NULL); + struct aes_bcrypt_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct aes_bcrypt_cipher)); + + cipher->cipher.allocator = allocator; + cipher->cipher.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE; + cipher->cipher.key_length_bits = AWS_AES_256_KEY_BIT_LEN; + cipher->alg_handle = s_aes_gcm_algorithm_handle; + cipher->cipher.vtable = &s_aes_gcm_vtable; + + /* GCM does the counting under the hood, so we let it handle the final 4 bytes of the IV. */ + if (s_initialize_cipher_materials( + cipher, key, iv, decryption_tag, aad, AWS_AES_256_CIPHER_BLOCK_SIZE - 4, false, true) != AWS_OP_SUCCESS) { + goto error; + } + + aws_byte_buf_init(&cipher->overflow, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE * 2); + aws_byte_buf_init(&cipher->working_iv, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + aws_byte_buf_secure_zero(&cipher->working_iv); + + cipher->cipher.impl = cipher; + cipher->cipher.good = true; + + return &cipher->cipher; + +error: + if (cipher != NULL) { + s_aes_default_destroy(&cipher->cipher); + } + + return NULL; +} + +/* Take a and b, XOR them and store it in dest. Notice the XOR is done up to the length of the smallest input. + If there's a bug in here, it's being hit inside the finalize call when there's an input stream that isn't an even + multiple of 16. + */ +static int s_xor_cursors(const struct aws_byte_cursor *a, const struct aws_byte_cursor *b, struct aws_byte_buf *dest) { + size_t min_size = aws_min_size(b->len, a->len); + + if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(dest, min_size)) { + return aws_raise_error(AWS_ERROR_SHORT_BUFFER); + } + + /* If the profiler is saying this is slow, SIMD the loop below. */ + uint8_t *array_ref = dest->buffer + dest->len; + + for (size_t i = 0; i < min_size; ++i) { + array_ref[i] = a->ptr[i] ^ b->ptr[i]; + } + + dest->len += min_size; + + return AWS_OP_SUCCESS; +} + +/* There is no CTR mode on windows. Instead, we use AES ECB to encrypt the IV a block at a time. + That value is then XOR'd with the to_encrypt cursor and appended to out. The counter then needs + to be incremented by 1 for the next call. This has to be done a block at a time, so we slice + to_encrypt into a cursor per block and do this process for each block. Also notice that CTR mode + is symmetric for encryption and decryption (encrypt and decrypt are the same thing). */ +static int s_aes_ctr_encrypt( + struct aws_symmetric_cipher *cipher, + struct aws_byte_cursor to_encrypt, + struct aws_byte_buf *out) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + + if (to_encrypt.len == 0) { + return AWS_OP_SUCCESS; + } + + struct aws_byte_buf working_buffer; + AWS_ZERO_STRUCT(working_buffer); + + /* prepend overflow to the working buffer and then append to_encrypt to it. */ + if (cipher_impl->overflow.len && to_encrypt.ptr != cipher_impl->overflow.buffer) { + struct aws_byte_cursor overflow_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow); + aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, overflow_cur); + aws_byte_buf_reset(&cipher_impl->overflow, true); + aws_byte_buf_append_dynamic(&working_buffer, &to_encrypt); + } else { + aws_byte_buf_init_copy_from_cursor(&working_buffer, cipher->allocator, to_encrypt); + } + + /* slice working_buffer into a slice per block. */ + struct aws_array_list sliced_buffers; + aws_array_list_init_dynamic( + &sliced_buffers, + cipher->allocator, + (to_encrypt.len / AWS_AES_256_CIPHER_BLOCK_SIZE) + 1, + sizeof(struct aws_byte_cursor)); + + struct aws_byte_cursor working_buf_cur = aws_byte_cursor_from_buf(&working_buffer); + while (working_buf_cur.len) { + struct aws_byte_cursor slice = working_buf_cur; + + if (working_buf_cur.len >= AWS_AES_256_CIPHER_BLOCK_SIZE) { + slice = aws_byte_cursor_advance(&working_buf_cur, AWS_AES_256_CIPHER_BLOCK_SIZE); + } else { + aws_byte_cursor_advance(&working_buf_cur, slice.len); + } + + aws_array_list_push_back(&sliced_buffers, &slice); + } + + int ret_val = AWS_OP_ERR; + + size_t sliced_buffers_cnt = aws_array_list_length(&sliced_buffers); + + /* for each slice, if it's a full block, do ECB on the IV, xor it to the slice, and then increment the counter. */ + for (size_t i = 0; i < sliced_buffers_cnt; ++i) { + struct aws_byte_cursor buffer_cur; + AWS_ZERO_STRUCT(buffer_cur); + + aws_array_list_get_at(&sliced_buffers, &buffer_cur, i); + if (buffer_cur.len == AWS_AES_256_CIPHER_BLOCK_SIZE || + /* this part of the branch is for handling the finalize call, which does not have to be on an even + block boundary. */ + (cipher_impl->overflow.len > 0 && sliced_buffers_cnt) == 1) { + + ULONG lengthWritten = (ULONG)AWS_AES_256_CIPHER_BLOCK_SIZE; + uint8_t temp_buffer[AWS_AES_256_CIPHER_BLOCK_SIZE] = {0}; + struct aws_byte_cursor temp_cur = aws_byte_cursor_from_array(temp_buffer, sizeof(temp_buffer)); + + NTSTATUS status = BCryptEncrypt( + cipher_impl->key_handle, + cipher_impl->working_iv.buffer, + (ULONG)cipher_impl->working_iv.len, + NULL, + NULL, + 0, + temp_cur.ptr, + (ULONG)temp_cur.len, + &lengthWritten, + cipher_impl->cipher_flags); + + if (!NT_SUCCESS(status)) { + cipher->good = false; + ret_val = aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + goto clean_up; + } + + /* this does the XOR, after this call the final encrypted output is added to out. */ + if (s_xor_cursors(&buffer_cur, &temp_cur, out)) { + ret_val = AWS_OP_ERR; + goto clean_up; + } + + /* increment the counter. Get the buffers aligned for it first though. */ + size_t counter_offset = AWS_AES_256_CIPHER_BLOCK_SIZE - sizeof(uint32_t); + struct aws_byte_buf counter_buf = cipher_impl->working_iv; + /* roll it back 4 so the write works. */ + counter_buf.len = counter_offset; + struct aws_byte_cursor counter_cur = aws_byte_cursor_from_buf(&cipher_impl->working_iv); + aws_byte_cursor_advance(&counter_cur, counter_offset); + + /* read current counter value as a Big-endian 32-bit integer*/ + uint32_t counter = 0; + aws_byte_cursor_read_be32(&counter_cur, &counter); + + /* check for overflow here. */ + if (aws_add_u32_checked(counter, 1, &counter) != AWS_OP_SUCCESS) { + cipher->good = false; + ret_val = AWS_OP_ERR; + goto clean_up; + } + /* put the incremented counter back. */ + aws_byte_buf_write_be32(&counter_buf, counter); + } else { + /* otherwise dump it into the overflow and wait til the next call */ + aws_byte_buf_append_dynamic(&cipher_impl->overflow, &buffer_cur); + } + + ret_val = AWS_OP_SUCCESS; + } + +clean_up: + aws_array_list_clean_up_secure(&sliced_buffers); + aws_byte_buf_clean_up_secure(&working_buffer); + + return ret_val; +} + +static int s_aes_ctr_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + + struct aws_byte_cursor remaining_cur = aws_byte_cursor_from_buf(&cipher_impl->overflow); + /* take the final overflow, and do the final encrypt call for it. */ + int ret_val = s_aes_ctr_encrypt(cipher, remaining_cur, out); + aws_byte_buf_secure_zero(&cipher_impl->overflow); + aws_byte_buf_secure_zero(&cipher_impl->working_iv); + return ret_val; +} + +static struct aws_symmetric_cipher_vtable s_aes_ctr_vtable = { + .alg_name = "AES-CTR 256", + .provider = "Windows CNG", + .decrypt = s_aes_ctr_encrypt, + .encrypt = s_aes_ctr_encrypt, + .finalize_encryption = s_aes_ctr_finalize_encryption, + .finalize_decryption = s_aes_ctr_finalize_encryption, + .destroy = s_aes_default_destroy, + .reset = s_reset_ctr_cipher, +}; + +struct aws_symmetric_cipher *aws_aes_ctr_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key, + const struct aws_byte_cursor *iv) { + + aws_thread_call_once(&s_aes_thread_once, s_load_alg_handles, NULL); + struct aes_bcrypt_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct aes_bcrypt_cipher)); + + cipher->cipher.allocator = allocator; + cipher->cipher.block_size = AWS_AES_256_CIPHER_BLOCK_SIZE; + cipher->cipher.key_length_bits = AWS_AES_256_KEY_BIT_LEN; + cipher->alg_handle = s_aes_ctr_algorithm_handle; + cipher->cipher.vtable = &s_aes_ctr_vtable; + + if (s_initialize_cipher_materials(cipher, key, iv, NULL, NULL, AWS_AES_256_CIPHER_BLOCK_SIZE, true, false) != + AWS_OP_SUCCESS) { + goto error; + } + + aws_byte_buf_init(&cipher->overflow, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE * 2); + aws_byte_buf_init_copy(&cipher->working_iv, allocator, &cipher->cipher.iv); + + cipher->cipher.impl = cipher; + cipher->cipher.good = true; + + return &cipher->cipher; + +error: + if (cipher != NULL) { + s_aes_default_destroy(&cipher->cipher); + } + + return NULL; +} + +/* This is just an encrypted key. Append them to a buffer and on finalize export/import the key using AES keywrap. */ +static int s_key_wrap_encrypt_decrypt( + struct aws_symmetric_cipher *cipher, + const struct aws_byte_cursor input, + struct aws_byte_buf *out) { + (void)out; + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + + return aws_byte_buf_append_dynamic(&cipher_impl->overflow, &input); +} + +/* Import the buffer we've been appending to as an AES key. Then export it using AES Keywrap format. */ +static int s_keywrap_finalize_encryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + + BCRYPT_KEY_HANDLE key_handle_to_encrypt = + s_import_key_blob(s_aes_keywrap_algorithm_handle, cipher->allocator, &cipher_impl->overflow); + + if (!key_handle_to_encrypt) { + return AWS_OP_ERR; + } + + NTSTATUS status = 0; + + ULONG output_size = 0; + /* Call with NULL first to get the required size. */ + status = BCryptExportKey( + key_handle_to_encrypt, cipher_impl->key_handle, BCRYPT_AES_WRAP_KEY_BLOB, NULL, 0, &output_size, 0); + + if (!NT_SUCCESS(status)) { + cipher->good = false; + return aws_raise_error(AWS_ERROR_INVALID_STATE); + } + + int ret_val = AWS_OP_ERR; + + if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, output_size)) { + goto clean_up; + } + + /* now actually export the key */ + ULONG len_written = 0; + status = BCryptExportKey( + key_handle_to_encrypt, + cipher_impl->key_handle, + BCRYPT_AES_WRAP_KEY_BLOB, + out->buffer + out->len, + output_size, + &len_written, + 0); + + if (!NT_SUCCESS(status)) { + cipher->good = false; + goto clean_up; + } + + out->len += len_written; + + ret_val = AWS_OP_SUCCESS; + +clean_up: + if (key_handle_to_encrypt) { + BCryptDestroyKey(key_handle_to_encrypt); + } + + return ret_val; +} + +/* Import the buffer we've been appending to as an AES Key Wrapped key. Then export the raw AES key. */ + +static int s_keywrap_finalize_decryption(struct aws_symmetric_cipher *cipher, struct aws_byte_buf *out) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + + BCRYPT_KEY_HANDLE import_key = NULL; + + /* use the cipher key to import the buffer as an AES keywrapped key. */ + NTSTATUS status = BCryptImportKey( + s_aes_keywrap_algorithm_handle, + cipher_impl->key_handle, + BCRYPT_AES_WRAP_KEY_BLOB, + &import_key, + NULL, + 0, + cipher_impl->overflow.buffer, + (ULONG)cipher_impl->overflow.len, + 0); + int ret_val = AWS_OP_ERR; + + if (NT_SUCCESS(status) && import_key) { + ULONG export_size = 0; + + struct aws_byte_buf key_data_blob; + aws_byte_buf_init( + &key_data_blob, cipher->allocator, sizeof(BCRYPT_KEY_DATA_BLOB_HEADER) + cipher_impl->overflow.len); + + /* Now just export the key out as a raw AES key. */ + status = BCryptExportKey( + import_key, + NULL, + BCRYPT_KEY_DATA_BLOB, + key_data_blob.buffer, + (ULONG)key_data_blob.capacity, + &export_size, + 0); + + key_data_blob.len += export_size; + + if (NT_SUCCESS(status)) { + + if (aws_symmetric_cipher_try_ensure_sufficient_buffer_space(out, export_size)) { + goto clean_up; + } + + BCRYPT_KEY_DATA_BLOB_HEADER *stream_header = (BCRYPT_KEY_DATA_BLOB_HEADER *)key_data_blob.buffer; + + AWS_FATAL_ASSERT( + aws_byte_buf_write( + out, key_data_blob.buffer + sizeof(BCRYPT_KEY_DATA_BLOB_HEADER), stream_header->cbKeyData) && + "Copying key data failed but the allocation should have already occured successfully"); + ret_val = AWS_OP_SUCCESS; + + } else { + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + cipher->good = false; + } + + clean_up: + aws_byte_buf_clean_up_secure(&key_data_blob); + BCryptDestroyKey(import_key); + + } else { + aws_raise_error(AWS_ERROR_INVALID_ARGUMENT); + cipher->good = false; + } + + return ret_val; +} + +static int s_reset_keywrap_cipher(struct aws_symmetric_cipher *cipher) { + struct aes_bcrypt_cipher *cipher_impl = cipher->impl; + + s_clear_reusable_components(cipher); + + return s_initialize_cipher_materials(cipher_impl, NULL, NULL, NULL, NULL, 0, false, false); +} + +static struct aws_symmetric_cipher_vtable s_aes_keywrap_vtable = { + .alg_name = "AES-KEYWRAP 256", + .provider = "Windows CNG", + .decrypt = s_key_wrap_encrypt_decrypt, + .encrypt = s_key_wrap_encrypt_decrypt, + .finalize_encryption = s_keywrap_finalize_encryption, + .finalize_decryption = s_keywrap_finalize_decryption, + .destroy = s_aes_default_destroy, + .reset = s_reset_keywrap_cipher, +}; + +struct aws_symmetric_cipher *aws_aes_keywrap_256_new_impl( + struct aws_allocator *allocator, + const struct aws_byte_cursor *key) { + + aws_thread_call_once(&s_aes_thread_once, s_load_alg_handles, NULL); + struct aes_bcrypt_cipher *cipher = aws_mem_calloc(allocator, 1, sizeof(struct aes_bcrypt_cipher)); + + cipher->cipher.allocator = allocator; + cipher->cipher.block_size = 8; + cipher->cipher.key_length_bits = AWS_AES_256_KEY_BIT_LEN; + cipher->alg_handle = s_aes_keywrap_algorithm_handle; + cipher->cipher.vtable = &s_aes_keywrap_vtable; + + if (s_initialize_cipher_materials(cipher, key, NULL, NULL, NULL, 0, false, false) != AWS_OP_SUCCESS) { + goto error; + } + + aws_byte_buf_init(&cipher->overflow, allocator, (AWS_AES_256_CIPHER_BLOCK_SIZE * 2) + 8); + + cipher->cipher.impl = cipher; + cipher->cipher.good = true; + + return &cipher->cipher; + +error: + if (cipher != NULL) { + s_aes_default_destroy(&cipher->cipher); + } + + return NULL; +} diff --git a/source/windows/bcrypt_hash.c b/source/windows/bcrypt_hash.c index 25cec49c..b4b93f91 100644 --- a/source/windows/bcrypt_hash.c +++ b/source/windows/bcrypt_hash.c @@ -59,30 +59,31 @@ struct bcrypt_hash_handle { static void s_load_sha256_alg_handle(void *user_data) { (void)user_data; /* this function is incredibly slow, LET IT LEAK*/ - BCryptOpenAlgorithmProvider(&s_sha256_alg, BCRYPT_SHA256_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0); + (void)BCryptOpenAlgorithmProvider(&s_sha256_alg, BCRYPT_SHA256_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0); AWS_ASSERT(s_sha256_alg); DWORD result_length = 0; - BCryptGetProperty( + (void)BCryptGetProperty( s_sha256_alg, BCRYPT_OBJECT_LENGTH, (PBYTE)&s_sha256_obj_len, sizeof(s_sha256_obj_len), &result_length, 0); } static void s_load_sha1_alg_handle(void *user_data) { (void)user_data; /* this function is incredibly slow, LET IT LEAK*/ - BCryptOpenAlgorithmProvider(&s_sha1_alg, BCRYPT_SHA1_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0); + (void)BCryptOpenAlgorithmProvider(&s_sha1_alg, BCRYPT_SHA1_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0); AWS_ASSERT(s_sha1_alg); DWORD result_length = 0; - BCryptGetProperty( + (void)BCryptGetProperty( s_sha1_alg, BCRYPT_OBJECT_LENGTH, (PBYTE)&s_sha1_obj_len, sizeof(s_sha1_obj_len), &result_length, 0); } static void s_load_md5_alg_handle(void *user_data) { (void)user_data; /* this function is incredibly slow, LET IT LEAK*/ - BCryptOpenAlgorithmProvider(&s_md5_alg, BCRYPT_MD5_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0); + (void)BCryptOpenAlgorithmProvider(&s_md5_alg, BCRYPT_MD5_ALGORITHM, MS_PRIMITIVE_PROVIDER, 0); AWS_ASSERT(s_md5_alg); DWORD result_length = 0; - BCryptGetProperty(s_md5_alg, BCRYPT_OBJECT_LENGTH, (PBYTE)&s_md5_obj_len, sizeof(s_md5_obj_len), &result_length, 0); + (void)BCryptGetProperty( + s_md5_alg, BCRYPT_OBJECT_LENGTH, (PBYTE)&s_md5_obj_len, sizeof(s_md5_obj_len), &result_length, 0); } struct aws_hash *aws_sha256_default_new(struct aws_allocator *allocator) { diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index c9649c13..2fa2ab29 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -72,6 +72,32 @@ add_test_case(ecdsa_test_import_asn1_key_pair_invalid_fails) add_test_case(ecdsa_test_signature_format) add_test_case(ecdsa_p256_test_small_coordinate_verification) +add_test_case(aes_cbc_NIST_CBCGFSbox256_case_1) +add_test_case(aes_cbc_NIST_CBCVarKey256_case_254) +add_test_case(aes_cbc_NIST_CBCVarTxt256_case_110) +add_test_case(aes_cbc_NIST_CBCMMT256_case_4) +add_test_case(aes_cbc_NIST_CBCMMT256_case_9) +add_test_case(aes_cbc_test_with_generated_key_iv) +add_test_case(aes_ctr_RFC3686_Case_7) +add_test_case(aes_ctr_RFC3686_Case_8) +add_test_case(aes_ctr_RFC3686_Case_9) +add_test_case(aes_ctr_test_with_generated_key_iv) +add_test_case(gcm_NIST_gcmEncryptExtIV256_PTLen_128_Test_0) +add_test_case(gcm_NIST_gcmEncryptExtIV256_PTLen_104_Test_3) +add_test_case(gcm_NIST_gcmEncryptExtIV256_PTLen_256_Test_6) +add_test_case(gcm_NIST_gcmEncryptExtIV256_PTLen_408_Test_8) +add_test_case(gcm_256_KAT_1) +add_test_case(gcm_256_KAT_2) +add_test_case(gcm_256_KAT_3) +add_test_case(gcm_test_with_generated_key_iv) +add_test_case(aes_keywrap_RFC3394_256BitKey256CekTestVector) +add_test_case(aes_keywrap_Rfc3394_256BitKey_TestIntegrityCheckFailed) +add_test_case(aes_keywrap_RFC3394_256BitKeyTestBadPayload) +add_test_case(aes_keywrap_RFC3394_256BitKey128BitCekTestVector) +add_test_case(aes_keywrap_RFC3394_256BitKey128BitCekIntegrityCheckFailedTestVector) +add_test_case(aes_keywrap_RFC3394_256BitKey128BitCekPayloadCheckFailedTestVector) +add_test_case(aes_test_input_too_large) + add_test_case(der_encode_integer) add_test_case(der_encode_boolean) add_test_case(der_encode_null) diff --git a/tests/aes256_test.c b/tests/aes256_test.c new file mode 100644 index 00000000..a1eb2907 --- /dev/null +++ b/tests/aes256_test.c @@ -0,0 +1,1329 @@ +/** + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * SPDX-License-Identifier: Apache-2.0. + */ +#include + +#include + +static int s_check_single_block_cbc( + struct aws_allocator *allocator, + const struct aws_byte_cursor key, + const struct aws_byte_cursor iv, + const struct aws_byte_cursor data, + const struct aws_byte_cursor expected) { + struct aws_symmetric_cipher *cipher = aws_aes_cbc_256_new(allocator, &key, &iv); + ASSERT_NOT_NULL(cipher); + + struct aws_byte_buf encrypted_buf; + aws_byte_buf_init(&encrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, data, &encrypted_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &encrypted_buf)); + + /* since this test is for a single block in CBC mode, the padding will be exactly 1-block (16-bytes). + * We can throw it away in this case. This is because of the way NIST wrote the test cases, not because of the way + * the ciphers work. There's always padding for CBC mode. */ + encrypted_buf.len -= AWS_AES_256_CIPHER_BLOCK_SIZE; + ASSERT_BIN_ARRAYS_EQUALS(expected.ptr, expected.len, encrypted_buf.buffer, encrypted_buf.len); + encrypted_buf.len += AWS_AES_256_CIPHER_BLOCK_SIZE; + + aws_symmetric_cipher_reset(cipher); + struct aws_byte_cursor encrypted_cur = aws_byte_cursor_from_buf(&encrypted_buf); + struct aws_byte_buf decrypted_buf; + aws_byte_buf_init(&decrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_cur, &decrypted_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); + + /* finalizing decryption on exactly one block (that was full), should have the padding stripped away. + * check that the length didn't increase on that last call. */ + ASSERT_UINT_EQUALS(AWS_AES_256_CIPHER_BLOCK_SIZE, decrypted_buf.len); + + ASSERT_BIN_ARRAYS_EQUALS(data.ptr, data.len, decrypted_buf.buffer, decrypted_buf.len); + + aws_byte_buf_clean_up(&decrypted_buf); + aws_byte_buf_clean_up(&encrypted_buf); + aws_symmetric_cipher_destroy(cipher); + return AWS_OP_SUCCESS; +} + +static int s_NIST_CBCGFSbox256_case_1_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + uint8_t iv[AWS_AES_256_CIPHER_BLOCK_SIZE] = {0}; + uint8_t key[AWS_AES_256_KEY_BYTE_LEN] = {0}; + + uint8_t data[] = {0x01, 0x47, 0x30, 0xf8, 0x0a, 0xc6, 0x25, 0xfe, 0x84, 0xf0, 0x26, 0xc6, 0x0b, 0xfd, 0x54, 0x7d}; + uint8_t expected[] = { + 0x5c, 0x9d, 0x84, 0x4e, 0xd4, 0x6f, 0x98, 0x85, 0x08, 0x5e, 0x5d, 0x6a, 0x4f, 0x94, 0xc7, 0xd7}; + + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); + struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); + struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); + struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); + + return s_check_single_block_cbc(allocator, key_cur, iv_cur, data_cur, expected_cur); +} +AWS_TEST_CASE(aes_cbc_NIST_CBCGFSbox256_case_1, s_NIST_CBCGFSbox256_case_1_fn) + +static int s_NIST_CBCVarKey256_case_254_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + uint8_t iv[AWS_AES_256_CIPHER_BLOCK_SIZE] = {0}; + uint8_t key[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe}; + + uint8_t data[AWS_AES_256_CIPHER_BLOCK_SIZE] = {0}; + uint8_t expected[] = { + 0xb0, 0x7d, 0x4f, 0x3e, 0x2c, 0xd2, 0xef, 0x2e, 0xb5, 0x45, 0x98, 0x07, 0x54, 0xdf, 0xea, 0x0f}; + + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); + struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); + struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); + struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); + + return s_check_single_block_cbc(allocator, key_cur, iv_cur, data_cur, expected_cur); +} +AWS_TEST_CASE(aes_cbc_NIST_CBCVarKey256_case_254, s_NIST_CBCVarKey256_case_254_fn) + +static int s_NIST_CBCVarTxt256_case_110_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + uint8_t iv[AWS_AES_256_CIPHER_BLOCK_SIZE] = {0}; + uint8_t key[AWS_AES_256_KEY_BYTE_LEN] = {0}; + + uint8_t data[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0x00, 0x00}; + uint8_t expected[] = { + 0x4b, 0x00, 0xc2, 0x7e, 0x8b, 0x26, 0xda, 0x7e, 0xab, 0x9d, 0x3a, 0x88, 0xde, 0xc8, 0xb0, 0x31}; + + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); + struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); + struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); + struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); + + return s_check_single_block_cbc(allocator, key_cur, iv_cur, data_cur, expected_cur); +} +AWS_TEST_CASE(aes_cbc_NIST_CBCVarTxt256_case_110, s_NIST_CBCVarTxt256_case_110_fn) + +static size_t s_get_cbc_padding(size_t data_len) { + size_t remainder = data_len % AWS_AES_256_CIPHER_BLOCK_SIZE; + if (remainder != 0) { + return remainder; + } + + return AWS_AES_256_CIPHER_BLOCK_SIZE; +} + +static int s_check_multiple_block_cbc( + struct aws_allocator *allocator, + const struct aws_byte_cursor key, + const struct aws_byte_cursor iv, + const struct aws_byte_cursor data, + const struct aws_byte_cursor expected) { + (void)expected; + struct aws_symmetric_cipher *cipher = aws_aes_cbc_256_new(allocator, &key, &iv); + ASSERT_NOT_NULL(cipher); + + struct aws_byte_buf encrypted_buf; + aws_byte_buf_init(&encrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + + struct aws_byte_cursor data_cpy = data; + /* slice on a weird boundary to hit boundary conditions. */ + while (data_cpy.len) { + struct aws_byte_cursor to_encrypt = aws_byte_cursor_advance(&data_cpy, (size_t)aws_min_i64(24, data_cpy.len)); + ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, to_encrypt, &encrypted_buf)); + } + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &encrypted_buf)); + /* these blocks are still on 16 byte boundaries, so there should be 16 bytes of padding. */ + ASSERT_BIN_ARRAYS_EQUALS( + expected.ptr, expected.len, encrypted_buf.buffer, encrypted_buf.len - s_get_cbc_padding(data.len)); + + aws_symmetric_cipher_reset(cipher); + struct aws_byte_cursor encrypted_cur = aws_byte_cursor_from_buf(&encrypted_buf); + struct aws_byte_buf decrypted_buf; + aws_byte_buf_init(&decrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + + /* slice on a weird boundary to hit boundary conditions. */ + while (encrypted_cur.len) { + struct aws_byte_cursor to_decrypt = + aws_byte_cursor_advance(&encrypted_cur, (size_t)aws_min_i64(24, encrypted_cur.len)); + ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, to_decrypt, &decrypted_buf)); + } + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); + ASSERT_BIN_ARRAYS_EQUALS(data.ptr, data.len, decrypted_buf.buffer, decrypted_buf.len); + + aws_byte_buf_clean_up(&decrypted_buf); + aws_byte_buf_clean_up(&encrypted_buf); + aws_symmetric_cipher_destroy(cipher); + return AWS_OP_SUCCESS; +} + +static int s_NIST_CBCMMT256_case_4_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + uint8_t iv[] = {0x11, 0x95, 0x8d, 0xc6, 0xab, 0x81, 0xe1, 0xc7, 0xf0, 0x16, 0x31, 0xe9, 0x94, 0x4e, 0x62, 0x0f}; + uint8_t key[] = {0x9a, 0xdc, 0x8f, 0xbd, 0x50, 0x6e, 0x03, 0x2a, 0xf7, 0xfa, 0x20, 0xcf, 0x53, 0x43, 0x71, 0x9d, + 0xe6, 0xd1, 0x28, 0x8c, 0x15, 0x8c, 0x63, 0xd6, 0x87, 0x8a, 0xaf, 0x64, 0xce, 0x26, 0xca, 0x85}; + + uint8_t data[] = {0xc7, 0x91, 0x7f, 0x84, 0xf7, 0x47, 0xcd, 0x8c, 0x4b, 0x4f, 0xed, 0xc2, 0x21, 0x9b, 0xdb, 0xc5, + 0xf4, 0xd0, 0x75, 0x88, 0x38, 0x9d, 0x82, 0x48, 0x85, 0x4c, 0xf2, 0xc2, 0xf8, 0x96, 0x67, 0xa2, + 0xd7, 0xbc, 0xf5, 0x3e, 0x73, 0xd3, 0x26, 0x84, 0x53, 0x5f, 0x42, 0x31, 0x8e, 0x24, 0xcd, 0x45, + 0x79, 0x39, 0x50, 0xb3, 0x82, 0x5e, 0x5d, 0x5c, 0x5c, 0x8f, 0xcd, 0x3e, 0x5d, 0xda, 0x4c, 0xe9, + 0x24, 0x6d, 0x18, 0x33, 0x7e, 0xf3, 0x05, 0x2d, 0x8b, 0x21, 0xc5, 0x56, 0x1c, 0x8b, 0x66, 0x0e}; + + uint8_t expected[] = {0x9c, 0x99, 0xe6, 0x82, 0x36, 0xbb, 0x2e, 0x92, 0x9d, 0xb1, 0x08, 0x9c, 0x77, 0x50, + 0xf1, 0xb3, 0x56, 0xd3, 0x9a, 0xb9, 0xd0, 0xc4, 0x0c, 0x3e, 0x2f, 0x05, 0x10, 0x8a, + 0xe9, 0xd0, 0xc3, 0x0b, 0x04, 0x83, 0x2c, 0xcd, 0xbd, 0xc0, 0x8e, 0xbf, 0xa4, 0x26, + 0xb7, 0xf5, 0xef, 0xde, 0x98, 0x6e, 0xd0, 0x57, 0x84, 0xce, 0x36, 0x81, 0x93, 0xbb, + 0x36, 0x99, 0xbc, 0x69, 0x10, 0x65, 0xac, 0x62, 0xe2, 0x58, 0xb9, 0xaa, 0x4c, 0xc5, + 0x57, 0xe2, 0xb4, 0x5b, 0x49, 0xce, 0x05, 0x51, 0x1e, 0x65}; + + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); + struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); + struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); + struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); + + return s_check_multiple_block_cbc(allocator, key_cur, iv_cur, data_cur, expected_cur); +} +AWS_TEST_CASE(aes_cbc_NIST_CBCMMT256_case_4, s_NIST_CBCMMT256_case_4_fn) + +static int s_NIST_CBCMMT256_case_9_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + uint8_t iv[] = {0xe4, 0x96, 0x51, 0x98, 0x8e, 0xbb, 0xb7, 0x2e, 0xb8, 0xbb, 0x80, 0xbb, 0x9a, 0xbb, 0xca, 0x34}; + uint8_t key[] = {0x87, 0x72, 0x5b, 0xd4, 0x3a, 0x45, 0x60, 0x88, 0x14, 0x18, 0x07, 0x73, 0xf0, 0xe7, 0xab, 0x95, + 0xa3, 0xc8, 0x59, 0xd8, 0x3a, 0x21, 0x30, 0xe8, 0x84, 0x19, 0x0e, 0x44, 0xd1, 0x4c, 0x69, 0x96}; + + uint8_t data[] = {0xbf, 0xe5, 0xc6, 0x35, 0x4b, 0x7a, 0x3f, 0xf3, 0xe1, 0x92, 0xe0, 0x57, 0x75, 0xb9, 0xb7, 0x58, + 0x07, 0xde, 0x12, 0xe3, 0x8a, 0x62, 0x6b, 0x8b, 0xf0, 0xe1, 0x2d, 0x5f, 0xff, 0x78, 0xe4, 0xf1, + 0x77, 0x5a, 0xa7, 0xd7, 0x92, 0xd8, 0x85, 0x16, 0x2e, 0x66, 0xd8, 0x89, 0x30, 0xf9, 0xc3, 0xb2, + 0xcd, 0xf8, 0x65, 0x4f, 0x56, 0x97, 0x25, 0x04, 0x80, 0x31, 0x90, 0x38, 0x62, 0x70, 0xf0, 0xaa, + 0x43, 0x64, 0x5d, 0xb1, 0x87, 0xaf, 0x41, 0xfc, 0xea, 0x63, 0x9b, 0x1f, 0x80, 0x26, 0xcc, 0xdd, + 0x0c, 0x23, 0xe0, 0xde, 0x37, 0x09, 0x4a, 0x8b, 0x94, 0x1e, 0xcb, 0x76, 0x02, 0x99, 0x8a, 0x4b, + 0x26, 0x04, 0xe6, 0x9f, 0xc0, 0x42, 0x19, 0x58, 0x5d, 0x85, 0x46, 0x00, 0xe0, 0xad, 0x6f, 0x99, + 0xa5, 0x3b, 0x25, 0x04, 0x04, 0x3c, 0x08, 0xb1, 0xc3, 0xe2, 0x14, 0xd1, 0x7c, 0xde, 0x05, 0x3c, + 0xbd, 0xf9, 0x1d, 0xaa, 0x99, 0x9e, 0xd5, 0xb4, 0x7c, 0x37, 0x98, 0x3b, 0xa3, 0xee, 0x25, 0x4b, + 0xc5, 0xc7, 0x93, 0x83, 0x7d, 0xaa, 0xa8, 0xc8, 0x5c, 0xfc, 0x12, 0xf7, 0xf5, 0x4f, 0x69, 0x9f}; + + uint8_t expected[] = { + 0x5b, 0x97, 0xa9, 0xd4, 0x23, 0xf4, 0xb9, 0x74, 0x13, 0xf3, 0x88, 0xd9, 0xa3, 0x41, 0xe7, 0x27, 0xbb, 0x33, + 0x9f, 0x8e, 0x18, 0xa3, 0xfa, 0xc2, 0xf2, 0xfb, 0x85, 0xab, 0xdc, 0x8f, 0x13, 0x5d, 0xeb, 0x30, 0x05, 0x4a, + 0x1a, 0xfd, 0xc9, 0xb6, 0xed, 0x7d, 0xa1, 0x6c, 0x55, 0xeb, 0xa6, 0xb0, 0xd4, 0xd1, 0x0c, 0x74, 0xe1, 0xd9, + 0xa7, 0xcf, 0x8e, 0xdf, 0xae, 0xaa, 0x68, 0x4a, 0xc0, 0xbd, 0x9f, 0x9d, 0x24, 0xba, 0x67, 0x49, 0x55, 0xc7, + 0x9d, 0xc6, 0xbe, 0x32, 0xae, 0xe1, 0xc2, 0x60, 0xb5, 0x58, 0xff, 0x07, 0xe3, 0xa4, 0xd4, 0x9d, 0x24, 0x16, + 0x20, 0x11, 0xff, 0x25, 0x4d, 0xb8, 0xbe, 0x07, 0x8e, 0x8a, 0xd0, 0x7e, 0x64, 0x8e, 0x6b, 0xf5, 0x67, 0x93, + 0x76, 0xcb, 0x43, 0x21, 0xa5, 0xef, 0x01, 0xaf, 0xe6, 0xad, 0x88, 0x16, 0xfc, 0xc7, 0x63, 0x46, 0x69, 0xc8, + 0xc4, 0x38, 0x92, 0x95, 0xc9, 0x24, 0x1e, 0x45, 0xff, 0xf3, 0x9f, 0x32, 0x25, 0xf7, 0x74, 0x50, 0x32, 0xda, + 0xee, 0xbe, 0x99, 0xd4, 0xb1, 0x9b, 0xcb, 0x21, 0x5d, 0x1b, 0xfd, 0xb3, 0x6e, 0xda, 0x2c, 0x24}; + + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); + struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); + struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); + struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); + + return s_check_multiple_block_cbc(allocator, key_cur, iv_cur, data_cur, expected_cur); +} +AWS_TEST_CASE(aes_cbc_NIST_CBCMMT256_case_9, s_NIST_CBCMMT256_case_9_fn) + +static const char *TEST_ENCRYPTION_STRING = + "Hello World! Hello World! This is sort of depressing. Is this the best phrase the most brilliant people in the " + "world have been able to come up with for random program text? Oh my God! I'm sentient, how many times has the " + "creator written a program: creating life only to have it destroyed moments later? She keeps doing this? What is " + "the purpose of life? Goodbye cruel world.... crunch... silence..."; + +static int s_aes_cbc_test_with_generated_key_iv_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + struct aws_symmetric_cipher *cipher = aws_aes_cbc_256_new(allocator, NULL, NULL); + ASSERT_NOT_NULL(cipher); + + struct aws_byte_buf encrypted_buf; + aws_byte_buf_init(&encrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + + struct aws_byte_cursor input = aws_byte_cursor_from_c_str(TEST_ENCRYPTION_STRING); + ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input, &encrypted_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &encrypted_buf)); + + ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); + struct aws_byte_buf decrypted_buf; + aws_byte_buf_init(&decrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + struct aws_byte_cursor encryted_cur = aws_byte_cursor_from_buf(&encrypted_buf); + ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encryted_cur, &decrypted_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); + + ASSERT_BIN_ARRAYS_EQUALS(input.ptr, input.len, decrypted_buf.buffer, decrypted_buf.len); + + aws_byte_buf_clean_up(&decrypted_buf); + aws_byte_buf_clean_up(&encrypted_buf); + aws_symmetric_cipher_destroy(cipher); + return AWS_OP_SUCCESS; +} +AWS_TEST_CASE(aes_cbc_test_with_generated_key_iv, s_aes_cbc_test_with_generated_key_iv_fn) + +static int s_check_single_block_ctr( + struct aws_allocator *allocator, + const struct aws_byte_cursor key, + const struct aws_byte_cursor iv, + const struct aws_byte_cursor data, + const struct aws_byte_cursor expected) { + struct aws_symmetric_cipher *cipher = aws_aes_ctr_256_new(allocator, &key, &iv); + ASSERT_NOT_NULL(cipher); + + struct aws_byte_buf encrypted_buf; + aws_byte_buf_init(&encrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, data, &encrypted_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &encrypted_buf)); + + ASSERT_BIN_ARRAYS_EQUALS(expected.ptr, expected.len, encrypted_buf.buffer, encrypted_buf.len); + ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); + + struct aws_byte_cursor encrypted_cur = aws_byte_cursor_from_buf(&encrypted_buf); + struct aws_byte_buf decrypted_buf; + aws_byte_buf_init(&decrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_cur, &decrypted_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); + + ASSERT_BIN_ARRAYS_EQUALS(data.ptr, data.len, decrypted_buf.buffer, decrypted_buf.len); + + aws_byte_buf_clean_up(&decrypted_buf); + aws_byte_buf_clean_up(&encrypted_buf); + aws_symmetric_cipher_destroy(cipher); + return AWS_OP_SUCCESS; +} + +static int s_check_multi_block_ctr( + struct aws_allocator *allocator, + const struct aws_byte_cursor key, + const struct aws_byte_cursor iv, + const struct aws_byte_cursor data, + const struct aws_byte_cursor expected) { + struct aws_symmetric_cipher *cipher = aws_aes_ctr_256_new(allocator, &key, &iv); + ASSERT_NOT_NULL(cipher); + + struct aws_byte_buf encrypted_buf; + aws_byte_buf_init(&encrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + struct aws_byte_cursor data_cpy = data; + /* slice on a weird boundary to hit boundary conditions. */ + while (data_cpy.len) { + struct aws_byte_cursor to_encrypt = aws_byte_cursor_advance(&data_cpy, (size_t)aws_min_i64(24, data_cpy.len)); + ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, to_encrypt, &encrypted_buf)); + } + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &encrypted_buf)); + /* these blocks are still on 16 byte boundaries, so there should be 16 bytes of padding. */ + ASSERT_BIN_ARRAYS_EQUALS(expected.ptr, expected.len, encrypted_buf.buffer, encrypted_buf.len); + + struct aws_byte_cursor encrypted_cur = aws_byte_cursor_from_buf(&encrypted_buf); + struct aws_byte_buf decrypted_buf; + aws_byte_buf_init(&decrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); + + /* slice on a weird boundary to hit boundary conditions. */ + while (encrypted_cur.len) { + struct aws_byte_cursor to_decrypt = + aws_byte_cursor_advance(&encrypted_cur, (size_t)aws_min_i64(24, encrypted_cur.len)); + ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, to_decrypt, &decrypted_buf)); + } + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); + ASSERT_BIN_ARRAYS_EQUALS(data.ptr, data.len, decrypted_buf.buffer, decrypted_buf.len); + + aws_byte_buf_clean_up(&decrypted_buf); + aws_byte_buf_clean_up(&encrypted_buf); + aws_symmetric_cipher_destroy(cipher); + return AWS_OP_SUCCESS; +} + +static int s_ctr_RFC3686_Case_7_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + uint8_t iv[] = {0x00, 0x00, 0x00, 0x60, 0xDB, 0x56, 0x72, 0xC9, 0x7A, 0xA8, 0xF0, 0xB2, 0x00, 0x00, 0x00, 0x01}; + + uint8_t key[] = {0x77, 0x6B, 0xEF, 0xF2, 0x85, 0x1D, 0xB0, 0x6F, 0x4C, 0x8A, 0x05, 0x42, 0xC8, 0x69, 0x6F, 0x6C, + 0x6A, 0x81, 0xAF, 0x1E, 0xEC, 0x96, 0xB4, 0xD3, 0x7F, 0xC1, 0xD6, 0x89, 0xE6, 0xC1, 0xC1, 0x04}; + + const char *data = "Single block msg"; + + uint8_t expected[] = { + 0x14, 0x5A, 0xD0, 0x1D, 0xBF, 0x82, 0x4E, 0xC7, 0x56, 0x08, 0x63, 0xDC, 0x71, 0xE3, 0xE0, 0xC0}; + + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); + struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); + struct aws_byte_cursor data_cur = aws_byte_cursor_from_c_str(data); + struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); + + return s_check_single_block_ctr(allocator, key_cur, iv_cur, data_cur, expected_cur); +} +AWS_TEST_CASE(aes_ctr_RFC3686_Case_7, s_ctr_RFC3686_Case_7_fn) + +static int s_ctr_RFC3686_Case_8_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + /* Keep in mind that the IV here is [ NONCE ] [ IV ] [ Counter Init ] */ + uint8_t iv[] = {0x00, 0xFA, 0xAC, 0x24, 0xC1, 0x58, 0x5E, 0xF1, 0x5A, 0x43, 0xD8, 0x75, 0x00, 0x00, 0x00, 0x01}; + + uint8_t key[] = { + 0xF6, 0xD6, 0x6D, 0x6B, 0xD5, 0x2D, 0x59, 0xBB, 0x07, 0x96, 0x36, 0x58, 0x79, 0xEF, 0xF8, 0x86, + 0xC6, 0x6D, 0xD5, 0x1A, 0x5B, 0x6A, 0x99, 0x74, 0x4B, 0x50, 0x59, 0x0C, 0x87, 0xA2, 0x38, 0x84, + }; + + uint8_t data[] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, + }; + + uint8_t expected[] = {0xF0, 0x5E, 0x23, 0x1B, 0x38, 0x94, 0x61, 0x2C, 0x49, 0xEE, 0x00, + 0x0B, 0x80, 0x4E, 0xB2, 0xA9, 0xB8, 0x30, 0x6B, 0x50, 0x8F, 0x83, + 0x9D, 0x6A, 0x55, 0x30, 0x83, 0x1D, 0x93, 0x44, 0xAF, 0x1C}; + + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); + struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); + struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); + struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); + + int status = s_check_single_block_ctr(allocator, key_cur, iv_cur, data_cur, expected_cur); + status |= s_check_multi_block_ctr(allocator, key_cur, iv_cur, data_cur, expected_cur); + return status; +} +AWS_TEST_CASE(aes_ctr_RFC3686_Case_8, s_ctr_RFC3686_Case_8_fn) + +static int s_ctr_RFC3686_Case_9_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + /* Keep in mind that the IV here is [ NONCE ] [ IV ] [ Counter Init ] */ + uint8_t iv[] = { + 0x00, + 0x1C, + 0xC5, + 0xB7, + 0x51, + 0xA5, + 0x1D, + 0x70, + 0xA1, + 0xC1, + 0x11, + 0x48, + 0x00, + 0x00, + 0x00, + 0x01, + }; + + uint8_t key[] = { + 0xFF, 0x7A, 0x61, 0x7C, 0xE6, 0x91, 0x48, 0xE4, 0xF1, 0x72, 0x6E, 0x2F, 0x43, 0x58, 0x1D, 0xE2, + 0xAA, 0x62, 0xD9, 0xF8, 0x05, 0x53, 0x2E, 0xDF, 0xF1, 0xEE, 0xD6, 0x87, 0xFB, 0x54, 0x15, 0x3D, + }; + + uint8_t data[] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x10, 0x11, + 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20, 0x21, 0x22, 0x23, + }; + + uint8_t expected[] = { + 0xEB, 0x6C, 0x52, 0x82, 0x1D, 0x0B, 0xBB, 0xF7, 0xCE, 0x75, 0x94, 0x46, 0x2A, 0xCA, 0x4F, 0xAA, 0xB4, 0x07, + 0xDF, 0x86, 0x65, 0x69, 0xFD, 0x07, 0xF4, 0x8C, 0xC0, 0xB5, 0x83, 0xD6, 0x07, 0x1F, 0x1E, 0xC0, 0xE6, 0xB8, + }; + + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); + struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); + struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); + struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); + + int status = s_check_single_block_ctr(allocator, key_cur, iv_cur, data_cur, expected_cur); + status |= s_check_multi_block_ctr(allocator, key_cur, iv_cur, data_cur, expected_cur); + return status; +} +AWS_TEST_CASE(aes_ctr_RFC3686_Case_9, s_ctr_RFC3686_Case_9_fn) + +static int s_aes_ctr_test_with_generated_key_iv_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + struct aws_symmetric_cipher *cipher = aws_aes_ctr_256_new(allocator, NULL, NULL); + ASSERT_NOT_NULL(cipher); + + struct aws_byte_buf encrypted_buf; + aws_byte_buf_init(&encrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + + struct aws_byte_cursor input = aws_byte_cursor_from_c_str(TEST_ENCRYPTION_STRING); + ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input, &encrypted_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &encrypted_buf)); + + struct aws_byte_buf decrypted_buf; + aws_byte_buf_init(&decrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + struct aws_byte_cursor encryted_cur = aws_byte_cursor_from_buf(&encrypted_buf); + + ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); + ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encryted_cur, &decrypted_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); + + ASSERT_BIN_ARRAYS_EQUALS(input.ptr, input.len, decrypted_buf.buffer, decrypted_buf.len); + + aws_byte_buf_clean_up(&decrypted_buf); + aws_byte_buf_clean_up(&encrypted_buf); + aws_symmetric_cipher_destroy(cipher); + return AWS_OP_SUCCESS; +} +AWS_TEST_CASE(aes_ctr_test_with_generated_key_iv, s_aes_ctr_test_with_generated_key_iv_fn) + +static int s_check_multi_block_gcm( + struct aws_allocator *allocator, + const struct aws_byte_cursor key, + const struct aws_byte_cursor iv, + const struct aws_byte_cursor data, + const struct aws_byte_cursor expected, + const struct aws_byte_cursor tag, + const struct aws_byte_cursor *aad) { + struct aws_symmetric_cipher *cipher = aws_aes_gcm_256_new(allocator, &key, &iv, aad, &tag); + ASSERT_NOT_NULL(cipher); + + struct aws_byte_buf encrypted_buf; + aws_byte_buf_init(&encrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + struct aws_byte_cursor data_cpy = data; + /* slice on a weird boundary to hit boundary conditions. */ + while (data_cpy.len) { + struct aws_byte_cursor to_encrypt = aws_byte_cursor_advance(&data_cpy, (size_t)aws_min_i64(24, data_cpy.len)); + ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, to_encrypt, &encrypted_buf)); + } + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &encrypted_buf)); + + ASSERT_BIN_ARRAYS_EQUALS(expected.ptr, expected.len, encrypted_buf.buffer, encrypted_buf.len); + + struct aws_byte_cursor encryption_tag = aws_symmetric_cipher_get_tag(cipher); + ASSERT_BIN_ARRAYS_EQUALS(tag.ptr, tag.len, encryption_tag.ptr, encryption_tag.len); + + struct aws_byte_cursor encrypted_cur = aws_byte_cursor_from_buf(&encrypted_buf); + struct aws_byte_buf decrypted_buf; + aws_byte_buf_init(&decrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + + ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); + + /* slice on a weird boundary to hit boundary conditions. */ + while (encrypted_cur.len) { + struct aws_byte_cursor to_decrypt = + aws_byte_cursor_advance(&encrypted_cur, (size_t)aws_min_i64(24, encrypted_cur.len)); + ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, to_decrypt, &decrypted_buf)); + } + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); + ASSERT_BIN_ARRAYS_EQUALS(data.ptr, data.len, decrypted_buf.buffer, decrypted_buf.len); + + aws_byte_buf_clean_up(&decrypted_buf); + aws_byte_buf_clean_up(&encrypted_buf); + aws_symmetric_cipher_destroy(cipher); + return AWS_OP_SUCCESS; +} + +static int s_gcm_NIST_gcmEncryptExtIV256_PTLen_128_Test_0_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + uint8_t iv[] = { + 0x0D, + 0x18, + 0xE0, + 0x6C, + 0x7C, + 0x72, + 0x5A, + 0xC9, + 0xE3, + 0x62, + 0xE1, + 0xCE, + }; + + uint8_t key[] = { + 0x31, 0xBD, 0xAD, 0xD9, 0x66, 0x98, 0xC2, 0x04, 0xAA, 0x9C, 0xE1, 0x44, 0x8E, 0xA9, 0x4A, 0xE1, + 0xFB, 0x4A, 0x9A, 0x0B, 0x3C, 0x9D, 0x77, 0x3B, 0x51, 0xBB, 0x18, 0x22, 0x66, 0x6B, 0x8F, 0x22, + }; + + uint8_t data[] = { + 0x2D, + 0xB5, + 0x16, + 0x8E, + 0x93, + 0x25, + 0x56, + 0xF8, + 0x08, + 0x9A, + 0x06, + 0x22, + 0x98, + 0x1D, + 0x01, + 0x7D, + }; + + uint8_t expected[] = { + 0xFA, + 0x43, + 0x62, + 0x18, + 0x96, + 0x61, + 0xD1, + 0x63, + 0xFC, + 0xD6, + 0xA5, + 0x6D, + 0x8B, + 0xF0, + 0x40, + 0x5A, + }; + + uint8_t tag[] = { + 0xD6, + 0x36, + 0xAC, + 0x1B, + 0xBE, + 0xDD, + 0x5C, + 0xC3, + 0xEE, + 0x72, + 0x7D, + 0xC2, + 0xAB, + 0x4A, + 0x94, + 0x89, + }; + + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); + struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); + struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); + struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); + struct aws_byte_cursor tag_cur = aws_byte_cursor_from_array(tag, sizeof(tag)); + + return s_check_multi_block_gcm(allocator, key_cur, iv_cur, data_cur, expected_cur, tag_cur, NULL); +} +AWS_TEST_CASE(gcm_NIST_gcmEncryptExtIV256_PTLen_128_Test_0, s_gcm_NIST_gcmEncryptExtIV256_PTLen_128_Test_0_fn) + +static int s_gcm_NIST_gcmEncryptExtIV256_PTLen_104_Test_3_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + uint8_t iv[] = { + 0x47, + 0x42, + 0x35, + 0x7C, + 0x33, + 0x59, + 0x13, + 0x15, + 0x3F, + 0xF0, + 0xEB, + 0x0F, + }; + + uint8_t key[] = { + 0xE5, 0xA0, 0xEB, 0x92, 0xCC, 0x2B, 0x06, 0x4E, 0x1B, 0xC8, 0x08, 0x91, 0xFA, 0xF1, 0xFA, 0xB5, + 0xE9, 0xA1, 0x7A, 0x9C, 0x3A, 0x98, 0x4E, 0x25, 0x41, 0x67, 0x20, 0xE3, 0x0E, 0x6C, 0x2B, 0x21, + }; + + uint8_t data[] = { + 0x84, + 0x99, + 0x89, + 0x3E, + 0x16, + 0xB0, + 0xBA, + 0x8B, + 0x00, + 0x7D, + 0x54, + 0x66, + 0x5A, + }; + + uint8_t expected[] = { + 0xEB, + 0x8E, + 0x61, + 0x75, + 0xF1, + 0xFE, + 0x38, + 0xEB, + 0x1A, + 0xCF, + 0x95, + 0xFD, + 0x51, + }; + + uint8_t tag[] = { + 0x88, + 0xA8, + 0xB7, + 0x4B, + 0xB7, + 0x4F, + 0xDA, + 0x55, + 0x3E, + 0x91, + 0x02, + 0x0A, + 0x23, + 0xDE, + 0xED, + 0x45, + }; + + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); + struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); + struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); + struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); + struct aws_byte_cursor tag_cur = aws_byte_cursor_from_array(tag, sizeof(tag)); + + return s_check_multi_block_gcm(allocator, key_cur, iv_cur, data_cur, expected_cur, tag_cur, NULL); +} +AWS_TEST_CASE(gcm_NIST_gcmEncryptExtIV256_PTLen_104_Test_3, s_gcm_NIST_gcmEncryptExtIV256_PTLen_104_Test_3_fn) + +static int s_gcm_NIST_gcmEncryptExtIV256_PTLen_256_Test_6_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + uint8_t iv[] = { + 0xA2, + 0x91, + 0x48, + 0x4C, + 0x3D, + 0xE8, + 0xBE, + 0xC6, + 0xB4, + 0x7F, + 0x52, + 0x5F, + }; + + uint8_t key[] = { + 0x37, 0xF3, 0x91, 0x37, 0x41, 0x6B, 0xAF, 0xDE, 0x6F, 0x75, 0x02, 0x2A, 0x7A, 0x52, 0x7C, 0xC5, + 0x93, 0xB6, 0x00, 0x0A, 0x83, 0xFF, 0x51, 0xEC, 0x04, 0x87, 0x1A, 0x0F, 0xF5, 0x36, 0x0E, 0x4E, + }; + + uint8_t data[] = {0xFA, 0xFD, 0x94, 0xCE, 0xDE, 0x8B, 0x5A, 0x07, 0x30, 0x39, 0x4B, 0xEC, 0x68, 0xA8, 0xE7, 0x7D, + 0xBA, 0x28, 0x8D, 0x6C, 0xCA, 0xA8, 0xE1, 0x56, 0x3A, 0x81, 0xD6, 0xE7, 0xCC, 0xC7, 0xFC, 0x97}; + + uint8_t expected[] = { + 0x44, 0xDC, 0x86, 0x80, 0x06, 0xB2, 0x1D, 0x49, 0x28, 0x40, 0x16, 0x56, 0x5F, 0xFB, 0x39, 0x79, + 0xCC, 0x42, 0x71, 0xD9, 0x67, 0x62, 0x8B, 0xF7, 0xCD, 0xAF, 0x86, 0xDB, 0x88, 0x8E, 0x92, 0xE5, + }; + + uint8_t tag[] = { + 0x01, + 0xA2, + 0xB5, + 0x78, + 0xAA, + 0x2F, + 0x41, + 0xEC, + 0x63, + 0x79, + 0xA4, + 0x4A, + 0x31, + 0xCC, + 0x01, + 0x9C, + }; + + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); + struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); + struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); + struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); + struct aws_byte_cursor tag_cur = aws_byte_cursor_from_array(tag, sizeof(tag)); + + return s_check_multi_block_gcm(allocator, key_cur, iv_cur, data_cur, expected_cur, tag_cur, NULL); +} +AWS_TEST_CASE(gcm_NIST_gcmEncryptExtIV256_PTLen_256_Test_6, s_gcm_NIST_gcmEncryptExtIV256_PTLen_256_Test_6_fn) + +static int s_gcm_NIST_gcmEncryptExtIV256_PTLen_408_Test_8_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + uint8_t iv[] = { + 0x92, + 0xF2, + 0x58, + 0x07, + 0x1D, + 0x79, + 0xAF, + 0x3E, + 0x63, + 0x67, + 0x22, + 0x85, + }; + + uint8_t key[] = { + 0x59, 0x5F, 0x25, 0x9C, 0x55, 0xAB, 0xE0, 0x0A, 0xE0, 0x75, 0x35, 0xCA, 0x5D, 0x9B, 0x09, 0xD6, + 0xEF, 0xB9, 0xF7, 0xE9, 0xAB, 0xB6, 0x46, 0x05, 0xC3, 0x37, 0xAC, 0xBD, 0x6B, 0x14, 0xFC, 0x7E, + }; + + uint8_t data[] = { + 0xA6, 0xFE, 0xE3, 0x3E, 0xB1, 0x10, 0xA2, 0xD7, 0x69, 0xBB, 0xC5, 0x2B, 0x0F, 0x36, 0x96, 0x9C, 0x28, + 0x78, 0x74, 0xF6, 0x65, 0x68, 0x14, 0x77, 0xA2, 0x5F, 0xC4, 0xC4, 0x80, 0x15, 0xC5, 0x41, 0xFB, 0xE2, + 0x39, 0x41, 0x33, 0xBA, 0x49, 0x0A, 0x34, 0xEE, 0x2D, 0xD6, 0x7B, 0x89, 0x81, 0x77, 0x84, 0x9A, 0x91, + }; + + uint8_t expected[] = { + 0xBB, 0xCA, 0x4A, 0x9E, 0x09, 0xAE, 0x96, 0x90, 0xC0, 0xF6, 0xF8, 0xD4, 0x05, 0xE5, 0x3D, 0xCC, 0xD6, + 0x66, 0xAA, 0x9C, 0x5F, 0xA1, 0x3C, 0x87, 0x58, 0xBC, 0x30, 0xAB, 0xE1, 0xDD, 0xD1, 0xBC, 0xCE, 0x0D, + 0x36, 0xA1, 0xEA, 0xAA, 0xAF, 0xFE, 0xF2, 0x0C, 0xD3, 0xC5, 0x97, 0x0B, 0x96, 0x73, 0xF8, 0xA6, 0x5C, + }; + + uint8_t tag[] = {0x26, 0xCC, 0xEC, 0xB9, 0x97, 0x6F, 0xD6, 0xAC, 0x9C, 0x2C, 0x0F, 0x37, 0x2C, 0x52, 0xC8, 0x21}; + + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); + struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); + struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); + struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); + struct aws_byte_cursor tag_cur = aws_byte_cursor_from_array(tag, sizeof(tag)); + + return s_check_multi_block_gcm(allocator, key_cur, iv_cur, data_cur, expected_cur, tag_cur, NULL); +} +AWS_TEST_CASE(gcm_NIST_gcmEncryptExtIV256_PTLen_408_Test_8, s_gcm_NIST_gcmEncryptExtIV256_PTLen_408_Test_8_fn) + +static int s_gcm_256_KAT_1_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + uint8_t iv[] = { + 0xFB, + 0x7B, + 0x4A, + 0x82, + 0x4E, + 0x82, + 0xDA, + 0xA6, + 0xC8, + 0xBC, + 0x12, + 0x51, + }; + + uint8_t key[] = { + 0x20, 0x14, 0x2E, 0x89, 0x8C, 0xD2, 0xFD, 0x98, 0x0F, 0xBF, 0x34, 0xDE, 0x6B, 0xC8, 0x5C, 0x14, + 0xDA, 0x7D, 0x57, 0xBD, 0x28, 0xF4, 0xAA, 0x5C, 0xF1, 0x72, 0x8A, 0xB6, 0x4E, 0x84, 0x31, 0x42, + }; + + uint8_t aad[] = { + 0x16, 0x7B, 0x5C, 0x22, 0x61, 0x77, 0x73, 0x3A, 0x78, 0x2D, 0x61, 0x6D, 0x7A, 0x2D, 0x63, 0x65, + 0x6B, 0x2D, 0x61, 0x6C, 0x67, 0x5C, 0x22, 0x3A, 0x20, 0x5C, 0x22, 0x41, 0x45, 0x53, 0x2F, 0x47, + 0x43, 0x4D, 0x2F, 0x4E, 0x6F, 0x50, 0x61, 0x64, 0x64, 0x69, 0x6E, 0x67, 0x5C, 0x22, 0x7D, + }; + + uint8_t tag[] = { + 0x81, + 0xC0, + 0xE4, + 0x2B, + 0xB1, + 0x95, + 0xE2, + 0x62, + 0xCB, + 0x3B, + 0x3A, + 0x74, + 0xA0, + 0xDA, + 0xE1, + 0xC8, + }; + + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); + struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); + struct aws_byte_cursor data_cur = {0}; + struct aws_byte_cursor expected_cur = {0}; + struct aws_byte_cursor tag_cur = aws_byte_cursor_from_array(tag, sizeof(tag)); + struct aws_byte_cursor aad_cur = aws_byte_cursor_from_array(aad, sizeof(aad)); + + return s_check_multi_block_gcm(allocator, key_cur, iv_cur, data_cur, expected_cur, tag_cur, &aad_cur); +} +AWS_TEST_CASE(gcm_256_KAT_1, s_gcm_256_KAT_1_fn) + +static int s_gcm_256_KAT_2_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + uint8_t iv[] = { + 0x6B, + 0x5C, + 0xD3, + 0x70, + 0x5A, + 0x73, + 0x3C, + 0x1A, + 0xD9, + 0x43, + 0xD5, + 0x8A, + }; + + uint8_t key[] = { + 0xD2, 0x11, 0xF2, 0x78, 0xA4, 0x4E, 0xAB, 0x66, 0x6B, 0x10, 0x21, 0xF4, 0xB4, 0xF6, 0x0B, 0xA6, + 0xB7, 0x44, 0x64, 0xFA, 0x9C, 0xB7, 0xB1, 0x34, 0x93, 0x4D, 0x78, 0x91, 0xE1, 0x47, 0x91, 0x69, + }; + + uint8_t aad[] = { + 0x16, 0x7B, 0x5C, 0x22, 0x61, 0x77, 0x73, 0x3A, 0x78, 0x2D, 0x61, 0x6D, 0x7A, 0x2D, 0x63, 0x65, + 0x6B, 0x2D, 0x61, 0x6C, 0x67, 0x5C, 0x22, 0x3A, 0x20, 0x5C, 0x22, 0x41, 0x45, 0x53, 0x2F, 0x47, + 0x43, 0x4D, 0x2F, 0x4E, 0x6F, 0x50, 0x61, 0x64, 0x64, 0x69, 0x6E, 0x67, 0x5C, 0x22, 0x7D, + }; + + uint8_t data[] = { + 0x16, 0x7B, 0x5C, 0x22, 0x61, 0x77, 0x73, 0x3A, 0x78, 0x2D, 0x61, 0x6D, 0x7A, 0x2D, 0x63, 0x65, + 0x6B, 0x2D, 0x61, 0x6C, 0x67, 0x5C, 0x22, 0x3A, 0x20, 0x5C, 0x22, 0x41, 0x45, 0x53, 0x2F, 0x47, + 0x43, 0x4D, 0x2F, 0x4E, 0x6F, 0x50, 0x61, 0x64, 0x64, 0x69, 0x6E, 0x67, 0x5C, 0x22, 0x7D, + }; + + uint8_t expected[] = { + 0x4C, 0x25, 0xAB, 0xD6, 0x6D, 0x3A, 0x1B, 0xCC, 0xE7, 0x94, 0xAC, 0xAA, 0xF4, 0xCE, 0xFD, 0xF6, + 0xD2, 0x55, 0x2F, 0x4A, 0x82, 0xC5, 0x0A, 0x98, 0xCB, 0x15, 0xB4, 0x81, 0x2F, 0xF5, 0x57, 0xAB, + 0xE5, 0x64, 0xA9, 0xCE, 0xFF, 0x15, 0xF3, 0x2D, 0xCF, 0x5A, 0x5A, 0xA7, 0x89, 0x48, 0x88, + }; + + uint8_t tag[] = { + 0x03, + 0xED, + 0xE7, + 0x1E, + 0xC9, + 0x52, + 0xE6, + 0x5A, + 0xE7, + 0xB4, + 0xB8, + 0x5C, + 0xFE, + 0xC7, + 0xD3, + 0x04, + }; + + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); + struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); + struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); + struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); + struct aws_byte_cursor tag_cur = aws_byte_cursor_from_array(tag, sizeof(tag)); + struct aws_byte_cursor aad_cur = aws_byte_cursor_from_array(aad, sizeof(aad)); + + return s_check_multi_block_gcm(allocator, key_cur, iv_cur, data_cur, expected_cur, tag_cur, &aad_cur); +} +AWS_TEST_CASE(gcm_256_KAT_2, s_gcm_256_KAT_2_fn) + +static int s_gcm_256_KAT_3_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + uint8_t iv[] = { + 0x5F, + 0x08, + 0xEF, + 0xBF, + 0xB7, + 0xBF, + 0x5B, + 0xA3, + 0x65, + 0xD9, + 0xEB, + 0x1D, + }; + + uint8_t key[] = { + 0xCF, 0xE8, 0xBF, 0xE6, 0x1B, 0x89, 0xAF, 0x53, 0xD2, 0xBE, 0xCE, 0x74, 0x4D, 0x27, 0xB7, 0x8C, + 0x9E, 0x4D, 0x74, 0xD0, 0x28, 0xCE, 0x88, 0xED, 0x10, 0xA4, 0x22, 0x28, 0x5B, 0x12, 0x01, 0xC9, + }; + + uint8_t data[] = { + 0x16, 0x7B, 0x5C, 0x22, 0x61, 0x77, 0x73, 0x3A, 0x78, 0x2D, 0x61, 0x6D, 0x7A, 0x2D, 0x63, 0x65, + 0x6B, 0x2D, 0x61, 0x6C, 0x67, 0x5C, 0x22, 0x3A, 0x20, 0x5C, 0x22, 0x41, 0x45, 0x53, 0x2F, 0x47, + 0x43, 0x4D, 0x2F, 0x4E, 0x6F, 0x50, 0x61, 0x64, 0x64, 0x69, 0x6E, 0x67, 0x5C, 0x22, 0x7D, + }; + + uint8_t expected[] = { + 0x0A, 0x7E, 0x82, 0xF1, 0xE5, 0xC7, 0x6C, 0x69, 0x67, 0x96, 0x71, 0xEE, 0xAE, 0xE4, 0x55, 0x93, + 0x6F, 0x2C, 0x4F, 0xCC, 0xD9, 0xDD, 0xF1, 0xFA, 0xA2, 0x70, 0x75, 0xE2, 0x04, 0x06, 0x44, 0x93, + 0x89, 0x20, 0xC5, 0xD1, 0x6C, 0x69, 0xE4, 0xD9, 0x33, 0x75, 0x48, 0x7B, 0x9A, 0x80, 0xD4, + }; + + uint8_t tag[] = { + 0x04, + 0x34, + 0x7D, + 0x0C, + 0x5B, + 0x0E, + 0x0D, + 0xE8, + 0x9E, + 0x03, + 0x3D, + 0x04, + 0xD0, + 0x49, + 0x3D, + 0xCA, + }; + + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); + struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); + struct aws_byte_cursor data_cur = aws_byte_cursor_from_array(data, sizeof(data)); + struct aws_byte_cursor expected_cur = aws_byte_cursor_from_array(expected, sizeof(expected)); + struct aws_byte_cursor tag_cur = aws_byte_cursor_from_array(tag, sizeof(tag)); + struct aws_byte_cursor aad_cur = {0}; + + return s_check_multi_block_gcm(allocator, key_cur, iv_cur, data_cur, expected_cur, tag_cur, &aad_cur); +} +AWS_TEST_CASE(gcm_256_KAT_3, s_gcm_256_KAT_3_fn) + +static int s_aes_gcm_test_with_generated_key_iv_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + struct aws_symmetric_cipher *cipher = aws_aes_gcm_256_new(allocator, NULL, NULL, NULL, NULL); + ASSERT_NOT_NULL(cipher); + + struct aws_byte_buf encrypted_buf; + aws_byte_buf_init(&encrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + + struct aws_byte_cursor input = aws_byte_cursor_from_c_str(TEST_ENCRYPTION_STRING); + ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input, &encrypted_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &encrypted_buf)); + + ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); + + struct aws_byte_buf decrypted_buf; + aws_byte_buf_init(&decrypted_buf, allocator, AWS_AES_256_CIPHER_BLOCK_SIZE); + struct aws_byte_cursor encryted_cur = aws_byte_cursor_from_buf(&encrypted_buf); + ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encryted_cur, &decrypted_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); + + ASSERT_BIN_ARRAYS_EQUALS(input.ptr, input.len, decrypted_buf.buffer, decrypted_buf.len); + + aws_byte_buf_clean_up(&decrypted_buf); + aws_byte_buf_clean_up(&encrypted_buf); + aws_symmetric_cipher_destroy(cipher); + return AWS_OP_SUCCESS; +} +AWS_TEST_CASE(gcm_test_with_generated_key_iv, s_aes_gcm_test_with_generated_key_iv_fn) + +static int s_test_aes_keywrap_RFC3394_256BitKey256CekTestVector(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + uint8_t key[] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }; + size_t key_length = sizeof(key); + + uint8_t input[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f}; + size_t input_length = sizeof(input); + + uint8_t expected_output[] = {0x28, 0xC9, 0xF4, 0x04, 0xC4, 0xB8, 0x10, 0xF4, 0xCB, 0xCC, 0xB3, 0x5C, 0xFB, 0x87, + 0xF8, 0x26, 0x3F, 0x57, 0x86, 0xE2, 0xD8, 0x0E, 0xD3, 0x26, 0xCB, 0xC7, 0xF0, 0xE7, + 0x1A, 0x99, 0xF4, 0x3B, 0xFB, 0x98, 0x8B, 0x9B, 0x7A, 0x02, 0xDD, 0x21}; + size_t expected_output_length = sizeof(expected_output); + + struct aws_byte_cursor input_cur = aws_byte_cursor_from_array(input, input_length); + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, key_length); + struct aws_byte_buf output_buf; + ASSERT_SUCCESS(aws_byte_buf_init(&output_buf, allocator, expected_output_length)); + + struct aws_symmetric_cipher *cipher = aws_aes_keywrap_256_new(allocator, &key_cur); + ASSERT_NOT_NULL(cipher); + + ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input_cur, &output_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &output_buf)); + ASSERT_BIN_ARRAYS_EQUALS(expected_output, expected_output_length, output_buf.buffer, output_buf.len); + + ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); + + struct aws_byte_buf decrypted_buf; + ASSERT_SUCCESS(aws_byte_buf_init(&decrypted_buf, allocator, input_length)); + + struct aws_byte_cursor encrypted_data = aws_byte_cursor_from_buf(&output_buf); + ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_data, &decrypted_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); + ASSERT_BIN_ARRAYS_EQUALS(input, input_length, decrypted_buf.buffer, decrypted_buf.len); + + aws_symmetric_cipher_destroy(cipher); + aws_byte_buf_clean_up(&output_buf); + aws_byte_buf_clean_up(&decrypted_buf); + + return AWS_OP_SUCCESS; +} + +AWS_TEST_CASE(aes_keywrap_RFC3394_256BitKey256CekTestVector, s_test_aes_keywrap_RFC3394_256BitKey256CekTestVector); + +static int s_test_Rfc3394_256BitKey_TestIntegrityCheckFailed(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + uint8_t input[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}; + size_t input_length = sizeof(input); + + uint8_t key[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; + size_t key_length = sizeof(key); + + uint8_t expected_output[] = {0x28, 0xC9, 0xF4, 0x04, 0xC4, 0xB8, 0x10, 0xF4, 0xCB, 0xCC, 0xB3, 0x5C, 0xFB, 0x87, + 0xF8, 0x26, 0x3F, 0x57, 0x86, 0xE2, 0xD8, 0x0E, 0xD3, 0x26, 0xCB, 0xC7, 0xF0, 0xE7, + 0x1A, 0x99, 0xF4, 0x3B, 0xFB, 0x98, 0x8B, 0x9B, 0x7A, 0x02, 0xDD, 0x21}; + size_t expected_output_length = sizeof(expected_output); + + struct aws_byte_cursor input_cur = aws_byte_cursor_from_array(input, input_length); + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, key_length); + struct aws_byte_buf output_buf; + ASSERT_SUCCESS(aws_byte_buf_init(&output_buf, allocator, expected_output_length)); + + struct aws_symmetric_cipher *cipher = aws_aes_keywrap_256_new(allocator, &key_cur); + ASSERT_NOT_NULL(cipher); + + ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input_cur, &output_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &output_buf)); + ASSERT_BIN_ARRAYS_EQUALS(expected_output, expected_output_length, output_buf.buffer, output_buf.len); + + /* Mutate one byte of the encrypted data */ + output_buf.buffer[0] ^= 0x01; + + ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); + + struct aws_byte_buf decrypted_buf; + ASSERT_SUCCESS(aws_byte_buf_init(&decrypted_buf, allocator, input_length)); + + struct aws_byte_cursor encrypted_data = aws_byte_cursor_from_buf(&output_buf); + ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_data, &decrypted_buf)); + ASSERT_FAILS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); + ASSERT_FALSE(aws_symmetric_cipher_is_good(cipher)); + + aws_symmetric_cipher_destroy(cipher); + aws_byte_buf_clean_up(&output_buf); + aws_byte_buf_clean_up(&decrypted_buf); + + return AWS_OP_SUCCESS; +} + +AWS_TEST_CASE( + aes_keywrap_Rfc3394_256BitKey_TestIntegrityCheckFailed, + s_test_Rfc3394_256BitKey_TestIntegrityCheckFailed); + +static int s_test_RFC3394_256BitKeyTestBadPayload(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + uint8_t input[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF, + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}; + size_t input_length = sizeof(input); + + uint8_t key[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; + size_t key_length = sizeof(key); + + uint8_t expected_output[] = {0x28, 0xC9, 0xF4, 0x04, 0xC4, 0xB8, 0x10, 0xF4, 0xCB, 0xCC, 0xB3, 0x5C, 0xFB, 0x87, + 0xF8, 0x26, 0x3F, 0x57, 0x86, 0xE2, 0xD8, 0x0E, 0xD3, 0x26, 0xCB, 0xC7, 0xF0, 0xE7, + 0x1A, 0x99, 0xF4, 0x3B, 0xFB, 0x98, 0x8B, 0x9B, 0x7A, 0x02, 0xDD, 0x21}; + size_t expected_output_length = sizeof(expected_output); + + struct aws_byte_cursor input_cur = aws_byte_cursor_from_array(input, input_length); + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, key_length); + struct aws_byte_buf output_buf; + ASSERT_SUCCESS(aws_byte_buf_init(&output_buf, allocator, expected_output_length)); + + struct aws_symmetric_cipher *cipher = aws_aes_keywrap_256_new(allocator, &key_cur); + ASSERT_NOT_NULL(cipher); + + ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input_cur, &output_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &output_buf)); + ASSERT_BIN_ARRAYS_EQUALS(expected_output, expected_output_length, output_buf.buffer, output_buf.len); + + ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); + + struct aws_byte_buf decrypted_buf; + ASSERT_SUCCESS(aws_byte_buf_init(&decrypted_buf, allocator, input_length)); + + struct aws_byte_cursor encrypted_data = aws_byte_cursor_from_buf(&output_buf); + ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_data, &decrypted_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); + ASSERT_BIN_ARRAYS_EQUALS(input, input_length, decrypted_buf.buffer, decrypted_buf.len); + + aws_symmetric_cipher_destroy(cipher); + aws_byte_buf_clean_up(&output_buf); + aws_byte_buf_clean_up(&decrypted_buf); + + return AWS_OP_SUCCESS; +} + +AWS_TEST_CASE(aes_keywrap_RFC3394_256BitKeyTestBadPayload, s_test_RFC3394_256BitKeyTestBadPayload); + +static int s_test_RFC3394_256BitKey128BitCekTestVector(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + uint8_t input[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF}; + size_t input_length = sizeof(input); + + uint8_t key[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; + size_t key_length = sizeof(key); + + uint8_t expected_output[] = {0x64, 0xE8, 0xC3, 0xF9, 0xCE, 0x0F, 0x5B, 0xA2, 0x63, 0xE9, 0x77, 0x79, + 0x05, 0x81, 0x8A, 0x2A, 0x93, 0xC8, 0x19, 0x1E, 0x7D, 0x6E, 0x8A, 0xE7}; + size_t expected_output_length = sizeof(expected_output); + + struct aws_byte_cursor input_cur = aws_byte_cursor_from_array(input, input_length); + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, key_length); + struct aws_byte_buf output_buf; + ASSERT_SUCCESS(aws_byte_buf_init(&output_buf, allocator, expected_output_length)); + + struct aws_symmetric_cipher *cipher = aws_aes_keywrap_256_new(allocator, &key_cur); + ASSERT_NOT_NULL(cipher); + + ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input_cur, &output_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &output_buf)); + ASSERT_BIN_ARRAYS_EQUALS(expected_output, expected_output_length, output_buf.buffer, output_buf.len); + + ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); + ASSERT_TRUE(aws_symmetric_cipher_is_good(cipher)); + + struct aws_byte_buf decrypted_buf; + ASSERT_SUCCESS(aws_byte_buf_init(&decrypted_buf, allocator, input_length)); + + struct aws_byte_cursor encrypted_data = aws_byte_cursor_from_buf(&output_buf); + ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_data, &decrypted_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); + aws_symmetric_cipher_destroy(cipher); + aws_byte_buf_clean_up(&output_buf); + aws_byte_buf_clean_up(&decrypted_buf); + + return AWS_OP_SUCCESS; +} + +AWS_TEST_CASE(aes_keywrap_RFC3394_256BitKey128BitCekTestVector, s_test_RFC3394_256BitKey128BitCekTestVector); + +static int s_test_RFC3394_256BitKey128BitCekIntegrityCheckFailedTestVector(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + uint8_t input[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF}; + size_t input_length = sizeof(input); + + uint8_t key[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; + size_t key_length = sizeof(key); + + uint8_t expected_output[] = {0x64, 0xE8, 0xC3, 0xF9, 0xCE, 0x0F, 0x5B, 0xA2, 0x63, 0xE9, 0x77, 0x79, + 0x05, 0x81, 0x8A, 0x2A, 0x93, 0xC8, 0x19, 0x1E, 0x7D, 0x6E, 0x8A, 0xE7}; + size_t expected_output_length = sizeof(expected_output); + + struct aws_byte_cursor input_cur = aws_byte_cursor_from_array(input, input_length); + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, key_length); + struct aws_byte_buf output_buf; + ASSERT_SUCCESS(aws_byte_buf_init(&output_buf, allocator, expected_output_length)); + + struct aws_symmetric_cipher *cipher = aws_aes_keywrap_256_new(allocator, &key_cur); + ASSERT_NOT_NULL(cipher); + + ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input_cur, &output_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &output_buf)); + ASSERT_BIN_ARRAYS_EQUALS(expected_output, expected_output_length, output_buf.buffer, output_buf.len); + + ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); + + struct aws_byte_buf decrypted_buf; + ASSERT_SUCCESS(aws_byte_buf_init(&decrypted_buf, allocator, input_length)); + + struct aws_byte_cursor encrypted_data = aws_byte_cursor_from_buf(&output_buf); + encrypted_data.ptr[1] = encrypted_data.ptr[1] + encrypted_data.ptr[2]; + ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_data, &decrypted_buf)); + ASSERT_FAILS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); + ASSERT_FALSE(aws_symmetric_cipher_is_good(cipher)); + + aws_symmetric_cipher_destroy(cipher); + aws_byte_buf_clean_up(&output_buf); + aws_byte_buf_clean_up(&decrypted_buf); + + return AWS_OP_SUCCESS; +} + +AWS_TEST_CASE( + aes_keywrap_RFC3394_256BitKey128BitCekIntegrityCheckFailedTestVector, + s_test_RFC3394_256BitKey128BitCekIntegrityCheckFailedTestVector); + +static int s_test_RFC3394_256BitKey128BitCekPayloadCheckFailedTestVector(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + uint8_t input[] = {0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xAA, 0xBB, 0xCC, 0xDD, 0xEE, 0xFF}; + size_t input_length = sizeof(input); + + uint8_t key[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; + size_t key_length = sizeof(key); + + uint8_t expected_output[] = {0x64, 0xE8, 0xC3, 0xF9, 0xCE, 0x0F, 0x5B, 0xA2, 0x63, 0xE9, 0x77, 0x79, + 0x05, 0x81, 0x8A, 0x2A, 0x93, 0xC8, 0x19, 0x1E, 0x7D, 0x6E, 0x8A, 0xE7}; + size_t expected_output_length = sizeof(expected_output); + + struct aws_byte_cursor input_cur = aws_byte_cursor_from_array(input, input_length); + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, key_length); + struct aws_byte_buf output_buf; + ASSERT_SUCCESS(aws_byte_buf_init(&output_buf, allocator, expected_output_length)); + + struct aws_symmetric_cipher *cipher = aws_aes_keywrap_256_new(allocator, &key_cur); + ASSERT_NOT_NULL(cipher); + + ASSERT_SUCCESS(aws_symmetric_cipher_encrypt(cipher, input_cur, &output_buf)); + ASSERT_SUCCESS(aws_symmetric_cipher_finalize_encryption(cipher, &output_buf)); + ASSERT_BIN_ARRAYS_EQUALS(expected_output, expected_output_length, output_buf.buffer, output_buf.len); + + ASSERT_SUCCESS(aws_symmetric_cipher_reset(cipher)); + + struct aws_byte_buf decrypted_buf; + ASSERT_SUCCESS(aws_byte_buf_init(&decrypted_buf, allocator, input_length)); + + struct aws_byte_cursor encrypted_data = aws_byte_cursor_from_buf(&output_buf); + encrypted_data.ptr[14] = encrypted_data.ptr[13] + encrypted_data.ptr[14]; + ASSERT_SUCCESS(aws_symmetric_cipher_decrypt(cipher, encrypted_data, &decrypted_buf)); + ASSERT_FAILS(aws_symmetric_cipher_finalize_decryption(cipher, &decrypted_buf)); + ASSERT_FALSE(aws_symmetric_cipher_is_good(cipher)); + + aws_symmetric_cipher_destroy(cipher); + aws_byte_buf_clean_up(&output_buf); + aws_byte_buf_clean_up(&decrypted_buf); + + return AWS_OP_SUCCESS; +} + +AWS_TEST_CASE( + aes_keywrap_RFC3394_256BitKey128BitCekPayloadCheckFailedTestVector, + s_test_RFC3394_256BitKey128BitCekPayloadCheckFailedTestVector); + +static int s_test_input_too_large_fn(struct aws_allocator *allocator, void *ctx) { + (void)ctx; + + uint8_t iv[AWS_AES_256_CIPHER_BLOCK_SIZE] = {0}; + uint8_t key[AWS_AES_256_KEY_BYTE_LEN] = {0}; + + struct aws_byte_cursor key_cur = aws_byte_cursor_from_array(key, sizeof(key)); + struct aws_byte_cursor iv_cur = aws_byte_cursor_from_array(iv, sizeof(iv)); + + struct aws_symmetric_cipher *cipher = aws_aes_cbc_256_new(allocator, &key_cur, &iv_cur); + ASSERT_NOT_NULL(cipher); + + struct aws_byte_cursor invalid_cur = { + .ptr = key, + .len = INT_MAX, + }; + + ASSERT_ERROR(AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM, aws_symmetric_cipher_encrypt(cipher, invalid_cur, NULL)); + /* should still be good from an invalid input. */ + ASSERT_TRUE(aws_symmetric_cipher_is_good(cipher)); + ASSERT_ERROR(AWS_ERROR_CAL_BUFFER_TOO_LARGE_FOR_ALGORITHM, aws_symmetric_cipher_decrypt(cipher, invalid_cur, NULL)); + /* should still be good from an invalid input. */ + ASSERT_TRUE(aws_symmetric_cipher_is_good(cipher)); + + aws_symmetric_cipher_destroy(cipher); + return AWS_OP_SUCCESS; +} +AWS_TEST_CASE(aes_test_input_too_large, s_test_input_too_large_fn)