| /* |
| * Copyright 2017 The OpenSSL Project Authors. All Rights Reserved. |
| * |
| * Licensed under the OpenSSL license (the "License"). You may not use |
| * this file except in compliance with the License. You can obtain a copy |
| * in the file LICENSE in the source distribution or at |
| * https://www.openssl.org/source/license.html |
| */ |
| |
| /* ==================================================================== |
| * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. |
| */ |
| |
| #include <assert.h> |
| #include <openssl/e_os2.h> |
| #include <string.h> |
| #include "internal/aria.h" |
| |
| static const unsigned char sb1[256] = { |
| 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, |
| 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, |
| 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, |
| 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, |
| 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, |
| 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, |
| 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, |
| 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, |
| 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, |
| 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, |
| 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, |
| 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, |
| 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, |
| 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, |
| 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, |
| 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, |
| 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, |
| 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, |
| 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, |
| 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, |
| 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, |
| 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, |
| 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, |
| 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, |
| 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, |
| 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, |
| 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, |
| 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, |
| 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, |
| 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, |
| 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, |
| 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 |
| }; |
| |
| static const unsigned char sb2[256] = { |
| 0xe2, 0x4e, 0x54, 0xfc, 0x94, 0xc2, 0x4a, 0xcc, |
| 0x62, 0x0d, 0x6a, 0x46, 0x3c, 0x4d, 0x8b, 0xd1, |
| 0x5e, 0xfa, 0x64, 0xcb, 0xb4, 0x97, 0xbe, 0x2b, |
| 0xbc, 0x77, 0x2e, 0x03, 0xd3, 0x19, 0x59, 0xc1, |
| 0x1d, 0x06, 0x41, 0x6b, 0x55, 0xf0, 0x99, 0x69, |
| 0xea, 0x9c, 0x18, 0xae, 0x63, 0xdf, 0xe7, 0xbb, |
| 0x00, 0x73, 0x66, 0xfb, 0x96, 0x4c, 0x85, 0xe4, |
| 0x3a, 0x09, 0x45, 0xaa, 0x0f, 0xee, 0x10, 0xeb, |
| 0x2d, 0x7f, 0xf4, 0x29, 0xac, 0xcf, 0xad, 0x91, |
| 0x8d, 0x78, 0xc8, 0x95, 0xf9, 0x2f, 0xce, 0xcd, |
| 0x08, 0x7a, 0x88, 0x38, 0x5c, 0x83, 0x2a, 0x28, |
| 0x47, 0xdb, 0xb8, 0xc7, 0x93, 0xa4, 0x12, 0x53, |
| 0xff, 0x87, 0x0e, 0x31, 0x36, 0x21, 0x58, 0x48, |
| 0x01, 0x8e, 0x37, 0x74, 0x32, 0xca, 0xe9, 0xb1, |
| 0xb7, 0xab, 0x0c, 0xd7, 0xc4, 0x56, 0x42, 0x26, |
| 0x07, 0x98, 0x60, 0xd9, 0xb6, 0xb9, 0x11, 0x40, |
| 0xec, 0x20, 0x8c, 0xbd, 0xa0, 0xc9, 0x84, 0x04, |
| 0x49, 0x23, 0xf1, 0x4f, 0x50, 0x1f, 0x13, 0xdc, |
| 0xd8, 0xc0, 0x9e, 0x57, 0xe3, 0xc3, 0x7b, 0x65, |
| 0x3b, 0x02, 0x8f, 0x3e, 0xe8, 0x25, 0x92, 0xe5, |
| 0x15, 0xdd, 0xfd, 0x17, 0xa9, 0xbf, 0xd4, 0x9a, |
| 0x7e, 0xc5, 0x39, 0x67, 0xfe, 0x76, 0x9d, 0x43, |
| 0xa7, 0xe1, 0xd0, 0xf5, 0x68, 0xf2, 0x1b, 0x34, |
| 0x70, 0x05, 0xa3, 0x8a, 0xd5, 0x79, 0x86, 0xa8, |
| 0x30, 0xc6, 0x51, 0x4b, 0x1e, 0xa6, 0x27, 0xf6, |
| 0x35, 0xd2, 0x6e, 0x24, 0x16, 0x82, 0x5f, 0xda, |
| 0xe6, 0x75, 0xa2, 0xef, 0x2c, 0xb2, 0x1c, 0x9f, |
| 0x5d, 0x6f, 0x80, 0x0a, 0x72, 0x44, 0x9b, 0x6c, |
| 0x90, 0x0b, 0x5b, 0x33, 0x7d, 0x5a, 0x52, 0xf3, |
| 0x61, 0xa1, 0xf7, 0xb0, 0xd6, 0x3f, 0x7c, 0x6d, |
| 0xed, 0x14, 0xe0, 0xa5, 0x3d, 0x22, 0xb3, 0xf8, |
| 0x89, 0xde, 0x71, 0x1a, 0xaf, 0xba, 0xb5, 0x81 |
| }; |
| |
| static const unsigned char sb3[256] = { |
| 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, |
| 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, |
| 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, |
| 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, |
| 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, |
| 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, |
| 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, |
| 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, |
| 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, |
| 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, |
| 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, |
| 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, |
| 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, |
| 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, |
| 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, |
| 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, |
| 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, |
| 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, |
| 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, |
| 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, |
| 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, |
| 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, |
| 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, |
| 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, |
| 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, |
| 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, |
| 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, |
| 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, |
| 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, |
| 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, |
| 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, |
| 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d |
| }; |
| |
| static const unsigned char sb4[256] = { |
| 0x30, 0x68, 0x99, 0x1b, 0x87, 0xb9, 0x21, 0x78, |
| 0x50, 0x39, 0xdb, 0xe1, 0x72, 0x09, 0x62, 0x3c, |
| 0x3e, 0x7e, 0x5e, 0x8e, 0xf1, 0xa0, 0xcc, 0xa3, |
| 0x2a, 0x1d, 0xfb, 0xb6, 0xd6, 0x20, 0xc4, 0x8d, |
| 0x81, 0x65, 0xf5, 0x89, 0xcb, 0x9d, 0x77, 0xc6, |
| 0x57, 0x43, 0x56, 0x17, 0xd4, 0x40, 0x1a, 0x4d, |
| 0xc0, 0x63, 0x6c, 0xe3, 0xb7, 0xc8, 0x64, 0x6a, |
| 0x53, 0xaa, 0x38, 0x98, 0x0c, 0xf4, 0x9b, 0xed, |
| 0x7f, 0x22, 0x76, 0xaf, 0xdd, 0x3a, 0x0b, 0x58, |
| 0x67, 0x88, 0x06, 0xc3, 0x35, 0x0d, 0x01, 0x8b, |
| 0x8c, 0xc2, 0xe6, 0x5f, 0x02, 0x24, 0x75, 0x93, |
| 0x66, 0x1e, 0xe5, 0xe2, 0x54, 0xd8, 0x10, 0xce, |
| 0x7a, 0xe8, 0x08, 0x2c, 0x12, 0x97, 0x32, 0xab, |
| 0xb4, 0x27, 0x0a, 0x23, 0xdf, 0xef, 0xca, 0xd9, |
| 0xb8, 0xfa, 0xdc, 0x31, 0x6b, 0xd1, 0xad, 0x19, |
| 0x49, 0xbd, 0x51, 0x96, 0xee, 0xe4, 0xa8, 0x41, |
| 0xda, 0xff, 0xcd, 0x55, 0x86, 0x36, 0xbe, 0x61, |
| 0x52, 0xf8, 0xbb, 0x0e, 0x82, 0x48, 0x69, 0x9a, |
| 0xe0, 0x47, 0x9e, 0x5c, 0x04, 0x4b, 0x34, 0x15, |
| 0x79, 0x26, 0xa7, 0xde, 0x29, 0xae, 0x92, 0xd7, |
| 0x84, 0xe9, 0xd2, 0xba, 0x5d, 0xf3, 0xc5, 0xb0, |
| 0xbf, 0xa4, 0x3b, 0x71, 0x44, 0x46, 0x2b, 0xfc, |
| 0xeb, 0x6f, 0xd5, 0xf6, 0x14, 0xfe, 0x7c, 0x70, |
| 0x5a, 0x7d, 0xfd, 0x2f, 0x18, 0x83, 0x16, 0xa5, |
| 0x91, 0x1f, 0x05, 0x95, 0x74, 0xa9, 0xc1, 0x5b, |
| 0x4a, 0x85, 0x6d, 0x13, 0x07, 0x4f, 0x4e, 0x45, |
| 0xb2, 0x0f, 0xc9, 0x1c, 0xa6, 0xbc, 0xec, 0x73, |
| 0x90, 0x7b, 0xcf, 0x59, 0x8f, 0xa1, 0xf9, 0x2d, |
| 0xf2, 0xb1, 0x00, 0x94, 0x37, 0x9f, 0xd0, 0x2e, |
| 0x9c, 0x6e, 0x28, 0x3f, 0x80, 0xf0, 0x3d, 0xd3, |
| 0x25, 0x8a, 0xb5, 0xe7, 0x42, 0xb3, 0xc7, 0xea, |
| 0xf7, 0x4c, 0x11, 0x33, 0x03, 0xa2, 0xac, 0x60 |
| }; |
| |
| static const ARIA_u128 c1 = { |
| 0x51, 0x7c, 0xc1, 0xb7, 0x27, 0x22, 0x0a, 0x94, |
| 0xfe, 0x13, 0xab, 0xe8, 0xfa, 0x9a, 0x6e, 0xe0 |
| }; |
| |
| static const ARIA_u128 c2 = { |
| 0x6d, 0xb1, 0x4a, 0xcc, 0x9e, 0x21, 0xc8, 0x20, |
| 0xff, 0x28, 0xb1, 0xd5, 0xef, 0x5d, 0xe2, 0xb0 |
| }; |
| |
| static const ARIA_u128 c3 = { |
| 0xdb, 0x92, 0x37, 0x1d, 0x21, 0x26, 0xe9, 0x70, |
| 0x03, 0x24, 0x97, 0x75, 0x04, 0xe8, 0xc9, 0x0e |
| }; |
| |
| /* |
| * Exclusive or two 128 bit values into the result. |
| * It is safe for the result to be the same as the either input. |
| */ |
| static void xor128(ARIA_u128 o, const ARIA_u128 x, const ARIA_u128 y) |
| { |
| int i; |
| |
| for (i = 0; i < ARIA_BLOCK_SIZE; i++) |
| o[i] = x[i] ^ y[i]; |
| } |
| |
| /* |
| * Generalised circular rotate right and exclusive or function. |
| * It is safe for the output to overlap either input. |
| */ |
| static ossl_inline void rotnr(unsigned int n, ARIA_u128 o, const ARIA_u128 xor, |
| const ARIA_u128 z) |
| { |
| const unsigned int bytes = n / 8, bits = n % 8; |
| unsigned int i; |
| ARIA_u128 t; |
| |
| for (i = 0; i < ARIA_BLOCK_SIZE; i++) |
| t[(i + bytes) % ARIA_BLOCK_SIZE] = z[i]; |
| for (i = 0; i < ARIA_BLOCK_SIZE; i++) |
| o[i] = ((t[i] >> bits) | |
| (t[i ? i - 1 : ARIA_BLOCK_SIZE - 1] << (8 - bits))) ^ xor[i]; |
| } |
| |
| /* |
| * Circular rotate 19 bits right and xor. |
| * It is safe for the output to overlap either input. |
| */ |
| static void rot19r(ARIA_u128 o, const ARIA_u128 xor, const ARIA_u128 z) |
| { |
| rotnr(19, o, xor, z); |
| } |
| |
| /* |
| * Circular rotate 31 bits right and xor. |
| * It is safe for the output to overlap either input. |
| */ |
| static void rot31r(ARIA_u128 o, const ARIA_u128 xor, const ARIA_u128 z) |
| { |
| rotnr(31, o, xor, z); |
| } |
| |
| /* |
| * Circular rotate 61 bits left and xor. |
| * It is safe for the output to overlap either input. |
| */ |
| static void rot61l(ARIA_u128 o, const ARIA_u128 xor, const ARIA_u128 z) |
| { |
| rotnr(8 * ARIA_BLOCK_SIZE - 61, o, xor, z); |
| } |
| |
| /* |
| * Circular rotate 31 bits left and xor. |
| * It is safe for the output to overlap either input. |
| */ |
| static void rot31l(ARIA_u128 o, const ARIA_u128 xor, const ARIA_u128 z) |
| { |
| rotnr(8 * ARIA_BLOCK_SIZE - 31, o, xor, z); |
| } |
| |
| /* |
| * Circular rotate 19 bits left and xor. |
| * It is safe for the output to overlap either input. |
| */ |
| static void rot19l(ARIA_u128 o, const ARIA_u128 xor, const ARIA_u128 z) |
| { |
| rotnr(8 * ARIA_BLOCK_SIZE - 19, o, xor, z); |
| } |
| |
| /* |
| * First substitution and xor layer, used for odd steps. |
| * It is safe for the input and output to be the same. |
| */ |
| static void sl1(ARIA_u128 o, const ARIA_u128 x, const ARIA_u128 y) |
| { |
| unsigned int i; |
| for (i = 0; i < ARIA_BLOCK_SIZE; i += 4) { |
| o[i ] = sb1[x[i ] ^ y[i ]]; |
| o[i + 1] = sb2[x[i + 1] ^ y[i + 1]]; |
| o[i + 2] = sb3[x[i + 2] ^ y[i + 2]]; |
| o[i + 3] = sb4[x[i + 3] ^ y[i + 3]]; |
| } |
| } |
| |
| /* |
| * Second substitution and xor layer, used for even steps. |
| * It is safe for the input and output to be the same. |
| */ |
| static void sl2(ARIA_u128 o, const ARIA_u128 x, const ARIA_u128 y) |
| { |
| unsigned int i; |
| for (i = 0; i < ARIA_BLOCK_SIZE; i += 4) { |
| o[i ] = sb3[x[i ] ^ y[i ]]; |
| o[i + 1] = sb4[x[i + 1] ^ y[i + 1]]; |
| o[i + 2] = sb1[x[i + 2] ^ y[i + 2]]; |
| o[i + 3] = sb2[x[i + 3] ^ y[i + 3]]; |
| } |
| } |
| |
| /* |
| * Diffusion layer step |
| * It is NOT safe for the input and output to overlap. |
| */ |
| static void a(ARIA_u128 y, const ARIA_u128 x) |
| { |
| y[ 0] = x[3] ^ x[4] ^ x[6] ^ x[ 8] ^ x[ 9] ^ x[13] ^ x[14]; |
| y[ 1] = x[2] ^ x[5] ^ x[7] ^ x[ 8] ^ x[ 9] ^ x[12] ^ x[15]; |
| y[ 2] = x[1] ^ x[4] ^ x[6] ^ x[10] ^ x[11] ^ x[12] ^ x[15]; |
| y[ 3] = x[0] ^ x[5] ^ x[7] ^ x[10] ^ x[11] ^ x[13] ^ x[14]; |
| y[ 4] = x[0] ^ x[2] ^ x[5] ^ x[ 8] ^ x[11] ^ x[14] ^ x[15]; |
| y[ 5] = x[1] ^ x[3] ^ x[4] ^ x[ 9] ^ x[10] ^ x[14] ^ x[15]; |
| y[ 6] = x[0] ^ x[2] ^ x[7] ^ x[ 9] ^ x[10] ^ x[12] ^ x[13]; |
| y[ 7] = x[1] ^ x[3] ^ x[6] ^ x[ 8] ^ x[11] ^ x[12] ^ x[13]; |
| y[ 8] = x[0] ^ x[1] ^ x[4] ^ x[ 7] ^ x[10] ^ x[13] ^ x[15]; |
| y[ 9] = x[0] ^ x[1] ^ x[5] ^ x[ 6] ^ x[11] ^ x[12] ^ x[14]; |
| y[10] = x[2] ^ x[3] ^ x[5] ^ x[ 6] ^ x[ 8] ^ x[13] ^ x[15]; |
| y[11] = x[2] ^ x[3] ^ x[4] ^ x[ 7] ^ x[ 9] ^ x[12] ^ x[14]; |
| y[12] = x[1] ^ x[2] ^ x[6] ^ x[ 7] ^ x[ 9] ^ x[11] ^ x[12]; |
| y[13] = x[0] ^ x[3] ^ x[6] ^ x[ 7] ^ x[ 8] ^ x[10] ^ x[13]; |
| y[14] = x[0] ^ x[3] ^ x[4] ^ x[ 5] ^ x[ 9] ^ x[11] ^ x[14]; |
| y[15] = x[1] ^ x[2] ^ x[4] ^ x[ 5] ^ x[ 8] ^ x[10] ^ x[15]; |
| } |
| |
| /* |
| * Odd round function |
| * Apply the first substitution layer and then a diffusion step. |
| * It is safe for the input and output to overlap. |
| */ |
| static ossl_inline void FO(ARIA_u128 o, const ARIA_u128 d, const ARIA_u128 rk) |
| { |
| ARIA_u128 y; |
| |
| sl1(y, d, rk); |
| a(o, y); |
| } |
| |
| /* |
| * Even round function |
| * Apply the second substitution layer and then a diffusion step. |
| * It is safe for the input and output to overlap. |
| */ |
| static ossl_inline void FE(ARIA_u128 o, const ARIA_u128 d, const ARIA_u128 rk) |
| { |
| ARIA_u128 y; |
| |
| sl2(y, d, rk); |
| a(o, y); |
| } |
| |
| /* |
| * Encrypt or decrypt a single block |
| * in and out can overlap |
| */ |
| static void do_encrypt(ARIA_u128 o, const ARIA_u128 pin, unsigned int rounds, |
| const ARIA_u128 *keys) |
| { |
| ARIA_u128 p; |
| unsigned int i; |
| |
| memcpy(p, pin, sizeof(p)); |
| for (i = 0; i < rounds - 2; i += 2) { |
| FO(p, p, keys[i]); |
| FE(p, p, keys[i + 1]); |
| } |
| FO(p, p, keys[rounds - 2]); |
| sl2(o, p, keys[rounds - 1]); |
| xor128(o, o, keys[rounds]); |
| } |
| |
| /* |
| * Encrypt a single block |
| * in and out can overlap |
| */ |
| void aria_encrypt(const unsigned char *in, unsigned char *out, |
| const ARIA_KEY *key) |
| { |
| assert(in != NULL && out != NULL && key != NULL); |
| do_encrypt(out, in, key->rounds, key->rd_key); |
| } |
| |
| |
| /* |
| * Expand the cipher key into the encryption key schedule. |
| * We short circuit execution of the last two |
| * or four rotations based on the key size. |
| */ |
| int aria_set_encrypt_key(const unsigned char *userKey, const int bits, |
| ARIA_KEY *key) |
| { |
| const unsigned char *ck1, *ck2, *ck3; |
| ARIA_u128 kr, w0, w1, w2, w3; |
| |
| if (!userKey || !key) |
| return -1; |
| memcpy(w0, userKey, sizeof(w0)); |
| switch (bits) { |
| default: |
| return -2; |
| case 128: |
| key->rounds = 12; |
| ck1 = c1; |
| ck2 = c2; |
| ck3 = c3; |
| memset(kr, 0, sizeof(kr)); |
| break; |
| |
| case 192: |
| key->rounds = 14; |
| ck1 = c2; |
| ck2 = c3; |
| ck3 = c1; |
| memcpy(kr, userKey + ARIA_BLOCK_SIZE, sizeof(kr) / 2); |
| memset(kr + ARIA_BLOCK_SIZE / 2, 0, sizeof(kr) / 2); |
| break; |
| |
| case 256: |
| key->rounds = 16; |
| ck1 = c3; |
| ck2 = c1; |
| ck3 = c2; |
| memcpy(kr, userKey + ARIA_BLOCK_SIZE, sizeof(kr)); |
| break; |
| } |
| |
| FO(w3, w0, ck1); xor128(w1, w3, kr); |
| FE(w3, w1, ck2); xor128(w2, w3, w0); |
| FO(kr, w2, ck3); xor128(w3, kr, w1); |
| |
| rot19r(key->rd_key[ 0], w0, w1); |
| rot19r(key->rd_key[ 1], w1, w2); |
| rot19r(key->rd_key[ 2], w2, w3); |
| rot19r(key->rd_key[ 3], w3, w0); |
| |
| rot31r(key->rd_key[ 4], w0, w1); |
| rot31r(key->rd_key[ 5], w1, w2); |
| rot31r(key->rd_key[ 6], w2, w3); |
| rot31r(key->rd_key[ 7], w3, w0); |
| |
| rot61l(key->rd_key[ 8], w0, w1); |
| rot61l(key->rd_key[ 9], w1, w2); |
| rot61l(key->rd_key[10], w2, w3); |
| rot61l(key->rd_key[11], w3, w0); |
| |
| rot31l(key->rd_key[12], w0, w1); |
| if (key->rounds > 12) { |
| rot31l(key->rd_key[13], w1, w2); |
| rot31l(key->rd_key[14], w2, w3); |
| |
| if (key->rounds > 14) { |
| rot31l(key->rd_key[15], w3, w0); |
| rot19l(key->rd_key[16], w0, w1); |
| } |
| } |
| return 0; |
| } |
| |
| /* |
| * Expand the cipher key into the decryption key schedule. |
| */ |
| int aria_set_decrypt_key(const unsigned char *userKey, const int bits, |
| ARIA_KEY *key) |
| { |
| ARIA_KEY ek; |
| const int r = aria_set_encrypt_key(userKey, bits, &ek); |
| unsigned int i, rounds = ek.rounds; |
| |
| if (r == 0) { |
| key->rounds = rounds; |
| memcpy(key->rd_key[0], ek.rd_key[rounds], sizeof(key->rd_key[0])); |
| for (i = 1; i < rounds; i++) |
| a(key->rd_key[i], ek.rd_key[rounds - i]); |
| memcpy(key->rd_key[rounds], ek.rd_key[0], sizeof(key->rd_key[rounds])); |
| } |
| return r; |
| } |