Rich Salz | 4f22f40 | 2016-05-17 14:51:04 -0400 | [diff] [blame] | 1 | /* |
Richard Levitte | 2842813 | 2018-04-17 15:18:40 +0200 | [diff] [blame] | 2 | * Copyright 2014-2018 The OpenSSL Project Authors. All Rights Reserved. |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 3 | * |
Rich Salz | 4f22f40 | 2016-05-17 14:51:04 -0400 | [diff] [blame] | 4 | * Licensed under the OpenSSL license (the "License"). You may not use |
| 5 | * this file except in compliance with the License. You can obtain a copy |
| 6 | * in the file LICENSE in the source distribution or at |
| 7 | * https://www.openssl.org/source/license.html |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 8 | */ |
| 9 | |
| 10 | #include <string.h> |
| 11 | #include <openssl/crypto.h> |
Rich Salz | 7de2b9c | 2018-04-05 15:13:55 -0400 | [diff] [blame] | 12 | #include <openssl/err.h> |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 13 | #include "modes_lcl.h" |
| 14 | |
Matt Caswell | 3feb630 | 2014-12-07 23:53:22 +0000 | [diff] [blame] | 15 | #ifndef OPENSSL_NO_OCB |
| 16 | |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 17 | /* |
| 18 | * Calculate the number of binary trailing zero's in any given number |
| 19 | */ |
| 20 | static u32 ocb_ntz(u64 n) |
| 21 | { |
| 22 | u32 cnt = 0; |
| 23 | |
| 24 | /* |
| 25 | * We do a right-to-left simple sequential search. This is surprisingly |
| 26 | * efficient as the distribution of trailing zeros is not uniform, |
| 27 | * e.g. the number of possible inputs with no trailing zeros is equal to |
| 28 | * the number with 1 or more; the number with exactly 1 is equal to the |
| 29 | * number with 2 or more, etc. Checking the last two bits covers 75% of |
| 30 | * all numbers. Checking the last three covers 87.5% |
| 31 | */ |
| 32 | while (!(n & 1)) { |
| 33 | n >>= 1; |
| 34 | cnt++; |
| 35 | } |
| 36 | return cnt; |
| 37 | } |
| 38 | |
| 39 | /* |
| 40 | * Shift a block of 16 bytes left by shift bits |
| 41 | */ |
Andy Polyakov | 1bbea40 | 2015-12-02 16:25:08 +0100 | [diff] [blame] | 42 | static void ocb_block_lshift(const unsigned char *in, size_t shift, |
| 43 | unsigned char *out) |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 44 | { |
| 45 | unsigned char shift_mask; |
| 46 | int i; |
| 47 | unsigned char mask[15]; |
Matt Caswell | 0f113f3 | 2015-01-22 03:40:55 +0000 | [diff] [blame] | 48 | |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 49 | shift_mask = 0xff; |
| 50 | shift_mask <<= (8 - shift); |
| 51 | for (i = 15; i >= 0; i--) { |
| 52 | if (i > 0) { |
Andy Polyakov | 1bbea40 | 2015-12-02 16:25:08 +0100 | [diff] [blame] | 53 | mask[i - 1] = in[i] & shift_mask; |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 54 | mask[i - 1] >>= 8 - shift; |
| 55 | } |
Andy Polyakov | 1bbea40 | 2015-12-02 16:25:08 +0100 | [diff] [blame] | 56 | out[i] = in[i] << shift; |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 57 | |
| 58 | if (i != 15) { |
Andy Polyakov | 1bbea40 | 2015-12-02 16:25:08 +0100 | [diff] [blame] | 59 | out[i] ^= mask[i]; |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 60 | } |
| 61 | } |
| 62 | } |
| 63 | |
| 64 | /* |
| 65 | * Perform a "double" operation as per OCB spec |
| 66 | */ |
| 67 | static void ocb_double(OCB_BLOCK *in, OCB_BLOCK *out) |
| 68 | { |
| 69 | unsigned char mask; |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 70 | |
| 71 | /* |
| 72 | * Calculate the mask based on the most significant bit. There are more |
| 73 | * efficient ways to do this - but this way is constant time |
| 74 | */ |
Andy Polyakov | 81f3d63 | 2015-11-30 23:07:38 +0100 | [diff] [blame] | 75 | mask = in->c[0] & 0x80; |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 76 | mask >>= 7; |
| 77 | mask *= 135; |
| 78 | |
Andy Polyakov | 1bbea40 | 2015-12-02 16:25:08 +0100 | [diff] [blame] | 79 | ocb_block_lshift(in->c, 1, out->c); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 80 | |
Andy Polyakov | 81f3d63 | 2015-11-30 23:07:38 +0100 | [diff] [blame] | 81 | out->c[15] ^= mask; |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 82 | } |
| 83 | |
| 84 | /* |
| 85 | * Perform an xor on in1 and in2 - each of len bytes. Store result in out |
| 86 | */ |
| 87 | static void ocb_block_xor(const unsigned char *in1, |
| 88 | const unsigned char *in2, size_t len, |
| 89 | unsigned char *out) |
| 90 | { |
| 91 | size_t i; |
| 92 | for (i = 0; i < len; i++) { |
| 93 | out[i] = in1[i] ^ in2[i]; |
| 94 | } |
| 95 | } |
| 96 | |
| 97 | /* |
| 98 | * Lookup L_index in our lookup table. If we haven't already got it we need to |
| 99 | * calculate it |
| 100 | */ |
Matt Caswell | 55467a1 | 2015-01-27 16:39:13 +0000 | [diff] [blame] | 101 | static OCB_BLOCK *ocb_lookup_l(OCB128_CONTEXT *ctx, size_t idx) |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 102 | { |
Andy Polyakov | b9e3d7e | 2015-11-30 13:26:21 +0100 | [diff] [blame] | 103 | size_t l_index = ctx->l_index; |
| 104 | |
| 105 | if (idx <= l_index) { |
Matt Caswell | 55467a1 | 2015-01-27 16:39:13 +0000 | [diff] [blame] | 106 | return ctx->l + idx; |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 107 | } |
| 108 | |
| 109 | /* We don't have it - so calculate it */ |
Andy Polyakov | b9e3d7e | 2015-11-30 13:26:21 +0100 | [diff] [blame] | 110 | if (idx >= ctx->max_l_index) { |
Dr. Stephen Henson | 7c0ef84 | 2016-05-11 21:14:57 +0100 | [diff] [blame] | 111 | void *tmp_ptr; |
Andy Polyakov | b9e3d7e | 2015-11-30 13:26:21 +0100 | [diff] [blame] | 112 | /* |
| 113 | * Each additional entry allows to process almost double as |
| 114 | * much data, so that in linear world the table will need to |
| 115 | * be expanded with smaller and smaller increments. Originally |
| 116 | * it was doubling in size, which was a waste. Growing it |
| 117 | * linearly is not formally optimal, but is simpler to implement. |
| 118 | * We grow table by minimally required 4*n that would accommodate |
| 119 | * the index. |
| 120 | */ |
| 121 | ctx->max_l_index += (idx - ctx->max_l_index + 4) & ~3; |
Dr. Stephen Henson | 7c0ef84 | 2016-05-11 21:14:57 +0100 | [diff] [blame] | 122 | tmp_ptr = |
Matt Caswell | 0f113f3 | 2015-01-22 03:40:55 +0000 | [diff] [blame] | 123 | OPENSSL_realloc(ctx->l, ctx->max_l_index * sizeof(OCB_BLOCK)); |
Dr. Stephen Henson | 7c0ef84 | 2016-05-11 21:14:57 +0100 | [diff] [blame] | 124 | if (tmp_ptr == NULL) /* prevent ctx->l from being clobbered */ |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 125 | return NULL; |
Dr. Stephen Henson | 7c0ef84 | 2016-05-11 21:14:57 +0100 | [diff] [blame] | 126 | ctx->l = tmp_ptr; |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 127 | } |
Andy Polyakov | 44bf711 | 2015-12-02 14:26:03 +0100 | [diff] [blame] | 128 | while (l_index < idx) { |
Andy Polyakov | b9e3d7e | 2015-11-30 13:26:21 +0100 | [diff] [blame] | 129 | ocb_double(ctx->l + l_index, ctx->l + l_index + 1); |
| 130 | l_index++; |
| 131 | } |
| 132 | ctx->l_index = l_index; |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 133 | |
Matt Caswell | 55467a1 | 2015-01-27 16:39:13 +0000 | [diff] [blame] | 134 | return ctx->l + idx; |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 135 | } |
| 136 | |
| 137 | /* |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 138 | * Create a new OCB128_CONTEXT |
| 139 | */ |
| 140 | OCB128_CONTEXT *CRYPTO_ocb128_new(void *keyenc, void *keydec, |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 141 | block128_f encrypt, block128_f decrypt, |
| 142 | ocb128_f stream) |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 143 | { |
| 144 | OCB128_CONTEXT *octx; |
| 145 | int ret; |
| 146 | |
Matt Caswell | 90945fa | 2015-10-30 11:12:26 +0000 | [diff] [blame] | 147 | if ((octx = OPENSSL_malloc(sizeof(*octx))) != NULL) { |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 148 | ret = CRYPTO_ocb128_init(octx, keyenc, keydec, encrypt, decrypt, |
| 149 | stream); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 150 | if (ret) |
| 151 | return octx; |
| 152 | OPENSSL_free(octx); |
| 153 | } |
| 154 | |
| 155 | return NULL; |
| 156 | } |
| 157 | |
| 158 | /* |
| 159 | * Initialise an existing OCB128_CONTEXT |
| 160 | */ |
| 161 | int CRYPTO_ocb128_init(OCB128_CONTEXT *ctx, void *keyenc, void *keydec, |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 162 | block128_f encrypt, block128_f decrypt, |
| 163 | ocb128_f stream) |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 164 | { |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 165 | memset(ctx, 0, sizeof(*ctx)); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 166 | ctx->l_index = 0; |
Andy Polyakov | b9e3d7e | 2015-11-30 13:26:21 +0100 | [diff] [blame] | 167 | ctx->max_l_index = 5; |
Rich Salz | 7de2b9c | 2018-04-05 15:13:55 -0400 | [diff] [blame] | 168 | if ((ctx->l = OPENSSL_malloc(ctx->max_l_index * 16)) == NULL) { |
| 169 | CRYPTOerr(CRYPTO_F_CRYPTO_OCB128_INIT, ERR_R_MALLOC_FAILURE); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 170 | return 0; |
Rich Salz | 7de2b9c | 2018-04-05 15:13:55 -0400 | [diff] [blame] | 171 | } |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 172 | |
| 173 | /* |
| 174 | * We set both the encryption and decryption key schedules - decryption |
| 175 | * needs both. Don't really need decryption schedule if only doing |
| 176 | * encryption - but it simplifies things to take it anyway |
| 177 | */ |
| 178 | ctx->encrypt = encrypt; |
| 179 | ctx->decrypt = decrypt; |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 180 | ctx->stream = stream; |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 181 | ctx->keyenc = keyenc; |
| 182 | ctx->keydec = keydec; |
| 183 | |
| 184 | /* L_* = ENCIPHER(K, zeros(128)) */ |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 185 | ctx->encrypt(ctx->l_star.c, ctx->l_star.c, ctx->keyenc); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 186 | |
| 187 | /* L_$ = double(L_*) */ |
| 188 | ocb_double(&ctx->l_star, &ctx->l_dollar); |
| 189 | |
| 190 | /* L_0 = double(L_$) */ |
| 191 | ocb_double(&ctx->l_dollar, ctx->l); |
| 192 | |
Andy Polyakov | b9e3d7e | 2015-11-30 13:26:21 +0100 | [diff] [blame] | 193 | /* L_{i} = double(L_{i-1}) */ |
| 194 | ocb_double(ctx->l, ctx->l+1); |
| 195 | ocb_double(ctx->l+1, ctx->l+2); |
| 196 | ocb_double(ctx->l+2, ctx->l+3); |
| 197 | ocb_double(ctx->l+3, ctx->l+4); |
| 198 | ctx->l_index = 4; /* enough to process up to 496 bytes */ |
| 199 | |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 200 | return 1; |
| 201 | } |
| 202 | |
| 203 | /* |
| 204 | * Copy an OCB128_CONTEXT object |
| 205 | */ |
Matt Caswell | 0f113f3 | 2015-01-22 03:40:55 +0000 | [diff] [blame] | 206 | int CRYPTO_ocb128_copy_ctx(OCB128_CONTEXT *dest, OCB128_CONTEXT *src, |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 207 | void *keyenc, void *keydec) |
| 208 | { |
| 209 | memcpy(dest, src, sizeof(OCB128_CONTEXT)); |
| 210 | if (keyenc) |
| 211 | dest->keyenc = keyenc; |
| 212 | if (keydec) |
| 213 | dest->keydec = keydec; |
| 214 | if (src->l) { |
Rich Salz | 7de2b9c | 2018-04-05 15:13:55 -0400 | [diff] [blame] | 215 | if ((dest->l = OPENSSL_malloc(src->max_l_index * 16)) == NULL) { |
| 216 | CRYPTOerr(CRYPTO_F_CRYPTO_OCB128_COPY_CTX, ERR_R_MALLOC_FAILURE); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 217 | return 0; |
Rich Salz | 7de2b9c | 2018-04-05 15:13:55 -0400 | [diff] [blame] | 218 | } |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 219 | memcpy(dest->l, src->l, (src->l_index + 1) * 16); |
| 220 | } |
| 221 | return 1; |
| 222 | } |
| 223 | |
| 224 | /* |
| 225 | * Set the IV to be used for this operation. Must be 1 - 15 bytes. |
| 226 | */ |
Matt Caswell | 0f113f3 | 2015-01-22 03:40:55 +0000 | [diff] [blame] | 227 | int CRYPTO_ocb128_setiv(OCB128_CONTEXT *ctx, const unsigned char *iv, |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 228 | size_t len, size_t taglen) |
| 229 | { |
| 230 | unsigned char ktop[16], tmp[16], mask; |
| 231 | unsigned char stretch[24], nonce[16]; |
| 232 | size_t bottom, shift; |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 233 | |
| 234 | /* |
| 235 | * Spec says IV is 120 bits or fewer - it allows non byte aligned lengths. |
FdaSilvaYY | 02e112a | 2016-06-28 22:51:01 +0200 | [diff] [blame] | 236 | * We don't support this at this stage |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 237 | */ |
| 238 | if ((len > 15) || (len < 1) || (taglen > 16) || (taglen < 1)) { |
| 239 | return -1; |
| 240 | } |
| 241 | |
| 242 | /* Nonce = num2str(TAGLEN mod 128,7) || zeros(120-bitlen(N)) || 1 || N */ |
| 243 | nonce[0] = ((taglen * 8) % 128) << 1; |
| 244 | memset(nonce + 1, 0, 15); |
| 245 | memcpy(nonce + 16 - len, iv, len); |
| 246 | nonce[15 - len] |= 1; |
| 247 | |
| 248 | /* Ktop = ENCIPHER(K, Nonce[1..122] || zeros(6)) */ |
| 249 | memcpy(tmp, nonce, 16); |
| 250 | tmp[15] &= 0xc0; |
| 251 | ctx->encrypt(tmp, ktop, ctx->keyenc); |
| 252 | |
| 253 | /* Stretch = Ktop || (Ktop[1..64] xor Ktop[9..72]) */ |
| 254 | memcpy(stretch, ktop, 16); |
| 255 | ocb_block_xor(ktop, ktop + 1, 8, stretch + 16); |
| 256 | |
| 257 | /* bottom = str2num(Nonce[123..128]) */ |
| 258 | bottom = nonce[15] & 0x3f; |
| 259 | |
| 260 | /* Offset_0 = Stretch[1+bottom..128+bottom] */ |
| 261 | shift = bottom % 8; |
Andy Polyakov | 1bbea40 | 2015-12-02 16:25:08 +0100 | [diff] [blame] | 262 | ocb_block_lshift(stretch + (bottom / 8), shift, ctx->offset.c); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 263 | mask = 0xff; |
| 264 | mask <<= 8 - shift; |
Andy Polyakov | 81f3d63 | 2015-11-30 23:07:38 +0100 | [diff] [blame] | 265 | ctx->offset.c[15] |= |
Matt Caswell | 0f113f3 | 2015-01-22 03:40:55 +0000 | [diff] [blame] | 266 | (*(stretch + (bottom / 8) + 16) & mask) >> (8 - shift); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 267 | |
| 268 | return 1; |
| 269 | } |
| 270 | |
| 271 | /* |
| 272 | * Provide any AAD. This can be called multiple times. Only the final time can |
| 273 | * have a partial block |
| 274 | */ |
Matt Caswell | 0f113f3 | 2015-01-22 03:40:55 +0000 | [diff] [blame] | 275 | int CRYPTO_ocb128_aad(OCB128_CONTEXT *ctx, const unsigned char *aad, |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 276 | size_t len) |
| 277 | { |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 278 | u64 i, all_num_blocks; |
| 279 | size_t num_blocks, last_len; |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 280 | OCB_BLOCK tmp; |
Matt Caswell | 0f113f3 | 2015-01-22 03:40:55 +0000 | [diff] [blame] | 281 | |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 282 | /* Calculate the number of blocks of AAD provided now, and so far */ |
| 283 | num_blocks = len / 16; |
| 284 | all_num_blocks = num_blocks + ctx->blocks_hashed; |
| 285 | |
| 286 | /* Loop through all full blocks of AAD */ |
| 287 | for (i = ctx->blocks_hashed + 1; i <= all_num_blocks; i++) { |
| 288 | OCB_BLOCK *lookup; |
Matt Caswell | 0f113f3 | 2015-01-22 03:40:55 +0000 | [diff] [blame] | 289 | |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 290 | /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ |
| 291 | lookup = ocb_lookup_l(ctx, ocb_ntz(i)); |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 292 | if (lookup == NULL) |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 293 | return 0; |
| 294 | ocb_block16_xor(&ctx->offset_aad, lookup, &ctx->offset_aad); |
| 295 | |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 296 | memcpy(tmp.c, aad, 16); |
| 297 | aad += 16; |
| 298 | |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 299 | /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */ |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 300 | ocb_block16_xor(&ctx->offset_aad, &tmp, &tmp); |
| 301 | ctx->encrypt(tmp.c, tmp.c, ctx->keyenc); |
| 302 | ocb_block16_xor(&tmp, &ctx->sum, &ctx->sum); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 303 | } |
| 304 | |
| 305 | /* |
| 306 | * Check if we have any partial blocks left over. This is only valid in the |
| 307 | * last call to this function |
| 308 | */ |
| 309 | last_len = len % 16; |
| 310 | |
| 311 | if (last_len > 0) { |
| 312 | /* Offset_* = Offset_m xor L_* */ |
| 313 | ocb_block16_xor(&ctx->offset_aad, &ctx->l_star, &ctx->offset_aad); |
| 314 | |
| 315 | /* CipherInput = (A_* || 1 || zeros(127-bitlen(A_*))) xor Offset_* */ |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 316 | memset(tmp.c, 0, 16); |
| 317 | memcpy(tmp.c, aad, last_len); |
| 318 | tmp.c[last_len] = 0x80; |
| 319 | ocb_block16_xor(&ctx->offset_aad, &tmp, &tmp); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 320 | |
| 321 | /* Sum = Sum_m xor ENCIPHER(K, CipherInput) */ |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 322 | ctx->encrypt(tmp.c, tmp.c, ctx->keyenc); |
| 323 | ocb_block16_xor(&tmp, &ctx->sum, &ctx->sum); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 324 | } |
| 325 | |
| 326 | ctx->blocks_hashed = all_num_blocks; |
| 327 | |
| 328 | return 1; |
| 329 | } |
| 330 | |
| 331 | /* |
| 332 | * Provide any data to be encrypted. This can be called multiple times. Only |
| 333 | * the final time can have a partial block |
| 334 | */ |
Matt Caswell | 0f113f3 | 2015-01-22 03:40:55 +0000 | [diff] [blame] | 335 | int CRYPTO_ocb128_encrypt(OCB128_CONTEXT *ctx, |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 336 | const unsigned char *in, unsigned char *out, |
| 337 | size_t len) |
| 338 | { |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 339 | u64 i, all_num_blocks; |
| 340 | size_t num_blocks, last_len; |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 341 | |
| 342 | /* |
| 343 | * Calculate the number of blocks of data to be encrypted provided now, and |
| 344 | * so far |
| 345 | */ |
| 346 | num_blocks = len / 16; |
| 347 | all_num_blocks = num_blocks + ctx->blocks_processed; |
| 348 | |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 349 | if (num_blocks && all_num_blocks == (size_t)all_num_blocks |
| 350 | && ctx->stream != NULL) { |
| 351 | size_t max_idx = 0, top = (size_t)all_num_blocks; |
Matt Caswell | 0f113f3 | 2015-01-22 03:40:55 +0000 | [diff] [blame] | 352 | |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 353 | /* |
| 354 | * See how many L_{i} entries we need to process data at hand |
| 355 | * and pre-compute missing entries in the table [if any]... |
| 356 | */ |
| 357 | while (top >>= 1) |
| 358 | max_idx++; |
| 359 | if (ocb_lookup_l(ctx, max_idx) == NULL) |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 360 | return 0; |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 361 | |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 362 | ctx->stream(in, out, num_blocks, ctx->keyenc, |
| 363 | (size_t)ctx->blocks_processed + 1, ctx->offset.c, |
| 364 | (const unsigned char (*)[16])ctx->l, ctx->checksum.c); |
| 365 | } else { |
| 366 | /* Loop through all full blocks to be encrypted */ |
| 367 | for (i = ctx->blocks_processed + 1; i <= all_num_blocks; i++) { |
| 368 | OCB_BLOCK *lookup; |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 369 | OCB_BLOCK tmp; |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 370 | |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 371 | /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ |
| 372 | lookup = ocb_lookup_l(ctx, ocb_ntz(i)); |
| 373 | if (lookup == NULL) |
| 374 | return 0; |
| 375 | ocb_block16_xor(&ctx->offset, lookup, &ctx->offset); |
| 376 | |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 377 | memcpy(tmp.c, in, 16); |
| 378 | in += 16; |
| 379 | |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 380 | /* Checksum_i = Checksum_{i-1} xor P_i */ |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 381 | ocb_block16_xor(&tmp, &ctx->checksum, &ctx->checksum); |
| 382 | |
| 383 | /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */ |
| 384 | ocb_block16_xor(&ctx->offset, &tmp, &tmp); |
| 385 | ctx->encrypt(tmp.c, tmp.c, ctx->keyenc); |
| 386 | ocb_block16_xor(&ctx->offset, &tmp, &tmp); |
| 387 | |
| 388 | memcpy(out, tmp.c, 16); |
| 389 | out += 16; |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 390 | } |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 391 | } |
| 392 | |
| 393 | /* |
| 394 | * Check if we have any partial blocks left over. This is only valid in the |
| 395 | * last call to this function |
| 396 | */ |
| 397 | last_len = len % 16; |
| 398 | |
| 399 | if (last_len > 0) { |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 400 | OCB_BLOCK pad; |
| 401 | |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 402 | /* Offset_* = Offset_m xor L_* */ |
| 403 | ocb_block16_xor(&ctx->offset, &ctx->l_star, &ctx->offset); |
| 404 | |
| 405 | /* Pad = ENCIPHER(K, Offset_*) */ |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 406 | ctx->encrypt(ctx->offset.c, pad.c, ctx->keyenc); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 407 | |
| 408 | /* C_* = P_* xor Pad[1..bitlen(P_*)] */ |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 409 | ocb_block_xor(in, pad.c, last_len, out); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 410 | |
| 411 | /* Checksum_* = Checksum_m xor (P_* || 1 || zeros(127-bitlen(P_*))) */ |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 412 | memset(pad.c, 0, 16); /* borrow pad */ |
| 413 | memcpy(pad.c, in, last_len); |
| 414 | pad.c[last_len] = 0x80; |
| 415 | ocb_block16_xor(&pad, &ctx->checksum, &ctx->checksum); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 416 | } |
| 417 | |
| 418 | ctx->blocks_processed = all_num_blocks; |
| 419 | |
| 420 | return 1; |
| 421 | } |
| 422 | |
| 423 | /* |
| 424 | * Provide any data to be decrypted. This can be called multiple times. Only |
| 425 | * the final time can have a partial block |
| 426 | */ |
Matt Caswell | 0f113f3 | 2015-01-22 03:40:55 +0000 | [diff] [blame] | 427 | int CRYPTO_ocb128_decrypt(OCB128_CONTEXT *ctx, |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 428 | const unsigned char *in, unsigned char *out, |
| 429 | size_t len) |
| 430 | { |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 431 | u64 i, all_num_blocks; |
| 432 | size_t num_blocks, last_len; |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 433 | |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 434 | /* |
| 435 | * Calculate the number of blocks of data to be decrypted provided now, and |
| 436 | * so far |
| 437 | */ |
| 438 | num_blocks = len / 16; |
| 439 | all_num_blocks = num_blocks + ctx->blocks_processed; |
| 440 | |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 441 | if (num_blocks && all_num_blocks == (size_t)all_num_blocks |
| 442 | && ctx->stream != NULL) { |
| 443 | size_t max_idx = 0, top = (size_t)all_num_blocks; |
Matt Caswell | 0f113f3 | 2015-01-22 03:40:55 +0000 | [diff] [blame] | 444 | |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 445 | /* |
| 446 | * See how many L_{i} entries we need to process data at hand |
| 447 | * and pre-compute missing entries in the table [if any]... |
| 448 | */ |
| 449 | while (top >>= 1) |
| 450 | max_idx++; |
| 451 | if (ocb_lookup_l(ctx, max_idx) == NULL) |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 452 | return 0; |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 453 | |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 454 | ctx->stream(in, out, num_blocks, ctx->keydec, |
| 455 | (size_t)ctx->blocks_processed + 1, ctx->offset.c, |
| 456 | (const unsigned char (*)[16])ctx->l, ctx->checksum.c); |
| 457 | } else { |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 458 | OCB_BLOCK tmp; |
| 459 | |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 460 | /* Loop through all full blocks to be decrypted */ |
| 461 | for (i = ctx->blocks_processed + 1; i <= all_num_blocks; i++) { |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 462 | |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 463 | /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */ |
| 464 | OCB_BLOCK *lookup = ocb_lookup_l(ctx, ocb_ntz(i)); |
| 465 | if (lookup == NULL) |
| 466 | return 0; |
| 467 | ocb_block16_xor(&ctx->offset, lookup, &ctx->offset); |
| 468 | |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 469 | memcpy(tmp.c, in, 16); |
| 470 | in += 16; |
| 471 | |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 472 | /* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */ |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 473 | ocb_block16_xor(&ctx->offset, &tmp, &tmp); |
| 474 | ctx->decrypt(tmp.c, tmp.c, ctx->keydec); |
| 475 | ocb_block16_xor(&ctx->offset, &tmp, &tmp); |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 476 | |
| 477 | /* Checksum_i = Checksum_{i-1} xor P_i */ |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 478 | ocb_block16_xor(&tmp, &ctx->checksum, &ctx->checksum); |
| 479 | |
| 480 | memcpy(out, tmp.c, 16); |
| 481 | out += 16; |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 482 | } |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 483 | } |
| 484 | |
| 485 | /* |
| 486 | * Check if we have any partial blocks left over. This is only valid in the |
| 487 | * last call to this function |
| 488 | */ |
| 489 | last_len = len % 16; |
| 490 | |
| 491 | if (last_len > 0) { |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 492 | OCB_BLOCK pad; |
| 493 | |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 494 | /* Offset_* = Offset_m xor L_* */ |
| 495 | ocb_block16_xor(&ctx->offset, &ctx->l_star, &ctx->offset); |
| 496 | |
| 497 | /* Pad = ENCIPHER(K, Offset_*) */ |
Andy Polyakov | bd30091 | 2015-12-02 14:27:23 +0100 | [diff] [blame] | 498 | ctx->encrypt(ctx->offset.c, pad.c, ctx->keyenc); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 499 | |
| 500 | /* P_* = C_* xor Pad[1..bitlen(C_*)] */ |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 501 | ocb_block_xor(in, pad.c, last_len, out); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 502 | |
| 503 | /* Checksum_* = Checksum_m xor (P_* || 1 || zeros(127-bitlen(P_*))) */ |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 504 | memset(pad.c, 0, 16); /* borrow pad */ |
| 505 | memcpy(pad.c, out, last_len); |
| 506 | pad.c[last_len] = 0x80; |
| 507 | ocb_block16_xor(&pad, &ctx->checksum, &ctx->checksum); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 508 | } |
| 509 | |
| 510 | ctx->blocks_processed = all_num_blocks; |
| 511 | |
| 512 | return 1; |
| 513 | } |
| 514 | |
| 515 | /* |
| 516 | * Calculate the tag and verify it against the supplied tag |
| 517 | */ |
Matt Caswell | 0f113f3 | 2015-01-22 03:40:55 +0000 | [diff] [blame] | 518 | int CRYPTO_ocb128_finish(OCB128_CONTEXT *ctx, const unsigned char *tag, |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 519 | size_t len) |
| 520 | { |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 521 | OCB_BLOCK tmp; |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 522 | |
Matt Caswell | 0f113f3 | 2015-01-22 03:40:55 +0000 | [diff] [blame] | 523 | /* |
| 524 | * Tag = ENCIPHER(K, Checksum_* xor Offset_* xor L_$) xor HASH(K,A) |
| 525 | */ |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 526 | ocb_block16_xor(&ctx->checksum, &ctx->offset, &tmp); |
| 527 | ocb_block16_xor(&ctx->l_dollar, &tmp, &tmp); |
| 528 | ctx->encrypt(tmp.c, tmp.c, ctx->keyenc); |
| 529 | ocb_block16_xor(&tmp, &ctx->sum, &ctx->tag); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 530 | |
| 531 | if (len > 16 || len < 1) { |
| 532 | return -1; |
| 533 | } |
| 534 | |
| 535 | /* Compare the tag if we've been given one */ |
| 536 | if (tag) |
| 537 | return CRYPTO_memcmp(&ctx->tag, tag, len); |
| 538 | else |
| 539 | return -1; |
| 540 | } |
| 541 | |
| 542 | /* |
| 543 | * Retrieve the calculated tag |
| 544 | */ |
Matt Caswell | 0f113f3 | 2015-01-22 03:40:55 +0000 | [diff] [blame] | 545 | int CRYPTO_ocb128_tag(OCB128_CONTEXT *ctx, unsigned char *tag, size_t len) |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 546 | { |
| 547 | if (len > 16 || len < 1) { |
| 548 | return -1; |
| 549 | } |
| 550 | |
| 551 | /* Calculate the tag */ |
| 552 | CRYPTO_ocb128_finish(ctx, NULL, 0); |
| 553 | |
| 554 | /* Copy the tag into the supplied buffer */ |
Andy Polyakov | 14bb100 | 2017-05-25 18:08:09 +0200 | [diff] [blame] | 555 | memcpy(tag, ctx->tag.c, len); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 556 | |
| 557 | return 1; |
| 558 | } |
| 559 | |
| 560 | /* |
| 561 | * Release all resources |
| 562 | */ |
Matt Caswell | 0f113f3 | 2015-01-22 03:40:55 +0000 | [diff] [blame] | 563 | void CRYPTO_ocb128_cleanup(OCB128_CONTEXT *ctx) |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 564 | { |
| 565 | if (ctx) { |
Rich Salz | 4b45c6e | 2015-04-30 17:57:32 -0400 | [diff] [blame] | 566 | OPENSSL_clear_free(ctx->l, ctx->max_l_index * 16); |
Matt Caswell | c857a80 | 2014-12-06 20:53:35 +0000 | [diff] [blame] | 567 | OPENSSL_cleanse(ctx, sizeof(*ctx)); |
| 568 | } |
| 569 | } |
Matt Caswell | 3feb630 | 2014-12-07 23:53:22 +0000 | [diff] [blame] | 570 | |
Matt Caswell | 0f113f3 | 2015-01-22 03:40:55 +0000 | [diff] [blame] | 571 | #endif /* OPENSSL_NO_OCB */ |