blob: d49aa6ede9c85d058fd819df140706271f17ae79 [file] [log] [blame]
Matt Caswellc857a802014-12-06 20:53:35 +00001/* ====================================================================
2 * Copyright (c) 2014 The OpenSSL Project. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
14 * distribution.
15 *
16 * 3. All advertising materials mentioning features or use of this
17 * software must display the following acknowledgment:
18 * "This product includes software developed by the OpenSSL Project
19 * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
20 *
21 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
22 * endorse or promote products derived from this software without
23 * prior written permission. For written permission, please contact
24 * openssl-core@openssl.org.
25 *
26 * 5. Products derived from this software may not be called "OpenSSL"
27 * nor may "OpenSSL" appear in their names without prior written
28 * permission of the OpenSSL Project.
29 *
30 * 6. Redistributions of any form whatsoever must retain the following
31 * acknowledgment:
32 * "This product includes software developed by the OpenSSL Project
33 * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
36 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
39 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
41 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
42 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
44 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
45 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
46 * OF THE POSSIBILITY OF SUCH DAMAGE.
47 * ====================================================================
48 */
49
50#include <string.h>
51#include <openssl/crypto.h>
52#include "modes_lcl.h"
53
Matt Caswell3feb6302014-12-07 23:53:22 +000054#ifndef OPENSSL_NO_OCB
55
Matt Caswellc857a802014-12-06 20:53:35 +000056/*
57 * Calculate the number of binary trailing zero's in any given number
58 */
59static u32 ocb_ntz(u64 n)
60{
61 u32 cnt = 0;
62
63 /*
64 * We do a right-to-left simple sequential search. This is surprisingly
65 * efficient as the distribution of trailing zeros is not uniform,
66 * e.g. the number of possible inputs with no trailing zeros is equal to
67 * the number with 1 or more; the number with exactly 1 is equal to the
68 * number with 2 or more, etc. Checking the last two bits covers 75% of
69 * all numbers. Checking the last three covers 87.5%
70 */
71 while (!(n & 1)) {
72 n >>= 1;
73 cnt++;
74 }
75 return cnt;
76}
77
78/*
79 * Shift a block of 16 bytes left by shift bits
80 */
81static void ocb_block_lshift(OCB_BLOCK *in, size_t shift, OCB_BLOCK *out)
82{
83 unsigned char shift_mask;
84 int i;
85 unsigned char mask[15];
Matt Caswell0f113f32015-01-22 03:40:55 +000086
Matt Caswellc857a802014-12-06 20:53:35 +000087 shift_mask = 0xff;
88 shift_mask <<= (8 - shift);
89 for (i = 15; i >= 0; i--) {
90 if (i > 0) {
Andy Polyakov81f3d632015-11-30 23:07:38 +010091 mask[i - 1] = in->c[i] & shift_mask;
Matt Caswellc857a802014-12-06 20:53:35 +000092 mask[i - 1] >>= 8 - shift;
93 }
Andy Polyakov81f3d632015-11-30 23:07:38 +010094 out->c[i] = in->c[i] << shift;
Matt Caswellc857a802014-12-06 20:53:35 +000095
96 if (i != 15) {
Andy Polyakov81f3d632015-11-30 23:07:38 +010097 out->c[i] ^= mask[i];
Matt Caswellc857a802014-12-06 20:53:35 +000098 }
99 }
100}
101
102/*
103 * Perform a "double" operation as per OCB spec
104 */
105static void ocb_double(OCB_BLOCK *in, OCB_BLOCK *out)
106{
107 unsigned char mask;
Matt Caswellc857a802014-12-06 20:53:35 +0000108
109 /*
110 * Calculate the mask based on the most significant bit. There are more
111 * efficient ways to do this - but this way is constant time
112 */
Andy Polyakov81f3d632015-11-30 23:07:38 +0100113 mask = in->c[0] & 0x80;
Matt Caswellc857a802014-12-06 20:53:35 +0000114 mask >>= 7;
115 mask *= 135;
116
117 ocb_block_lshift(in, 1, out);
118
Andy Polyakov81f3d632015-11-30 23:07:38 +0100119 out->c[15] ^= mask;
Matt Caswellc857a802014-12-06 20:53:35 +0000120}
121
122/*
123 * Perform an xor on in1 and in2 - each of len bytes. Store result in out
124 */
125static void ocb_block_xor(const unsigned char *in1,
126 const unsigned char *in2, size_t len,
127 unsigned char *out)
128{
129 size_t i;
130 for (i = 0; i < len; i++) {
131 out[i] = in1[i] ^ in2[i];
132 }
133}
134
135/*
136 * Lookup L_index in our lookup table. If we haven't already got it we need to
137 * calculate it
138 */
Matt Caswell55467a12015-01-27 16:39:13 +0000139static OCB_BLOCK *ocb_lookup_l(OCB128_CONTEXT *ctx, size_t idx)
Matt Caswellc857a802014-12-06 20:53:35 +0000140{
Andy Polyakovb9e3d7e2015-11-30 13:26:21 +0100141 size_t l_index = ctx->l_index;
142
143 if (idx <= l_index) {
Matt Caswell55467a12015-01-27 16:39:13 +0000144 return ctx->l + idx;
Matt Caswellc857a802014-12-06 20:53:35 +0000145 }
146
147 /* We don't have it - so calculate it */
Andy Polyakovb9e3d7e2015-11-30 13:26:21 +0100148 if (idx >= ctx->max_l_index) {
149 /*
150 * Each additional entry allows to process almost double as
151 * much data, so that in linear world the table will need to
152 * be expanded with smaller and smaller increments. Originally
153 * it was doubling in size, which was a waste. Growing it
154 * linearly is not formally optimal, but is simpler to implement.
155 * We grow table by minimally required 4*n that would accommodate
156 * the index.
157 */
158 ctx->max_l_index += (idx - ctx->max_l_index + 4) & ~3;
Matt Caswell0f113f32015-01-22 03:40:55 +0000159 ctx->l =
160 OPENSSL_realloc(ctx->l, ctx->max_l_index * sizeof(OCB_BLOCK));
Matt Caswellc857a802014-12-06 20:53:35 +0000161 if (!ctx->l)
162 return NULL;
163 }
Andy Polyakovb9e3d7e2015-11-30 13:26:21 +0100164 while (l_index <= idx) {
165 ocb_double(ctx->l + l_index, ctx->l + l_index + 1);
166 l_index++;
167 }
168 ctx->l_index = l_index;
Matt Caswellc857a802014-12-06 20:53:35 +0000169
Matt Caswell55467a12015-01-27 16:39:13 +0000170 return ctx->l + idx;
Matt Caswellc857a802014-12-06 20:53:35 +0000171}
172
173/*
174 * Encrypt a block from |in| and store the result in |out|
175 */
Matt Caswell0f113f32015-01-22 03:40:55 +0000176static void ocb_encrypt(OCB128_CONTEXT *ctx, OCB_BLOCK *in, OCB_BLOCK *out,
177 void *keyenc)
Matt Caswellc857a802014-12-06 20:53:35 +0000178{
Andy Polyakov81f3d632015-11-30 23:07:38 +0100179 ctx->encrypt(in->c, out->c, keyenc);
Matt Caswellc857a802014-12-06 20:53:35 +0000180}
181
182/*
183 * Decrypt a block from |in| and store the result in |out|
184 */
Matt Caswell0f113f32015-01-22 03:40:55 +0000185static void ocb_decrypt(OCB128_CONTEXT *ctx, OCB_BLOCK *in, OCB_BLOCK *out,
186 void *keydec)
Matt Caswellc857a802014-12-06 20:53:35 +0000187{
Andy Polyakov81f3d632015-11-30 23:07:38 +0100188 ctx->decrypt(in->c, out->c, keydec);
Matt Caswellc857a802014-12-06 20:53:35 +0000189}
190
191/*
192 * Create a new OCB128_CONTEXT
193 */
194OCB128_CONTEXT *CRYPTO_ocb128_new(void *keyenc, void *keydec,
195 block128_f encrypt, block128_f decrypt)
196{
197 OCB128_CONTEXT *octx;
198 int ret;
199
Matt Caswell90945fa2015-10-30 11:12:26 +0000200 if ((octx = OPENSSL_malloc(sizeof(*octx))) != NULL) {
Matt Caswellc857a802014-12-06 20:53:35 +0000201 ret = CRYPTO_ocb128_init(octx, keyenc, keydec, encrypt, decrypt);
202 if (ret)
203 return octx;
204 OPENSSL_free(octx);
205 }
206
207 return NULL;
208}
209
210/*
211 * Initialise an existing OCB128_CONTEXT
212 */
213int CRYPTO_ocb128_init(OCB128_CONTEXT *ctx, void *keyenc, void *keydec,
214 block128_f encrypt, block128_f decrypt)
215{
Matt Caswellc857a802014-12-06 20:53:35 +0000216 memset(ctx, 0, sizeof(*ctx));
Matt Caswellc857a802014-12-06 20:53:35 +0000217 ctx->l_index = 0;
Andy Polyakovb9e3d7e2015-11-30 13:26:21 +0100218 ctx->max_l_index = 5;
Matt Caswellc857a802014-12-06 20:53:35 +0000219 ctx->l = OPENSSL_malloc(ctx->max_l_index * 16);
Matt Caswell90945fa2015-10-30 11:12:26 +0000220 if (ctx->l == NULL)
Matt Caswellc857a802014-12-06 20:53:35 +0000221 return 0;
222
223 /*
224 * We set both the encryption and decryption key schedules - decryption
225 * needs both. Don't really need decryption schedule if only doing
226 * encryption - but it simplifies things to take it anyway
227 */
228 ctx->encrypt = encrypt;
229 ctx->decrypt = decrypt;
230 ctx->keyenc = keyenc;
231 ctx->keydec = keydec;
232
233 /* L_* = ENCIPHER(K, zeros(128)) */
234 ocb_encrypt(ctx, &ctx->l_star, &ctx->l_star, ctx->keyenc);
235
236 /* L_$ = double(L_*) */
237 ocb_double(&ctx->l_star, &ctx->l_dollar);
238
239 /* L_0 = double(L_$) */
240 ocb_double(&ctx->l_dollar, ctx->l);
241
Andy Polyakovb9e3d7e2015-11-30 13:26:21 +0100242 /* L_{i} = double(L_{i-1}) */
243 ocb_double(ctx->l, ctx->l+1);
244 ocb_double(ctx->l+1, ctx->l+2);
245 ocb_double(ctx->l+2, ctx->l+3);
246 ocb_double(ctx->l+3, ctx->l+4);
247 ctx->l_index = 4; /* enough to process up to 496 bytes */
248
Matt Caswellc857a802014-12-06 20:53:35 +0000249 return 1;
250}
251
252/*
253 * Copy an OCB128_CONTEXT object
254 */
Matt Caswell0f113f32015-01-22 03:40:55 +0000255int CRYPTO_ocb128_copy_ctx(OCB128_CONTEXT *dest, OCB128_CONTEXT *src,
Matt Caswellc857a802014-12-06 20:53:35 +0000256 void *keyenc, void *keydec)
257{
258 memcpy(dest, src, sizeof(OCB128_CONTEXT));
259 if (keyenc)
260 dest->keyenc = keyenc;
261 if (keydec)
262 dest->keydec = keydec;
263 if (src->l) {
264 dest->l = OPENSSL_malloc(src->max_l_index * 16);
Matt Caswell90945fa2015-10-30 11:12:26 +0000265 if (dest->l == NULL)
Matt Caswellc857a802014-12-06 20:53:35 +0000266 return 0;
267 memcpy(dest->l, src->l, (src->l_index + 1) * 16);
268 }
269 return 1;
270}
271
272/*
273 * Set the IV to be used for this operation. Must be 1 - 15 bytes.
274 */
Matt Caswell0f113f32015-01-22 03:40:55 +0000275int CRYPTO_ocb128_setiv(OCB128_CONTEXT *ctx, const unsigned char *iv,
Matt Caswellc857a802014-12-06 20:53:35 +0000276 size_t len, size_t taglen)
277{
278 unsigned char ktop[16], tmp[16], mask;
279 unsigned char stretch[24], nonce[16];
280 size_t bottom, shift;
Matt Caswellc857a802014-12-06 20:53:35 +0000281
282 /*
283 * Spec says IV is 120 bits or fewer - it allows non byte aligned lengths.
284 * We don't support this at this stage
285 */
286 if ((len > 15) || (len < 1) || (taglen > 16) || (taglen < 1)) {
287 return -1;
288 }
289
290 /* Nonce = num2str(TAGLEN mod 128,7) || zeros(120-bitlen(N)) || 1 || N */
291 nonce[0] = ((taglen * 8) % 128) << 1;
292 memset(nonce + 1, 0, 15);
293 memcpy(nonce + 16 - len, iv, len);
294 nonce[15 - len] |= 1;
295
296 /* Ktop = ENCIPHER(K, Nonce[1..122] || zeros(6)) */
297 memcpy(tmp, nonce, 16);
298 tmp[15] &= 0xc0;
299 ctx->encrypt(tmp, ktop, ctx->keyenc);
300
301 /* Stretch = Ktop || (Ktop[1..64] xor Ktop[9..72]) */
302 memcpy(stretch, ktop, 16);
303 ocb_block_xor(ktop, ktop + 1, 8, stretch + 16);
304
305 /* bottom = str2num(Nonce[123..128]) */
306 bottom = nonce[15] & 0x3f;
307
308 /* Offset_0 = Stretch[1+bottom..128+bottom] */
309 shift = bottom % 8;
Matt Caswell0f113f32015-01-22 03:40:55 +0000310 ocb_block_lshift((OCB_BLOCK *)(stretch + (bottom / 8)), shift,
311 &ctx->offset);
Matt Caswellc857a802014-12-06 20:53:35 +0000312 mask = 0xff;
313 mask <<= 8 - shift;
Andy Polyakov81f3d632015-11-30 23:07:38 +0100314 ctx->offset.c[15] |=
Matt Caswell0f113f32015-01-22 03:40:55 +0000315 (*(stretch + (bottom / 8) + 16) & mask) >> (8 - shift);
Matt Caswellc857a802014-12-06 20:53:35 +0000316
317 return 1;
318}
319
320/*
321 * Provide any AAD. This can be called multiple times. Only the final time can
322 * have a partial block
323 */
Matt Caswell0f113f32015-01-22 03:40:55 +0000324int CRYPTO_ocb128_aad(OCB128_CONTEXT *ctx, const unsigned char *aad,
Matt Caswellc857a802014-12-06 20:53:35 +0000325 size_t len)
326{
327 u64 all_num_blocks, num_blocks;
328 u64 i;
329 OCB_BLOCK tmp1;
330 OCB_BLOCK tmp2;
331 int last_len;
Matt Caswell0f113f32015-01-22 03:40:55 +0000332
Matt Caswellc857a802014-12-06 20:53:35 +0000333 /* Calculate the number of blocks of AAD provided now, and so far */
334 num_blocks = len / 16;
335 all_num_blocks = num_blocks + ctx->blocks_hashed;
336
337 /* Loop through all full blocks of AAD */
338 for (i = ctx->blocks_hashed + 1; i <= all_num_blocks; i++) {
339 OCB_BLOCK *lookup;
340 OCB_BLOCK *aad_block;
Matt Caswell0f113f32015-01-22 03:40:55 +0000341
Matt Caswellc857a802014-12-06 20:53:35 +0000342 /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
343 lookup = ocb_lookup_l(ctx, ocb_ntz(i));
344 if (!lookup)
345 return 0;
346 ocb_block16_xor(&ctx->offset_aad, lookup, &ctx->offset_aad);
347
348 /* Sum_i = Sum_{i-1} xor ENCIPHER(K, A_i xor Offset_i) */
Matt Caswell0f113f32015-01-22 03:40:55 +0000349 aad_block = (OCB_BLOCK *)(aad + ((i - ctx->blocks_hashed - 1) * 16));
Matt Caswellc857a802014-12-06 20:53:35 +0000350 ocb_block16_xor(&ctx->offset_aad, aad_block, &tmp1);
351 ocb_encrypt(ctx, &tmp1, &tmp2, ctx->keyenc);
352 ocb_block16_xor(&ctx->sum, &tmp2, &ctx->sum);
353 }
354
355 /*
356 * Check if we have any partial blocks left over. This is only valid in the
357 * last call to this function
358 */
359 last_len = len % 16;
360
361 if (last_len > 0) {
362 /* Offset_* = Offset_m xor L_* */
363 ocb_block16_xor(&ctx->offset_aad, &ctx->l_star, &ctx->offset_aad);
364
365 /* CipherInput = (A_* || 1 || zeros(127-bitlen(A_*))) xor Offset_* */
Rich Salz16f8d4e2015-05-04 18:00:15 -0400366 memset(&tmp1, 0, 16);
367 memcpy(&tmp1, aad + (num_blocks * 16), last_len);
Matt Caswellc857a802014-12-06 20:53:35 +0000368 ((unsigned char *)&tmp1)[last_len] = 0x80;
369 ocb_block16_xor(&ctx->offset_aad, &tmp1, &tmp2);
370
371 /* Sum = Sum_m xor ENCIPHER(K, CipherInput) */
372 ocb_encrypt(ctx, &tmp2, &tmp1, ctx->keyenc);
373 ocb_block16_xor(&ctx->sum, &tmp1, &ctx->sum);
374 }
375
376 ctx->blocks_hashed = all_num_blocks;
377
378 return 1;
379}
380
381/*
382 * Provide any data to be encrypted. This can be called multiple times. Only
383 * the final time can have a partial block
384 */
Matt Caswell0f113f32015-01-22 03:40:55 +0000385int CRYPTO_ocb128_encrypt(OCB128_CONTEXT *ctx,
Matt Caswellc857a802014-12-06 20:53:35 +0000386 const unsigned char *in, unsigned char *out,
387 size_t len)
388{
389 u64 i;
390 u64 all_num_blocks, num_blocks;
391 OCB_BLOCK tmp1;
392 OCB_BLOCK tmp2;
393 OCB_BLOCK pad;
394 int last_len;
395
396 /*
397 * Calculate the number of blocks of data to be encrypted provided now, and
398 * so far
399 */
400 num_blocks = len / 16;
401 all_num_blocks = num_blocks + ctx->blocks_processed;
402
403 /* Loop through all full blocks to be encrypted */
404 for (i = ctx->blocks_processed + 1; i <= all_num_blocks; i++) {
405 OCB_BLOCK *lookup;
406 OCB_BLOCK *inblock;
407 OCB_BLOCK *outblock;
Matt Caswell0f113f32015-01-22 03:40:55 +0000408
Matt Caswellc857a802014-12-06 20:53:35 +0000409 /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
410 lookup = ocb_lookup_l(ctx, ocb_ntz(i));
411 if (!lookup)
412 return 0;
413 ocb_block16_xor(&ctx->offset, lookup, &ctx->offset);
414
415 /* C_i = Offset_i xor ENCIPHER(K, P_i xor Offset_i) */
Matt Caswell0f113f32015-01-22 03:40:55 +0000416 inblock = (OCB_BLOCK *)(in + ((i - ctx->blocks_processed - 1) * 16));
Andy Polyakov81f3d632015-11-30 23:07:38 +0100417 ocb_block16_xor_misaligned(&ctx->offset, inblock, &tmp1);
Andy Polyakovb9e3d7e2015-11-30 13:26:21 +0100418 /* Checksum_i = Checksum_{i-1} xor P_i */
Andy Polyakov81f3d632015-11-30 23:07:38 +0100419 ocb_block16_xor_misaligned(&ctx->checksum, inblock, &ctx->checksum);
Matt Caswellc857a802014-12-06 20:53:35 +0000420 ocb_encrypt(ctx, &tmp1, &tmp2, ctx->keyenc);
421 outblock =
Matt Caswell0f113f32015-01-22 03:40:55 +0000422 (OCB_BLOCK *)(out + ((i - ctx->blocks_processed - 1) * 16));
Andy Polyakov81f3d632015-11-30 23:07:38 +0100423 ocb_block16_xor_misaligned(&ctx->offset, &tmp2, outblock);
Matt Caswellc857a802014-12-06 20:53:35 +0000424
Matt Caswellc857a802014-12-06 20:53:35 +0000425 }
426
427 /*
428 * Check if we have any partial blocks left over. This is only valid in the
429 * last call to this function
430 */
431 last_len = len % 16;
432
433 if (last_len > 0) {
434 /* Offset_* = Offset_m xor L_* */
435 ocb_block16_xor(&ctx->offset, &ctx->l_star, &ctx->offset);
436
437 /* Pad = ENCIPHER(K, Offset_*) */
438 ocb_encrypt(ctx, &ctx->offset, &pad, ctx->keyenc);
439
440 /* C_* = P_* xor Pad[1..bitlen(P_*)] */
441 ocb_block_xor(in + (len / 16) * 16, (unsigned char *)&pad, last_len,
442 out + (num_blocks * 16));
443
444 /* Checksum_* = Checksum_m xor (P_* || 1 || zeros(127-bitlen(P_*))) */
Rich Salz16f8d4e2015-05-04 18:00:15 -0400445 memset(&tmp1, 0, 16);
446 memcpy(&tmp1, in + (len / 16) * 16, last_len);
Matt Caswellc857a802014-12-06 20:53:35 +0000447 ((unsigned char *)(&tmp1))[last_len] = 0x80;
448 ocb_block16_xor(&ctx->checksum, &tmp1, &ctx->checksum);
449 }
450
451 ctx->blocks_processed = all_num_blocks;
452
453 return 1;
454}
455
456/*
457 * Provide any data to be decrypted. This can be called multiple times. Only
458 * the final time can have a partial block
459 */
Matt Caswell0f113f32015-01-22 03:40:55 +0000460int CRYPTO_ocb128_decrypt(OCB128_CONTEXT *ctx,
Matt Caswellc857a802014-12-06 20:53:35 +0000461 const unsigned char *in, unsigned char *out,
462 size_t len)
463{
464 u64 i;
465 u64 all_num_blocks, num_blocks;
466 OCB_BLOCK tmp1;
467 OCB_BLOCK tmp2;
468 OCB_BLOCK pad;
469 int last_len;
470 /*
471 * Calculate the number of blocks of data to be decrypted provided now, and
472 * so far
473 */
474 num_blocks = len / 16;
475 all_num_blocks = num_blocks + ctx->blocks_processed;
476
477 /* Loop through all full blocks to be decrypted */
478 for (i = ctx->blocks_processed + 1; i <= all_num_blocks; i++) {
479 OCB_BLOCK *inblock;
480 OCB_BLOCK *outblock;
Matt Caswell0f113f32015-01-22 03:40:55 +0000481
Matt Caswellc857a802014-12-06 20:53:35 +0000482 /* Offset_i = Offset_{i-1} xor L_{ntz(i)} */
483 OCB_BLOCK *lookup = ocb_lookup_l(ctx, ocb_ntz(i));
484 if (!lookup)
485 return 0;
486 ocb_block16_xor(&ctx->offset, lookup, &ctx->offset);
487
488 /* P_i = Offset_i xor DECIPHER(K, C_i xor Offset_i) */
Matt Caswell0f113f32015-01-22 03:40:55 +0000489 inblock = (OCB_BLOCK *)(in + ((i - ctx->blocks_processed - 1) * 16));
Andy Polyakov81f3d632015-11-30 23:07:38 +0100490 ocb_block16_xor_misaligned(&ctx->offset, inblock, &tmp1);
Matt Caswellc857a802014-12-06 20:53:35 +0000491 ocb_decrypt(ctx, &tmp1, &tmp2, ctx->keydec);
Matt Caswell0f113f32015-01-22 03:40:55 +0000492 outblock =
493 (OCB_BLOCK *)(out + ((i - ctx->blocks_processed - 1) * 16));
Andy Polyakov81f3d632015-11-30 23:07:38 +0100494 ocb_block16_xor_misaligned(&ctx->offset, &tmp2, outblock);
Matt Caswellc857a802014-12-06 20:53:35 +0000495
496 /* Checksum_i = Checksum_{i-1} xor P_i */
Andy Polyakov81f3d632015-11-30 23:07:38 +0100497 ocb_block16_xor_misaligned(&ctx->checksum, outblock, &ctx->checksum);
Matt Caswellc857a802014-12-06 20:53:35 +0000498 }
499
500 /*
501 * Check if we have any partial blocks left over. This is only valid in the
502 * last call to this function
503 */
504 last_len = len % 16;
505
506 if (last_len > 0) {
507 /* Offset_* = Offset_m xor L_* */
508 ocb_block16_xor(&ctx->offset, &ctx->l_star, &ctx->offset);
509
510 /* Pad = ENCIPHER(K, Offset_*) */
511 ocb_encrypt(ctx, &ctx->offset, &pad, ctx->keyenc);
512
513 /* P_* = C_* xor Pad[1..bitlen(C_*)] */
514 ocb_block_xor(in + (len / 16) * 16, (unsigned char *)&pad, last_len,
515 out + (num_blocks * 16));
516
517 /* Checksum_* = Checksum_m xor (P_* || 1 || zeros(127-bitlen(P_*))) */
Rich Salz16f8d4e2015-05-04 18:00:15 -0400518 memset(&tmp1, 0, 16);
519 memcpy(&tmp1, out + (len / 16) * 16, last_len);
Matt Caswellc857a802014-12-06 20:53:35 +0000520 ((unsigned char *)(&tmp1))[last_len] = 0x80;
521 ocb_block16_xor(&ctx->checksum, &tmp1, &ctx->checksum);
522 }
523
524 ctx->blocks_processed = all_num_blocks;
525
526 return 1;
527}
528
529/*
530 * Calculate the tag and verify it against the supplied tag
531 */
Matt Caswell0f113f32015-01-22 03:40:55 +0000532int CRYPTO_ocb128_finish(OCB128_CONTEXT *ctx, const unsigned char *tag,
Matt Caswellc857a802014-12-06 20:53:35 +0000533 size_t len)
534{
535 OCB_BLOCK tmp1, tmp2;
536
Matt Caswell0f113f32015-01-22 03:40:55 +0000537 /*
538 * Tag = ENCIPHER(K, Checksum_* xor Offset_* xor L_$) xor HASH(K,A)
539 */
Matt Caswellc857a802014-12-06 20:53:35 +0000540 ocb_block16_xor(&ctx->checksum, &ctx->offset, &tmp1);
541 ocb_block16_xor(&tmp1, &ctx->l_dollar, &tmp2);
542 ocb_encrypt(ctx, &tmp2, &tmp1, ctx->keyenc);
543 ocb_block16_xor(&tmp1, &ctx->sum, &ctx->tag);
544
545 if (len > 16 || len < 1) {
546 return -1;
547 }
548
549 /* Compare the tag if we've been given one */
550 if (tag)
551 return CRYPTO_memcmp(&ctx->tag, tag, len);
552 else
553 return -1;
554}
555
556/*
557 * Retrieve the calculated tag
558 */
Matt Caswell0f113f32015-01-22 03:40:55 +0000559int CRYPTO_ocb128_tag(OCB128_CONTEXT *ctx, unsigned char *tag, size_t len)
Matt Caswellc857a802014-12-06 20:53:35 +0000560{
561 if (len > 16 || len < 1) {
562 return -1;
563 }
564
565 /* Calculate the tag */
566 CRYPTO_ocb128_finish(ctx, NULL, 0);
567
568 /* Copy the tag into the supplied buffer */
569 memcpy(tag, &ctx->tag, len);
570
571 return 1;
572}
573
574/*
575 * Release all resources
576 */
Matt Caswell0f113f32015-01-22 03:40:55 +0000577void CRYPTO_ocb128_cleanup(OCB128_CONTEXT *ctx)
Matt Caswellc857a802014-12-06 20:53:35 +0000578{
579 if (ctx) {
Rich Salz4b45c6e2015-04-30 17:57:32 -0400580 OPENSSL_clear_free(ctx->l, ctx->max_l_index * 16);
Matt Caswellc857a802014-12-06 20:53:35 +0000581 OPENSSL_cleanse(ctx, sizeof(*ctx));
582 }
583}
Matt Caswell3feb6302014-12-07 23:53:22 +0000584
Matt Caswell0f113f32015-01-22 03:40:55 +0000585#endif /* OPENSSL_NO_OCB */