blob: de4a580032d5f3ccda7a5334665365b71527d7ce [file] [log] [blame]
Rich Salz2039c422016-05-17 14:51:34 -04001/*
Matt Caswell8020d792021-03-11 13:27:36 +00002 * Copyright 1995-2021 The OpenSSL Project Authors. All Rights Reserved.
Ralf S. Engelschall58964a41998-12-21 10:56:39 +00003 *
Richard Levitte2a7b6f32018-12-06 13:54:02 +01004 * Licensed under the Apache License 2.0 (the "License"). You may not use
Rich Salz2039c422016-05-17 14:51:34 -04005 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
Bodo Möller46a64372005-05-16 01:43:31 +00008 */
Ralf S. Engelschall58964a41998-12-21 10:56:39 +00009
Paulic5f87132020-02-12 15:03:51 +100010/*
11 * RSA low level APIs are deprecated for public use, but still ok for
12 * internal use.
13 */
14#include "internal/deprecated.h"
15
Richard Levitteb39fc562015-05-14 16:56:48 +020016#include "internal/cryptlib.h"
Dr. Matthias St. Pierre25f21382019-09-28 00:45:33 +020017#include "crypto/bn.h"
Dr. Matthias St. Pierre706457b2019-09-28 00:45:40 +020018#include "rsa_local.h"
19#include "internal/constant_time.h"
Ralf S. Engelschall58964a41998-12-21 10:56:39 +000020
Rich Salzbf160552015-11-01 19:55:56 -050021static int rsa_ossl_public_encrypt(int flen, const unsigned char *from,
Matt Caswell0f113f32015-01-22 03:40:55 +000022 unsigned char *to, RSA *rsa, int padding);
Rich Salzbf160552015-11-01 19:55:56 -050023static int rsa_ossl_private_encrypt(int flen, const unsigned char *from,
Matt Caswell0f113f32015-01-22 03:40:55 +000024 unsigned char *to, RSA *rsa, int padding);
Rich Salzbf160552015-11-01 19:55:56 -050025static int rsa_ossl_public_decrypt(int flen, const unsigned char *from,
Matt Caswell0f113f32015-01-22 03:40:55 +000026 unsigned char *to, RSA *rsa, int padding);
Rich Salzbf160552015-11-01 19:55:56 -050027static int rsa_ossl_private_decrypt(int flen, const unsigned char *from,
Matt Caswell0f113f32015-01-22 03:40:55 +000028 unsigned char *to, RSA *rsa, int padding);
Rich Salzbf160552015-11-01 19:55:56 -050029static int rsa_ossl_mod_exp(BIGNUM *r0, const BIGNUM *i, RSA *rsa,
Matt Caswell0f113f32015-01-22 03:40:55 +000030 BN_CTX *ctx);
Rich Salzbf160552015-11-01 19:55:56 -050031static int rsa_ossl_init(RSA *rsa);
32static int rsa_ossl_finish(RSA *rsa);
33static RSA_METHOD rsa_pkcs1_ossl_meth = {
Rich Salz076fc552017-04-07 12:07:42 -040034 "OpenSSL PKCS#1 RSA",
Rich Salzbf160552015-11-01 19:55:56 -050035 rsa_ossl_public_encrypt,
36 rsa_ossl_public_decrypt, /* signature verification */
37 rsa_ossl_private_encrypt, /* signing */
38 rsa_ossl_private_decrypt,
39 rsa_ossl_mod_exp,
Matt Caswell0f113f32015-01-22 03:40:55 +000040 BN_mod_exp_mont, /* XXX probably we should not use Montgomery
41 * if e == 3 */
Rich Salzbf160552015-11-01 19:55:56 -050042 rsa_ossl_init,
43 rsa_ossl_finish,
Matt Caswell0f113f32015-01-22 03:40:55 +000044 RSA_FLAG_FIPS_METHOD, /* flags */
45 NULL,
46 0, /* rsa_sign */
47 0, /* rsa_verify */
Paul Yang665d8992017-08-02 02:19:43 +080048 NULL, /* rsa_keygen */
49 NULL /* rsa_multi_prime_keygen */
Matt Caswell0f113f32015-01-22 03:40:55 +000050};
Ralf S. Engelschall58964a41998-12-21 10:56:39 +000051
Rich Salz076fc552017-04-07 12:07:42 -040052static const RSA_METHOD *default_RSA_meth = &rsa_pkcs1_ossl_meth;
53
54void RSA_set_default_method(const RSA_METHOD *meth)
55{
56 default_RSA_meth = meth;
57}
58
59const RSA_METHOD *RSA_get_default_method(void)
60{
61 return default_RSA_meth;
62}
63
Rich Salzb0700d22015-10-27 15:11:48 -040064const RSA_METHOD *RSA_PKCS1_OpenSSL(void)
Matt Caswell0f113f32015-01-22 03:40:55 +000065{
Rich Salzbf160552015-11-01 19:55:56 -050066 return &rsa_pkcs1_ossl_meth;
Matt Caswell0f113f32015-01-22 03:40:55 +000067}
Ralf S. Engelschall58964a41998-12-21 10:56:39 +000068
Rich Salz076fc552017-04-07 12:07:42 -040069const RSA_METHOD *RSA_null_method(void)
70{
71 return NULL;
72}
73
Rich Salzbf160552015-11-01 19:55:56 -050074static int rsa_ossl_public_encrypt(int flen, const unsigned char *from,
Matt Caswell0f113f32015-01-22 03:40:55 +000075 unsigned char *to, RSA *rsa, int padding)
76{
77 BIGNUM *f, *ret;
Andy Polyakov582ad5d2018-02-04 15:24:54 +010078 int i, num = 0, r = -1;
Matt Caswell0f113f32015-01-22 03:40:55 +000079 unsigned char *buf = NULL;
80 BN_CTX *ctx = NULL;
Ralf S. Engelschall58964a41998-12-21 10:56:39 +000081
Matt Caswell0f113f32015-01-22 03:40:55 +000082 if (BN_num_bits(rsa->n) > OPENSSL_RSA_MAX_MODULUS_BITS) {
Richard Levitte9311d0c2020-11-04 12:23:19 +010083 ERR_raise(ERR_LIB_RSA, RSA_R_MODULUS_TOO_LARGE);
Matt Caswell0f113f32015-01-22 03:40:55 +000084 return -1;
85 }
Bodo Möller5e3225c2006-09-28 13:45:34 +000086
Matt Caswell0f113f32015-01-22 03:40:55 +000087 if (BN_ucmp(rsa->n, rsa->e) <= 0) {
Richard Levitte9311d0c2020-11-04 12:23:19 +010088 ERR_raise(ERR_LIB_RSA, RSA_R_BAD_E_VALUE);
Matt Caswell0f113f32015-01-22 03:40:55 +000089 return -1;
90 }
Bodo Möller5e3225c2006-09-28 13:45:34 +000091
Matt Caswell0f113f32015-01-22 03:40:55 +000092 /* for large moduli, enforce exponent limit */
93 if (BN_num_bits(rsa->n) > OPENSSL_RSA_SMALL_MODULUS_BITS) {
94 if (BN_num_bits(rsa->e) > OPENSSL_RSA_MAX_PUBEXP_BITS) {
Richard Levitte9311d0c2020-11-04 12:23:19 +010095 ERR_raise(ERR_LIB_RSA, RSA_R_BAD_E_VALUE);
Matt Caswell0f113f32015-01-22 03:40:55 +000096 return -1;
97 }
98 }
Ralf S. Engelschall58964a41998-12-21 10:56:39 +000099
Matt Caswellafb638f2020-01-17 14:47:18 +0000100 if ((ctx = BN_CTX_new_ex(rsa->libctx)) == NULL)
Matt Caswell0f113f32015-01-22 03:40:55 +0000101 goto err;
102 BN_CTX_start(ctx);
103 f = BN_CTX_get(ctx);
104 ret = BN_CTX_get(ctx);
105 num = BN_num_bytes(rsa->n);
106 buf = OPENSSL_malloc(num);
Paul Yangedea42c2017-06-22 18:52:29 +0800107 if (ret == NULL || buf == NULL) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100108 ERR_raise(ERR_LIB_RSA, ERR_R_MALLOC_FAILURE);
Matt Caswell0f113f32015-01-22 03:40:55 +0000109 goto err;
110 }
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000111
Matt Caswell0f113f32015-01-22 03:40:55 +0000112 switch (padding) {
113 case RSA_PKCS1_PADDING:
Pauli23b2fc02020-09-30 14:20:14 +1000114 i = ossl_rsa_padding_add_PKCS1_type_2_ex(rsa->libctx, buf, num,
115 from, flen);
Matt Caswell0f113f32015-01-22 03:40:55 +0000116 break;
Matt Caswell0f113f32015-01-22 03:40:55 +0000117 case RSA_PKCS1_OAEP_PADDING:
Pauli23b2fc02020-09-30 14:20:14 +1000118 i = ossl_rsa_padding_add_PKCS1_OAEP_mgf1_ex(rsa->libctx, buf, num,
119 from, flen, NULL, 0,
120 NULL, NULL);
Matt Caswell0f113f32015-01-22 03:40:55 +0000121 break;
Matt Caswell0f113f32015-01-22 03:40:55 +0000122 case RSA_NO_PADDING:
123 i = RSA_padding_add_none(buf, num, from, flen);
124 break;
125 default:
Richard Levitte9311d0c2020-11-04 12:23:19 +0100126 ERR_raise(ERR_LIB_RSA, RSA_R_UNKNOWN_PADDING_TYPE);
Matt Caswell0f113f32015-01-22 03:40:55 +0000127 goto err;
128 }
129 if (i <= 0)
130 goto err;
Bodo Möller24cff6c2001-07-25 17:02:58 +0000131
Matt Caswell0f113f32015-01-22 03:40:55 +0000132 if (BN_bin2bn(buf, num, f) == NULL)
133 goto err;
Geoff Thorpe79221bc2003-02-14 23:21:19 +0000134
Matt Caswell0f113f32015-01-22 03:40:55 +0000135 if (BN_ucmp(f, rsa->n) >= 0) {
136 /* usually the padding functions would catch this */
Richard Levitte9311d0c2020-11-04 12:23:19 +0100137 ERR_raise(ERR_LIB_RSA, RSA_R_DATA_TOO_LARGE_FOR_MODULUS);
Matt Caswell0f113f32015-01-22 03:40:55 +0000138 goto err;
139 }
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000140
Matt Caswell0f113f32015-01-22 03:40:55 +0000141 if (rsa->flags & RSA_FLAG_CACHE_PUBLIC)
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200142 if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, rsa->lock,
143 rsa->n, ctx))
Matt Caswell0f113f32015-01-22 03:40:55 +0000144 goto err;
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000145
Matt Caswell0f113f32015-01-22 03:40:55 +0000146 if (!rsa->meth->bn_mod_exp(ret, f, rsa->e, rsa->n, ctx,
147 rsa->_method_mod_n))
148 goto err;
149
150 /*
Andy Polyakov582ad5d2018-02-04 15:24:54 +0100151 * BN_bn2binpad puts in leading 0 bytes if the number is less than
152 * the length of the modulus.
Matt Caswell0f113f32015-01-22 03:40:55 +0000153 */
Andy Polyakov582ad5d2018-02-04 15:24:54 +0100154 r = BN_bn2binpad(ret, to, num);
Matt Caswell0f113f32015-01-22 03:40:55 +0000155 err:
Shane Lontisce1415e2019-03-19 09:58:09 +1000156 BN_CTX_end(ctx);
Rich Salz23a1d5e2015-04-30 21:37:06 -0400157 BN_CTX_free(ctx);
Rich Salz4b45c6e2015-04-30 17:57:32 -0400158 OPENSSL_clear_free(buf, num);
Paul Yang8686c472017-08-23 01:25:23 +0800159 return r;
Matt Caswell0f113f32015-01-22 03:40:55 +0000160}
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000161
Bodo Möller675f6052006-06-14 08:55:23 +0000162static BN_BLINDING *rsa_get_blinding(RSA *rsa, int *local, BN_CTX *ctx)
Nils Larsch800e4002005-04-26 22:31:48 +0000163{
Matt Caswell0f113f32015-01-22 03:40:55 +0000164 BN_BLINDING *ret;
Bodo Möller675f6052006-06-14 08:55:23 +0000165
Rich Salzcd3f8c12021-02-18 15:31:56 -0500166 if (!CRYPTO_THREAD_write_lock(rsa->lock))
167 return NULL;
Bodo Möllerc5541552003-03-20 17:31:30 +0000168
Matt Caswell0f113f32015-01-22 03:40:55 +0000169 if (rsa->blinding == NULL) {
Alessandro Ghedinid188a532016-03-04 15:43:46 +0000170 rsa->blinding = RSA_setup_blinding(rsa, ctx);
Matt Caswell0f113f32015-01-22 03:40:55 +0000171 }
Nils Larsch800e4002005-04-26 22:31:48 +0000172
Matt Caswell0f113f32015-01-22 03:40:55 +0000173 ret = rsa->blinding;
174 if (ret == NULL)
175 goto err;
Nils Larsch800e4002005-04-26 22:31:48 +0000176
Alessandro Ghedini0b1a07c2016-03-08 22:37:01 +0000177 if (BN_BLINDING_is_current_thread(ret)) {
Matt Caswell0f113f32015-01-22 03:40:55 +0000178 /* rsa->blinding is ours! */
Bodo Möller675f6052006-06-14 08:55:23 +0000179
Matt Caswell0f113f32015-01-22 03:40:55 +0000180 *local = 1;
181 } else {
182 /* resort to rsa->mt_blinding instead */
Bodo Möller675f6052006-06-14 08:55:23 +0000183
Matt Caswell0f113f32015-01-22 03:40:55 +0000184 /*
185 * instructs rsa_blinding_convert(), rsa_blinding_invert() that the
186 * BN_BLINDING is shared, meaning that accesses require locks, and
187 * that the blinding factor must be stored outside the BN_BLINDING
188 */
189 *local = 0;
Bodo Möller675f6052006-06-14 08:55:23 +0000190
Matt Caswell0f113f32015-01-22 03:40:55 +0000191 if (rsa->mt_blinding == NULL) {
Alessandro Ghedinid188a532016-03-04 15:43:46 +0000192 rsa->mt_blinding = RSA_setup_blinding(rsa, ctx);
Matt Caswell0f113f32015-01-22 03:40:55 +0000193 }
194 ret = rsa->mt_blinding;
195 }
Bodo Möller5679bcc2003-04-02 09:50:22 +0000196
Bodo Möller675f6052006-06-14 08:55:23 +0000197 err:
Alessandro Ghedinid188a532016-03-04 15:43:46 +0000198 CRYPTO_THREAD_unlock(rsa->lock);
Matt Caswell0f113f32015-01-22 03:40:55 +0000199 return ret;
Nils Larsch800e4002005-04-26 22:31:48 +0000200}
201
Bodo Möllere5641d72011-10-19 14:59:27 +0000202static int rsa_blinding_convert(BN_BLINDING *b, BIGNUM *f, BIGNUM *unblind,
Matt Caswell0f113f32015-01-22 03:40:55 +0000203 BN_CTX *ctx)
204{
Paul Yang90862ab2017-08-23 01:36:49 +0800205 if (unblind == NULL) {
Matt Caswell0f113f32015-01-22 03:40:55 +0000206 /*
207 * Local blinding: store the unblinding factor in BN_BLINDING.
208 */
209 return BN_BLINDING_convert_ex(f, NULL, b, ctx);
Paul Yang90862ab2017-08-23 01:36:49 +0800210 } else {
Matt Caswell0f113f32015-01-22 03:40:55 +0000211 /*
212 * Shared blinding: store the unblinding factor outside BN_BLINDING.
213 */
214 int ret;
Alessandro Ghedini0b1a07c2016-03-08 22:37:01 +0000215
Jiasheng Jiangaefbcde2022-02-05 18:00:51 +0800216 if (!BN_BLINDING_lock(b))
217 return 0;
218
Matt Caswell0f113f32015-01-22 03:40:55 +0000219 ret = BN_BLINDING_convert_ex(f, unblind, b, ctx);
Alessandro Ghedini0b1a07c2016-03-08 22:37:01 +0000220 BN_BLINDING_unlock(b);
221
Matt Caswell0f113f32015-01-22 03:40:55 +0000222 return ret;
223 }
224}
Bodo Möllere5641d72011-10-19 14:59:27 +0000225
226static int rsa_blinding_invert(BN_BLINDING *b, BIGNUM *f, BIGNUM *unblind,
Matt Caswell0f113f32015-01-22 03:40:55 +0000227 BN_CTX *ctx)
228{
229 /*
230 * For local blinding, unblind is set to NULL, and BN_BLINDING_invert_ex
231 * will use the unblinding factor stored in BN_BLINDING. If BN_BLINDING
232 * is shared between threads, unblind must be non-null:
233 * BN_BLINDING_invert_ex will then use the local unblinding factor, and
234 * will only read the modulus from BN_BLINDING. In both cases it's safe
235 * to access the blinding without a lock.
236 */
237 return BN_BLINDING_invert_ex(f, unblind, b, ctx);
238}
Bodo Möller5679bcc2003-04-02 09:50:22 +0000239
Bodo Möller24cff6c2001-07-25 17:02:58 +0000240/* signing */
Rich Salzbf160552015-11-01 19:55:56 -0500241static int rsa_ossl_private_encrypt(int flen, const unsigned char *from,
Matt Caswell0f113f32015-01-22 03:40:55 +0000242 unsigned char *to, RSA *rsa, int padding)
243{
244 BIGNUM *f, *ret, *res;
Andy Polyakov582ad5d2018-02-04 15:24:54 +0100245 int i, num = 0, r = -1;
Matt Caswell0f113f32015-01-22 03:40:55 +0000246 unsigned char *buf = NULL;
247 BN_CTX *ctx = NULL;
248 int local_blinding = 0;
249 /*
250 * Used only if the blinding structure is shared. A non-NULL unblind
251 * instructs rsa_blinding_convert() and rsa_blinding_invert() to store
252 * the unblinding factor outside the blinding structure.
253 */
254 BIGNUM *unblind = NULL;
255 BN_BLINDING *blinding = NULL;
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000256
Matt Caswellafb638f2020-01-17 14:47:18 +0000257 if ((ctx = BN_CTX_new_ex(rsa->libctx)) == NULL)
Matt Caswell0f113f32015-01-22 03:40:55 +0000258 goto err;
259 BN_CTX_start(ctx);
260 f = BN_CTX_get(ctx);
261 ret = BN_CTX_get(ctx);
262 num = BN_num_bytes(rsa->n);
263 buf = OPENSSL_malloc(num);
Paul Yangedea42c2017-06-22 18:52:29 +0800264 if (ret == NULL || buf == NULL) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100265 ERR_raise(ERR_LIB_RSA, ERR_R_MALLOC_FAILURE);
Matt Caswell0f113f32015-01-22 03:40:55 +0000266 goto err;
267 }
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000268
Matt Caswell0f113f32015-01-22 03:40:55 +0000269 switch (padding) {
270 case RSA_PKCS1_PADDING:
271 i = RSA_padding_add_PKCS1_type_1(buf, num, from, flen);
272 break;
273 case RSA_X931_PADDING:
274 i = RSA_padding_add_X931(buf, num, from, flen);
275 break;
276 case RSA_NO_PADDING:
277 i = RSA_padding_add_none(buf, num, from, flen);
278 break;
Matt Caswell0f113f32015-01-22 03:40:55 +0000279 default:
Richard Levitte9311d0c2020-11-04 12:23:19 +0100280 ERR_raise(ERR_LIB_RSA, RSA_R_UNKNOWN_PADDING_TYPE);
Matt Caswell0f113f32015-01-22 03:40:55 +0000281 goto err;
282 }
283 if (i <= 0)
284 goto err;
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000285
Matt Caswell0f113f32015-01-22 03:40:55 +0000286 if (BN_bin2bn(buf, num, f) == NULL)
287 goto err;
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000288
Matt Caswell0f113f32015-01-22 03:40:55 +0000289 if (BN_ucmp(f, rsa->n) >= 0) {
290 /* usually the padding functions would catch this */
Richard Levitte9311d0c2020-11-04 12:23:19 +0100291 ERR_raise(ERR_LIB_RSA, RSA_R_DATA_TOO_LARGE_FOR_MODULUS);
Matt Caswell0f113f32015-01-22 03:40:55 +0000292 goto err;
293 }
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000294
Andy Polyakov2cc3f682018-11-07 22:07:22 +0100295 if (rsa->flags & RSA_FLAG_CACHE_PUBLIC)
296 if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, rsa->lock,
297 rsa->n, ctx))
298 goto err;
299
Matt Caswell0f113f32015-01-22 03:40:55 +0000300 if (!(rsa->flags & RSA_FLAG_NO_BLINDING)) {
301 blinding = rsa_get_blinding(rsa, &local_blinding, ctx);
302 if (blinding == NULL) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100303 ERR_raise(ERR_LIB_RSA, ERR_R_INTERNAL_ERROR);
Matt Caswell0f113f32015-01-22 03:40:55 +0000304 goto err;
305 }
306 }
Bodo Möller46a64372005-05-16 01:43:31 +0000307
Matt Caswell0f113f32015-01-22 03:40:55 +0000308 if (blinding != NULL) {
309 if (!local_blinding && ((unblind = BN_CTX_get(ctx)) == NULL)) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100310 ERR_raise(ERR_LIB_RSA, ERR_R_MALLOC_FAILURE);
Matt Caswell0f113f32015-01-22 03:40:55 +0000311 goto err;
312 }
313 if (!rsa_blinding_convert(blinding, f, unblind, ctx))
314 goto err;
315 }
Bodo Möller46a64372005-05-16 01:43:31 +0000316
Matt Caswell0f113f32015-01-22 03:40:55 +0000317 if ((rsa->flags & RSA_FLAG_EXT_PKEY) ||
Paul Yang665d8992017-08-02 02:19:43 +0800318 (rsa->version == RSA_ASN1_VERSION_MULTI) ||
Matt Caswell0f113f32015-01-22 03:40:55 +0000319 ((rsa->p != NULL) &&
320 (rsa->q != NULL) &&
321 (rsa->dmp1 != NULL) && (rsa->dmq1 != NULL) && (rsa->iqmp != NULL))) {
322 if (!rsa->meth->rsa_mod_exp(ret, f, rsa, ctx))
323 goto err;
324 } else {
Matt Caswell5584f652016-05-26 10:55:11 +0100325 BIGNUM *d = BN_new();
326 if (d == NULL) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100327 ERR_raise(ERR_LIB_RSA, ERR_R_MALLOC_FAILURE);
Matt Caswell5584f652016-05-26 10:55:11 +0100328 goto err;
Matt Caswellfd7d2522015-11-24 11:09:00 +0000329 }
David von Oheimb7408f672019-07-26 11:03:12 +0200330 if (rsa->d == NULL) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100331 ERR_raise(ERR_LIB_RSA, RSA_R_MISSING_PRIVATE_KEY);
David von Oheimb7408f672019-07-26 11:03:12 +0200332 BN_free(d);
333 goto err;
334 }
Matt Caswell5584f652016-05-26 10:55:11 +0100335 BN_with_flags(d, rsa->d, BN_FLG_CONSTTIME);
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000336
Matt Caswell0f113f32015-01-22 03:40:55 +0000337 if (!rsa->meth->bn_mod_exp(ret, f, d, rsa->n, ctx,
338 rsa->_method_mod_n)) {
Matt Caswell5584f652016-05-26 10:55:11 +0100339 BN_free(d);
Matt Caswell0f113f32015-01-22 03:40:55 +0000340 goto err;
341 }
Matt Caswell5584f652016-05-26 10:55:11 +0100342 /* We MUST free d before any further use of rsa->d */
343 BN_free(d);
Matt Caswell0f113f32015-01-22 03:40:55 +0000344 }
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000345
Matt Caswell0f113f32015-01-22 03:40:55 +0000346 if (blinding)
347 if (!rsa_blinding_invert(blinding, ret, unblind, ctx))
348 goto err;
349
350 if (padding == RSA_X931_PADDING) {
Pauli3d3cbce2018-07-31 13:11:00 +1000351 if (!BN_sub(f, rsa->n, ret))
352 goto err;
Matt Caswell0f113f32015-01-22 03:40:55 +0000353 if (BN_cmp(ret, f) > 0)
354 res = f;
355 else
356 res = ret;
Paul Yang90862ab2017-08-23 01:36:49 +0800357 } else {
Matt Caswell0f113f32015-01-22 03:40:55 +0000358 res = ret;
Paul Yang90862ab2017-08-23 01:36:49 +0800359 }
Matt Caswell0f113f32015-01-22 03:40:55 +0000360
361 /*
Andy Polyakov582ad5d2018-02-04 15:24:54 +0100362 * BN_bn2binpad puts in leading 0 bytes if the number is less than
363 * the length of the modulus.
Matt Caswell0f113f32015-01-22 03:40:55 +0000364 */
Andy Polyakov582ad5d2018-02-04 15:24:54 +0100365 r = BN_bn2binpad(res, to, num);
Matt Caswell0f113f32015-01-22 03:40:55 +0000366 err:
Shane Lontisce1415e2019-03-19 09:58:09 +1000367 BN_CTX_end(ctx);
Rich Salz23a1d5e2015-04-30 21:37:06 -0400368 BN_CTX_free(ctx);
Rich Salz4b45c6e2015-04-30 17:57:32 -0400369 OPENSSL_clear_free(buf, num);
Paul Yang8686c472017-08-23 01:25:23 +0800370 return r;
Matt Caswell0f113f32015-01-22 03:40:55 +0000371}
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000372
Rich Salzbf160552015-11-01 19:55:56 -0500373static int rsa_ossl_private_decrypt(int flen, const unsigned char *from,
Matt Caswell0f113f32015-01-22 03:40:55 +0000374 unsigned char *to, RSA *rsa, int padding)
375{
376 BIGNUM *f, *ret;
377 int j, num = 0, r = -1;
Matt Caswell0f113f32015-01-22 03:40:55 +0000378 unsigned char *buf = NULL;
379 BN_CTX *ctx = NULL;
380 int local_blinding = 0;
381 /*
382 * Used only if the blinding structure is shared. A non-NULL unblind
383 * instructs rsa_blinding_convert() and rsa_blinding_invert() to store
384 * the unblinding factor outside the blinding structure.
385 */
386 BIGNUM *unblind = NULL;
387 BN_BLINDING *blinding = NULL;
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000388
Matt Caswellafb638f2020-01-17 14:47:18 +0000389 if ((ctx = BN_CTX_new_ex(rsa->libctx)) == NULL)
Matt Caswell0f113f32015-01-22 03:40:55 +0000390 goto err;
391 BN_CTX_start(ctx);
392 f = BN_CTX_get(ctx);
393 ret = BN_CTX_get(ctx);
394 num = BN_num_bytes(rsa->n);
395 buf = OPENSSL_malloc(num);
Paul Yangedea42c2017-06-22 18:52:29 +0800396 if (ret == NULL || buf == NULL) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100397 ERR_raise(ERR_LIB_RSA, ERR_R_MALLOC_FAILURE);
Matt Caswell0f113f32015-01-22 03:40:55 +0000398 goto err;
399 }
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000400
Matt Caswell0f113f32015-01-22 03:40:55 +0000401 /*
402 * This check was for equality but PGP does evil things and chops off the
403 * top '0' bytes
404 */
405 if (flen > num) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100406 ERR_raise(ERR_LIB_RSA, RSA_R_DATA_GREATER_THAN_MOD_LEN);
Matt Caswell0f113f32015-01-22 03:40:55 +0000407 goto err;
408 }
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000409
Matt Caswell0f113f32015-01-22 03:40:55 +0000410 /* make data into a big number */
411 if (BN_bin2bn(from, (int)flen, f) == NULL)
412 goto err;
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000413
Matt Caswell0f113f32015-01-22 03:40:55 +0000414 if (BN_ucmp(f, rsa->n) >= 0) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100415 ERR_raise(ERR_LIB_RSA, RSA_R_DATA_TOO_LARGE_FOR_MODULUS);
Matt Caswell0f113f32015-01-22 03:40:55 +0000416 goto err;
417 }
Bodo Möller24cff6c2001-07-25 17:02:58 +0000418
Matt Caswell0f113f32015-01-22 03:40:55 +0000419 if (!(rsa->flags & RSA_FLAG_NO_BLINDING)) {
420 blinding = rsa_get_blinding(rsa, &local_blinding, ctx);
421 if (blinding == NULL) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100422 ERR_raise(ERR_LIB_RSA, ERR_R_INTERNAL_ERROR);
Matt Caswell0f113f32015-01-22 03:40:55 +0000423 goto err;
424 }
425 }
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000426
Matt Caswell0f113f32015-01-22 03:40:55 +0000427 if (blinding != NULL) {
428 if (!local_blinding && ((unblind = BN_CTX_get(ctx)) == NULL)) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100429 ERR_raise(ERR_LIB_RSA, ERR_R_MALLOC_FAILURE);
Matt Caswell0f113f32015-01-22 03:40:55 +0000430 goto err;
431 }
432 if (!rsa_blinding_convert(blinding, f, unblind, ctx))
433 goto err;
434 }
Bodo Möller46a64372005-05-16 01:43:31 +0000435
Matt Caswell0f113f32015-01-22 03:40:55 +0000436 /* do the decrypt */
437 if ((rsa->flags & RSA_FLAG_EXT_PKEY) ||
Paul Yang665d8992017-08-02 02:19:43 +0800438 (rsa->version == RSA_ASN1_VERSION_MULTI) ||
Matt Caswell0f113f32015-01-22 03:40:55 +0000439 ((rsa->p != NULL) &&
440 (rsa->q != NULL) &&
441 (rsa->dmp1 != NULL) && (rsa->dmq1 != NULL) && (rsa->iqmp != NULL))) {
442 if (!rsa->meth->rsa_mod_exp(ret, f, rsa, ctx))
443 goto err;
444 } else {
Matt Caswell5584f652016-05-26 10:55:11 +0100445 BIGNUM *d = BN_new();
446 if (d == NULL) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100447 ERR_raise(ERR_LIB_RSA, ERR_R_MALLOC_FAILURE);
Matt Caswell5584f652016-05-26 10:55:11 +0100448 goto err;
Matt Caswellfd7d2522015-11-24 11:09:00 +0000449 }
David von Oheimb7408f672019-07-26 11:03:12 +0200450 if (rsa->d == NULL) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100451 ERR_raise(ERR_LIB_RSA, RSA_R_MISSING_PRIVATE_KEY);
David von Oheimb7408f672019-07-26 11:03:12 +0200452 BN_free(d);
453 goto err;
454 }
Matt Caswell5584f652016-05-26 10:55:11 +0100455 BN_with_flags(d, rsa->d, BN_FLG_CONSTTIME);
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000456
Matt Caswell0f113f32015-01-22 03:40:55 +0000457 if (rsa->flags & RSA_FLAG_CACHE_PUBLIC)
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200458 if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, rsa->lock,
459 rsa->n, ctx)) {
Matt Caswell5584f652016-05-26 10:55:11 +0100460 BN_free(d);
Matt Caswell0f113f32015-01-22 03:40:55 +0000461 goto err;
462 }
463 if (!rsa->meth->bn_mod_exp(ret, f, d, rsa->n, ctx,
464 rsa->_method_mod_n)) {
Matt Caswell5584f652016-05-26 10:55:11 +0100465 BN_free(d);
Matt Caswell0f113f32015-01-22 03:40:55 +0000466 goto err;
467 }
Matt Caswell5584f652016-05-26 10:55:11 +0100468 /* We MUST free d before any further use of rsa->d */
469 BN_free(d);
Matt Caswell0f113f32015-01-22 03:40:55 +0000470 }
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000471
Matt Caswell0f113f32015-01-22 03:40:55 +0000472 if (blinding)
473 if (!rsa_blinding_invert(blinding, ret, unblind, ctx))
474 goto err;
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000475
Andy Polyakov582ad5d2018-02-04 15:24:54 +0100476 j = BN_bn2binpad(ret, buf, num);
Pauli4a3dd622019-09-09 10:14:32 +1000477 if (j < 0)
478 goto err;
Matt Caswell0f113f32015-01-22 03:40:55 +0000479
480 switch (padding) {
481 case RSA_PKCS1_PADDING:
482 r = RSA_padding_check_PKCS1_type_2(to, num, buf, j, num);
483 break;
Matt Caswell0f113f32015-01-22 03:40:55 +0000484 case RSA_PKCS1_OAEP_PADDING:
485 r = RSA_padding_check_PKCS1_OAEP(to, num, buf, j, num, NULL, 0);
486 break;
Matt Caswell0f113f32015-01-22 03:40:55 +0000487 case RSA_NO_PADDING:
Andy Polyakov582ad5d2018-02-04 15:24:54 +0100488 memcpy(to, buf, (r = j));
Matt Caswell0f113f32015-01-22 03:40:55 +0000489 break;
490 default:
Richard Levitte9311d0c2020-11-04 12:23:19 +0100491 ERR_raise(ERR_LIB_RSA, RSA_R_UNKNOWN_PADDING_TYPE);
Matt Caswell0f113f32015-01-22 03:40:55 +0000492 goto err;
493 }
Richard Levittef844f9e2020-04-13 22:34:56 +0200494#ifndef FIPS_MODULE
Matt Caswellafb638f2020-01-17 14:47:18 +0000495 /*
496 * This trick doesn't work in the FIPS provider because libcrypto manages
497 * the error stack. Instead we opt not to put an error on the stack at all
498 * in case of padding failure in the FIPS provider.
499 */
Richard Levitte9311d0c2020-11-04 12:23:19 +0100500 ERR_raise(ERR_LIB_RSA, RSA_R_PADDING_CHECK_FAILED);
Bernd Edlinger94dc53a2019-03-20 20:01:12 +0100501 err_clear_last_constant_time(1 & ~constant_time_msb(r));
Matt Caswellafb638f2020-01-17 14:47:18 +0000502#endif
Matt Caswell0f113f32015-01-22 03:40:55 +0000503
504 err:
Shane Lontisce1415e2019-03-19 09:58:09 +1000505 BN_CTX_end(ctx);
Rich Salz23a1d5e2015-04-30 21:37:06 -0400506 BN_CTX_free(ctx);
Rich Salz4b45c6e2015-04-30 17:57:32 -0400507 OPENSSL_clear_free(buf, num);
Paul Yang8686c472017-08-23 01:25:23 +0800508 return r;
Matt Caswell0f113f32015-01-22 03:40:55 +0000509}
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000510
Bodo Möller24cff6c2001-07-25 17:02:58 +0000511/* signature verification */
Rich Salzbf160552015-11-01 19:55:56 -0500512static int rsa_ossl_public_decrypt(int flen, const unsigned char *from,
Matt Caswell0f113f32015-01-22 03:40:55 +0000513 unsigned char *to, RSA *rsa, int padding)
514{
515 BIGNUM *f, *ret;
516 int i, num = 0, r = -1;
Matt Caswell0f113f32015-01-22 03:40:55 +0000517 unsigned char *buf = NULL;
518 BN_CTX *ctx = NULL;
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000519
Matt Caswell0f113f32015-01-22 03:40:55 +0000520 if (BN_num_bits(rsa->n) > OPENSSL_RSA_MAX_MODULUS_BITS) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100521 ERR_raise(ERR_LIB_RSA, RSA_R_MODULUS_TOO_LARGE);
Matt Caswell0f113f32015-01-22 03:40:55 +0000522 return -1;
523 }
Bodo Möller5e3225c2006-09-28 13:45:34 +0000524
Matt Caswell0f113f32015-01-22 03:40:55 +0000525 if (BN_ucmp(rsa->n, rsa->e) <= 0) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100526 ERR_raise(ERR_LIB_RSA, RSA_R_BAD_E_VALUE);
Matt Caswell0f113f32015-01-22 03:40:55 +0000527 return -1;
528 }
Bodo Möller5e3225c2006-09-28 13:45:34 +0000529
Matt Caswell0f113f32015-01-22 03:40:55 +0000530 /* for large moduli, enforce exponent limit */
531 if (BN_num_bits(rsa->n) > OPENSSL_RSA_SMALL_MODULUS_BITS) {
532 if (BN_num_bits(rsa->e) > OPENSSL_RSA_MAX_PUBEXP_BITS) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100533 ERR_raise(ERR_LIB_RSA, RSA_R_BAD_E_VALUE);
Matt Caswell0f113f32015-01-22 03:40:55 +0000534 return -1;
535 }
536 }
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000537
Matt Caswellafb638f2020-01-17 14:47:18 +0000538 if ((ctx = BN_CTX_new_ex(rsa->libctx)) == NULL)
Matt Caswell0f113f32015-01-22 03:40:55 +0000539 goto err;
540 BN_CTX_start(ctx);
541 f = BN_CTX_get(ctx);
542 ret = BN_CTX_get(ctx);
543 num = BN_num_bytes(rsa->n);
544 buf = OPENSSL_malloc(num);
Paul Yangedea42c2017-06-22 18:52:29 +0800545 if (ret == NULL || buf == NULL) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100546 ERR_raise(ERR_LIB_RSA, ERR_R_MALLOC_FAILURE);
Matt Caswell0f113f32015-01-22 03:40:55 +0000547 goto err;
548 }
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000549
Matt Caswell0f113f32015-01-22 03:40:55 +0000550 /*
551 * This check was for equality but PGP does evil things and chops off the
552 * top '0' bytes
553 */
554 if (flen > num) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100555 ERR_raise(ERR_LIB_RSA, RSA_R_DATA_GREATER_THAN_MOD_LEN);
Matt Caswell0f113f32015-01-22 03:40:55 +0000556 goto err;
557 }
Bodo Möller24cff6c2001-07-25 17:02:58 +0000558
Matt Caswell0f113f32015-01-22 03:40:55 +0000559 if (BN_bin2bn(from, flen, f) == NULL)
560 goto err;
Bodo Möller24cff6c2001-07-25 17:02:58 +0000561
Matt Caswell0f113f32015-01-22 03:40:55 +0000562 if (BN_ucmp(f, rsa->n) >= 0) {
Richard Levitte9311d0c2020-11-04 12:23:19 +0100563 ERR_raise(ERR_LIB_RSA, RSA_R_DATA_TOO_LARGE_FOR_MODULUS);
Matt Caswell0f113f32015-01-22 03:40:55 +0000564 goto err;
565 }
Geoff Thorpe79221bc2003-02-14 23:21:19 +0000566
Matt Caswell0f113f32015-01-22 03:40:55 +0000567 if (rsa->flags & RSA_FLAG_CACHE_PUBLIC)
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200568 if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, rsa->lock,
569 rsa->n, ctx))
Matt Caswell0f113f32015-01-22 03:40:55 +0000570 goto err;
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000571
Matt Caswell0f113f32015-01-22 03:40:55 +0000572 if (!rsa->meth->bn_mod_exp(ret, f, rsa->e, rsa->n, ctx,
573 rsa->_method_mod_n))
574 goto err;
Dr. Stephen Henson499fca22005-05-28 20:44:02 +0000575
Matt Caswell0f113f32015-01-22 03:40:55 +0000576 if ((padding == RSA_X931_PADDING) && ((bn_get_words(ret)[0] & 0xf) != 12))
577 if (!BN_sub(ret, rsa->n, ret))
578 goto err;
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000579
Andy Polyakov582ad5d2018-02-04 15:24:54 +0100580 i = BN_bn2binpad(ret, buf, num);
Pauli4a3dd622019-09-09 10:14:32 +1000581 if (i < 0)
582 goto err;
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000583
Matt Caswell0f113f32015-01-22 03:40:55 +0000584 switch (padding) {
585 case RSA_PKCS1_PADDING:
586 r = RSA_padding_check_PKCS1_type_1(to, num, buf, i, num);
587 break;
588 case RSA_X931_PADDING:
589 r = RSA_padding_check_X931(to, num, buf, i, num);
590 break;
591 case RSA_NO_PADDING:
Andy Polyakov582ad5d2018-02-04 15:24:54 +0100592 memcpy(to, buf, (r = i));
Matt Caswell0f113f32015-01-22 03:40:55 +0000593 break;
594 default:
Richard Levitte9311d0c2020-11-04 12:23:19 +0100595 ERR_raise(ERR_LIB_RSA, RSA_R_UNKNOWN_PADDING_TYPE);
Matt Caswell0f113f32015-01-22 03:40:55 +0000596 goto err;
597 }
598 if (r < 0)
Richard Levitte9311d0c2020-11-04 12:23:19 +0100599 ERR_raise(ERR_LIB_RSA, RSA_R_PADDING_CHECK_FAILED);
Matt Caswell0f113f32015-01-22 03:40:55 +0000600
601 err:
Shane Lontisce1415e2019-03-19 09:58:09 +1000602 BN_CTX_end(ctx);
Rich Salz23a1d5e2015-04-30 21:37:06 -0400603 BN_CTX_free(ctx);
Rich Salz4b45c6e2015-04-30 17:57:32 -0400604 OPENSSL_clear_free(buf, num);
Paul Yang8686c472017-08-23 01:25:23 +0800605 return r;
Matt Caswell0f113f32015-01-22 03:40:55 +0000606}
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000607
Rich Salzbf160552015-11-01 19:55:56 -0500608static int rsa_ossl_mod_exp(BIGNUM *r0, const BIGNUM *I, RSA *rsa, BN_CTX *ctx)
Matt Caswell0f113f32015-01-22 03:40:55 +0000609{
Matt Caswellafb638f2020-01-17 14:47:18 +0000610 BIGNUM *r1, *m1, *vrfy;
611 int ret = 0, smooth = 0;
Richard Levittef844f9e2020-04-13 22:34:56 +0200612#ifndef FIPS_MODULE
Matt Caswellafb638f2020-01-17 14:47:18 +0000613 BIGNUM *r2, *m[RSA_MAX_PRIME_NUM - 2];
614 int i, ex_primes = 0;
Paul Yang665d8992017-08-02 02:19:43 +0800615 RSA_PRIME_INFO *pinfo;
Matt Caswellafb638f2020-01-17 14:47:18 +0000616#endif
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000617
Pascal Cuoqc804d232015-05-05 11:20:39 +0200618 BN_CTX_start(ctx);
619
Matt Caswell0f113f32015-01-22 03:40:55 +0000620 r1 = BN_CTX_get(ctx);
Richard Levittef844f9e2020-04-13 22:34:56 +0200621#ifndef FIPS_MODULE
Paul Yang665d8992017-08-02 02:19:43 +0800622 r2 = BN_CTX_get(ctx);
Matt Caswellafb638f2020-01-17 14:47:18 +0000623#endif
Matt Caswell0f113f32015-01-22 03:40:55 +0000624 m1 = BN_CTX_get(ctx);
625 vrfy = BN_CTX_get(ctx);
Bernd Edlinger56255672017-06-13 22:08:03 +0200626 if (vrfy == NULL)
627 goto err;
Matt Caswell18125f72014-10-28 23:00:29 +0000628
Richard Levittef844f9e2020-04-13 22:34:56 +0200629#ifndef FIPS_MODULE
Paul Yang665d8992017-08-02 02:19:43 +0800630 if (rsa->version == RSA_ASN1_VERSION_MULTI
Bernd Edlingera1471582017-12-07 13:03:15 +0100631 && ((ex_primes = sk_RSA_PRIME_INFO_num(rsa->prime_infos)) <= 0
632 || ex_primes > RSA_MAX_PRIME_NUM - 2))
Paul Yang665d8992017-08-02 02:19:43 +0800633 goto err;
Matt Caswellafb638f2020-01-17 14:47:18 +0000634#endif
Paul Yang665d8992017-08-02 02:19:43 +0800635
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200636 if (rsa->flags & RSA_FLAG_CACHE_PRIVATE) {
637 BIGNUM *factor = BN_new();
638
639 if (factor == NULL)
640 goto err;
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000641
Matt Caswell0f113f32015-01-22 03:40:55 +0000642 /*
FdaSilvaYY0d4fb842016-02-05 15:23:54 -0500643 * Make sure BN_mod_inverse in Montgomery initialization uses the
Matt Caswell5584f652016-05-26 10:55:11 +0100644 * BN_FLG_CONSTTIME flag
Matt Caswell0f113f32015-01-22 03:40:55 +0000645 */
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200646 if (!(BN_with_flags(factor, rsa->p, BN_FLG_CONSTTIME),
647 BN_MONT_CTX_set_locked(&rsa->_method_mod_p, rsa->lock,
648 factor, ctx))
649 || !(BN_with_flags(factor, rsa->q, BN_FLG_CONSTTIME),
650 BN_MONT_CTX_set_locked(&rsa->_method_mod_q, rsa->lock,
651 factor, ctx))) {
652 BN_free(factor);
Matt Caswell5584f652016-05-26 10:55:11 +0100653 goto err;
Matt Caswell0f113f32015-01-22 03:40:55 +0000654 }
Richard Levittef844f9e2020-04-13 22:34:56 +0200655#ifndef FIPS_MODULE
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200656 for (i = 0; i < ex_primes; i++) {
657 pinfo = sk_RSA_PRIME_INFO_value(rsa->prime_infos, i);
658 BN_with_flags(factor, pinfo->r, BN_FLG_CONSTTIME);
659 if (!BN_MONT_CTX_set_locked(&pinfo->m, rsa->lock, factor, ctx)) {
660 BN_free(factor);
Matt Caswell0f113f32015-01-22 03:40:55 +0000661 goto err;
662 }
663 }
Matt Caswellafb638f2020-01-17 14:47:18 +0000664#endif
Matt Caswellfd7d2522015-11-24 11:09:00 +0000665 /*
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200666 * We MUST free |factor| before any further use of the prime factors
Matt Caswellfd7d2522015-11-24 11:09:00 +0000667 */
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200668 BN_free(factor);
669
Matt Caswellafb638f2020-01-17 14:47:18 +0000670 smooth = (rsa->meth->bn_mod_exp == BN_mod_exp_mont)
Richard Levittef844f9e2020-04-13 22:34:56 +0200671#ifndef FIPS_MODULE
Matt Caswellafb638f2020-01-17 14:47:18 +0000672 && (ex_primes == 0)
673#endif
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200674 && (BN_num_bits(rsa->q) == BN_num_bits(rsa->p));
Matt Caswell0f113f32015-01-22 03:40:55 +0000675 }
Bodo Möller7c9882e2008-02-27 06:01:28 +0000676
Matt Caswell0f113f32015-01-22 03:40:55 +0000677 if (rsa->flags & RSA_FLAG_CACHE_PUBLIC)
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200678 if (!BN_MONT_CTX_set_locked(&rsa->_method_mod_n, rsa->lock,
679 rsa->n, ctx))
Matt Caswell0f113f32015-01-22 03:40:55 +0000680 goto err;
Bodo Möller7c9882e2008-02-27 06:01:28 +0000681
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200682 if (smooth) {
683 /*
684 * Conversion from Montgomery domain, a.k.a. Montgomery reduction,
685 * accepts values in [0-m*2^w) range. w is m's bit width rounded up
686 * to limb width. So that at the very least if |I| is fully reduced,
687 * i.e. less than p*q, we can count on from-to round to perform
688 * below modulo operations on |I|. Unlike BN_mod it's constant time.
689 */
690 if (/* m1 = I moq q */
691 !bn_from_mont_fixed_top(m1, I, rsa->_method_mod_q, ctx)
692 || !bn_to_mont_fixed_top(m1, m1, rsa->_method_mod_q, ctx)
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200693 /* r1 = I mod p */
694 || !bn_from_mont_fixed_top(r1, I, rsa->_method_mod_p, ctx)
695 || !bn_to_mont_fixed_top(r1, r1, rsa->_method_mod_p, ctx)
Andrey Matyukovc781eb12020-12-08 22:53:39 +0300696 /*
697 * Use parallel exponentiations optimization if possible,
698 * otherwise fallback to two sequential exponentiations:
699 * m1 = m1^dmq1 mod q
700 * r1 = r1^dmp1 mod p
701 */
702 || !BN_mod_exp_mont_consttime_x2(m1, m1, rsa->dmq1, rsa->q,
703 rsa->_method_mod_q,
704 r1, r1, rsa->dmp1, rsa->p,
705 rsa->_method_mod_p,
706 ctx)
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200707 /* r1 = (r1 - m1) mod p */
708 /*
709 * bn_mod_sub_fixed_top is not regular modular subtraction,
710 * it can tolerate subtrahend to be larger than modulus, but
711 * not bit-wise wider. This makes up for uncommon q>p case,
712 * when |m1| can be larger than |rsa->p|.
713 */
714 || !bn_mod_sub_fixed_top(r1, r1, m1, rsa->p)
715
Andy Polyakovd1c008f2018-09-05 14:33:21 +0200716 /* r1 = r1 * iqmp mod p */
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200717 || !bn_to_mont_fixed_top(r1, r1, rsa->_method_mod_p, ctx)
718 || !bn_mul_mont_fixed_top(r1, r1, rsa->iqmp, rsa->_method_mod_p,
719 ctx)
Andy Polyakovd1c008f2018-09-05 14:33:21 +0200720 /* r0 = r1 * q + m1 */
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200721 || !bn_mul_fixed_top(r0, r1, rsa->q, ctx)
722 || !bn_mod_add_fixed_top(r0, r0, m1, rsa->n))
723 goto err;
724
725 goto tail;
726 }
727
Matt Caswell0f113f32015-01-22 03:40:55 +0000728 /* compute I mod q */
Matt Caswellfd7d2522015-11-24 11:09:00 +0000729 {
Matt Caswell5584f652016-05-26 10:55:11 +0100730 BIGNUM *c = BN_new();
731 if (c == NULL)
732 goto err;
733 BN_with_flags(c, I, BN_FLG_CONSTTIME);
734
Matt Caswellfd7d2522015-11-24 11:09:00 +0000735 if (!BN_mod(r1, c, rsa->q, ctx)) {
Matt Caswell5584f652016-05-26 10:55:11 +0100736 BN_free(c);
Matt Caswell0f113f32015-01-22 03:40:55 +0000737 goto err;
Matt Caswellfd7d2522015-11-24 11:09:00 +0000738 }
739
740 {
Matt Caswell5584f652016-05-26 10:55:11 +0100741 BIGNUM *dmq1 = BN_new();
742 if (dmq1 == NULL) {
743 BN_free(c);
Matt Caswellfd7d2522015-11-24 11:09:00 +0000744 goto err;
745 }
Matt Caswell5584f652016-05-26 10:55:11 +0100746 BN_with_flags(dmq1, rsa->dmq1, BN_FLG_CONSTTIME);
747
748 /* compute r1^dmq1 mod q */
749 if (!rsa->meth->bn_mod_exp(m1, r1, dmq1, rsa->q, ctx,
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200750 rsa->_method_mod_q)) {
Matt Caswell5584f652016-05-26 10:55:11 +0100751 BN_free(c);
752 BN_free(dmq1);
753 goto err;
754 }
755 /* We MUST free dmq1 before any further use of rsa->dmq1 */
756 BN_free(dmq1);
Matt Caswellfd7d2522015-11-24 11:09:00 +0000757 }
758
759 /* compute I mod p */
760 if (!BN_mod(r1, c, rsa->p, ctx)) {
Matt Caswell5584f652016-05-26 10:55:11 +0100761 BN_free(c);
Matt Caswell0f113f32015-01-22 03:40:55 +0000762 goto err;
Matt Caswellfd7d2522015-11-24 11:09:00 +0000763 }
Matt Caswell5584f652016-05-26 10:55:11 +0100764 /* We MUST free c before any further use of I */
765 BN_free(c);
Matt Caswell0f113f32015-01-22 03:40:55 +0000766 }
Bodo Möller126fe082000-12-19 12:31:41 +0000767
Matt Caswellfd7d2522015-11-24 11:09:00 +0000768 {
Matt Caswell5584f652016-05-26 10:55:11 +0100769 BIGNUM *dmp1 = BN_new();
770 if (dmp1 == NULL)
771 goto err;
772 BN_with_flags(dmp1, rsa->dmp1, BN_FLG_CONSTTIME);
773
Matt Caswellfd7d2522015-11-24 11:09:00 +0000774 /* compute r1^dmp1 mod p */
Matt Caswellfd7d2522015-11-24 11:09:00 +0000775 if (!rsa->meth->bn_mod_exp(r0, r1, dmp1, rsa->p, ctx,
776 rsa->_method_mod_p)) {
Matt Caswell5584f652016-05-26 10:55:11 +0100777 BN_free(dmp1);
Matt Caswell0f113f32015-01-22 03:40:55 +0000778 goto err;
Matt Caswellfd7d2522015-11-24 11:09:00 +0000779 }
Matt Caswell5584f652016-05-26 10:55:11 +0100780 /* We MUST free dmp1 before any further use of rsa->dmp1 */
781 BN_free(dmp1);
Matt Caswell0f113f32015-01-22 03:40:55 +0000782 }
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000783
Richard Levittef844f9e2020-04-13 22:34:56 +0200784#ifndef FIPS_MODULE
Paul Yang665d8992017-08-02 02:19:43 +0800785 if (ex_primes > 0) {
786 BIGNUM *di = BN_new(), *cc = BN_new();
787
788 if (cc == NULL || di == NULL) {
789 BN_free(cc);
790 BN_free(di);
791 goto err;
792 }
793
794 for (i = 0; i < ex_primes; i++) {
795 /* prepare m_i */
796 if ((m[i] = BN_CTX_get(ctx)) == NULL) {
797 BN_free(cc);
798 BN_free(di);
799 goto err;
800 }
801
802 pinfo = sk_RSA_PRIME_INFO_value(rsa->prime_infos, i);
803
804 /* prepare c and d_i */
805 BN_with_flags(cc, I, BN_FLG_CONSTTIME);
806 BN_with_flags(di, pinfo->d, BN_FLG_CONSTTIME);
807
808 if (!BN_mod(r1, cc, pinfo->r, ctx)) {
809 BN_free(cc);
810 BN_free(di);
811 goto err;
812 }
813 /* compute r1 ^ d_i mod r_i */
814 if (!rsa->meth->bn_mod_exp(m[i], r1, di, pinfo->r, ctx, pinfo->m)) {
815 BN_free(cc);
816 BN_free(di);
817 goto err;
818 }
819 }
820
821 BN_free(cc);
822 BN_free(di);
823 }
Matt Caswellafb638f2020-01-17 14:47:18 +0000824#endif
Paul Yang665d8992017-08-02 02:19:43 +0800825
Matt Caswell0f113f32015-01-22 03:40:55 +0000826 if (!BN_sub(r0, r0, m1))
827 goto err;
828 /*
829 * This will help stop the size of r0 increasing, which does affect the
830 * multiply if it optimised for a power of 2 size
831 */
832 if (BN_is_negative(r0))
833 if (!BN_add(r0, r0, rsa->p))
834 goto err;
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000835
Matt Caswell0f113f32015-01-22 03:40:55 +0000836 if (!BN_mul(r1, r0, rsa->iqmp, ctx))
837 goto err;
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000838
Matt Caswellfd7d2522015-11-24 11:09:00 +0000839 {
Matt Caswell5584f652016-05-26 10:55:11 +0100840 BIGNUM *pr1 = BN_new();
841 if (pr1 == NULL)
842 goto err;
843 BN_with_flags(pr1, r1, BN_FLG_CONSTTIME);
844
Matt Caswellfd7d2522015-11-24 11:09:00 +0000845 if (!BN_mod(r0, pr1, rsa->p, ctx)) {
Matt Caswell5584f652016-05-26 10:55:11 +0100846 BN_free(pr1);
Matt Caswellfd7d2522015-11-24 11:09:00 +0000847 goto err;
848 }
Matt Caswell5584f652016-05-26 10:55:11 +0100849 /* We MUST free pr1 before any further use of r1 */
850 BN_free(pr1);
Matt Caswellfd7d2522015-11-24 11:09:00 +0000851 }
Bodo Möllerbd31fb22007-03-28 00:15:28 +0000852
Matt Caswell0f113f32015-01-22 03:40:55 +0000853 /*
854 * If p < q it is occasionally possible for the correction of adding 'p'
855 * if r0 is negative above to leave the result still negative. This can
856 * break the private key operations: the following second correction
857 * should *always* correct this rare occurrence. This will *never* happen
858 * with OpenSSL generated keys because they ensure p > q [steve]
859 */
860 if (BN_is_negative(r0))
861 if (!BN_add(r0, r0, rsa->p))
862 goto err;
863 if (!BN_mul(r1, r0, rsa->q, ctx))
864 goto err;
865 if (!BN_add(r0, r1, m1))
866 goto err;
Bodo Möllerbd31fb22007-03-28 00:15:28 +0000867
Richard Levittef844f9e2020-04-13 22:34:56 +0200868#ifndef FIPS_MODULE
Paul Yang665d8992017-08-02 02:19:43 +0800869 /* add m_i to m in multi-prime case */
870 if (ex_primes > 0) {
871 BIGNUM *pr2 = BN_new();
872
873 if (pr2 == NULL)
874 goto err;
875
876 for (i = 0; i < ex_primes; i++) {
877 pinfo = sk_RSA_PRIME_INFO_value(rsa->prime_infos, i);
878 if (!BN_sub(r1, m[i], r0)) {
879 BN_free(pr2);
880 goto err;
881 }
882
883 if (!BN_mul(r2, r1, pinfo->t, ctx)) {
884 BN_free(pr2);
885 goto err;
886 }
887
888 BN_with_flags(pr2, r2, BN_FLG_CONSTTIME);
889
890 if (!BN_mod(r1, pr2, pinfo->r, ctx)) {
891 BN_free(pr2);
892 goto err;
893 }
894
895 if (BN_is_negative(r1))
896 if (!BN_add(r1, r1, pinfo->r)) {
897 BN_free(pr2);
898 goto err;
899 }
900 if (!BN_mul(r1, r1, pinfo->pp, ctx)) {
901 BN_free(pr2);
902 goto err;
903 }
904 if (!BN_add(r0, r0, r1)) {
905 BN_free(pr2);
906 goto err;
907 }
908 }
909 BN_free(pr2);
910 }
Matt Caswellafb638f2020-01-17 14:47:18 +0000911#endif
Paul Yang665d8992017-08-02 02:19:43 +0800912
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200913 tail:
Matt Caswell0f113f32015-01-22 03:40:55 +0000914 if (rsa->e && rsa->n) {
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200915 if (rsa->meth->bn_mod_exp == BN_mod_exp_mont) {
916 if (!BN_mod_exp_mont(vrfy, r0, rsa->e, rsa->n, ctx,
917 rsa->_method_mod_n))
918 goto err;
919 } else {
920 bn_correct_top(r0);
921 if (!rsa->meth->bn_mod_exp(vrfy, r0, rsa->e, rsa->n, ctx,
922 rsa->_method_mod_n))
923 goto err;
924 }
Matt Caswell0f113f32015-01-22 03:40:55 +0000925 /*
926 * If 'I' was greater than (or equal to) rsa->n, the operation will
927 * be equivalent to using 'I mod n'. However, the result of the
928 * verify will *always* be less than 'n' so we don't check for
929 * absolute equality, just congruency.
930 */
931 if (!BN_sub(vrfy, vrfy, I))
932 goto err;
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200933 if (BN_is_zero(vrfy)) {
934 bn_correct_top(r0);
935 ret = 1;
936 goto err; /* not actually error */
937 }
Matt Caswell0f113f32015-01-22 03:40:55 +0000938 if (!BN_mod(vrfy, vrfy, rsa->n, ctx))
939 goto err;
940 if (BN_is_negative(vrfy))
941 if (!BN_add(vrfy, vrfy, rsa->n))
942 goto err;
943 if (!BN_is_zero(vrfy)) {
944 /*
945 * 'I' and 'vrfy' aren't congruent mod n. Don't leak
946 * miscalculated CRT output, just do a raw (slower) mod_exp and
947 * return that instead.
948 */
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000949
Matt Caswell5584f652016-05-26 10:55:11 +0100950 BIGNUM *d = BN_new();
951 if (d == NULL)
952 goto err;
953 BN_with_flags(d, rsa->d, BN_FLG_CONSTTIME);
Bodo Möller46a64372005-05-16 01:43:31 +0000954
Matt Caswell0f113f32015-01-22 03:40:55 +0000955 if (!rsa->meth->bn_mod_exp(r0, I, d, rsa->n, ctx,
956 rsa->_method_mod_n)) {
Matt Caswell5584f652016-05-26 10:55:11 +0100957 BN_free(d);
Matt Caswell0f113f32015-01-22 03:40:55 +0000958 goto err;
959 }
Matt Caswell5584f652016-05-26 10:55:11 +0100960 /* We MUST free d before any further use of rsa->d */
961 BN_free(d);
Matt Caswell0f113f32015-01-22 03:40:55 +0000962 }
963 }
Andy Polyakov41bfd5e2018-08-10 19:46:03 +0200964 /*
965 * It's unfortunate that we have to bn_correct_top(r0). What hopefully
966 * saves the day is that correction is highly unlike, and private key
967 * operations are customarily performed on blinded message. Which means
968 * that attacker won't observe correlation with chosen plaintext.
969 * Secondly, remaining code would still handle it in same computational
970 * time and even conceal memory access pattern around corrected top.
971 */
972 bn_correct_top(r0);
Matt Caswell0f113f32015-01-22 03:40:55 +0000973 ret = 1;
974 err:
Matt Caswell0f113f32015-01-22 03:40:55 +0000975 BN_CTX_end(ctx);
Paul Yang8686c472017-08-23 01:25:23 +0800976 return ret;
Matt Caswell0f113f32015-01-22 03:40:55 +0000977}
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000978
Rich Salzbf160552015-11-01 19:55:56 -0500979static int rsa_ossl_init(RSA *rsa)
Matt Caswell0f113f32015-01-22 03:40:55 +0000980{
981 rsa->flags |= RSA_FLAG_CACHE_PUBLIC | RSA_FLAG_CACHE_PRIVATE;
Paul Yang8686c472017-08-23 01:25:23 +0800982 return 1;
Matt Caswell0f113f32015-01-22 03:40:55 +0000983}
Ralf S. Engelschall58964a41998-12-21 10:56:39 +0000984
Rich Salzbf160552015-11-01 19:55:56 -0500985static int rsa_ossl_finish(RSA *rsa)
Matt Caswell0f113f32015-01-22 03:40:55 +0000986{
Richard Levittef844f9e2020-04-13 22:34:56 +0200987#ifndef FIPS_MODULE
Paul Yang665d8992017-08-02 02:19:43 +0800988 int i;
989 RSA_PRIME_INFO *pinfo;
990
Paul Yang665d8992017-08-02 02:19:43 +0800991 for (i = 0; i < sk_RSA_PRIME_INFO_num(rsa->prime_infos); i++) {
992 pinfo = sk_RSA_PRIME_INFO_value(rsa->prime_infos, i);
993 BN_MONT_CTX_free(pinfo->m);
994 }
Matt Caswellafb638f2020-01-17 14:47:18 +0000995#endif
996
997 BN_MONT_CTX_free(rsa->_method_mod_n);
998 BN_MONT_CTX_free(rsa->_method_mod_p);
999 BN_MONT_CTX_free(rsa->_method_mod_q);
Paul Yang8686c472017-08-23 01:25:23 +08001000 return 1;
Matt Caswell0f113f32015-01-22 03:40:55 +00001001}