Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 1 | /* |
Matt Caswell | 6738bf1 | 2018-02-13 12:51:29 +0000 | [diff] [blame] | 2 | * Copyright 2015-2018 The OpenSSL Project Authors. All Rights Reserved. |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 3 | * |
Rich Salz | 846e33c | 2016-05-17 14:18:30 -0400 | [diff] [blame] | 4 | * Licensed under the OpenSSL license (the "License"). You may not use |
| 5 | * this file except in compliance with the License. You can obtain a copy |
| 6 | * in the file LICENSE in the source distribution or at |
| 7 | * https://www.openssl.org/source/license.html |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 8 | */ |
| 9 | |
Pauli | 198c42f | 2017-08-24 09:14:10 +1000 | [diff] [blame] | 10 | #include "internal/cryptlib.h" |
Pauli | 07016a8 | 2017-08-24 09:05:07 +1000 | [diff] [blame] | 11 | #include <openssl/rand.h> |
Matt Caswell | 8ba708e | 2015-09-11 10:48:59 +0100 | [diff] [blame] | 12 | #include "../ssl_locl.h" |
Matt Caswell | 61ae935 | 2015-09-11 11:23:20 +0100 | [diff] [blame] | 13 | #include "statem_locl.h" |
Matt Caswell | f9f674e | 2017-11-23 12:33:11 +0000 | [diff] [blame] | 14 | #include <assert.h> |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 15 | |
| 16 | /* |
| 17 | * This file implements the SSL/TLS/DTLS state machines. |
| 18 | * |
| 19 | * There are two primary state machines: |
| 20 | * |
| 21 | * 1) Message flow state machine |
| 22 | * 2) Handshake state machine |
| 23 | * |
| 24 | * The Message flow state machine controls the reading and sending of messages |
| 25 | * including handling of non-blocking IO events, flushing of the underlying |
| 26 | * write BIO, handling unexpected messages, etc. It is itself broken into two |
| 27 | * separate sub-state machines which control reading and writing respectively. |
| 28 | * |
| 29 | * The Handshake state machine keeps track of the current SSL/TLS handshake |
| 30 | * state. Transitions of the handshake state are the result of events that |
| 31 | * occur within the Message flow state machine. |
| 32 | * |
| 33 | * Overall it looks like this: |
| 34 | * |
| 35 | * --------------------------------------------- ------------------- |
| 36 | * | | | | |
| 37 | * | Message flow state machine | | | |
| 38 | * | | | | |
| 39 | * | -------------------- -------------------- | Transition | Handshake state | |
Matt Caswell | 61ae935 | 2015-09-11 11:23:20 +0100 | [diff] [blame] | 40 | * | | MSG_FLOW_READING | | MSG_FLOW_WRITING | | Event | machine | |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 41 | * | | sub-state | | sub-state | |----------->| | |
| 42 | * | | machine for | | machine for | | | | |
| 43 | * | | reading messages | | writing messages | | | | |
| 44 | * | -------------------- -------------------- | | | |
| 45 | * | | | | |
| 46 | * --------------------------------------------- ------------------- |
| 47 | * |
| 48 | */ |
| 49 | |
| 50 | /* Sub state machine return values */ |
Emilia Kasper | a230b26 | 2016-08-05 19:03:17 +0200 | [diff] [blame] | 51 | typedef enum { |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 52 | /* Something bad happened or NBIO */ |
| 53 | SUB_STATE_ERROR, |
| 54 | /* Sub state finished go to the next sub state */ |
| 55 | SUB_STATE_FINISHED, |
| 56 | /* Sub state finished and handshake was completed */ |
| 57 | SUB_STATE_END_HANDSHAKE |
Matt Caswell | d78052c | 2015-10-05 11:03:27 +0100 | [diff] [blame] | 58 | } SUB_STATE_RETURN; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 59 | |
Matt Caswell | 8723588 | 2015-09-07 16:36:53 +0100 | [diff] [blame] | 60 | static int state_machine(SSL *s, int server); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 61 | static void init_read_state_machine(SSL *s); |
Matt Caswell | d78052c | 2015-10-05 11:03:27 +0100 | [diff] [blame] | 62 | static SUB_STATE_RETURN read_state_machine(SSL *s); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 63 | static void init_write_state_machine(SSL *s); |
Matt Caswell | d78052c | 2015-10-05 11:03:27 +0100 | [diff] [blame] | 64 | static SUB_STATE_RETURN write_state_machine(SSL *s); |
Matt Caswell | 49ae742 | 2015-09-08 09:13:50 +0100 | [diff] [blame] | 65 | |
Matt Caswell | 5998e29 | 2015-10-05 10:49:15 +0100 | [diff] [blame] | 66 | OSSL_HANDSHAKE_STATE SSL_get_state(const SSL *ssl) |
Matt Caswell | 49ae742 | 2015-09-08 09:13:50 +0100 | [diff] [blame] | 67 | { |
| 68 | return ssl->statem.hand_state; |
| 69 | } |
| 70 | |
Matt Caswell | 49ae742 | 2015-09-08 09:13:50 +0100 | [diff] [blame] | 71 | int SSL_in_init(SSL *s) |
| 72 | { |
| 73 | return s->statem.in_init; |
| 74 | } |
| 75 | |
| 76 | int SSL_is_init_finished(SSL *s) |
| 77 | { |
| 78 | return !(s->statem.in_init) && (s->statem.hand_state == TLS_ST_OK); |
| 79 | } |
| 80 | |
| 81 | int SSL_in_before(SSL *s) |
| 82 | { |
| 83 | /* |
| 84 | * Historically being "in before" meant before anything had happened. In the |
| 85 | * current code though we remain in the "before" state for a while after we |
| 86 | * have started the handshake process (e.g. as a server waiting for the |
| 87 | * first message to arrive). There "in before" is taken to mean "in before" |
| 88 | * and not started any handshake process yet. |
| 89 | */ |
| 90 | return (s->statem.hand_state == TLS_ST_BEFORE) |
| 91 | && (s->statem.state == MSG_FLOW_UNINITED); |
| 92 | } |
| 93 | |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 94 | /* |
| 95 | * Clear the state machine state and reset back to MSG_FLOW_UNINITED |
| 96 | */ |
Matt Caswell | fe3a329 | 2015-10-05 10:39:54 +0100 | [diff] [blame] | 97 | void ossl_statem_clear(SSL *s) |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 98 | { |
| 99 | s->statem.state = MSG_FLOW_UNINITED; |
Matt Caswell | 49ae742 | 2015-09-08 09:13:50 +0100 | [diff] [blame] | 100 | s->statem.hand_state = TLS_ST_BEFORE; |
| 101 | s->statem.in_init = 1; |
Matt Caswell | a71a496 | 2015-10-05 10:44:41 +0100 | [diff] [blame] | 102 | s->statem.no_cert_verify = 0; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 103 | } |
| 104 | |
| 105 | /* |
| 106 | * Set the state machine up ready for a renegotiation handshake |
| 107 | */ |
Matt Caswell | fe3a329 | 2015-10-05 10:39:54 +0100 | [diff] [blame] | 108 | void ossl_statem_set_renegotiate(SSL *s) |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 109 | { |
Matt Caswell | c64359d | 2015-09-10 09:11:41 +0100 | [diff] [blame] | 110 | s->statem.in_init = 1; |
Matt Caswell | 0386aad | 2017-01-10 14:58:17 +0000 | [diff] [blame] | 111 | s->statem.request_state = TLS_ST_SW_HELLO_REQ; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 112 | } |
| 113 | |
| 114 | /* |
Matt Caswell | 1f35947 | 2017-11-23 16:21:46 +0000 | [diff] [blame] | 115 | * Put the state machine into an error state and send an alert if appropriate. |
| 116 | * This is a permanent error for the current connection. |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 117 | */ |
Matt Caswell | 1f35947 | 2017-11-23 16:21:46 +0000 | [diff] [blame] | 118 | void ossl_statem_fatal(SSL *s, int al, int func, int reason, const char *file, |
| 119 | int line) |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 120 | { |
Matt Caswell | f9f674e | 2017-11-23 12:33:11 +0000 | [diff] [blame] | 121 | /* We shouldn't call SSLfatal() twice. Once is enough */ |
| 122 | assert(s->statem.state != MSG_FLOW_ERROR); |
Matt Caswell | 1f35947 | 2017-11-23 16:21:46 +0000 | [diff] [blame] | 123 | s->statem.in_init = 1; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 124 | s->statem.state = MSG_FLOW_ERROR; |
Matt Caswell | 1f35947 | 2017-11-23 16:21:46 +0000 | [diff] [blame] | 125 | ERR_put_error(ERR_LIB_SSL, func, reason, file, line); |
Bernd Edlinger | d4ef4fb | 2018-03-16 13:29:51 +0100 | [diff] [blame] | 126 | if (al != SSL_AD_NO_ALERT && !s->statem.invalid_enc_write_ctx) |
Matt Caswell | 1f35947 | 2017-11-23 16:21:46 +0000 | [diff] [blame] | 127 | ssl3_send_alert(s, SSL3_AL_FATAL, al); |
Matt Caswell | 49ae742 | 2015-09-08 09:13:50 +0100 | [diff] [blame] | 128 | } |
| 129 | |
| 130 | /* |
Matt Caswell | 47e2ee0 | 2017-11-23 12:10:54 +0000 | [diff] [blame] | 131 | * This macro should only be called if we are already expecting to be in |
| 132 | * a fatal error state. We verify that we are, and set it if not (this would |
| 133 | * indicate a bug). |
| 134 | */ |
| 135 | #define check_fatal(s, f) \ |
| 136 | do { \ |
| 137 | if (!ossl_assert((s)->statem.in_init \ |
Matt Caswell | e1dd8fa | 2017-12-05 13:37:26 +0000 | [diff] [blame] | 138 | && (s)->statem.state == MSG_FLOW_ERROR)) \ |
Matt Caswell | 47e2ee0 | 2017-11-23 12:10:54 +0000 | [diff] [blame] | 139 | SSLfatal(s, SSL_AD_INTERNAL_ERROR, (f), \ |
| 140 | SSL_R_MISSING_FATAL); \ |
| 141 | } while (0) |
| 142 | |
| 143 | /* |
Matt Caswell | 49ae742 | 2015-09-08 09:13:50 +0100 | [diff] [blame] | 144 | * Discover whether the current connection is in the error state. |
| 145 | * |
| 146 | * Valid return values are: |
| 147 | * 1: Yes |
| 148 | * 0: No |
| 149 | */ |
Matt Caswell | fe3a329 | 2015-10-05 10:39:54 +0100 | [diff] [blame] | 150 | int ossl_statem_in_error(const SSL *s) |
Matt Caswell | 49ae742 | 2015-09-08 09:13:50 +0100 | [diff] [blame] | 151 | { |
| 152 | if (s->statem.state == MSG_FLOW_ERROR) |
| 153 | return 1; |
| 154 | |
| 155 | return 0; |
| 156 | } |
| 157 | |
Matt Caswell | fe3a329 | 2015-10-05 10:39:54 +0100 | [diff] [blame] | 158 | void ossl_statem_set_in_init(SSL *s, int init) |
Matt Caswell | 49ae742 | 2015-09-08 09:13:50 +0100 | [diff] [blame] | 159 | { |
| 160 | s->statem.in_init = init; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 161 | } |
| 162 | |
Matt Caswell | 024f543 | 2015-10-22 13:57:18 +0100 | [diff] [blame] | 163 | int ossl_statem_get_in_handshake(SSL *s) |
| 164 | { |
| 165 | return s->statem.in_handshake; |
| 166 | } |
| 167 | |
| 168 | void ossl_statem_set_in_handshake(SSL *s, int inhand) |
| 169 | { |
| 170 | if (inhand) |
| 171 | s->statem.in_handshake++; |
| 172 | else |
| 173 | s->statem.in_handshake--; |
| 174 | } |
| 175 | |
Matt Caswell | 0a87d0a | 2017-02-20 16:35:03 +0000 | [diff] [blame] | 176 | /* Are we in a sensible state to skip over unreadable early data? */ |
| 177 | int ossl_statem_skip_early_data(SSL *s) |
| 178 | { |
Matt Caswell | 1ea4d09 | 2017-02-22 13:01:48 +0000 | [diff] [blame] | 179 | if (s->ext.early_data != SSL_EARLY_DATA_REJECTED) |
Matt Caswell | 0a87d0a | 2017-02-20 16:35:03 +0000 | [diff] [blame] | 180 | return 0; |
| 181 | |
Matt Caswell | d4504fe | 2017-07-14 14:50:48 +0100 | [diff] [blame] | 182 | if (!s->server || s->statem.hand_state != TLS_ST_EARLY_DATA) |
| 183 | return 0; |
Matt Caswell | 0a87d0a | 2017-02-20 16:35:03 +0000 | [diff] [blame] | 184 | |
| 185 | return 1; |
| 186 | } |
| 187 | |
Matt Caswell | 3eaa417 | 2017-02-27 20:54:39 +0000 | [diff] [blame] | 188 | /* |
| 189 | * Called when we are in SSL_read*(), SSL_write*(), or SSL_accept() |
| 190 | * /SSL_connect()/SSL_do_handshake(). Used to test whether we are in an early |
| 191 | * data state and whether we should attempt to move the handshake on if so. |
Todd Short | d1186c3 | 2017-04-13 10:20:04 -0400 | [diff] [blame] | 192 | * |sending| is 1 if we are attempting to send data (SSL_write*()), 0 if we are |
Matt Caswell | 3eaa417 | 2017-02-27 20:54:39 +0000 | [diff] [blame] | 193 | * attempting to read data (SSL_read*()), or -1 if we are in SSL_do_handshake() |
| 194 | * or similar. |
| 195 | */ |
Todd Short | d1186c3 | 2017-04-13 10:20:04 -0400 | [diff] [blame] | 196 | void ossl_statem_check_finish_init(SSL *s, int sending) |
Matt Caswell | 564547e | 2017-02-25 15:34:07 +0000 | [diff] [blame] | 197 | { |
Todd Short | d1186c3 | 2017-04-13 10:20:04 -0400 | [diff] [blame] | 198 | if (sending == -1) { |
Matt Caswell | 3eaa417 | 2017-02-27 20:54:39 +0000 | [diff] [blame] | 199 | if (s->statem.hand_state == TLS_ST_PENDING_EARLY_DATA_END |
Matt Caswell | ef6c191 | 2017-03-09 15:03:07 +0000 | [diff] [blame] | 200 | || s->statem.hand_state == TLS_ST_EARLY_DATA) { |
Matt Caswell | 3eaa417 | 2017-02-27 20:54:39 +0000 | [diff] [blame] | 201 | ossl_statem_set_in_init(s, 1); |
Matt Caswell | ef6c191 | 2017-03-09 15:03:07 +0000 | [diff] [blame] | 202 | if (s->early_data_state == SSL_EARLY_DATA_WRITE_RETRY) { |
| 203 | /* |
| 204 | * SSL_connect() or SSL_do_handshake() has been called directly. |
| 205 | * We don't allow any more writing of early data. |
| 206 | */ |
| 207 | s->early_data_state = SSL_EARLY_DATA_FINISHED_WRITING; |
| 208 | } |
| 209 | } |
Matt Caswell | 3eaa417 | 2017-02-27 20:54:39 +0000 | [diff] [blame] | 210 | } else if (!s->server) { |
Todd Short | d1186c3 | 2017-04-13 10:20:04 -0400 | [diff] [blame] | 211 | if ((sending && (s->statem.hand_state == TLS_ST_PENDING_EARLY_DATA_END |
Matt Caswell | ef6c191 | 2017-03-09 15:03:07 +0000 | [diff] [blame] | 212 | || s->statem.hand_state == TLS_ST_EARLY_DATA) |
Matt Caswell | f7e393b | 2017-02-27 11:19:57 +0000 | [diff] [blame] | 213 | && s->early_data_state != SSL_EARLY_DATA_WRITING) |
Todd Short | d1186c3 | 2017-04-13 10:20:04 -0400 | [diff] [blame] | 214 | || (!sending && s->statem.hand_state == TLS_ST_EARLY_DATA)) { |
Matt Caswell | d7f8783 | 2017-02-25 15:59:44 +0000 | [diff] [blame] | 215 | ossl_statem_set_in_init(s, 1); |
Matt Caswell | ef6c191 | 2017-03-09 15:03:07 +0000 | [diff] [blame] | 216 | /* |
| 217 | * SSL_write() has been called directly. We don't allow any more |
| 218 | * writing of early data. |
| 219 | */ |
Todd Short | d1186c3 | 2017-04-13 10:20:04 -0400 | [diff] [blame] | 220 | if (sending && s->early_data_state == SSL_EARLY_DATA_WRITE_RETRY) |
Matt Caswell | ef6c191 | 2017-03-09 15:03:07 +0000 | [diff] [blame] | 221 | s->early_data_state = SSL_EARLY_DATA_FINISHED_WRITING; |
| 222 | } |
Matt Caswell | f7e393b | 2017-02-27 11:19:57 +0000 | [diff] [blame] | 223 | } else { |
| 224 | if (s->early_data_state == SSL_EARLY_DATA_FINISHED_READING |
| 225 | && s->statem.hand_state == TLS_ST_EARLY_DATA) |
| 226 | ossl_statem_set_in_init(s, 1); |
Matt Caswell | d7f8783 | 2017-02-25 15:59:44 +0000 | [diff] [blame] | 227 | } |
Matt Caswell | 564547e | 2017-02-25 15:34:07 +0000 | [diff] [blame] | 228 | } |
| 229 | |
Matt Caswell | 31fd10e | 2015-10-22 12:18:45 +0100 | [diff] [blame] | 230 | void ossl_statem_set_hello_verify_done(SSL *s) |
| 231 | { |
| 232 | s->statem.state = MSG_FLOW_UNINITED; |
| 233 | s->statem.in_init = 1; |
| 234 | /* |
| 235 | * This will get reset (briefly) back to TLS_ST_BEFORE when we enter |
| 236 | * state_machine() because |state| is MSG_FLOW_UNINITED, but until then any |
| 237 | * calls to SSL_in_before() will return false. Also calls to |
| 238 | * SSL_state_string() and SSL_state_string_long() will return something |
| 239 | * sensible. |
| 240 | */ |
| 241 | s->statem.hand_state = TLS_ST_SR_CLNT_HELLO; |
| 242 | } |
| 243 | |
Emilia Kasper | a230b26 | 2016-08-05 19:03:17 +0200 | [diff] [blame] | 244 | int ossl_statem_connect(SSL *s) |
| 245 | { |
Matt Caswell | 8723588 | 2015-09-07 16:36:53 +0100 | [diff] [blame] | 246 | return state_machine(s, 0); |
| 247 | } |
| 248 | |
Matt Caswell | fe3a329 | 2015-10-05 10:39:54 +0100 | [diff] [blame] | 249 | int ossl_statem_accept(SSL *s) |
Matt Caswell | c130dd8 | 2015-09-04 13:51:49 +0100 | [diff] [blame] | 250 | { |
| 251 | return state_machine(s, 1); |
| 252 | } |
| 253 | |
Emilia Kasper | a230b26 | 2016-08-05 19:03:17 +0200 | [diff] [blame] | 254 | typedef void (*info_cb) (const SSL *, int, int); |
| 255 | |
| 256 | static info_cb get_callback(SSL *s) |
Matt Caswell | 91eac8d | 2015-10-05 11:28:51 +0100 | [diff] [blame] | 257 | { |
| 258 | if (s->info_callback != NULL) |
| 259 | return s->info_callback; |
| 260 | else if (s->ctx->info_callback != NULL) |
| 261 | return s->ctx->info_callback; |
| 262 | |
| 263 | return NULL; |
| 264 | } |
| 265 | |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 266 | /* |
| 267 | * The main message flow state machine. We start in the MSG_FLOW_UNINITED or |
Matt Caswell | c7f4778 | 2017-01-10 23:02:28 +0000 | [diff] [blame] | 268 | * MSG_FLOW_FINISHED state and finish in MSG_FLOW_FINISHED. Valid states and |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 269 | * transitions are as follows: |
| 270 | * |
Matt Caswell | c7f4778 | 2017-01-10 23:02:28 +0000 | [diff] [blame] | 271 | * MSG_FLOW_UNINITED MSG_FLOW_FINISHED |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 272 | * | | |
| 273 | * +-----------------------+ |
| 274 | * v |
| 275 | * MSG_FLOW_WRITING <---> MSG_FLOW_READING |
| 276 | * | |
| 277 | * V |
| 278 | * MSG_FLOW_FINISHED |
| 279 | * | |
| 280 | * V |
| 281 | * [SUCCESS] |
| 282 | * |
| 283 | * We may exit at any point due to an error or NBIO event. If an NBIO event |
| 284 | * occurs then we restart at the point we left off when we are recalled. |
| 285 | * MSG_FLOW_WRITING and MSG_FLOW_READING have sub-state machines associated with them. |
| 286 | * |
| 287 | * In addition to the above there is also the MSG_FLOW_ERROR state. We can move |
| 288 | * into that state at any point in the event that an irrecoverable error occurs. |
| 289 | * |
| 290 | * Valid return values are: |
| 291 | * 1: Success |
| 292 | * <=0: NBIO or error |
| 293 | */ |
Viktor Dukhovni | 4fa5214 | 2015-12-29 03:24:17 -0500 | [diff] [blame] | 294 | static int state_machine(SSL *s, int server) |
| 295 | { |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 296 | BUF_MEM *buf = NULL; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 297 | void (*cb) (const SSL *ssl, int type, int val) = NULL; |
Matt Caswell | d6f1a6e | 2015-10-05 10:58:52 +0100 | [diff] [blame] | 298 | OSSL_STATEM *st = &s->statem; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 299 | int ret = -1; |
| 300 | int ssret; |
| 301 | |
| 302 | if (st->state == MSG_FLOW_ERROR) { |
| 303 | /* Shouldn't have been called if we're already in the error state */ |
| 304 | return -1; |
| 305 | } |
| 306 | |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 307 | ERR_clear_error(); |
| 308 | clear_sys_error(); |
| 309 | |
Matt Caswell | 91eac8d | 2015-10-05 11:28:51 +0100 | [diff] [blame] | 310 | cb = get_callback(s); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 311 | |
Matt Caswell | 024f543 | 2015-10-22 13:57:18 +0100 | [diff] [blame] | 312 | st->in_handshake++; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 313 | if (!SSL_in_init(s) || SSL_in_before(s)) { |
Matt Caswell | 808d160 | 2017-09-28 13:23:49 +0100 | [diff] [blame] | 314 | /* |
| 315 | * If we are stateless then we already called SSL_clear() - don't do |
| 316 | * it again and clear the STATELESS flag itself. |
| 317 | */ |
| 318 | if ((s->s3->flags & TLS1_FLAGS_STATELESS) == 0 && !SSL_clear(s)) |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 319 | return -1; |
| 320 | } |
Matt Caswell | 473483d | 2015-09-07 22:00:36 +0100 | [diff] [blame] | 321 | #ifndef OPENSSL_NO_SCTP |
Matt Caswell | 9924087 | 2017-06-20 16:36:30 +0100 | [diff] [blame] | 322 | if (SSL_IS_DTLS(s) && BIO_dgram_is_sctp(SSL_get_wbio(s))) { |
Matt Caswell | 473483d | 2015-09-07 22:00:36 +0100 | [diff] [blame] | 323 | /* |
| 324 | * Notify SCTP BIO socket to enter handshake mode and prevent stream |
Matt Caswell | 9924087 | 2017-06-20 16:36:30 +0100 | [diff] [blame] | 325 | * identifier other than 0. |
Matt Caswell | 473483d | 2015-09-07 22:00:36 +0100 | [diff] [blame] | 326 | */ |
| 327 | BIO_ctrl(SSL_get_wbio(s), BIO_CTRL_DGRAM_SCTP_SET_IN_HANDSHAKE, |
Matt Caswell | 024f543 | 2015-10-22 13:57:18 +0100 | [diff] [blame] | 328 | st->in_handshake, NULL); |
Matt Caswell | 473483d | 2015-09-07 22:00:36 +0100 | [diff] [blame] | 329 | } |
| 330 | #endif |
| 331 | |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 332 | /* Initialise state machine */ |
Matt Caswell | 0386aad | 2017-01-10 14:58:17 +0000 | [diff] [blame] | 333 | if (st->state == MSG_FLOW_UNINITED |
Matt Caswell | 0386aad | 2017-01-10 14:58:17 +0000 | [diff] [blame] | 334 | || st->state == MSG_FLOW_FINISHED) { |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 335 | if (st->state == MSG_FLOW_UNINITED) { |
| 336 | st->hand_state = TLS_ST_BEFORE; |
Matt Caswell | 0386aad | 2017-01-10 14:58:17 +0000 | [diff] [blame] | 337 | st->request_state = TLS_ST_BEFORE; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 338 | } |
| 339 | |
| 340 | s->server = server; |
| 341 | if (cb != NULL) |
| 342 | cb(s, SSL_CB_HANDSHAKE_START, 1); |
| 343 | |
Matt Caswell | 47e2ee0 | 2017-11-23 12:10:54 +0000 | [diff] [blame] | 344 | /* |
| 345 | * Fatal errors in this block don't send an alert because we have |
| 346 | * failed to even initialise properly. Sending an alert is probably |
| 347 | * doomed to failure. |
| 348 | */ |
| 349 | |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 350 | if (SSL_IS_DTLS(s)) { |
| 351 | if ((s->version & 0xff00) != (DTLS1_VERSION & 0xff00) && |
Emilia Kasper | a230b26 | 2016-08-05 19:03:17 +0200 | [diff] [blame] | 352 | (server || (s->version & 0xff00) != (DTLS1_BAD_VER & 0xff00))) { |
Matt Caswell | d4d2f3a | 2017-11-23 10:37:51 +0000 | [diff] [blame] | 353 | SSLfatal(s, SSL_AD_NO_ALERT, SSL_F_STATE_MACHINE, |
| 354 | ERR_R_INTERNAL_ERROR); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 355 | goto end; |
| 356 | } |
| 357 | } else { |
Viktor Dukhovni | 4fa5214 | 2015-12-29 03:24:17 -0500 | [diff] [blame] | 358 | if ((s->version >> 8) != SSL3_VERSION_MAJOR) { |
Matt Caswell | d4d2f3a | 2017-11-23 10:37:51 +0000 | [diff] [blame] | 359 | SSLfatal(s, SSL_AD_NO_ALERT, SSL_F_STATE_MACHINE, |
| 360 | ERR_R_INTERNAL_ERROR); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 361 | goto end; |
| 362 | } |
| 363 | } |
| 364 | |
Viktor Dukhovni | 4fa5214 | 2015-12-29 03:24:17 -0500 | [diff] [blame] | 365 | if (!ssl_security(s, SSL_SECOP_VERSION, 0, s->version, NULL)) { |
Matt Caswell | d4d2f3a | 2017-11-23 10:37:51 +0000 | [diff] [blame] | 366 | SSLfatal(s, SSL_AD_NO_ALERT, SSL_F_STATE_MACHINE, |
| 367 | ERR_R_INTERNAL_ERROR); |
Viktor Dukhovni | 4fa5214 | 2015-12-29 03:24:17 -0500 | [diff] [blame] | 368 | goto end; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 369 | } |
| 370 | |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 371 | if (s->init_buf == NULL) { |
| 372 | if ((buf = BUF_MEM_new()) == NULL) { |
Matt Caswell | 47e2ee0 | 2017-11-23 12:10:54 +0000 | [diff] [blame] | 373 | SSLfatal(s, SSL_AD_NO_ALERT, SSL_F_STATE_MACHINE, |
| 374 | ERR_R_INTERNAL_ERROR); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 375 | goto end; |
| 376 | } |
| 377 | if (!BUF_MEM_grow(buf, SSL3_RT_MAX_PLAIN_LENGTH)) { |
Matt Caswell | 47e2ee0 | 2017-11-23 12:10:54 +0000 | [diff] [blame] | 378 | SSLfatal(s, SSL_AD_NO_ALERT, SSL_F_STATE_MACHINE, |
| 379 | ERR_R_INTERNAL_ERROR); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 380 | goto end; |
| 381 | } |
| 382 | s->init_buf = buf; |
| 383 | buf = NULL; |
| 384 | } |
| 385 | |
| 386 | if (!ssl3_setup_buffers(s)) { |
Matt Caswell | 47e2ee0 | 2017-11-23 12:10:54 +0000 | [diff] [blame] | 387 | SSLfatal(s, SSL_AD_NO_ALERT, SSL_F_STATE_MACHINE, |
| 388 | ERR_R_INTERNAL_ERROR); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 389 | goto end; |
| 390 | } |
| 391 | s->init_num = 0; |
| 392 | |
| 393 | /* |
| 394 | * Should have been reset by tls_process_finished, too. |
| 395 | */ |
| 396 | s->s3->change_cipher_spec = 0; |
| 397 | |
Matt Caswell | 4641756 | 2016-05-17 12:28:14 +0100 | [diff] [blame] | 398 | /* |
| 399 | * Ok, we now need to push on a buffering BIO ...but not with |
| 400 | * SCTP |
| 401 | */ |
| 402 | #ifndef OPENSSL_NO_SCTP |
| 403 | if (!SSL_IS_DTLS(s) || !BIO_dgram_is_sctp(SSL_get_wbio(s))) |
| 404 | #endif |
| 405 | if (!ssl_init_wbio_buffer(s)) { |
Matt Caswell | 47e2ee0 | 2017-11-23 12:10:54 +0000 | [diff] [blame] | 406 | SSLfatal(s, SSL_AD_NO_ALERT, SSL_F_STATE_MACHINE, |
| 407 | ERR_R_INTERNAL_ERROR); |
Matt Caswell | 4641756 | 2016-05-17 12:28:14 +0100 | [diff] [blame] | 408 | goto end; |
| 409 | } |
| 410 | |
Matt Caswell | f7e393b | 2017-02-27 11:19:57 +0000 | [diff] [blame] | 411 | if ((SSL_in_before(s)) |
Matt Caswell | 49e7fe1 | 2017-02-21 09:22:22 +0000 | [diff] [blame] | 412 | || s->renegotiate) { |
Matt Caswell | 47e2ee0 | 2017-11-23 12:10:54 +0000 | [diff] [blame] | 413 | if (!tls_setup_handshake(s)) { |
| 414 | /* SSLfatal() already called */ |
Matt Caswell | c7f4778 | 2017-01-10 23:02:28 +0000 | [diff] [blame] | 415 | goto end; |
Matt Caswell | 47e2ee0 | 2017-11-23 12:10:54 +0000 | [diff] [blame] | 416 | } |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 417 | |
Matt Caswell | c7f4778 | 2017-01-10 23:02:28 +0000 | [diff] [blame] | 418 | if (SSL_IS_FIRST_HANDSHAKE(s)) |
| 419 | st->read_state_first_init = 1; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 420 | } |
| 421 | |
| 422 | st->state = MSG_FLOW_WRITING; |
| 423 | init_write_state_machine(s); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 424 | } |
| 425 | |
FdaSilvaYY | e8aa8b6 | 2016-06-29 00:18:50 +0200 | [diff] [blame] | 426 | while (st->state != MSG_FLOW_FINISHED) { |
| 427 | if (st->state == MSG_FLOW_READING) { |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 428 | ssret = read_state_machine(s); |
| 429 | if (ssret == SUB_STATE_FINISHED) { |
| 430 | st->state = MSG_FLOW_WRITING; |
| 431 | init_write_state_machine(s); |
| 432 | } else { |
| 433 | /* NBIO or error */ |
| 434 | goto end; |
| 435 | } |
| 436 | } else if (st->state == MSG_FLOW_WRITING) { |
| 437 | ssret = write_state_machine(s); |
| 438 | if (ssret == SUB_STATE_FINISHED) { |
| 439 | st->state = MSG_FLOW_READING; |
| 440 | init_read_state_machine(s); |
| 441 | } else if (ssret == SUB_STATE_END_HANDSHAKE) { |
| 442 | st->state = MSG_FLOW_FINISHED; |
| 443 | } else { |
| 444 | /* NBIO or error */ |
| 445 | goto end; |
| 446 | } |
| 447 | } else { |
| 448 | /* Error */ |
Matt Caswell | 47e2ee0 | 2017-11-23 12:10:54 +0000 | [diff] [blame] | 449 | check_fatal(s, SSL_F_STATE_MACHINE); |
Matt Caswell | 8e7677a | 2017-11-27 11:34:05 +0000 | [diff] [blame] | 450 | SSLerr(SSL_F_STATE_MACHINE, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 451 | goto end; |
| 452 | } |
| 453 | } |
| 454 | |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 455 | ret = 1; |
| 456 | |
| 457 | end: |
Matt Caswell | 024f543 | 2015-10-22 13:57:18 +0100 | [diff] [blame] | 458 | st->in_handshake--; |
Matt Caswell | 473483d | 2015-09-07 22:00:36 +0100 | [diff] [blame] | 459 | |
| 460 | #ifndef OPENSSL_NO_SCTP |
Matt Caswell | 9924087 | 2017-06-20 16:36:30 +0100 | [diff] [blame] | 461 | if (SSL_IS_DTLS(s) && BIO_dgram_is_sctp(SSL_get_wbio(s))) { |
Matt Caswell | 473483d | 2015-09-07 22:00:36 +0100 | [diff] [blame] | 462 | /* |
| 463 | * Notify SCTP BIO socket to leave handshake mode and allow stream |
Matt Caswell | 9924087 | 2017-06-20 16:36:30 +0100 | [diff] [blame] | 464 | * identifier other than 0. |
Matt Caswell | 473483d | 2015-09-07 22:00:36 +0100 | [diff] [blame] | 465 | */ |
| 466 | BIO_ctrl(SSL_get_wbio(s), BIO_CTRL_DGRAM_SCTP_SET_IN_HANDSHAKE, |
Matt Caswell | 024f543 | 2015-10-22 13:57:18 +0100 | [diff] [blame] | 467 | st->in_handshake, NULL); |
Matt Caswell | 473483d | 2015-09-07 22:00:36 +0100 | [diff] [blame] | 468 | } |
| 469 | #endif |
| 470 | |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 471 | BUF_MEM_free(buf); |
| 472 | if (cb != NULL) { |
| 473 | if (server) |
| 474 | cb(s, SSL_CB_ACCEPT_EXIT, ret); |
| 475 | else |
| 476 | cb(s, SSL_CB_CONNECT_EXIT, ret); |
| 477 | } |
| 478 | return ret; |
| 479 | } |
| 480 | |
| 481 | /* |
| 482 | * Initialise the MSG_FLOW_READING sub-state machine |
| 483 | */ |
| 484 | static void init_read_state_machine(SSL *s) |
| 485 | { |
Matt Caswell | d6f1a6e | 2015-10-05 10:58:52 +0100 | [diff] [blame] | 486 | OSSL_STATEM *st = &s->statem; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 487 | |
| 488 | st->read_state = READ_STATE_HEADER; |
| 489 | } |
| 490 | |
Matt Caswell | 0d698f6 | 2016-09-23 16:58:11 +0100 | [diff] [blame] | 491 | static int grow_init_buf(SSL *s, size_t size) { |
| 492 | |
| 493 | size_t msg_offset = (char *)s->init_msg - s->init_buf->data; |
| 494 | |
| 495 | if (!BUF_MEM_grow_clean(s->init_buf, (int)size)) |
| 496 | return 0; |
| 497 | |
| 498 | if (size < msg_offset) |
| 499 | return 0; |
| 500 | |
| 501 | s->init_msg = s->init_buf->data + msg_offset; |
| 502 | |
| 503 | return 1; |
| 504 | } |
| 505 | |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 506 | /* |
| 507 | * This function implements the sub-state machine when the message flow is in |
| 508 | * MSG_FLOW_READING. The valid sub-states and transitions are: |
| 509 | * |
| 510 | * READ_STATE_HEADER <--+<-------------+ |
| 511 | * | | | |
| 512 | * v | | |
| 513 | * READ_STATE_BODY -----+-->READ_STATE_POST_PROCESS |
| 514 | * | | |
| 515 | * +----------------------------+ |
| 516 | * v |
| 517 | * [SUB_STATE_FINISHED] |
| 518 | * |
| 519 | * READ_STATE_HEADER has the responsibility for reading in the message header |
| 520 | * and transitioning the state of the handshake state machine. |
| 521 | * |
| 522 | * READ_STATE_BODY reads in the rest of the message and then subsequently |
| 523 | * processes it. |
| 524 | * |
| 525 | * READ_STATE_POST_PROCESS is an optional step that may occur if some post |
| 526 | * processing activity performed on the message may block. |
| 527 | * |
FdaSilvaYY | 0d4fb84 | 2016-02-05 15:23:54 -0500 | [diff] [blame] | 528 | * Any of the above states could result in an NBIO event occurring in which case |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 529 | * control returns to the calling application. When this function is recalled we |
| 530 | * will resume in the same state where we left off. |
| 531 | */ |
Emilia Kasper | a230b26 | 2016-08-05 19:03:17 +0200 | [diff] [blame] | 532 | static SUB_STATE_RETURN read_state_machine(SSL *s) |
| 533 | { |
Matt Caswell | d6f1a6e | 2015-10-05 10:58:52 +0100 | [diff] [blame] | 534 | OSSL_STATEM *st = &s->statem; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 535 | int ret, mt; |
Matt Caswell | eda7575 | 2016-09-06 12:05:25 +0100 | [diff] [blame] | 536 | size_t len = 0; |
Emilia Kasper | a230b26 | 2016-08-05 19:03:17 +0200 | [diff] [blame] | 537 | int (*transition) (SSL *s, int mt); |
Matt Caswell | 73999b6 | 2015-09-10 10:22:30 +0100 | [diff] [blame] | 538 | PACKET pkt; |
Emilia Kasper | a230b26 | 2016-08-05 19:03:17 +0200 | [diff] [blame] | 539 | MSG_PROCESS_RETURN(*process_message) (SSL *s, PACKET *pkt); |
| 540 | WORK_STATE(*post_process_message) (SSL *s, WORK_STATE wst); |
Matt Caswell | eda7575 | 2016-09-06 12:05:25 +0100 | [diff] [blame] | 541 | size_t (*max_message_size) (SSL *s); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 542 | void (*cb) (const SSL *ssl, int type, int val) = NULL; |
| 543 | |
Matt Caswell | 91eac8d | 2015-10-05 11:28:51 +0100 | [diff] [blame] | 544 | cb = get_callback(s); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 545 | |
FdaSilvaYY | e8aa8b6 | 2016-06-29 00:18:50 +0200 | [diff] [blame] | 546 | if (s->server) { |
Matt Caswell | 8481f58 | 2015-10-26 11:54:17 +0000 | [diff] [blame] | 547 | transition = ossl_statem_server_read_transition; |
| 548 | process_message = ossl_statem_server_process_message; |
| 549 | max_message_size = ossl_statem_server_max_message_size; |
| 550 | post_process_message = ossl_statem_server_post_process_message; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 551 | } else { |
Matt Caswell | 8481f58 | 2015-10-26 11:54:17 +0000 | [diff] [blame] | 552 | transition = ossl_statem_client_read_transition; |
| 553 | process_message = ossl_statem_client_process_message; |
| 554 | max_message_size = ossl_statem_client_max_message_size; |
| 555 | post_process_message = ossl_statem_client_post_process_message; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 556 | } |
| 557 | |
| 558 | if (st->read_state_first_init) { |
| 559 | s->first_packet = 1; |
| 560 | st->read_state_first_init = 0; |
| 561 | } |
| 562 | |
FdaSilvaYY | e8aa8b6 | 2016-06-29 00:18:50 +0200 | [diff] [blame] | 563 | while (1) { |
| 564 | switch (st->read_state) { |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 565 | case READ_STATE_HEADER: |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 566 | /* Get the state the peer wants to move to */ |
Matt Caswell | 76af303 | 2015-08-11 11:41:03 +0100 | [diff] [blame] | 567 | if (SSL_IS_DTLS(s)) { |
| 568 | /* |
| 569 | * In DTLS we get the whole message in one go - header and body |
| 570 | */ |
| 571 | ret = dtls_get_message(s, &mt, &len); |
| 572 | } else { |
| 573 | ret = tls_get_message_header(s, &mt); |
| 574 | } |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 575 | |
| 576 | if (ret == 0) { |
| 577 | /* Could be non-blocking IO */ |
| 578 | return SUB_STATE_ERROR; |
| 579 | } |
| 580 | |
| 581 | if (cb != NULL) { |
| 582 | /* Notify callback of an impending state change */ |
| 583 | if (s->server) |
| 584 | cb(s, SSL_CB_ACCEPT_LOOP, 1); |
| 585 | else |
| 586 | cb(s, SSL_CB_CONNECT_LOOP, 1); |
| 587 | } |
| 588 | /* |
| 589 | * Validate that we are allowed to move to the new state and move |
| 590 | * to that state if so |
| 591 | */ |
Matt Caswell | f20404f | 2018-05-03 12:07:47 +0100 | [diff] [blame] | 592 | if (!transition(s, mt)) |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 593 | return SUB_STATE_ERROR; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 594 | |
| 595 | if (s->s3->tmp.message_size > max_message_size(s)) { |
Matt Caswell | f63a17d | 2017-11-21 17:18:43 +0000 | [diff] [blame] | 596 | SSLfatal(s, SSL_AD_ILLEGAL_PARAMETER, SSL_F_READ_STATE_MACHINE, |
| 597 | SSL_R_EXCESSIVE_MESSAGE_SIZE); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 598 | return SUB_STATE_ERROR; |
| 599 | } |
| 600 | |
Matt Caswell | c1ef7c9 | 2016-09-19 11:39:21 +0100 | [diff] [blame] | 601 | /* dtls_get_message already did this */ |
| 602 | if (!SSL_IS_DTLS(s) |
| 603 | && s->s3->tmp.message_size > 0 |
Matt Caswell | 0d698f6 | 2016-09-23 16:58:11 +0100 | [diff] [blame] | 604 | && !grow_init_buf(s, s->s3->tmp.message_size |
| 605 | + SSL3_HM_HEADER_LENGTH)) { |
Matt Caswell | f63a17d | 2017-11-21 17:18:43 +0000 | [diff] [blame] | 606 | SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_READ_STATE_MACHINE, |
| 607 | ERR_R_BUF_LIB); |
Matt Caswell | c1ef7c9 | 2016-09-19 11:39:21 +0100 | [diff] [blame] | 608 | return SUB_STATE_ERROR; |
| 609 | } |
| 610 | |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 611 | st->read_state = READ_STATE_BODY; |
| 612 | /* Fall through */ |
| 613 | |
| 614 | case READ_STATE_BODY: |
| 615 | if (!SSL_IS_DTLS(s)) { |
| 616 | /* We already got this above for DTLS */ |
| 617 | ret = tls_get_message_body(s, &len); |
| 618 | if (ret == 0) { |
| 619 | /* Could be non-blocking IO */ |
| 620 | return SUB_STATE_ERROR; |
| 621 | } |
| 622 | } |
| 623 | |
| 624 | s->first_packet = 0; |
Matt Caswell | 73999b6 | 2015-09-10 10:22:30 +0100 | [diff] [blame] | 625 | if (!PACKET_buf_init(&pkt, s->init_msg, len)) { |
Matt Caswell | f63a17d | 2017-11-21 17:18:43 +0000 | [diff] [blame] | 626 | SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_READ_STATE_MACHINE, |
| 627 | ERR_R_INTERNAL_ERROR); |
Matt Caswell | 73999b6 | 2015-09-10 10:22:30 +0100 | [diff] [blame] | 628 | return SUB_STATE_ERROR; |
| 629 | } |
| 630 | ret = process_message(s, &pkt); |
Matt Caswell | 1689e7e | 2016-05-12 17:18:32 +0100 | [diff] [blame] | 631 | |
| 632 | /* Discard the packet data */ |
| 633 | s->init_num = 0; |
| 634 | |
Alessandro Ghedini | 4f8a5f4 | 2016-09-14 00:51:02 +0100 | [diff] [blame] | 635 | switch (ret) { |
| 636 | case MSG_PROCESS_ERROR: |
Matt Caswell | 47e2ee0 | 2017-11-23 12:10:54 +0000 | [diff] [blame] | 637 | check_fatal(s, SSL_F_READ_STATE_MACHINE); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 638 | return SUB_STATE_ERROR; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 639 | |
Alessandro Ghedini | 4f8a5f4 | 2016-09-14 00:51:02 +0100 | [diff] [blame] | 640 | case MSG_PROCESS_FINISHED_READING: |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 641 | if (SSL_IS_DTLS(s)) { |
| 642 | dtls1_stop_timer(s); |
| 643 | } |
| 644 | return SUB_STATE_FINISHED; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 645 | |
Alessandro Ghedini | 4f8a5f4 | 2016-09-14 00:51:02 +0100 | [diff] [blame] | 646 | case MSG_PROCESS_CONTINUE_PROCESSING: |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 647 | st->read_state = READ_STATE_POST_PROCESS; |
| 648 | st->read_state_work = WORK_MORE_A; |
Alessandro Ghedini | 4f8a5f4 | 2016-09-14 00:51:02 +0100 | [diff] [blame] | 649 | break; |
| 650 | |
| 651 | default: |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 652 | st->read_state = READ_STATE_HEADER; |
Alessandro Ghedini | 4f8a5f4 | 2016-09-14 00:51:02 +0100 | [diff] [blame] | 653 | break; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 654 | } |
| 655 | break; |
| 656 | |
| 657 | case READ_STATE_POST_PROCESS: |
| 658 | st->read_state_work = post_process_message(s, st->read_state_work); |
FdaSilvaYY | e8aa8b6 | 2016-06-29 00:18:50 +0200 | [diff] [blame] | 659 | switch (st->read_state_work) { |
Rich Salz | f3b3d7f | 2016-08-30 13:31:18 -0400 | [diff] [blame] | 660 | case WORK_ERROR: |
Matt Caswell | 47e2ee0 | 2017-11-23 12:10:54 +0000 | [diff] [blame] | 661 | check_fatal(s, SSL_F_READ_STATE_MACHINE); |
| 662 | /* Fall through */ |
Rich Salz | f3b3d7f | 2016-08-30 13:31:18 -0400 | [diff] [blame] | 663 | case WORK_MORE_A: |
| 664 | case WORK_MORE_B: |
Benjamin Kaduk | ddf9725 | 2017-02-06 15:33:28 -0600 | [diff] [blame] | 665 | case WORK_MORE_C: |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 666 | return SUB_STATE_ERROR; |
| 667 | |
| 668 | case WORK_FINISHED_CONTINUE: |
| 669 | st->read_state = READ_STATE_HEADER; |
| 670 | break; |
| 671 | |
| 672 | case WORK_FINISHED_STOP: |
| 673 | if (SSL_IS_DTLS(s)) { |
| 674 | dtls1_stop_timer(s); |
| 675 | } |
| 676 | return SUB_STATE_FINISHED; |
| 677 | } |
| 678 | break; |
| 679 | |
| 680 | default: |
| 681 | /* Shouldn't happen */ |
Matt Caswell | f63a17d | 2017-11-21 17:18:43 +0000 | [diff] [blame] | 682 | SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_READ_STATE_MACHINE, |
| 683 | ERR_R_INTERNAL_ERROR); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 684 | return SUB_STATE_ERROR; |
| 685 | } |
| 686 | } |
| 687 | } |
| 688 | |
| 689 | /* |
| 690 | * Send a previously constructed message to the peer. |
| 691 | */ |
| 692 | static int statem_do_write(SSL *s) |
| 693 | { |
Matt Caswell | d6f1a6e | 2015-10-05 10:58:52 +0100 | [diff] [blame] | 694 | OSSL_STATEM *st = &s->statem; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 695 | |
| 696 | if (st->hand_state == TLS_ST_CW_CHANGE |
Emilia Kasper | a230b26 | 2016-08-05 19:03:17 +0200 | [diff] [blame] | 697 | || st->hand_state == TLS_ST_SW_CHANGE) { |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 698 | if (SSL_IS_DTLS(s)) |
| 699 | return dtls1_do_write(s, SSL3_RT_CHANGE_CIPHER_SPEC); |
| 700 | else |
| 701 | return ssl3_do_write(s, SSL3_RT_CHANGE_CIPHER_SPEC); |
| 702 | } else { |
| 703 | return ssl_do_write(s); |
| 704 | } |
| 705 | } |
| 706 | |
| 707 | /* |
| 708 | * Initialise the MSG_FLOW_WRITING sub-state machine |
| 709 | */ |
| 710 | static void init_write_state_machine(SSL *s) |
| 711 | { |
Matt Caswell | d6f1a6e | 2015-10-05 10:58:52 +0100 | [diff] [blame] | 712 | OSSL_STATEM *st = &s->statem; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 713 | |
| 714 | st->write_state = WRITE_STATE_TRANSITION; |
| 715 | } |
| 716 | |
| 717 | /* |
| 718 | * This function implements the sub-state machine when the message flow is in |
| 719 | * MSG_FLOW_WRITING. The valid sub-states and transitions are: |
| 720 | * |
| 721 | * +-> WRITE_STATE_TRANSITION ------> [SUB_STATE_FINISHED] |
| 722 | * | | |
| 723 | * | v |
| 724 | * | WRITE_STATE_PRE_WORK -----> [SUB_STATE_END_HANDSHAKE] |
| 725 | * | | |
| 726 | * | v |
| 727 | * | WRITE_STATE_SEND |
| 728 | * | | |
| 729 | * | v |
| 730 | * | WRITE_STATE_POST_WORK |
| 731 | * | | |
| 732 | * +-------------+ |
| 733 | * |
| 734 | * WRITE_STATE_TRANSITION transitions the state of the handshake state machine |
| 735 | |
| 736 | * WRITE_STATE_PRE_WORK performs any work necessary to prepare the later |
FdaSilvaYY | 0d4fb84 | 2016-02-05 15:23:54 -0500 | [diff] [blame] | 737 | * sending of the message. This could result in an NBIO event occurring in |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 738 | * which case control returns to the calling application. When this function |
| 739 | * is recalled we will resume in the same state where we left off. |
| 740 | * |
| 741 | * WRITE_STATE_SEND sends the message and performs any work to be done after |
| 742 | * sending. |
| 743 | * |
| 744 | * WRITE_STATE_POST_WORK performs any work necessary after the sending of the |
| 745 | * message has been completed. As for WRITE_STATE_PRE_WORK this could also |
| 746 | * result in an NBIO event. |
| 747 | */ |
Matt Caswell | d78052c | 2015-10-05 11:03:27 +0100 | [diff] [blame] | 748 | static SUB_STATE_RETURN write_state_machine(SSL *s) |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 749 | { |
Matt Caswell | d6f1a6e | 2015-10-05 10:58:52 +0100 | [diff] [blame] | 750 | OSSL_STATEM *st = &s->statem; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 751 | int ret; |
Emilia Kasper | a230b26 | 2016-08-05 19:03:17 +0200 | [diff] [blame] | 752 | WRITE_TRAN(*transition) (SSL *s); |
| 753 | WORK_STATE(*pre_work) (SSL *s, WORK_STATE wst); |
| 754 | WORK_STATE(*post_work) (SSL *s, WORK_STATE wst); |
Matt Caswell | 6392fb8 | 2016-09-30 11:17:57 +0100 | [diff] [blame] | 755 | int (*get_construct_message_f) (SSL *s, WPACKET *pkt, |
| 756 | int (**confunc) (SSL *s, WPACKET *pkt), |
| 757 | int *mt); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 758 | void (*cb) (const SSL *ssl, int type, int val) = NULL; |
Matt Caswell | 6392fb8 | 2016-09-30 11:17:57 +0100 | [diff] [blame] | 759 | int (*confunc) (SSL *s, WPACKET *pkt); |
| 760 | int mt; |
Matt Caswell | 7cea05d | 2016-09-29 23:28:29 +0100 | [diff] [blame] | 761 | WPACKET pkt; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 762 | |
Matt Caswell | 91eac8d | 2015-10-05 11:28:51 +0100 | [diff] [blame] | 763 | cb = get_callback(s); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 764 | |
FdaSilvaYY | e8aa8b6 | 2016-06-29 00:18:50 +0200 | [diff] [blame] | 765 | if (s->server) { |
Matt Caswell | 8481f58 | 2015-10-26 11:54:17 +0000 | [diff] [blame] | 766 | transition = ossl_statem_server_write_transition; |
| 767 | pre_work = ossl_statem_server_pre_work; |
| 768 | post_work = ossl_statem_server_post_work; |
Matt Caswell | 6392fb8 | 2016-09-30 11:17:57 +0100 | [diff] [blame] | 769 | get_construct_message_f = ossl_statem_server_construct_message; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 770 | } else { |
Matt Caswell | 8481f58 | 2015-10-26 11:54:17 +0000 | [diff] [blame] | 771 | transition = ossl_statem_client_write_transition; |
| 772 | pre_work = ossl_statem_client_pre_work; |
| 773 | post_work = ossl_statem_client_post_work; |
Matt Caswell | 6392fb8 | 2016-09-30 11:17:57 +0100 | [diff] [blame] | 774 | get_construct_message_f = ossl_statem_client_construct_message; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 775 | } |
| 776 | |
FdaSilvaYY | e8aa8b6 | 2016-06-29 00:18:50 +0200 | [diff] [blame] | 777 | while (1) { |
| 778 | switch (st->write_state) { |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 779 | case WRITE_STATE_TRANSITION: |
| 780 | if (cb != NULL) { |
| 781 | /* Notify callback of an impending state change */ |
| 782 | if (s->server) |
| 783 | cb(s, SSL_CB_ACCEPT_LOOP, 1); |
| 784 | else |
| 785 | cb(s, SSL_CB_CONNECT_LOOP, 1); |
| 786 | } |
FdaSilvaYY | e8aa8b6 | 2016-06-29 00:18:50 +0200 | [diff] [blame] | 787 | switch (transition(s)) { |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 788 | case WRITE_TRAN_CONTINUE: |
| 789 | st->write_state = WRITE_STATE_PRE_WORK; |
| 790 | st->write_state_work = WORK_MORE_A; |
| 791 | break; |
| 792 | |
| 793 | case WRITE_TRAN_FINISHED: |
| 794 | return SUB_STATE_FINISHED; |
| 795 | break; |
| 796 | |
Rich Salz | f3b3d7f | 2016-08-30 13:31:18 -0400 | [diff] [blame] | 797 | case WRITE_TRAN_ERROR: |
Matt Caswell | 47e2ee0 | 2017-11-23 12:10:54 +0000 | [diff] [blame] | 798 | check_fatal(s, SSL_F_WRITE_STATE_MACHINE); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 799 | return SUB_STATE_ERROR; |
| 800 | } |
| 801 | break; |
| 802 | |
| 803 | case WRITE_STATE_PRE_WORK: |
FdaSilvaYY | e8aa8b6 | 2016-06-29 00:18:50 +0200 | [diff] [blame] | 804 | switch (st->write_state_work = pre_work(s, st->write_state_work)) { |
Rich Salz | f3b3d7f | 2016-08-30 13:31:18 -0400 | [diff] [blame] | 805 | case WORK_ERROR: |
Matt Caswell | 47e2ee0 | 2017-11-23 12:10:54 +0000 | [diff] [blame] | 806 | check_fatal(s, SSL_F_WRITE_STATE_MACHINE); |
| 807 | /* Fall through */ |
Rich Salz | f3b3d7f | 2016-08-30 13:31:18 -0400 | [diff] [blame] | 808 | case WORK_MORE_A: |
| 809 | case WORK_MORE_B: |
Benjamin Kaduk | ddf9725 | 2017-02-06 15:33:28 -0600 | [diff] [blame] | 810 | case WORK_MORE_C: |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 811 | return SUB_STATE_ERROR; |
| 812 | |
| 813 | case WORK_FINISHED_CONTINUE: |
| 814 | st->write_state = WRITE_STATE_SEND; |
| 815 | break; |
| 816 | |
| 817 | case WORK_FINISHED_STOP: |
| 818 | return SUB_STATE_END_HANDSHAKE; |
| 819 | } |
Matt Caswell | f7e393b | 2017-02-27 11:19:57 +0000 | [diff] [blame] | 820 | if (!get_construct_message_f(s, &pkt, &confunc, &mt)) { |
Matt Caswell | f63a17d | 2017-11-21 17:18:43 +0000 | [diff] [blame] | 821 | /* SSLfatal() already called */ |
Matt Caswell | f7e393b | 2017-02-27 11:19:57 +0000 | [diff] [blame] | 822 | return SUB_STATE_ERROR; |
| 823 | } |
| 824 | if (mt == SSL3_MT_DUMMY) { |
| 825 | /* Skip construction and sending. This isn't a "real" state */ |
| 826 | st->write_state = WRITE_STATE_POST_WORK; |
| 827 | st->write_state_work = WORK_MORE_A; |
| 828 | break; |
| 829 | } |
Matt Caswell | 7cea05d | 2016-09-29 23:28:29 +0100 | [diff] [blame] | 830 | if (!WPACKET_init(&pkt, s->init_buf) |
Matt Caswell | f63a17d | 2017-11-21 17:18:43 +0000 | [diff] [blame] | 831 | || !ssl_set_handshake_header(s, &pkt, mt)) { |
| 832 | WPACKET_cleanup(&pkt); |
| 833 | SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_WRITE_STATE_MACHINE, |
| 834 | ERR_R_INTERNAL_ERROR); |
| 835 | return SUB_STATE_ERROR; |
| 836 | } |
| 837 | if (confunc != NULL && !confunc(s, &pkt)) { |
| 838 | WPACKET_cleanup(&pkt); |
Matt Caswell | 47e2ee0 | 2017-11-23 12:10:54 +0000 | [diff] [blame] | 839 | check_fatal(s, SSL_F_WRITE_STATE_MACHINE); |
Matt Caswell | f63a17d | 2017-11-21 17:18:43 +0000 | [diff] [blame] | 840 | return SUB_STATE_ERROR; |
| 841 | } |
| 842 | if (!ssl_close_construct_packet(s, &pkt, mt) |
Matt Caswell | 7cea05d | 2016-09-29 23:28:29 +0100 | [diff] [blame] | 843 | || !WPACKET_finish(&pkt)) { |
| 844 | WPACKET_cleanup(&pkt); |
Matt Caswell | f63a17d | 2017-11-21 17:18:43 +0000 | [diff] [blame] | 845 | SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_WRITE_STATE_MACHINE, |
| 846 | ERR_R_INTERNAL_ERROR); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 847 | return SUB_STATE_ERROR; |
Matt Caswell | 7cea05d | 2016-09-29 23:28:29 +0100 | [diff] [blame] | 848 | } |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 849 | |
| 850 | /* Fall through */ |
| 851 | |
| 852 | case WRITE_STATE_SEND: |
| 853 | if (SSL_IS_DTLS(s) && st->use_timer) { |
| 854 | dtls1_start_timer(s); |
| 855 | } |
| 856 | ret = statem_do_write(s); |
| 857 | if (ret <= 0) { |
| 858 | return SUB_STATE_ERROR; |
| 859 | } |
| 860 | st->write_state = WRITE_STATE_POST_WORK; |
| 861 | st->write_state_work = WORK_MORE_A; |
| 862 | /* Fall through */ |
| 863 | |
| 864 | case WRITE_STATE_POST_WORK: |
FdaSilvaYY | e8aa8b6 | 2016-06-29 00:18:50 +0200 | [diff] [blame] | 865 | switch (st->write_state_work = post_work(s, st->write_state_work)) { |
Rich Salz | f3b3d7f | 2016-08-30 13:31:18 -0400 | [diff] [blame] | 866 | case WORK_ERROR: |
Matt Caswell | 47e2ee0 | 2017-11-23 12:10:54 +0000 | [diff] [blame] | 867 | check_fatal(s, SSL_F_WRITE_STATE_MACHINE); |
| 868 | /* Fall through */ |
Rich Salz | f3b3d7f | 2016-08-30 13:31:18 -0400 | [diff] [blame] | 869 | case WORK_MORE_A: |
| 870 | case WORK_MORE_B: |
Benjamin Kaduk | ddf9725 | 2017-02-06 15:33:28 -0600 | [diff] [blame] | 871 | case WORK_MORE_C: |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 872 | return SUB_STATE_ERROR; |
| 873 | |
| 874 | case WORK_FINISHED_CONTINUE: |
| 875 | st->write_state = WRITE_STATE_TRANSITION; |
| 876 | break; |
| 877 | |
| 878 | case WORK_FINISHED_STOP: |
| 879 | return SUB_STATE_END_HANDSHAKE; |
| 880 | } |
| 881 | break; |
| 882 | |
| 883 | default: |
Matt Caswell | 47e2ee0 | 2017-11-23 12:10:54 +0000 | [diff] [blame] | 884 | SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_WRITE_STATE_MACHINE, |
| 885 | ERR_R_INTERNAL_ERROR); |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 886 | return SUB_STATE_ERROR; |
| 887 | } |
| 888 | } |
| 889 | } |
| 890 | |
| 891 | /* |
Matt Caswell | 8723588 | 2015-09-07 16:36:53 +0100 | [diff] [blame] | 892 | * Flush the write BIO |
| 893 | */ |
Matt Caswell | 61ae935 | 2015-09-11 11:23:20 +0100 | [diff] [blame] | 894 | int statem_flush(SSL *s) |
Matt Caswell | 8723588 | 2015-09-07 16:36:53 +0100 | [diff] [blame] | 895 | { |
| 896 | s->rwstate = SSL_WRITING; |
| 897 | if (BIO_flush(s->wbio) <= 0) { |
| 898 | return 0; |
| 899 | } |
| 900 | s->rwstate = SSL_NOTHING; |
| 901 | |
| 902 | return 1; |
| 903 | } |
| 904 | |
| 905 | /* |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 906 | * Called by the record layer to determine whether application data is |
Matt Caswell | c7f4778 | 2017-01-10 23:02:28 +0000 | [diff] [blame] | 907 | * allowed to be received in the current handshake state or not. |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 908 | * |
| 909 | * Return values are: |
| 910 | * 1: Yes (application data allowed) |
| 911 | * 0: No (application data not allowed) |
| 912 | */ |
Matt Caswell | fe3a329 | 2015-10-05 10:39:54 +0100 | [diff] [blame] | 913 | int ossl_statem_app_data_allowed(SSL *s) |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 914 | { |
Matt Caswell | d6f1a6e | 2015-10-05 10:58:52 +0100 | [diff] [blame] | 915 | OSSL_STATEM *st = &s->statem; |
Matt Caswell | f8e0a55 | 2015-07-29 14:23:56 +0100 | [diff] [blame] | 916 | |
Matt Caswell | c7f4778 | 2017-01-10 23:02:28 +0000 | [diff] [blame] | 917 | if (st->state == MSG_FLOW_UNINITED) |
Matt Caswell | 94836de | 2015-09-08 09:19:22 +0100 | [diff] [blame] | 918 | return 0; |
| 919 | |
Matt Caswell | 8723588 | 2015-09-07 16:36:53 +0100 | [diff] [blame] | 920 | if (!s->s3->in_read_app_data || (s->s3->total_renegotiations == 0)) |
| 921 | return 0; |
| 922 | |
Matt Caswell | 94836de | 2015-09-08 09:19:22 +0100 | [diff] [blame] | 923 | if (s->server) { |
| 924 | /* |
| 925 | * If we're a server and we haven't got as far as writing our |
| 926 | * ServerHello yet then we allow app data |
| 927 | */ |
| 928 | if (st->hand_state == TLS_ST_BEFORE |
Emilia Kasper | a230b26 | 2016-08-05 19:03:17 +0200 | [diff] [blame] | 929 | || st->hand_state == TLS_ST_SR_CLNT_HELLO) |
Matt Caswell | 8723588 | 2015-09-07 16:36:53 +0100 | [diff] [blame] | 930 | return 1; |
Matt Caswell | 94836de | 2015-09-08 09:19:22 +0100 | [diff] [blame] | 931 | } else { |
| 932 | /* |
| 933 | * If we're a client and we haven't read the ServerHello yet then we |
| 934 | * allow app data |
| 935 | */ |
| 936 | if (st->hand_state == TLS_ST_CW_CLNT_HELLO) |
| 937 | return 1; |
Matt Caswell | 8723588 | 2015-09-07 16:36:53 +0100 | [diff] [blame] | 938 | } |
| 939 | |
Matt Caswell | 8723588 | 2015-09-07 16:36:53 +0100 | [diff] [blame] | 940 | return 0; |
| 941 | } |
Tatsuhiro Tsujikawa | 1f5878b | 2018-01-21 11:30:36 +0900 | [diff] [blame] | 942 | |
| 943 | /* |
| 944 | * This function returns 1 if TLS exporter is ready to export keying |
| 945 | * material, or 0 if otherwise. |
| 946 | */ |
| 947 | int ossl_statem_export_allowed(SSL *s) |
| 948 | { |
| 949 | return s->s3->previous_server_finished_len != 0 |
| 950 | && s->statem.hand_state != TLS_ST_SW_FINISHED; |
| 951 | } |
Tatsuhiro Tsujikawa | b38ede8 | 2018-02-04 12:20:37 +0900 | [diff] [blame] | 952 | |
| 953 | /* |
| 954 | * Return 1 if early TLS exporter is ready to export keying material, |
| 955 | * or 0 if otherwise. |
| 956 | */ |
| 957 | int ossl_statem_export_early_allowed(SSL *s) |
| 958 | { |
| 959 | /* |
| 960 | * The early exporter secret is only present on the server if we |
| 961 | * have accepted early_data. It is present on the client as long |
| 962 | * as we have sent early_data. |
| 963 | */ |
| 964 | return s->ext.early_data == SSL_EARLY_DATA_ACCEPTED |
| 965 | || (!s->server && s->ext.early_data != SSL_EARLY_DATA_NOT_SENT); |
| 966 | } |