1 /* ====================================================================
2 * Copyright (c) 1998-2003 The OpenSSL Project. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
14 * distribution.
15 *
16 * 3. All advertising materials mentioning features or use of this
17 * software must display the following acknowledgment:
18 * "This product includes software developed by the OpenSSL Project
19 * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
20 *
21 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
22 * endorse or promote products derived from this software without
23 * prior written permission. For written permission, please contact
24 * openssl-core@openssl.org.
25 *
26 * 5. Products derived from this software may not be called "OpenSSL"
27 * nor may "OpenSSL" appear in their names without prior written
28 * permission of the OpenSSL Project.
29 *
30 * 6. Redistributions of any form whatsoever must retain the following
31 * acknowledgment:
32 * "This product includes software developed by the OpenSSL Project
33 * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
36 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
39 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
41 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
42 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
44 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
45 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
46 * OF THE POSSIBILITY OF SUCH DAMAGE.
47 * ====================================================================
48 *
49 * This product includes cryptographic software written by Eric Young
50 * (eay@cryptsoft.com). This product includes software written by Tim
51 * Hudson (tjh@cryptsoft.com). */
52
53 #include <openssl/bio.h>
54
55 #include <assert.h>
56 #include <string.h>
57
58 #include <openssl/buf.h>
59 #include <openssl/err.h>
60 #include <openssl/mem.h>
61
62
63 struct bio_bio_st {
64 BIO *peer; /* NULL if buf == NULL.
65 * If peer != NULL, then peer->ptr is also a bio_bio_st,
66 * and its "peer" member points back to us.
67 * peer != NULL iff init != 0 in the BIO. */
68
69 /* This is for what we write (i.e. reading uses peer's struct): */
70 int closed; /* valid iff peer != NULL */
71 size_t len; /* valid iff buf != NULL; 0 if peer == NULL */
72 size_t offset; /* valid iff buf != NULL; 0 if len == 0 */
73 size_t size;
74 uint8_t *buf; /* "size" elements (if != NULL) */
75 char buf_externally_allocated; /* true iff buf was externally allocated. */
76
77 char zero_copy_read_lock; /* true iff a zero copy read operation
78 * is in progress. */
79 char zero_copy_write_lock; /* true iff a zero copy write operation
80 * is in progress. */
81
82 size_t request; /* valid iff peer != NULL; 0 if len != 0,
83 * otherwise set by peer to number of bytes
84 * it (unsuccessfully) tried to read,
85 * never more than buffer space (size-len) warrants. */
86 };
87
bio_new(BIO * bio)88 static int bio_new(BIO *bio) {
89 struct bio_bio_st *b;
90
91 b = OPENSSL_malloc(sizeof *b);
92 if (b == NULL) {
93 return 0;
94 }
95 memset(b, 0, sizeof(struct bio_bio_st));
96
97 b->size = 17 * 1024; /* enough for one TLS record (just a default) */
98 bio->ptr = b;
99 return 1;
100 }
101
bio_destroy_pair(BIO * bio)102 static void bio_destroy_pair(BIO *bio) {
103 struct bio_bio_st *b = bio->ptr;
104 BIO *peer_bio;
105 struct bio_bio_st *peer_b;
106
107 if (b == NULL) {
108 return;
109 }
110
111 peer_bio = b->peer;
112 if (peer_bio == NULL) {
113 return;
114 }
115
116 peer_b = peer_bio->ptr;
117
118 assert(peer_b != NULL);
119 assert(peer_b->peer == bio);
120
121 peer_b->peer = NULL;
122 peer_bio->init = 0;
123 assert(peer_b->buf != NULL);
124 peer_b->len = 0;
125 peer_b->offset = 0;
126
127 b->peer = NULL;
128 bio->init = 0;
129 assert(b->buf != NULL);
130 b->len = 0;
131 b->offset = 0;
132 }
133
bio_free(BIO * bio)134 static int bio_free(BIO *bio) {
135 struct bio_bio_st *b;
136
137 if (bio == NULL) {
138 return 0;
139 }
140 b = bio->ptr;
141
142 assert(b != NULL);
143
144 if (b->peer) {
145 bio_destroy_pair(bio);
146 }
147
148 if (!b->buf_externally_allocated) {
149 OPENSSL_free(b->buf);
150 }
151
152 OPENSSL_free(b);
153
154 return 1;
155 }
156
bio_zero_copy_get_read_buf(struct bio_bio_st * peer_b,uint8_t ** out_read_buf,size_t * out_buf_offset)157 static size_t bio_zero_copy_get_read_buf(struct bio_bio_st* peer_b,
158 uint8_t** out_read_buf,
159 size_t* out_buf_offset) {
160 size_t max_available;
161 if (peer_b->len > peer_b->size - peer_b->offset) {
162 /* Only the first half of the ring buffer can be read. */
163 max_available = peer_b->size - peer_b->offset;
164 } else {
165 max_available = peer_b->len;
166 }
167
168 *out_read_buf = peer_b->buf;
169 *out_buf_offset = peer_b->offset;
170 return max_available;
171 }
172
BIO_zero_copy_get_read_buf(BIO * bio,uint8_t ** out_read_buf,size_t * out_buf_offset,size_t * out_available_bytes)173 int BIO_zero_copy_get_read_buf(BIO* bio, uint8_t** out_read_buf,
174 size_t* out_buf_offset,
175 size_t* out_available_bytes) {
176 struct bio_bio_st* b;
177 struct bio_bio_st* peer_b;
178 size_t max_available;
179 *out_available_bytes = 0;
180
181 BIO_clear_retry_flags(bio);
182
183 if (!bio->init) {
184 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_read_buf, BIO_R_UNINITIALIZED);
185 return 0;
186 }
187
188 b = bio->ptr;
189
190 if (!b || !b->peer) {
191 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_read_buf,
192 BIO_R_UNSUPPORTED_METHOD);
193 return 0;
194 }
195
196 peer_b = b->peer->ptr;
197 if (!peer_b || !peer_b->peer || peer_b->peer->ptr != b) {
198 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_read_buf,
199 BIO_R_UNSUPPORTED_METHOD);
200 return 0;
201 }
202
203 if (peer_b->zero_copy_read_lock) {
204 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_read_buf, BIO_R_INVALID_ARGUMENT);
205 return 0;
206 }
207
208 peer_b->request = 0; /* Is not used by zero-copy API. */
209
210 max_available =
211 bio_zero_copy_get_read_buf(peer_b, out_read_buf, out_buf_offset);
212
213 assert(peer_b->buf != NULL);
214 if (max_available > 0) {
215 peer_b->zero_copy_read_lock = 1;
216 }
217
218 *out_available_bytes = max_available;
219 return 1;
220 }
221
BIO_zero_copy_get_read_buf_done(BIO * bio,size_t bytes_read)222 int BIO_zero_copy_get_read_buf_done(BIO* bio, size_t bytes_read) {
223 struct bio_bio_st* b;
224 struct bio_bio_st* peer_b;
225 size_t max_available;
226 size_t dummy_read_offset;
227 uint8_t* dummy_read_buf;
228
229 assert(BIO_get_retry_flags(bio) == 0);
230
231 if (!bio->init) {
232 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_read_buf_done,
233 BIO_R_UNINITIALIZED);
234 return 0;
235 }
236
237 b = bio->ptr;
238
239 if (!b || !b->peer) {
240 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_read_buf_done,
241 BIO_R_UNSUPPORTED_METHOD);
242 return 0;
243 }
244
245 peer_b = b->peer->ptr;
246 if (!peer_b || !peer_b->peer || peer_b->peer->ptr != b) {
247 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_read_buf_done,
248 BIO_R_UNSUPPORTED_METHOD);
249 return 0;
250 }
251
252 if (!peer_b->zero_copy_read_lock) {
253 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_read_buf_done,
254 BIO_R_INVALID_ARGUMENT);
255 return 0;
256 }
257
258 max_available =
259 bio_zero_copy_get_read_buf(peer_b, &dummy_read_buf, &dummy_read_offset);
260 if (bytes_read > max_available) {
261 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_read_buf_done,
262 BIO_R_INVALID_ARGUMENT);
263 return 0;
264 }
265
266 peer_b->len -= bytes_read;
267 assert(peer_b->len >= 0);
268 assert(peer_b->offset + bytes_read <= peer_b->size);
269
270 /* Move read offset. If zero_copy_write_lock == 1 we must advance the
271 * offset even if buffer becomes empty, to make sure
272 * write_offset = (offset + len) mod size does not change. */
273 if (peer_b->offset + bytes_read == peer_b->size ||
274 (!peer_b->zero_copy_write_lock && peer_b->len == 0)) {
275 peer_b->offset = 0;
276 } else {
277 peer_b->offset += bytes_read;
278 }
279
280 bio->num_read += bytes_read;
281 peer_b->zero_copy_read_lock = 0;
282 return 1;
283 }
284
bio_zero_copy_get_write_buf(struct bio_bio_st * b,uint8_t ** out_write_buf,size_t * out_buf_offset)285 static size_t bio_zero_copy_get_write_buf(struct bio_bio_st* b,
286 uint8_t** out_write_buf,
287 size_t* out_buf_offset) {
288 size_t write_offset;
289 size_t max_available;
290
291 assert(b->len <= b->size);
292
293 write_offset = b->offset + b->len;
294
295 if (write_offset >= b->size) {
296 /* Only the first half of the ring buffer can be written to. */
297 write_offset -= b->size;
298 /* write up to the start of the ring buffer. */
299 max_available = b->offset - write_offset;
300 } else {
301 /* write up to the end the buffer. */
302 max_available = b->size - write_offset;
303 }
304
305 *out_write_buf = b->buf;
306 *out_buf_offset = write_offset;
307 return max_available;
308 }
309
BIO_zero_copy_get_write_buf(BIO * bio,uint8_t ** out_write_buf,size_t * out_buf_offset,size_t * out_available_bytes)310 int BIO_zero_copy_get_write_buf(BIO* bio, uint8_t** out_write_buf,
311 size_t* out_buf_offset,
312 size_t* out_available_bytes) {
313 struct bio_bio_st* b;
314 struct bio_bio_st* peer_b;
315 size_t max_available;
316
317 *out_available_bytes = 0;
318 BIO_clear_retry_flags(bio);
319
320 if (!bio->init) {
321 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf, BIO_R_UNINITIALIZED);
322 return 0;
323 }
324
325 b = bio->ptr;
326
327 if (!b || !b->buf || !b->peer) {
328 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf,
329 BIO_R_UNSUPPORTED_METHOD);
330 return 0;
331 }
332 peer_b = b->peer->ptr;
333 if (!peer_b || !peer_b->peer || peer_b->peer->ptr != b) {
334 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf,
335 BIO_R_UNSUPPORTED_METHOD);
336 return 0;
337 }
338
339 assert(b->buf != NULL);
340
341 if (b->zero_copy_write_lock) {
342 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf, BIO_R_INVALID_ARGUMENT);
343 return 0;
344 }
345
346 b->request = 0;
347 if (b->closed) {
348 /* Bio is already closed. */
349 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf, BIO_R_BROKEN_PIPE);
350 return 0;
351 }
352
353 max_available = bio_zero_copy_get_write_buf(b, out_write_buf, out_buf_offset);
354
355 if (max_available > 0) {
356 b->zero_copy_write_lock = 1;
357 }
358
359 *out_available_bytes = max_available;
360 return 1;
361 }
362
BIO_zero_copy_get_write_buf_done(BIO * bio,size_t bytes_written)363 int BIO_zero_copy_get_write_buf_done(BIO* bio, size_t bytes_written) {
364 struct bio_bio_st* b;
365 struct bio_bio_st* peer_b;
366
367 size_t rest;
368 size_t dummy_write_offset;
369 uint8_t* dummy_write_buf;
370
371 if (!bio->init) {
372 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf_done,
373 BIO_R_UNINITIALIZED);
374 return 0;
375 }
376
377 b = bio->ptr;
378
379 if (!b || !b->buf || !b->peer) {
380 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf_done,
381 BIO_R_UNSUPPORTED_METHOD);
382 return 0;
383 }
384 peer_b = b->peer->ptr;
385 if (!peer_b || !peer_b->peer || peer_b->peer->ptr != b) {
386 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf_done,
387 BIO_R_UNSUPPORTED_METHOD);
388 return 0;
389 }
390
391 b->request = 0;
392 if (b->closed) {
393 /* BIO is already closed. */
394 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf_done, BIO_R_BROKEN_PIPE);
395 return 0;
396 }
397
398 if (!b->zero_copy_write_lock) {
399 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf_done,
400 BIO_R_INVALID_ARGUMENT);
401 return 0;
402 }
403
404 rest = bio_zero_copy_get_write_buf(b, &dummy_write_buf, &dummy_write_offset);
405
406 if (bytes_written > rest) {
407 OPENSSL_PUT_ERROR(BIO, BIO_zero_copy_get_write_buf_done,
408 BIO_R_INVALID_ARGUMENT);
409 return 0;
410 }
411
412 bio->num_write += bytes_written;
413 /* Move write offset. */
414 b->len += bytes_written;
415 b->zero_copy_write_lock = 0;
416 return 1;
417 }
418
bio_read(BIO * bio,char * buf,int size_)419 static int bio_read(BIO *bio, char *buf, int size_) {
420 size_t size = size_;
421 size_t rest;
422 struct bio_bio_st *b, *peer_b;
423
424 BIO_clear_retry_flags(bio);
425
426 if (!bio->init) {
427 return 0;
428 }
429
430 b = bio->ptr;
431 assert(b != NULL);
432 assert(b->peer != NULL);
433 peer_b = b->peer->ptr;
434 assert(peer_b != NULL);
435 assert(peer_b->buf != NULL);
436
437 peer_b->request = 0; /* will be set in "retry_read" situation */
438
439 if (buf == NULL || size == 0 || peer_b->zero_copy_read_lock) {
440 return 0;
441 }
442
443 if (peer_b->len == 0) {
444 if (peer_b->closed) {
445 return 0; /* writer has closed, and no data is left */
446 } else {
447 BIO_set_retry_read(bio); /* buffer is empty */
448 if (size <= peer_b->size) {
449 peer_b->request = size;
450 } else {
451 /* don't ask for more than the peer can
452 * deliver in one write */
453 peer_b->request = peer_b->size;
454 }
455 return -1;
456 }
457 }
458
459 /* we can read */
460 if (peer_b->len < size) {
461 size = peer_b->len;
462 }
463
464 /* now read "size" bytes */
465 rest = size;
466
467 assert(rest > 0);
468 /* one or two iterations */
469 do {
470 size_t chunk;
471
472 assert(rest <= peer_b->len);
473 if (peer_b->offset + rest <= peer_b->size) {
474 chunk = rest;
475 } else {
476 /* wrap around ring buffer */
477 chunk = peer_b->size - peer_b->offset;
478 }
479 assert(peer_b->offset + chunk <= peer_b->size);
480
481 memcpy(buf, peer_b->buf + peer_b->offset, chunk);
482
483 peer_b->len -= chunk;
484 /* If zero_copy_write_lock == 1 we must advance the offset even if buffer
485 * becomes empty, to make sure write_offset = (offset + len) % size
486 * does not change. */
487 if (peer_b->len || peer_b->zero_copy_write_lock) {
488 peer_b->offset += chunk;
489 assert(peer_b->offset <= peer_b->size);
490 if (peer_b->offset == peer_b->size) {
491 peer_b->offset = 0;
492 }
493 buf += chunk;
494 } else {
495 /* buffer now empty, no need to advance "buf" */
496 assert(chunk == rest);
497 peer_b->offset = 0;
498 }
499 rest -= chunk;
500 } while (rest);
501
502 return size;
503 }
504
bio_write(BIO * bio,const char * buf,int num_)505 static int bio_write(BIO *bio, const char *buf, int num_) {
506 size_t num = num_;
507 size_t rest;
508 struct bio_bio_st *b;
509
510 BIO_clear_retry_flags(bio);
511
512 if (!bio->init || buf == NULL || num == 0) {
513 return 0;
514 }
515
516 b = bio->ptr;
517 assert(b != NULL);
518 assert(b->peer != NULL);
519 assert(b->buf != NULL);
520
521 if (b->zero_copy_write_lock) {
522 return 0;
523 }
524
525 b->request = 0;
526 if (b->closed) {
527 /* we already closed */
528 OPENSSL_PUT_ERROR(BIO, bio_write, BIO_R_BROKEN_PIPE);
529 return -1;
530 }
531
532 assert(b->len <= b->size);
533
534 if (b->len == b->size) {
535 BIO_set_retry_write(bio); /* buffer is full */
536 return -1;
537 }
538
539 /* we can write */
540 if (num > b->size - b->len) {
541 num = b->size - b->len;
542 }
543
544 /* now write "num" bytes */
545 rest = num;
546
547 assert(rest > 0);
548 /* one or two iterations */
549 do {
550 size_t write_offset;
551 size_t chunk;
552
553 assert(b->len + rest <= b->size);
554
555 write_offset = b->offset + b->len;
556 if (write_offset >= b->size) {
557 write_offset -= b->size;
558 }
559 /* b->buf[write_offset] is the first byte we can write to. */
560
561 if (write_offset + rest <= b->size) {
562 chunk = rest;
563 } else {
564 /* wrap around ring buffer */
565 chunk = b->size - write_offset;
566 }
567
568 memcpy(b->buf + write_offset, buf, chunk);
569
570 b->len += chunk;
571
572 assert(b->len <= b->size);
573
574 rest -= chunk;
575 buf += chunk;
576 } while (rest);
577
578 return num;
579 }
580
bio_make_pair(BIO * bio1,BIO * bio2,size_t writebuf1_len,uint8_t * ext_writebuf1,size_t writebuf2_len,uint8_t * ext_writebuf2)581 static int bio_make_pair(BIO* bio1, BIO* bio2,
582 size_t writebuf1_len, uint8_t* ext_writebuf1,
583 size_t writebuf2_len, uint8_t* ext_writebuf2) {
584 struct bio_bio_st *b1, *b2;
585
586 assert(bio1 != NULL);
587 assert(bio2 != NULL);
588
589 b1 = bio1->ptr;
590 b2 = bio2->ptr;
591
592 if (b1->peer != NULL || b2->peer != NULL) {
593 OPENSSL_PUT_ERROR(BIO, bio_make_pair, BIO_R_IN_USE);
594 return 0;
595 }
596
597 assert(b1->buf_externally_allocated == 0);
598 assert(b2->buf_externally_allocated == 0);
599
600 if (b1->buf == NULL) {
601 if (writebuf1_len) {
602 b1->size = writebuf1_len;
603 }
604 if (!ext_writebuf1) {
605 b1->buf_externally_allocated = 0;
606 b1->buf = OPENSSL_malloc(b1->size);
607 if (b1->buf == NULL) {
608 OPENSSL_PUT_ERROR(BIO, bio_make_pair, ERR_R_MALLOC_FAILURE);
609 return 0;
610 }
611 } else {
612 b1->buf = ext_writebuf1;
613 b1->buf_externally_allocated = 1;
614 }
615 b1->len = 0;
616 b1->offset = 0;
617 }
618
619 if (b2->buf == NULL) {
620 if (writebuf2_len) {
621 b2->size = writebuf2_len;
622 }
623 if (!ext_writebuf2) {
624 b2->buf_externally_allocated = 0;
625 b2->buf = OPENSSL_malloc(b2->size);
626 if (b2->buf == NULL) {
627 OPENSSL_PUT_ERROR(BIO, bio_make_pair, ERR_R_MALLOC_FAILURE);
628 return 0;
629 }
630 } else {
631 b2->buf = ext_writebuf2;
632 b2->buf_externally_allocated = 1;
633 }
634 b2->len = 0;
635 b2->offset = 0;
636 }
637
638 b1->peer = bio2;
639 b1->closed = 0;
640 b1->request = 0;
641 b1->zero_copy_read_lock = 0;
642 b1->zero_copy_write_lock = 0;
643 b2->peer = bio1;
644 b2->closed = 0;
645 b2->request = 0;
646 b2->zero_copy_read_lock = 0;
647 b2->zero_copy_write_lock = 0;
648
649 bio1->init = 1;
650 bio2->init = 1;
651
652 return 1;
653 }
654
bio_ctrl(BIO * bio,int cmd,long num,void * ptr)655 static long bio_ctrl(BIO *bio, int cmd, long num, void *ptr) {
656 long ret;
657 struct bio_bio_st *b = bio->ptr;
658
659 assert(b != NULL);
660
661 switch (cmd) {
662 /* specific CTRL codes */
663
664 case BIO_C_GET_WRITE_BUF_SIZE:
665 ret = (long)b->size;
666 break;
667
668 case BIO_C_GET_WRITE_GUARANTEE:
669 /* How many bytes can the caller feed to the next write
670 * without having to keep any? */
671 if (b->peer == NULL || b->closed) {
672 ret = 0;
673 } else {
674 ret = (long)b->size - b->len;
675 }
676 break;
677
678 case BIO_C_GET_READ_REQUEST:
679 /* If the peer unsuccessfully tried to read, how many bytes
680 * were requested? (As with BIO_CTRL_PENDING, that number
681 * can usually be treated as boolean.) */
682 ret = (long)b->request;
683 break;
684
685 case BIO_C_RESET_READ_REQUEST:
686 /* Reset request. (Can be useful after read attempts
687 * at the other side that are meant to be non-blocking,
688 * e.g. when probing SSL_read to see if any data is
689 * available.) */
690 b->request = 0;
691 ret = 1;
692 break;
693
694 case BIO_C_SHUTDOWN_WR:
695 /* similar to shutdown(..., SHUT_WR) */
696 b->closed = 1;
697 ret = 1;
698 break;
699
700 /* standard CTRL codes follow */
701
702 case BIO_CTRL_GET_CLOSE:
703 ret = bio->shutdown;
704 break;
705
706 case BIO_CTRL_SET_CLOSE:
707 bio->shutdown = (int)num;
708 ret = 1;
709 break;
710
711 case BIO_CTRL_PENDING:
712 if (b->peer != NULL) {
713 struct bio_bio_st *peer_b = b->peer->ptr;
714 ret = (long)peer_b->len;
715 } else {
716 ret = 0;
717 }
718 break;
719
720 case BIO_CTRL_WPENDING:
721 ret = 0;
722 if (b->buf != NULL) {
723 ret = (long)b->len;
724 }
725 break;
726
727 case BIO_CTRL_FLUSH:
728 ret = 1;
729 break;
730
731 case BIO_CTRL_EOF: {
732 BIO *other_bio = ptr;
733
734 if (other_bio) {
735 struct bio_bio_st *other_b = other_bio->ptr;
736 assert(other_b != NULL);
737 ret = other_b->len == 0 && other_b->closed;
738 } else {
739 ret = 1;
740 }
741 } break;
742
743 default:
744 ret = 0;
745 }
746 return ret;
747 }
748
bio_puts(BIO * bio,const char * str)749 static int bio_puts(BIO *bio, const char *str) {
750 return bio_write(bio, str, strlen(str));
751 }
752
753 static const BIO_METHOD methods_biop = {
754 BIO_TYPE_BIO, "BIO pair", bio_write, bio_read,
755 bio_puts, NULL /* no bio_gets */, bio_ctrl, bio_new,
756 bio_free, NULL /* no bio_callback_ctrl */
757 };
758
bio_s_bio(void)759 const BIO_METHOD *bio_s_bio(void) { return &methods_biop; }
760
BIO_new_bio_pair(BIO ** bio1_p,size_t writebuf1,BIO ** bio2_p,size_t writebuf2)761 int BIO_new_bio_pair(BIO** bio1_p, size_t writebuf1,
762 BIO** bio2_p, size_t writebuf2) {
763 return BIO_new_bio_pair_external_buf(bio1_p, writebuf1, NULL, bio2_p,
764 writebuf2, NULL);
765 }
766
BIO_new_bio_pair_external_buf(BIO ** bio1_p,size_t writebuf1_len,uint8_t * ext_writebuf1,BIO ** bio2_p,size_t writebuf2_len,uint8_t * ext_writebuf2)767 int BIO_new_bio_pair_external_buf(BIO** bio1_p, size_t writebuf1_len,
768 uint8_t* ext_writebuf1,
769 BIO** bio2_p, size_t writebuf2_len,
770 uint8_t* ext_writebuf2) {
771 BIO *bio1 = NULL, *bio2 = NULL;
772 int ret = 0;
773
774 /* External buffers must have sizes greater than 0. */
775 if ((ext_writebuf1 && !writebuf1_len) || (ext_writebuf2 && !writebuf2_len)) {
776 goto err;
777 }
778
779 bio1 = BIO_new(bio_s_bio());
780 if (bio1 == NULL) {
781 goto err;
782 }
783 bio2 = BIO_new(bio_s_bio());
784 if (bio2 == NULL) {
785 goto err;
786 }
787
788 if (!bio_make_pair(bio1, bio2, writebuf1_len, ext_writebuf1, writebuf2_len,
789 ext_writebuf2)) {
790 goto err;
791 }
792 ret = 1;
793
794 err:
795 if (ret == 0) {
796 BIO_free(bio1);
797 bio1 = NULL;
798 BIO_free(bio2);
799 bio2 = NULL;
800 }
801
802 *bio1_p = bio1;
803 *bio2_p = bio2;
804 return ret;
805 }
806
BIO_ctrl_get_read_request(BIO * bio)807 size_t BIO_ctrl_get_read_request(BIO *bio) {
808 return BIO_ctrl(bio, BIO_C_GET_READ_REQUEST, 0, NULL);
809 }
810
BIO_ctrl_get_write_guarantee(BIO * bio)811 size_t BIO_ctrl_get_write_guarantee(BIO *bio) {
812 return BIO_ctrl(bio, BIO_C_GET_WRITE_GUARANTEE, 0, NULL);
813 }
814
BIO_shutdown_wr(BIO * bio)815 int BIO_shutdown_wr(BIO *bio) {
816 return BIO_ctrl(bio, BIO_C_SHUTDOWN_WR, 0, NULL);
817 }
818