1 /*
2  * libwebsockets - small server side websockets and web server implementation
3  *
4  * Copyright (C) 2010 - 2019 Andy Green <andy@warmcat.com>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include <private-lib-core.h>
26 
27 #define LWS_CPYAPP(ptr, str) { strcpy(ptr, str); ptr += strlen(str); }
28 
29 /*
30  * client-parser.c: lws_ws_client_rx_sm() needs to be roughly kept in
31  *   sync with changes here, esp related to ext draining
32  */
33 
34 int
lws_ws_rx_sm(struct lws * wsi,char already_processed,unsigned char c)35 lws_ws_rx_sm(struct lws *wsi, char already_processed, unsigned char c)
36 {
37 	int callback_action = LWS_CALLBACK_RECEIVE;
38 	struct lws_ext_pm_deflate_rx_ebufs pmdrx;
39 	unsigned short close_code;
40 	unsigned char *pp;
41 	int ret = 0;
42 	int n = 0;
43 #if !defined(LWS_WITHOUT_EXTENSIONS)
44 	int rx_draining_ext = 0;
45 	int lin;
46 #endif
47 
48 	pmdrx.eb_in.token = NULL;
49 	pmdrx.eb_in.len = 0;
50 	pmdrx.eb_out.token = NULL;
51 	pmdrx.eb_out.len = 0;
52 
53 	if (wsi->socket_is_permanently_unusable)
54 		return -1;
55 
56 	switch (wsi->lws_rx_parse_state) {
57 	case LWS_RXPS_NEW:
58 #if !defined(LWS_WITHOUT_EXTENSIONS)
59 		if (wsi->ws->rx_draining_ext) {
60 			pmdrx.eb_in.token = NULL;
61 			pmdrx.eb_in.len = 0;
62 			pmdrx.eb_out.token = NULL;
63 			pmdrx.eb_out.len = 0;
64 			lws_remove_wsi_from_draining_ext_list(wsi);
65 			rx_draining_ext = 1;
66 			lwsl_debug("%s: doing draining flow\n", __func__);
67 
68 			goto drain_extension;
69 		}
70 #endif
71 		switch (wsi->ws->ietf_spec_revision) {
72 		case 13:
73 			/*
74 			 * no prepended frame key any more
75 			 */
76 			wsi->ws->all_zero_nonce = 1;
77 			goto handle_first;
78 
79 		default:
80 			lwsl_warn("lws_ws_rx_sm: unknown spec version %d\n",
81 				  wsi->ws->ietf_spec_revision);
82 			break;
83 		}
84 		break;
85 	case LWS_RXPS_04_mask_1:
86 		wsi->ws->mask[1] = c;
87 		if (c)
88 			wsi->ws->all_zero_nonce = 0;
89 		wsi->lws_rx_parse_state = LWS_RXPS_04_mask_2;
90 		break;
91 	case LWS_RXPS_04_mask_2:
92 		wsi->ws->mask[2] = c;
93 		if (c)
94 			wsi->ws->all_zero_nonce = 0;
95 		wsi->lws_rx_parse_state = LWS_RXPS_04_mask_3;
96 		break;
97 	case LWS_RXPS_04_mask_3:
98 		wsi->ws->mask[3] = c;
99 		if (c)
100 			wsi->ws->all_zero_nonce = 0;
101 
102 		/*
103 		 * start from the zero'th byte in the XOR key buffer since
104 		 * this is the start of a frame with a new key
105 		 */
106 
107 		wsi->ws->mask_idx = 0;
108 
109 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_1;
110 		break;
111 
112 	/*
113 	 *  04 logical framing from the spec (all this is masked when incoming
114 	 *  and has to be unmasked)
115 	 *
116 	 * We ignore the possibility of extension data because we don't
117 	 * negotiate any extensions at the moment.
118 	 *
119 	 *    0                   1                   2                   3
120 	 *    0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
121 	 *   +-+-+-+-+-------+-+-------------+-------------------------------+
122 	 *   |F|R|R|R| opcode|R| Payload len |    Extended payload length    |
123 	 *   |I|S|S|S|  (4)  |S|     (7)     |             (16/63)           |
124 	 *   |N|V|V|V|       |V|             |   (if payload len==126/127)   |
125 	 *   | |1|2|3|       |4|             |                               |
126 	 *   +-+-+-+-+-------+-+-------------+ - - - - - - - - - - - - - - - +
127 	 *   |     Extended payload length continued, if payload len == 127  |
128 	 *   + - - - - - - - - - - - - - - - +-------------------------------+
129 	 *   |                               |         Extension data        |
130 	 *   +-------------------------------+ - - - - - - - - - - - - - - - +
131 	 *   :                                                               :
132 	 *   +---------------------------------------------------------------+
133 	 *   :                       Application data                        :
134 	 *   +---------------------------------------------------------------+
135 	 *
136 	 *  We pass payload through to userland as soon as we get it, ignoring
137 	 *  FIN.  It's up to userland to buffer it up if it wants to see a
138 	 *  whole unfragmented block of the original size (which may be up to
139 	 *  2^63 long!)
140 	 */
141 
142 	case LWS_RXPS_04_FRAME_HDR_1:
143 handle_first:
144 
145 		wsi->ws->opcode = c & 0xf;
146 		wsi->ws->rsv = c & 0x70;
147 		wsi->ws->final = !!((c >> 7) & 1);
148 		wsi->ws->defeat_check_utf8 = 0;
149 
150 		if (((wsi->ws->opcode) & 8) && !wsi->ws->final) {
151 			lws_close_reason(wsi, LWS_CLOSE_STATUS_PROTOCOL_ERR,
152 					(uint8_t *)"frag ctl", 8);
153 			return -1;
154 		}
155 
156 		switch (wsi->ws->opcode) {
157 		case LWSWSOPC_TEXT_FRAME:
158 			wsi->ws->check_utf8 = lws_check_opt(
159 				wsi->context->options,
160 				LWS_SERVER_OPTION_VALIDATE_UTF8);
161 			/* fallthru */
162 		case LWSWSOPC_BINARY_FRAME:
163 			if (wsi->ws->opcode == LWSWSOPC_BINARY_FRAME)
164 				wsi->ws->check_utf8 = 0;
165 			if (wsi->ws->continuation_possible) {
166 				lws_close_reason(wsi,
167 					LWS_CLOSE_STATUS_PROTOCOL_ERR,
168 					(uint8_t *)"bad cont", 8);
169 				return -1;
170 			}
171 			wsi->ws->rsv_first_msg = (c & 0x70);
172 #if !defined(LWS_WITHOUT_EXTENSIONS)
173 			/*
174 			 *  set the expectation that we will have to
175 			 * fake up the zlib trailer to the inflator for this
176 			 * frame
177 			 */
178 			wsi->ws->pmd_trailer_application = !!(c & 0x40);
179 #endif
180 			wsi->ws->frame_is_binary =
181 			     wsi->ws->opcode == LWSWSOPC_BINARY_FRAME;
182 			wsi->ws->first_fragment = 1;
183 			wsi->ws->continuation_possible = !wsi->ws->final;
184 			break;
185 		case LWSWSOPC_CONTINUATION:
186 			if (!wsi->ws->continuation_possible) {
187 				lws_close_reason(wsi,
188 					LWS_CLOSE_STATUS_PROTOCOL_ERR,
189 					(uint8_t *)"bad cont", 8);
190 				return -1;
191 			}
192 			break;
193 		case LWSWSOPC_CLOSE:
194 			wsi->ws->check_utf8 = 0;
195 			wsi->ws->utf8 = 0;
196 			break;
197 		case 3:
198 		case 4:
199 		case 5:
200 		case 6:
201 		case 7:
202 		case 0xb:
203 		case 0xc:
204 		case 0xd:
205 		case 0xe:
206 		case 0xf:
207 			lws_close_reason(wsi, LWS_CLOSE_STATUS_PROTOCOL_ERR,
208 					(uint8_t *)"bad opc", 7);
209 			lwsl_info("illegal opcode\n");
210 			return -1;
211 		}
212 
213 		if (wsi->ws->owed_a_fin &&
214 		    (wsi->ws->opcode == LWSWSOPC_TEXT_FRAME ||
215 		     wsi->ws->opcode == LWSWSOPC_BINARY_FRAME)) {
216 			lwsl_info("hey you owed us a FIN\n");
217 			lws_close_reason(wsi, LWS_CLOSE_STATUS_PROTOCOL_ERR,
218 					(uint8_t *)"bad fin", 7);
219 			return -1;
220 		}
221 		if ((!(wsi->ws->opcode & 8)) && wsi->ws->final) {
222 			wsi->ws->continuation_possible = 0;
223 			wsi->ws->owed_a_fin = 0;
224 		}
225 
226 		if (!wsi->ws->final)
227 			wsi->ws->owed_a_fin = 1;
228 
229 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN;
230 		if (wsi->ws->rsv &&
231 		    (
232 #if !defined(LWS_WITHOUT_EXTENSIONS)
233 				    !wsi->ws->count_act_ext ||
234 #endif
235 				    (wsi->ws->rsv & ~0x40))) {
236 			lws_close_reason(wsi, LWS_CLOSE_STATUS_PROTOCOL_ERR,
237 					 (uint8_t *)"rsv bits", 8);
238 			return -1;
239 		}
240 		break;
241 
242 	case LWS_RXPS_04_FRAME_HDR_LEN:
243 
244 		wsi->ws->this_frame_masked = !!(c & 0x80);
245 
246 		switch (c & 0x7f) {
247 		case 126:
248 			/* control frames are not allowed to have big lengths */
249 			if (wsi->ws->opcode & 8)
250 				goto illegal_ctl_length;
251 
252 			wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN16_2;
253 			break;
254 		case 127:
255 			/* control frames are not allowed to have big lengths */
256 			if (wsi->ws->opcode & 8)
257 				goto illegal_ctl_length;
258 
259 			wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_8;
260 			break;
261 		default:
262 			wsi->ws->rx_packet_length = c & 0x7f;
263 
264 
265 			if (wsi->ws->this_frame_masked)
266 				wsi->lws_rx_parse_state =
267 						LWS_RXPS_07_COLLECT_FRAME_KEY_1;
268 			else
269 				if (wsi->ws->rx_packet_length) {
270 					wsi->lws_rx_parse_state =
271 					LWS_RXPS_WS_FRAME_PAYLOAD;
272 				} else {
273 					wsi->lws_rx_parse_state = LWS_RXPS_NEW;
274 					goto spill;
275 				}
276 			break;
277 		}
278 		break;
279 
280 	case LWS_RXPS_04_FRAME_HDR_LEN16_2:
281 		wsi->ws->rx_packet_length = c << 8;
282 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN16_1;
283 		break;
284 
285 	case LWS_RXPS_04_FRAME_HDR_LEN16_1:
286 		wsi->ws->rx_packet_length |= c;
287 		if (wsi->ws->this_frame_masked)
288 			wsi->lws_rx_parse_state =
289 					LWS_RXPS_07_COLLECT_FRAME_KEY_1;
290 		else {
291 			wsi->lws_rx_parse_state =
292 				LWS_RXPS_WS_FRAME_PAYLOAD;
293 		}
294 		break;
295 
296 	case LWS_RXPS_04_FRAME_HDR_LEN64_8:
297 		if (c & 0x80) {
298 			lwsl_warn("b63 of length must be zero\n");
299 			/* kill the connection */
300 			return -1;
301 		}
302 #if defined __LP64__
303 		wsi->ws->rx_packet_length = ((size_t)c) << 56;
304 #else
305 		wsi->ws->rx_packet_length = 0;
306 #endif
307 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_7;
308 		break;
309 
310 	case LWS_RXPS_04_FRAME_HDR_LEN64_7:
311 #if defined __LP64__
312 		wsi->ws->rx_packet_length |= ((size_t)c) << 48;
313 #endif
314 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_6;
315 		break;
316 
317 	case LWS_RXPS_04_FRAME_HDR_LEN64_6:
318 #if defined __LP64__
319 		wsi->ws->rx_packet_length |= ((size_t)c) << 40;
320 #endif
321 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_5;
322 		break;
323 
324 	case LWS_RXPS_04_FRAME_HDR_LEN64_5:
325 #if defined __LP64__
326 		wsi->ws->rx_packet_length |= ((size_t)c) << 32;
327 #endif
328 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_4;
329 		break;
330 
331 	case LWS_RXPS_04_FRAME_HDR_LEN64_4:
332 		wsi->ws->rx_packet_length |= ((size_t)c) << 24;
333 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_3;
334 		break;
335 
336 	case LWS_RXPS_04_FRAME_HDR_LEN64_3:
337 		wsi->ws->rx_packet_length |= ((size_t)c) << 16;
338 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_2;
339 		break;
340 
341 	case LWS_RXPS_04_FRAME_HDR_LEN64_2:
342 		wsi->ws->rx_packet_length |= ((size_t)c) << 8;
343 		wsi->lws_rx_parse_state = LWS_RXPS_04_FRAME_HDR_LEN64_1;
344 		break;
345 
346 	case LWS_RXPS_04_FRAME_HDR_LEN64_1:
347 		wsi->ws->rx_packet_length |= ((size_t)c);
348 		if (wsi->ws->this_frame_masked)
349 			wsi->lws_rx_parse_state =
350 					LWS_RXPS_07_COLLECT_FRAME_KEY_1;
351 		else
352 			wsi->lws_rx_parse_state = LWS_RXPS_WS_FRAME_PAYLOAD;
353 		break;
354 
355 	case LWS_RXPS_07_COLLECT_FRAME_KEY_1:
356 		wsi->ws->mask[0] = c;
357 		if (c)
358 			wsi->ws->all_zero_nonce = 0;
359 		wsi->lws_rx_parse_state = LWS_RXPS_07_COLLECT_FRAME_KEY_2;
360 		break;
361 
362 	case LWS_RXPS_07_COLLECT_FRAME_KEY_2:
363 		wsi->ws->mask[1] = c;
364 		if (c)
365 			wsi->ws->all_zero_nonce = 0;
366 		wsi->lws_rx_parse_state = LWS_RXPS_07_COLLECT_FRAME_KEY_3;
367 		break;
368 
369 	case LWS_RXPS_07_COLLECT_FRAME_KEY_3:
370 		wsi->ws->mask[2] = c;
371 		if (c)
372 			wsi->ws->all_zero_nonce = 0;
373 		wsi->lws_rx_parse_state = LWS_RXPS_07_COLLECT_FRAME_KEY_4;
374 		break;
375 
376 	case LWS_RXPS_07_COLLECT_FRAME_KEY_4:
377 		wsi->ws->mask[3] = c;
378 		if (c)
379 			wsi->ws->all_zero_nonce = 0;
380 		wsi->lws_rx_parse_state = LWS_RXPS_WS_FRAME_PAYLOAD;
381 		wsi->ws->mask_idx = 0;
382 		if (wsi->ws->rx_packet_length == 0) {
383 			wsi->lws_rx_parse_state = LWS_RXPS_NEW;
384 			goto spill;
385 		}
386 		break;
387 
388 
389 	case LWS_RXPS_WS_FRAME_PAYLOAD:
390 		assert(wsi->ws->rx_ubuf);
391 
392 		if (wsi->ws->rx_ubuf_head + LWS_PRE >= wsi->ws->rx_ubuf_alloc) {
393 			lwsl_err("Attempted overflow \n");
394 			return -1;
395 		}
396 		if (!(already_processed & ALREADY_PROCESSED_IGNORE_CHAR)) {
397 			if (wsi->ws->all_zero_nonce)
398 				wsi->ws->rx_ubuf[LWS_PRE +
399 				                 (wsi->ws->rx_ubuf_head++)] = c;
400 			else
401 				wsi->ws->rx_ubuf[LWS_PRE +
402 				                 (wsi->ws->rx_ubuf_head++)] =
403 				   c ^ wsi->ws->mask[(wsi->ws->mask_idx++) & 3];
404 
405 			--wsi->ws->rx_packet_length;
406 		}
407 
408 		if (!wsi->ws->rx_packet_length) {
409 			lwsl_debug("%s: ws fragment length exhausted\n",
410 				   __func__);
411 			/* spill because we have the whole frame */
412 			wsi->lws_rx_parse_state = LWS_RXPS_NEW;
413 			goto spill;
414 		}
415 #if !defined(LWS_WITHOUT_EXTENSIONS)
416 		if (wsi->ws->rx_draining_ext) {
417 			lwsl_debug("%s: UNTIL_EXHAUSTED draining\n", __func__);
418 			goto drain_extension;
419 		}
420 #endif
421 		/*
422 		 * if there's no protocol max frame size given, we are
423 		 * supposed to default to context->pt_serv_buf_size
424 		 */
425 		if (!wsi->protocol->rx_buffer_size &&
426 		    wsi->ws->rx_ubuf_head != wsi->context->pt_serv_buf_size)
427 			break;
428 
429 		if (wsi->protocol->rx_buffer_size &&
430 		    wsi->ws->rx_ubuf_head != wsi->protocol->rx_buffer_size)
431 			break;
432 
433 		/* spill because we filled our rx buffer */
434 spill:
435 		/*
436 		 * is this frame a control packet we should take care of at this
437 		 * layer?  If so service it and hide it from the user callback
438 		 */
439 
440 		lwsl_parser("spill on %s\n", wsi->protocol->name);
441 
442 		switch (wsi->ws->opcode) {
443 		case LWSWSOPC_CLOSE:
444 
445 			if (wsi->ws->peer_has_sent_close)
446 				break;
447 
448 			wsi->ws->peer_has_sent_close = 1;
449 
450 			pp = &wsi->ws->rx_ubuf[LWS_PRE];
451 			if (lws_check_opt(wsi->context->options,
452 					  LWS_SERVER_OPTION_VALIDATE_UTF8) &&
453 			    wsi->ws->rx_ubuf_head > 2 &&
454 			    lws_check_utf8(&wsi->ws->utf8, pp + 2,
455 					   wsi->ws->rx_ubuf_head - 2))
456 				goto utf8_fail;
457 
458 			/* is this an acknowledgment of our close? */
459 			if (lwsi_state(wsi) == LRS_AWAITING_CLOSE_ACK) {
460 				/*
461 				 * fine he has told us he is closing too, let's
462 				 * finish our close
463 				 */
464 				lwsl_parser("seen client close ack\n");
465 				return -1;
466 			}
467 			if (lwsi_state(wsi) == LRS_RETURNED_CLOSE)
468 				/* if he sends us 2 CLOSE, kill him */
469 				return -1;
470 
471 			if (lws_partial_buffered(wsi)) {
472 				/*
473 				 * if we're in the middle of something,
474 				 * we can't do a normal close response and
475 				 * have to just close our end.
476 				 */
477 				wsi->socket_is_permanently_unusable = 1;
478 				lwsl_parser("Closing on peer close "
479 					    "due to pending tx\n");
480 				return -1;
481 			}
482 
483 			if (wsi->ws->rx_ubuf_head >= 2) {
484 				close_code = (pp[0] << 8) | pp[1];
485 				if (close_code < 1000 ||
486 				    close_code == 1004 ||
487 				    close_code == 1005 ||
488 				    close_code == 1006 ||
489 				    close_code == 1012 ||
490 				    close_code == 1013 ||
491 				    close_code == 1014 ||
492 				    close_code == 1015 ||
493 				    (close_code >= 1016 && close_code < 3000)
494 				) {
495 					pp[0] = (LWS_CLOSE_STATUS_PROTOCOL_ERR >> 8) & 0xff;
496 					pp[1] = LWS_CLOSE_STATUS_PROTOCOL_ERR & 0xff;
497 				}
498 			}
499 
500 			if (user_callback_handle_rxflow(
501 					wsi->protocol->callback, wsi,
502 					LWS_CALLBACK_WS_PEER_INITIATED_CLOSE,
503 					wsi->user_space,
504 					&wsi->ws->rx_ubuf[LWS_PRE],
505 					wsi->ws->rx_ubuf_head))
506 				return -1;
507 
508 			lwsl_parser("server sees client close packet\n");
509 			lwsi_set_state(wsi, LRS_RETURNED_CLOSE);
510 			/* deal with the close packet contents as a PONG */
511 			wsi->ws->payload_is_close = 1;
512 			goto process_as_ping;
513 
514 		case LWSWSOPC_PING:
515 			lwsl_info("received %d byte ping, sending pong\n",
516 						 (int)wsi->ws->rx_ubuf_head);
517 
518 			if (wsi->ws->ping_pending_flag) {
519 				/*
520 				 * there is already a pending ping payload
521 				 * we should just log and drop
522 				 */
523 				lwsl_parser("DROP PING since one pending\n");
524 				goto ping_drop;
525 			}
526 process_as_ping:
527 			/* control packets can only be < 128 bytes long */
528 			if (wsi->ws->rx_ubuf_head > 128 - 3) {
529 				lwsl_parser("DROP PING payload too large\n");
530 				goto ping_drop;
531 			}
532 
533 			/* stash the pong payload */
534 			memcpy(wsi->ws->ping_payload_buf + LWS_PRE,
535 			       &wsi->ws->rx_ubuf[LWS_PRE],
536 				wsi->ws->rx_ubuf_head);
537 
538 			wsi->ws->ping_payload_len = wsi->ws->rx_ubuf_head;
539 			wsi->ws->ping_pending_flag = 1;
540 
541 			/* get it sent as soon as possible */
542 			lws_callback_on_writable(wsi);
543 ping_drop:
544 			wsi->ws->rx_ubuf_head = 0;
545 			return 0;
546 
547 		case LWSWSOPC_PONG:
548 			lwsl_info("received pong\n");
549 			lwsl_hexdump(&wsi->ws->rx_ubuf[LWS_PRE],
550 			             wsi->ws->rx_ubuf_head);
551 
552 			lws_validity_confirmed(wsi);
553 
554 			/* issue it */
555 			callback_action = LWS_CALLBACK_RECEIVE_PONG;
556 			break;
557 
558 		case LWSWSOPC_TEXT_FRAME:
559 		case LWSWSOPC_BINARY_FRAME:
560 		case LWSWSOPC_CONTINUATION:
561 			break;
562 
563 		default:
564 			lwsl_parser("unknown opc %x\n", wsi->ws->opcode);
565 
566 			return -1;
567 		}
568 
569 		/*
570 		 * No it's real payload, pass it up to the user callback.
571 		 *
572 		 * We have been statefully collecting it in the
573 		 * LWS_RXPS_WS_FRAME_PAYLOAD clause above.
574 		 *
575 		 * It's nicely buffered with the pre-padding taken care of
576 		 * so it can be sent straight out again using lws_write.
577 		 *
578 		 * However, now we have a chunk of it, we want to deal with it
579 		 * all here.  Since this may be input to permessage-deflate and
580 		 * there are block limits on that for input and output, we may
581 		 * need to iterate.
582 		 */
583 
584 		pmdrx.eb_in.token = &wsi->ws->rx_ubuf[LWS_PRE];
585 		pmdrx.eb_in.len = wsi->ws->rx_ubuf_head;
586 
587 		/* for the non-pm-deflate case */
588 
589 		pmdrx.eb_out = pmdrx.eb_in;
590 
591 		if (wsi->ws->opcode == LWSWSOPC_PONG && !pmdrx.eb_in.len)
592 			goto already_done;
593 #if !defined(LWS_WITHOUT_EXTENSIONS)
594 drain_extension:
595 #endif
596 
597 		do {
598 
599 //			lwsl_notice("%s: pmdrx.eb_in.len: %d\n", __func__,
600 //					(int)pmdrx.eb_in.len);
601 
602 			if (lwsi_state(wsi) == LRS_RETURNED_CLOSE ||
603 			    lwsi_state(wsi) == LRS_AWAITING_CLOSE_ACK)
604 				goto already_done;
605 
606 			n = PMDR_DID_NOTHING;
607 
608 #if !defined(LWS_WITHOUT_EXTENSIONS)
609 			lin = pmdrx.eb_in.len;
610 			//if (lin)
611 			//	lwsl_hexdump_notice(ebuf.token, ebuf.len);
612 			lwsl_ext("%s: +++ passing %d %p to ext\n", __func__,
613 					pmdrx.eb_in.len, pmdrx.eb_in.token);
614 
615 			n = lws_ext_cb_active(wsi, LWS_EXT_CB_PAYLOAD_RX, &pmdrx, 0);
616 			lwsl_debug("%s: ext says %d / ebuf.len %d\n", __func__,
617 				   n, pmdrx.eb_out.len);
618 			if (wsi->ws->rx_draining_ext)
619 				already_processed &= ~ALREADY_PROCESSED_NO_CB;
620 #endif
621 
622 			/*
623 			 * ebuf may be pointing somewhere completely different
624 			 * now, it's the output
625 			 */
626 #if !defined(LWS_WITHOUT_EXTENSIONS)
627 			if (n < 0) {
628 				/*
629 				 * we may rely on this to get RX, just drop
630 				 * connection
631 				 */
632 				wsi->socket_is_permanently_unusable = 1;
633 				return -1;
634 			}
635 			if (n == PMDR_DID_NOTHING)
636 				break;
637 #endif
638 			lwsl_debug("%s: post ext ret %d, ebuf in %d / out %d\n",
639 				    __func__, n, pmdrx.eb_in.len,
640 				    pmdrx.eb_out.len);
641 
642 #if !defined(LWS_WITHOUT_EXTENSIONS)
643 			if (rx_draining_ext && !pmdrx.eb_out.len) {
644 				lwsl_debug("   --- ending drain on 0 read\n");
645 				goto already_done;
646 			}
647 
648 			if (n == PMDR_HAS_PENDING)
649 				/*
650 				 * extension had more...
651 				 * main loop will come back
652 				 */
653 				lws_add_wsi_to_draining_ext_list(wsi);
654 			else
655 				lws_remove_wsi_from_draining_ext_list(wsi);
656 
657 			rx_draining_ext = wsi->ws->rx_draining_ext;
658 #endif
659 
660 			if (pmdrx.eb_out.len &&
661 			    wsi->ws->check_utf8 && !wsi->ws->defeat_check_utf8) {
662 				if (lws_check_utf8(&wsi->ws->utf8,
663 						   pmdrx.eb_out.token,
664 						   pmdrx.eb_out.len)) {
665 					lws_close_reason(wsi,
666 						LWS_CLOSE_STATUS_INVALID_PAYLOAD,
667 						(uint8_t *)"bad utf8", 8);
668 					goto utf8_fail;
669 				}
670 
671 				/* we are ending partway through utf-8 character? */
672 				if (!wsi->ws->rx_packet_length &&
673 				    wsi->ws->final && wsi->ws->utf8
674 #if !defined(LWS_WITHOUT_EXTENSIONS)
675 				    /* if ext not negotiated, going to be UNKNOWN */
676 				    && (n == PMDR_EMPTY_FINAL || n == PMDR_UNKNOWN)
677 #endif
678 				) {
679 					lwsl_info("FINAL utf8 error\n");
680 					lws_close_reason(wsi,
681 						LWS_CLOSE_STATUS_INVALID_PAYLOAD,
682 						(uint8_t *)"partial utf8", 12);
683 utf8_fail:
684 					lwsl_notice("utf8 error\n");
685 					lwsl_hexdump_notice(pmdrx.eb_out.token,
686 							    pmdrx.eb_out.len);
687 
688 					return -1;
689 				}
690 			}
691 
692 			/* if pmd not enabled, in == out */
693 
694 			if (n == PMDR_DID_NOTHING
695 #if !defined(LWS_WITHOUT_EXTENSIONS)
696 				       	||
697 			    n == PMDR_UNKNOWN
698 #endif
699 			    )
700 				pmdrx.eb_in.len -= pmdrx.eb_out.len;
701 
702 	if (!wsi->wsistate_pre_close &&
703 			    (pmdrx.eb_out.len >= 0 ||
704 			     callback_action == LWS_CALLBACK_RECEIVE_PONG ||
705 						       n == PMDR_EMPTY_FINAL)) {
706 				if (pmdrx.eb_out.len)
707 					pmdrx.eb_out.token[pmdrx.eb_out.len] = '\0';
708 
709 				if (wsi->protocol->callback &&
710 				    !(already_processed & ALREADY_PROCESSED_NO_CB)) {
711 					if (callback_action ==
712 						      LWS_CALLBACK_RECEIVE_PONG)
713 						lwsl_info("Doing pong callback\n");
714 
715 					ret = user_callback_handle_rxflow(
716 						wsi->protocol->callback, wsi,
717 						(enum lws_callback_reasons)
718 							     callback_action,
719 						wsi->user_space,
720 						pmdrx.eb_out.token,
721 						pmdrx.eb_out.len);
722 				}
723 				wsi->ws->first_fragment = 0;
724 			}
725 
726 #if !defined(LWS_WITHOUT_EXTENSIONS)
727 			if (!lin)
728 				break;
729 #endif
730 
731 		} while (pmdrx.eb_in.len
732 #if !defined(LWS_WITHOUT_EXTENSIONS)
733 				|| rx_draining_ext
734 #endif
735 		);
736 
737 already_done:
738 		wsi->ws->rx_ubuf_head = 0;
739 		break;
740 	}
741 
742 	return ret;
743 
744 illegal_ctl_length:
745 
746 	lwsl_warn("Control frame with xtended length is illegal\n");
747 	/* kill the connection */
748 	return -1;
749 }
750 
751 
752 size_t
lws_remaining_packet_payload(struct lws * wsi)753 lws_remaining_packet_payload(struct lws *wsi)
754 {
755 	return wsi->ws->rx_packet_length;
756 }
757 
lws_frame_is_binary(struct lws * wsi)758 int lws_frame_is_binary(struct lws *wsi)
759 {
760 	return wsi->ws->frame_is_binary;
761 }
762 
763 void
lws_add_wsi_to_draining_ext_list(struct lws * wsi)764 lws_add_wsi_to_draining_ext_list(struct lws *wsi)
765 {
766 #if !defined(LWS_WITHOUT_EXTENSIONS)
767 	struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
768 
769 	if (wsi->ws->rx_draining_ext)
770 		return;
771 
772 	lwsl_debug("%s: RX EXT DRAINING: Adding to list\n", __func__);
773 
774 	wsi->ws->rx_draining_ext = 1;
775 	wsi->ws->rx_draining_ext_list = pt->ws.rx_draining_ext_list;
776 	pt->ws.rx_draining_ext_list = wsi;
777 #endif
778 }
779 
780 void
lws_remove_wsi_from_draining_ext_list(struct lws * wsi)781 lws_remove_wsi_from_draining_ext_list(struct lws *wsi)
782 {
783 #if !defined(LWS_WITHOUT_EXTENSIONS)
784 	struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
785 	struct lws **w = &pt->ws.rx_draining_ext_list;
786 
787 	if (!wsi->ws->rx_draining_ext)
788 		return;
789 
790 	lwsl_debug("%s: RX EXT DRAINING: Removing from list\n", __func__);
791 
792 	wsi->ws->rx_draining_ext = 0;
793 
794 	/* remove us from context draining ext list */
795 	while (*w) {
796 		if (*w == wsi) {
797 			/* if us, point it instead to who we were pointing to */
798 			*w = wsi->ws->rx_draining_ext_list;
799 			break;
800 		}
801 		w = &((*w)->ws->rx_draining_ext_list);
802 	}
803 	wsi->ws->rx_draining_ext_list = NULL;
804 #endif
805 }
806 
807 static int
lws_0405_frame_mask_generate(struct lws * wsi)808 lws_0405_frame_mask_generate(struct lws *wsi)
809 {
810 	size_t n;
811 	/* fetch the per-frame nonce */
812 
813 	n = lws_get_random(lws_get_context(wsi), wsi->ws->mask, 4);
814 	if (n != 4) {
815 		lwsl_parser("Unable to read from random device %s %d\n",
816 			    SYSTEM_RANDOM_FILEPATH, (int)n);
817 		return 1;
818 	}
819 
820 	/* start masking from first byte of masking key buffer */
821 	wsi->ws->mask_idx = 0;
822 
823 	return 0;
824 }
825 
826 int
lws_server_init_wsi_for_ws(struct lws * wsi)827 lws_server_init_wsi_for_ws(struct lws *wsi)
828 {
829 	int n;
830 
831 	lwsi_set_state(wsi, LRS_ESTABLISHED);
832 
833 	/*
834 	 * create the frame buffer for this connection according to the
835 	 * size mentioned in the protocol definition.  If 0 there, use
836 	 * a big default for compatibility
837 	 */
838 
839 	n = (int)wsi->protocol->rx_buffer_size;
840 	if (!n)
841 		n = wsi->context->pt_serv_buf_size;
842 	n += LWS_PRE;
843 	wsi->ws->rx_ubuf = lws_malloc(n + 4 /* 0x0000ffff zlib */, "rx_ubuf");
844 	if (!wsi->ws->rx_ubuf) {
845 		lwsl_err("Out of Mem allocating rx buffer %d\n", n);
846 		return 1;
847 	}
848 	wsi->ws->rx_ubuf_alloc = n;
849 
850 	/* notify user code that we're ready to roll */
851 
852 	if (wsi->protocol->callback)
853 		if (wsi->protocol->callback(wsi, LWS_CALLBACK_ESTABLISHED,
854 					    wsi->user_space,
855 #ifdef LWS_WITH_TLS
856 					    wsi->tls.ssl,
857 #else
858 					    NULL,
859 #endif
860 					    wsi->h2_stream_carries_ws))
861 			return 1;
862 
863 	lws_validity_confirmed(wsi);
864 	lwsl_debug("ws established\n");
865 
866 	return 0;
867 }
868 
869 
870 
871 int
lws_is_final_fragment(struct lws * wsi)872 lws_is_final_fragment(struct lws *wsi)
873 {
874 #if !defined(LWS_WITHOUT_EXTENSIONS)
875 	lwsl_debug("%s: final %d, rx pk length %ld, draining %ld\n", __func__,
876 		   wsi->ws->final, (long)wsi->ws->rx_packet_length,
877 		   (long)wsi->ws->rx_draining_ext);
878 	return wsi->ws->final && !wsi->ws->rx_packet_length &&
879 	       !wsi->ws->rx_draining_ext;
880 #else
881 	return wsi->ws->final && !wsi->ws->rx_packet_length;
882 #endif
883 }
884 
885 int
lws_is_first_fragment(struct lws * wsi)886 lws_is_first_fragment(struct lws *wsi)
887 {
888 	return wsi->ws->first_fragment;
889 }
890 
891 unsigned char
lws_get_reserved_bits(struct lws * wsi)892 lws_get_reserved_bits(struct lws *wsi)
893 {
894 	return wsi->ws->rsv;
895 }
896 
897 int
lws_get_close_length(struct lws * wsi)898 lws_get_close_length(struct lws *wsi)
899 {
900 	return wsi->ws->close_in_ping_buffer_len;
901 }
902 
903 unsigned char *
lws_get_close_payload(struct lws * wsi)904 lws_get_close_payload(struct lws *wsi)
905 {
906 	return &wsi->ws->ping_payload_buf[LWS_PRE];
907 }
908 
909 void
lws_close_reason(struct lws * wsi,enum lws_close_status status,unsigned char * buf,size_t len)910 lws_close_reason(struct lws *wsi, enum lws_close_status status,
911 		 unsigned char *buf, size_t len)
912 {
913 	unsigned char *p, *start;
914 	int budget = sizeof(wsi->ws->ping_payload_buf) - LWS_PRE;
915 
916 	assert(lwsi_role_ws(wsi));
917 
918 	start = p = &wsi->ws->ping_payload_buf[LWS_PRE];
919 
920 	*p++ = (((int)status) >> 8) & 0xff;
921 	*p++ = ((int)status) & 0xff;
922 
923 	if (buf)
924 		while (len-- && p < start + budget)
925 			*p++ = *buf++;
926 
927 	wsi->ws->close_in_ping_buffer_len = lws_ptr_diff(p, start);
928 }
929 
930 static int
lws_is_ws_with_ext(struct lws * wsi)931 lws_is_ws_with_ext(struct lws *wsi)
932 {
933 #if defined(LWS_WITHOUT_EXTENSIONS)
934 	return 0;
935 #else
936 	return lwsi_role_ws(wsi) && !!wsi->ws->count_act_ext;
937 #endif
938 }
939 
940 static int
rops_handle_POLLIN_ws(struct lws_context_per_thread * pt,struct lws * wsi,struct lws_pollfd * pollfd)941 rops_handle_POLLIN_ws(struct lws_context_per_thread *pt, struct lws *wsi,
942 		       struct lws_pollfd *pollfd)
943 {
944 	unsigned int pending = 0;
945 	struct lws_tokens ebuf;
946 	char buffered = 0;
947 	int n = 0, m;
948 #if defined(LWS_WITH_HTTP2)
949 	struct lws *wsi1;
950 #endif
951 
952 	if (!wsi->ws) {
953 		lwsl_err("ws role wsi with no ws\n");
954 		return LWS_HPI_RET_PLEASE_CLOSE_ME;
955 	}
956 
957 	// lwsl_notice("%s: %s\n", __func__, wsi->protocol->name);
958 
959 	//lwsl_info("%s: wsistate 0x%x, pollout %d\n", __func__,
960 	//	   wsi->wsistate, pollfd->revents & LWS_POLLOUT);
961 
962 	/*
963 	 * something went wrong with parsing the handshake, and
964 	 * we ended up back in the event loop without completing it
965 	 */
966 	if (lwsi_state(wsi) == LRS_PRE_WS_SERVING_ACCEPT) {
967 		wsi->socket_is_permanently_unusable = 1;
968 		return LWS_HPI_RET_PLEASE_CLOSE_ME;
969 	}
970 
971 	ebuf.token = NULL;
972 	ebuf.len = 0;
973 
974 	if (lwsi_state(wsi) == LRS_WAITING_CONNECT) {
975 #if defined(LWS_WITH_CLIENT)
976 		if ((pollfd->revents & LWS_POLLOUT) &&
977 		    lws_handle_POLLOUT_event(wsi, pollfd)) {
978 			lwsl_debug("POLLOUT event closed it\n");
979 			return LWS_HPI_RET_PLEASE_CLOSE_ME;
980 		}
981 
982 		n = lws_client_socket_service(wsi, pollfd);
983 		if (n)
984 			return LWS_HPI_RET_WSI_ALREADY_DIED;
985 #endif
986 		return LWS_HPI_RET_HANDLED;
987 	}
988 
989 	/* 1: something requested a callback when it was OK to write */
990 
991 	if ((pollfd->revents & LWS_POLLOUT) &&
992 	    lwsi_state_can_handle_POLLOUT(wsi) &&
993 	    lws_handle_POLLOUT_event(wsi, pollfd)) {
994 		if (lwsi_state(wsi) == LRS_RETURNED_CLOSE)
995 			lwsi_set_state(wsi, LRS_FLUSHING_BEFORE_CLOSE);
996 
997 		return LWS_HPI_RET_PLEASE_CLOSE_ME;
998 	}
999 
1000 	if (lwsi_state(wsi) == LRS_RETURNED_CLOSE ||
1001 	    lwsi_state(wsi) == LRS_WAITING_TO_SEND_CLOSE) {
1002 		/*
1003 		 * we stopped caring about anything except control
1004 		 * packets.  Force flow control off, defeat tx
1005 		 * draining.
1006 		 */
1007 		lws_rx_flow_control(wsi, 1);
1008 #if !defined(LWS_WITHOUT_EXTENSIONS)
1009 		if (wsi->ws)
1010 			wsi->ws->tx_draining_ext = 0;
1011 #endif
1012 	}
1013 #if !defined(LWS_WITHOUT_EXTENSIONS)
1014 	if (wsi->ws->tx_draining_ext) {
1015 		lws_handle_POLLOUT_event(wsi, pollfd);
1016 		//lwsl_notice("%s: tx drain\n", __func__);
1017 		/*
1018 		 * We cannot deal with new RX until the TX ext path has
1019 		 * been drained.  It's because new rx will, eg, crap on
1020 		 * the wsi rx buf that may be needed to retain state.
1021 		 *
1022 		 * TX ext drain path MUST go through event loop to avoid
1023 		 * blocking.
1024 		 */
1025 		lws_callback_on_writable(wsi);
1026 		return LWS_HPI_RET_HANDLED;
1027 	}
1028 #endif
1029 	if ((pollfd->revents & LWS_POLLIN) && lws_is_flowcontrolled(wsi)) {
1030 		/* We cannot deal with any kind of new RX because we are
1031 		 * RX-flowcontrolled.
1032 		 */
1033 		lwsl_info("%s: flowcontrolled, ignoring rx\n", __func__);
1034 
1035 		if (__lws_change_pollfd(wsi, LWS_POLLIN, 0))
1036 			return -1;
1037 
1038 		return LWS_HPI_RET_HANDLED;
1039 	}
1040 
1041 	if (lws_is_flowcontrolled(wsi))
1042 		return LWS_HPI_RET_HANDLED;
1043 
1044 #if defined(LWS_WITH_HTTP2)
1045 	if (wsi->mux_substream || wsi->upgraded_to_http2) {
1046 		wsi1 = lws_get_network_wsi(wsi);
1047 		if (wsi1 && lws_has_buffered_out(wsi1))
1048 			/* We cannot deal with any kind of new RX
1049 			 * because we are dealing with a partial send
1050 			 * (new RX may trigger new http_action() that
1051 			 * expect to be able to send)
1052 			 */
1053 			return LWS_HPI_RET_HANDLED;
1054 	}
1055 #endif
1056 
1057 #if !defined(LWS_WITHOUT_EXTENSIONS)
1058 	/* 2: RX Extension needs to be drained
1059 	 */
1060 
1061 	if (wsi->ws->rx_draining_ext) {
1062 
1063 		lwsl_debug("%s: RX EXT DRAINING: Service\n", __func__);
1064 #if defined(LWS_WITH_CLIENT)
1065 		if (lwsi_role_client(wsi)) {
1066 			n = lws_ws_client_rx_sm(wsi, 0);
1067 			if (n < 0)
1068 				/* we closed wsi */
1069 				return LWS_HPI_RET_PLEASE_CLOSE_ME;
1070 		} else
1071 #endif
1072 			n = lws_ws_rx_sm(wsi, ALREADY_PROCESSED_IGNORE_CHAR, 0);
1073 
1074 		return LWS_HPI_RET_HANDLED;
1075 	}
1076 
1077 	if (wsi->ws->rx_draining_ext)
1078 		/*
1079 		 * We have RX EXT content to drain, but can't do it
1080 		 * right now.  That means we cannot do anything lower
1081 		 * priority either.
1082 		 */
1083 		return LWS_HPI_RET_HANDLED;
1084 #endif
1085 
1086 	/* 3: buflist needs to be drained
1087 	 */
1088 read:
1089 	//lws_buflist_describe(&wsi->buflist, wsi, __func__);
1090 	ebuf.len = (int)lws_buflist_next_segment_len(&wsi->buflist,
1091 						     &ebuf.token);
1092 	if (ebuf.len) {
1093 		lwsl_info("draining buflist (len %d)\n", ebuf.len);
1094 		buffered = 1;
1095 		goto drain;
1096 	}
1097 
1098 	if (!(pollfd->revents & pollfd->events & LWS_POLLIN) && !wsi->http.ah)
1099 		return LWS_HPI_RET_HANDLED;
1100 
1101 	if (lws_is_flowcontrolled(wsi)) {
1102 		lwsl_info("%s: %p should be rxflow (bm 0x%x)..\n",
1103 			    __func__, wsi, wsi->rxflow_bitmap);
1104 		return LWS_HPI_RET_HANDLED;
1105 	}
1106 
1107 	if (!(lwsi_role_client(wsi) &&
1108 	      (lwsi_state(wsi) != LRS_ESTABLISHED &&
1109 	       lwsi_state(wsi) != LRS_AWAITING_CLOSE_ACK &&
1110 	       lwsi_state(wsi) != LRS_H2_WAITING_TO_SEND_HEADERS))) {
1111 		/*
1112 		 * In case we are going to react to this rx by scheduling
1113 		 * writes, we need to restrict the amount of rx to the size
1114 		 * the protocol reported for rx buffer.
1115 		 *
1116 		 * Otherwise we get a situation we have to absorb possibly a
1117 		 * lot of reads before we get a chance to drain them by writing
1118 		 * them, eg, with echo type tests in autobahn.
1119 		 */
1120 
1121 		buffered = 0;
1122 		ebuf.token = pt->serv_buf;
1123 		if (lwsi_role_ws(wsi))
1124 			ebuf.len = wsi->ws->rx_ubuf_alloc;
1125 		else
1126 			ebuf.len = wsi->context->pt_serv_buf_size;
1127 
1128 		if ((unsigned int)ebuf.len > wsi->context->pt_serv_buf_size)
1129 			ebuf.len = wsi->context->pt_serv_buf_size;
1130 
1131 		if ((int)pending > ebuf.len)
1132 			pending = ebuf.len;
1133 
1134 		ebuf.len = lws_ssl_capable_read(wsi, ebuf.token,
1135 						pending ? (int)pending :
1136 						ebuf.len);
1137 		switch (ebuf.len) {
1138 		case 0:
1139 			lwsl_info("%s: zero length read\n",
1140 				  __func__);
1141 			return LWS_HPI_RET_PLEASE_CLOSE_ME;
1142 		case LWS_SSL_CAPABLE_MORE_SERVICE:
1143 			lwsl_info("SSL Capable more service\n");
1144 			return LWS_HPI_RET_HANDLED;
1145 		case LWS_SSL_CAPABLE_ERROR:
1146 			lwsl_info("%s: LWS_SSL_CAPABLE_ERROR\n",
1147 					__func__);
1148 			return LWS_HPI_RET_PLEASE_CLOSE_ME;
1149 		}
1150 
1151 		/*
1152 		 * coverity thinks ssl_capable_read() may read over
1153 		 * 2GB.  Dissuade it...
1154 		 */
1155 		ebuf.len &= 0x7fffffff;
1156 	}
1157 
1158 drain:
1159 
1160 	/*
1161 	 * give any active extensions a chance to munge the buffer
1162 	 * before parse.  We pass in a pointer to an lws_tokens struct
1163 	 * prepared with the default buffer and content length that's in
1164 	 * there.  Rather than rewrite the default buffer, extensions
1165 	 * that expect to grow the buffer can adapt .token to
1166 	 * point to their own per-connection buffer in the extension
1167 	 * user allocation.  By default with no extensions or no
1168 	 * extension callback handling, just the normal input buffer is
1169 	 * used then so it is efficient.
1170 	 */
1171 	m = 0;
1172 	do {
1173 
1174 		/* service incoming data */
1175 		//lws_buflist_describe(&wsi->buflist, wsi, __func__);
1176 		if (ebuf.len) {
1177 #if defined(LWS_ROLE_H2)
1178 			if (lwsi_role_h2(wsi) && lwsi_state(wsi) != LRS_BODY &&
1179 			    lwsi_state(wsi) != LRS_DISCARD_BODY)
1180 				n = lws_read_h2(wsi, ebuf.token,
1181 					     ebuf.len);
1182 			else
1183 #endif
1184 				n = lws_read_h1(wsi, ebuf.token,
1185 					     ebuf.len);
1186 
1187 			if (n < 0) {
1188 				/* we closed wsi */
1189 				return LWS_HPI_RET_WSI_ALREADY_DIED;
1190 			}
1191 			//lws_buflist_describe(&wsi->buflist, wsi, __func__);
1192 			//lwsl_notice("%s: consuming %d / %d\n", __func__, n, ebuf.len);
1193 			if (lws_buflist_aware_finished_consuming(wsi, &ebuf, n,
1194 							buffered, __func__))
1195 				return LWS_HPI_RET_PLEASE_CLOSE_ME;
1196 		}
1197 
1198 		ebuf.token = NULL;
1199 		ebuf.len = 0;
1200 	} while (m);
1201 
1202 	if (wsi->http.ah
1203 #if defined(LWS_WITH_CLIENT)
1204 			&& !wsi->client_h2_alpn
1205 #endif
1206 			) {
1207 		lwsl_info("%s: %p: detaching ah\n", __func__, wsi);
1208 		lws_header_table_detach(wsi, 0);
1209 	}
1210 
1211 	pending = lws_ssl_pending(wsi);
1212 	if (pending) {
1213 		if (lws_is_ws_with_ext(wsi))
1214 			pending = pending > wsi->ws->rx_ubuf_alloc ?
1215 				wsi->ws->rx_ubuf_alloc : pending;
1216 		else
1217 			pending = pending > wsi->context->pt_serv_buf_size ?
1218 				wsi->context->pt_serv_buf_size : pending;
1219 		goto read;
1220 	}
1221 
1222 	if (buffered && /* were draining, now nothing left */
1223 	    !lws_buflist_next_segment_len(&wsi->buflist, NULL)) {
1224 		lwsl_info("%s: %p flow buf: drained\n", __func__, wsi);
1225 		/* having drained the rxflow buffer, can rearm POLLIN */
1226 #if !defined(LWS_WITH_SERVER)
1227 		n =
1228 #endif
1229 		__lws_rx_flow_control(wsi);
1230 		/* n ignored, needed for NO_SERVER case */
1231 	}
1232 
1233 	/* n = 0 */
1234 	return LWS_HPI_RET_HANDLED;
1235 }
1236 
1237 
rops_handle_POLLOUT_ws(struct lws * wsi)1238 int rops_handle_POLLOUT_ws(struct lws *wsi)
1239 {
1240 	int write_type = LWS_WRITE_PONG;
1241 #if !defined(LWS_WITHOUT_EXTENSIONS)
1242 	struct lws_ext_pm_deflate_rx_ebufs pmdrx;
1243 	int ret, m;
1244 #endif
1245 	int n;
1246 
1247 #if !defined(LWS_WITHOUT_EXTENSIONS)
1248 	lwsl_debug("%s: %s: wsi->ws->tx_draining_ext %d\n", __func__,
1249 			wsi->protocol->name, wsi->ws->tx_draining_ext);
1250 #endif
1251 
1252 	/* Priority 3: pending control packets (pong or close)
1253 	 *
1254 	 * 3a: close notification packet requested from close api
1255 	 */
1256 
1257 	if (lwsi_state(wsi) == LRS_WAITING_TO_SEND_CLOSE) {
1258 		lwsl_debug("sending close packet\n");
1259 		lwsl_hexdump_debug(&wsi->ws->ping_payload_buf[LWS_PRE],
1260 				   wsi->ws->close_in_ping_buffer_len);
1261 		wsi->waiting_to_send_close_frame = 0;
1262 		n = lws_write(wsi, &wsi->ws->ping_payload_buf[LWS_PRE],
1263 			      wsi->ws->close_in_ping_buffer_len,
1264 			      LWS_WRITE_CLOSE);
1265 		if (n >= 0) {
1266 			if (wsi->close_needs_ack) {
1267 				lwsi_set_state(wsi, LRS_AWAITING_CLOSE_ACK);
1268 				lws_set_timeout(wsi, PENDING_TIMEOUT_CLOSE_ACK,
1269 						5);
1270 				lwsl_debug("sent close, await ack\n");
1271 
1272 				return LWS_HP_RET_BAIL_OK;
1273 			}
1274 			wsi->close_needs_ack = 0;
1275 			lwsi_set_state(wsi, LRS_RETURNED_CLOSE);
1276 		}
1277 
1278 		return LWS_HP_RET_BAIL_DIE;
1279 	}
1280 
1281 	/* else, the send failed and we should just hang up */
1282 
1283 	if ((lwsi_role_ws(wsi) && wsi->ws->ping_pending_flag) ||
1284 	    (lwsi_state(wsi) == LRS_RETURNED_CLOSE &&
1285 	     wsi->ws->payload_is_close)) {
1286 
1287 		if (wsi->ws->payload_is_close)
1288 			write_type = LWS_WRITE_CLOSE;
1289 		else {
1290 			if (wsi->wsistate_pre_close) {
1291 				/* we started close flow, forget pong */
1292 				wsi->ws->ping_pending_flag = 0;
1293 				return LWS_HP_RET_BAIL_OK;
1294 			}
1295 			lwsl_info("issuing pong %d on wsi %p\n",
1296 				  wsi->ws->ping_payload_len, wsi);
1297 		}
1298 
1299 		n = lws_write(wsi, &wsi->ws->ping_payload_buf[LWS_PRE],
1300 			      wsi->ws->ping_payload_len, write_type);
1301 		if (n < 0)
1302 			return LWS_HP_RET_BAIL_DIE;
1303 
1304 		/* well he is sent, mark him done */
1305 		wsi->ws->ping_pending_flag = 0;
1306 		if (wsi->ws->payload_is_close) {
1307 			// assert(0);
1308 			/* oh... a close frame was it... then we are done */
1309 			return LWS_HP_RET_BAIL_DIE;
1310 		}
1311 
1312 		/* otherwise for PING, leave POLLOUT active either way */
1313 		return LWS_HP_RET_BAIL_OK;
1314 	}
1315 
1316 	if (!wsi->socket_is_permanently_unusable &&
1317 	    wsi->ws->send_check_ping) {
1318 
1319 		lwsl_info("%s: issuing ping on wsi %p: %s %s h2: %d\n", __func__, wsi,
1320 				wsi->role_ops->name, wsi->protocol->name,
1321 				wsi->mux_substream);
1322 		wsi->ws->send_check_ping = 0;
1323 		n = lws_write(wsi, &wsi->ws->ping_payload_buf[LWS_PRE],
1324 			      0, LWS_WRITE_PING);
1325 		if (n < 0)
1326 			return LWS_HP_RET_BAIL_DIE;
1327 
1328 		return LWS_HP_RET_BAIL_OK;
1329 	}
1330 
1331 	/* Priority 4: if we are closing, not allowed to send more data frags
1332 	 *	       which means user callback or tx ext flush banned now
1333 	 */
1334 	if (lwsi_state(wsi) == LRS_RETURNED_CLOSE)
1335 		return LWS_HP_RET_USER_SERVICE;
1336 
1337 #if !defined(LWS_WITHOUT_EXTENSIONS)
1338 	/* Priority 5: Tx path extension with more to send
1339 	 *
1340 	 *	       These are handled as new fragments each time around
1341 	 *	       So while we must block new writeable callback to enforce
1342 	 *	       payload ordering, but since they are always complete
1343 	 *	       fragments control packets can interleave OK.
1344 	 */
1345 	if (wsi->ws->tx_draining_ext) {
1346 		lwsl_ext("SERVICING TX EXT DRAINING\n");
1347 		if (lws_write(wsi, NULL, 0, LWS_WRITE_CONTINUATION) < 0)
1348 			return LWS_HP_RET_BAIL_DIE;
1349 		/* leave POLLOUT active */
1350 		return LWS_HP_RET_BAIL_OK;
1351 	}
1352 
1353 	/* Priority 6: extensions
1354 	 */
1355 	if (!wsi->ws->extension_data_pending && !wsi->ws->tx_draining_ext) {
1356 		lwsl_ext("%s: !wsi->ws->extension_data_pending\n", __func__);
1357 		return LWS_HP_RET_USER_SERVICE;
1358 	}
1359 
1360 	/*
1361 	 * Check in on the active extensions, see if they had pending stuff to
1362 	 * spill... they need to get the first look-in otherwise sequence will
1363 	 * be disordered.
1364 	 *
1365 	 * coming here with a NULL, zero-length ebuf means just spill pending
1366 	 */
1367 
1368 	ret = 1;
1369 	if (wsi->role_ops == &role_ops_raw_skt
1370 #if defined(LWS_ROLE_RAW_FILE)
1371 		|| wsi->role_ops == &role_ops_raw_file
1372 #endif
1373 	    )
1374 		ret = 0;
1375 
1376 	while (ret == 1) {
1377 
1378 		/* default to nobody has more to spill */
1379 
1380 		ret = 0;
1381 		pmdrx.eb_in.token = NULL;
1382 		pmdrx.eb_in.len = 0;
1383 
1384 		/* give every extension a chance to spill */
1385 
1386 		m = lws_ext_cb_active(wsi, LWS_EXT_CB_PACKET_TX_PRESEND,
1387 				      &pmdrx, 0);
1388 		if (m < 0) {
1389 			lwsl_err("ext reports fatal error\n");
1390 			return LWS_HP_RET_BAIL_DIE;
1391 		}
1392 		if (m)
1393 			/*
1394 			 * at least one extension told us he has more
1395 			 * to spill, so we will go around again after
1396 			 */
1397 			ret = 1;
1398 
1399 		/* assuming they gave us something to send, send it */
1400 
1401 		if (pmdrx.eb_in.len) {
1402 			n = lws_issue_raw(wsi, (unsigned char *)pmdrx.eb_in.token,
1403 					pmdrx.eb_in.len);
1404 			if (n < 0) {
1405 				lwsl_info("closing from POLLOUT spill\n");
1406 				return LWS_HP_RET_BAIL_DIE;
1407 			}
1408 			/*
1409 			 * Keep amount spilled small to minimize chance of this
1410 			 */
1411 			if (n != pmdrx.eb_in.len) {
1412 				lwsl_err("Unable to spill ext %d vs %d\n",
1413 						pmdrx.eb_in.len, n);
1414 				return LWS_HP_RET_BAIL_DIE;
1415 			}
1416 		} else
1417 			continue;
1418 
1419 		/* no extension has more to spill */
1420 
1421 		if (!ret)
1422 			continue;
1423 
1424 		/*
1425 		 * There's more to spill from an extension, but we just sent
1426 		 * something... did that leave the pipe choked?
1427 		 */
1428 
1429 		if (!lws_send_pipe_choked(wsi))
1430 			/* no we could add more */
1431 			continue;
1432 
1433 		lwsl_info("choked in POLLOUT service\n");
1434 
1435 		/*
1436 		 * Yes, he's choked.  Leave the POLLOUT masked on so we will
1437 		 * come back here when he is unchoked.  Don't call the user
1438 		 * callback to enforce ordering of spilling, he'll get called
1439 		 * when we come back here and there's nothing more to spill.
1440 		 */
1441 
1442 		return LWS_HP_RET_BAIL_OK;
1443 	}
1444 
1445 	wsi->ws->extension_data_pending = 0;
1446 #endif
1447 
1448 	return LWS_HP_RET_USER_SERVICE;
1449 }
1450 
1451 static int
rops_service_flag_pending_ws(struct lws_context * context,int tsi)1452 rops_service_flag_pending_ws(struct lws_context *context, int tsi)
1453 {
1454 #if !defined(LWS_WITHOUT_EXTENSIONS)
1455 	struct lws_context_per_thread *pt = &context->pt[tsi];
1456 	struct lws *wsi;
1457 	int forced = 0;
1458 
1459 	/* POLLIN faking (the pt lock is taken by the parent) */
1460 
1461 	/*
1462 	 * 1) For all guys with already-available ext data to drain, if they are
1463 	 * not flowcontrolled, fake their POLLIN status
1464 	 */
1465 	wsi = pt->ws.rx_draining_ext_list;
1466 	while (wsi && wsi->position_in_fds_table != LWS_NO_FDS_POS) {
1467 		pt->fds[wsi->position_in_fds_table].revents |=
1468 			pt->fds[wsi->position_in_fds_table].events & LWS_POLLIN;
1469 		if (pt->fds[wsi->position_in_fds_table].revents & LWS_POLLIN)
1470 			forced = 1;
1471 
1472 		wsi = wsi->ws->rx_draining_ext_list;
1473 	}
1474 
1475 	return forced;
1476 #else
1477 	return 0;
1478 #endif
1479 }
1480 
1481 static int
rops_close_via_role_protocol_ws(struct lws * wsi,enum lws_close_status reason)1482 rops_close_via_role_protocol_ws(struct lws *wsi, enum lws_close_status reason)
1483 {
1484 	if (!wsi->ws)
1485 		return 0;
1486 
1487 	if (!wsi->ws->close_in_ping_buffer_len && /* already a reason */
1488 	     (reason == LWS_CLOSE_STATUS_NOSTATUS ||
1489 	      reason == LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY))
1490 		return 0;
1491 
1492 	lwsl_debug("%s: sending close indication...\n", __func__);
1493 
1494 	/* if no prepared close reason, use 1000 and no aux data */
1495 
1496 	if (!wsi->ws->close_in_ping_buffer_len) {
1497 		wsi->ws->close_in_ping_buffer_len = 2;
1498 		wsi->ws->ping_payload_buf[LWS_PRE] = (reason >> 8) & 0xff;
1499 		wsi->ws->ping_payload_buf[LWS_PRE + 1] = reason & 0xff;
1500 	}
1501 
1502 	wsi->waiting_to_send_close_frame = 1;
1503 	wsi->close_needs_ack = 1;
1504 	lwsi_set_state(wsi, LRS_WAITING_TO_SEND_CLOSE);
1505 	__lws_set_timeout(wsi, PENDING_TIMEOUT_CLOSE_SEND, 5);
1506 
1507 	lws_callback_on_writable(wsi);
1508 
1509 	return 1;
1510 }
1511 
1512 static int
rops_close_role_ws(struct lws_context_per_thread * pt,struct lws * wsi)1513 rops_close_role_ws(struct lws_context_per_thread *pt, struct lws *wsi)
1514 {
1515 	if (!wsi->ws)
1516 		return 0;
1517 
1518 #if !defined(LWS_WITHOUT_EXTENSIONS)
1519 
1520 	if (wsi->ws->rx_draining_ext) {
1521 		struct lws **w = &pt->ws.rx_draining_ext_list;
1522 
1523 		wsi->ws->rx_draining_ext = 0;
1524 		/* remove us from context draining ext list */
1525 		while (*w) {
1526 			if (*w == wsi) {
1527 				*w = wsi->ws->rx_draining_ext_list;
1528 				break;
1529 			}
1530 			w = &((*w)->ws->rx_draining_ext_list);
1531 		}
1532 		wsi->ws->rx_draining_ext_list = NULL;
1533 	}
1534 
1535 	if (wsi->ws->tx_draining_ext) {
1536 		struct lws **w = &pt->ws.tx_draining_ext_list;
1537 		lwsl_ext("%s: CLEARING tx_draining_ext\n", __func__);
1538 		wsi->ws->tx_draining_ext = 0;
1539 		/* remove us from context draining ext list */
1540 		while (*w) {
1541 			if (*w == wsi) {
1542 				*w = wsi->ws->tx_draining_ext_list;
1543 				break;
1544 			}
1545 			w = &((*w)->ws->tx_draining_ext_list);
1546 		}
1547 		wsi->ws->tx_draining_ext_list = NULL;
1548 	}
1549 #endif
1550 	lws_free_set_NULL(wsi->ws->rx_ubuf);
1551 
1552 	wsi->ws->ping_payload_len = 0;
1553 	wsi->ws->ping_pending_flag = 0;
1554 
1555 	/* deallocate any active extension contexts */
1556 
1557 	if (lws_ext_cb_active(wsi, LWS_EXT_CB_DESTROY, NULL, 0) < 0)
1558 		lwsl_warn("extension destruction failed\n");
1559 
1560 	return 0;
1561 }
1562 
1563 static int
rops_write_role_protocol_ws(struct lws * wsi,unsigned char * buf,size_t len,enum lws_write_protocol * wp)1564 rops_write_role_protocol_ws(struct lws *wsi, unsigned char *buf, size_t len,
1565 			    enum lws_write_protocol *wp)
1566 {
1567 #if !defined(LWS_WITHOUT_EXTENSIONS)
1568 	struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi];
1569 	enum lws_write_protocol wpt;
1570 #endif
1571 	struct lws_ext_pm_deflate_rx_ebufs pmdrx;
1572 	int masked7 = lwsi_role_client(wsi);
1573 	unsigned char is_masked_bit = 0;
1574 	unsigned char *dropmask = NULL;
1575 	size_t orig_len = len;
1576 	int pre = 0, n = 0;
1577 
1578 	// lwsl_err("%s: wp 0x%x len %d\n", __func__, *wp, (int)len);
1579 #if !defined(LWS_WITHOUT_EXTENSIONS)
1580 	if (wsi->ws->tx_draining_ext) {
1581 		/* remove us from the list */
1582 		struct lws **w = &pt->ws.tx_draining_ext_list;
1583 
1584 		lwsl_ext("%s: CLEARING tx_draining_ext\n", __func__);
1585 		wsi->ws->tx_draining_ext = 0;
1586 		/* remove us from context draining ext list */
1587 		while (*w) {
1588 			if (*w == wsi) {
1589 				*w = wsi->ws->tx_draining_ext_list;
1590 				break;
1591 			}
1592 			w = &((*w)->ws->tx_draining_ext_list);
1593 		}
1594 		wsi->ws->tx_draining_ext_list = NULL;
1595 
1596 		wpt = *wp;
1597 		*wp = (wsi->ws->tx_draining_stashed_wp & 0xc0) |
1598 				LWS_WRITE_CONTINUATION;
1599 
1600 		/*
1601 		 * When we are just flushing (len == 0), we can trust the
1602 		 * stashed wp info completely.  Otherwise adjust it to the
1603 		 * FIN status of the incoming packet.
1604 		 */
1605 
1606 		if (!(wpt & LWS_WRITE_NO_FIN) && len)
1607 			*wp &= ~LWS_WRITE_NO_FIN;
1608 
1609 		lwsl_ext("FORCED draining wp to 0x%02X "
1610 			 "(stashed 0x%02X, incoming 0x%02X)\n", *wp,
1611 			 wsi->ws->tx_draining_stashed_wp, wpt);
1612 		// assert(0);
1613 	}
1614 #endif
1615 
1616 	if (((*wp) & 0x1f) == LWS_WRITE_HTTP ||
1617 	    ((*wp) & 0x1f) == LWS_WRITE_HTTP_FINAL ||
1618 	    ((*wp) & 0x1f) == LWS_WRITE_HTTP_HEADERS_CONTINUATION ||
1619 	    ((*wp) & 0x1f) == LWS_WRITE_HTTP_HEADERS)
1620 		goto send_raw;
1621 
1622 
1623 
1624 	/* if we are continuing a frame that already had its header done */
1625 
1626 	if (wsi->ws->inside_frame) {
1627 		lwsl_debug("INSIDE FRAME\n");
1628 		goto do_more_inside_frame;
1629 	}
1630 
1631 	wsi->ws->clean_buffer = 1;
1632 
1633 	/*
1634 	 * give a chance to the extensions to modify payload
1635 	 * the extension may decide to produce unlimited payload erratically
1636 	 * (eg, compression extension), so we require only that if he produces
1637 	 * something, it will be a complete fragment of the length known at
1638 	 * the time (just the fragment length known), and if he has
1639 	 * more we will come back next time he is writeable and allow him to
1640 	 * produce more fragments until he's drained.
1641 	 *
1642 	 * This allows what is sent each time it is writeable to be limited to
1643 	 * a size that can be sent without partial sends or blocking, allows
1644 	 * interleaving of control frames and other connection service.
1645 	 */
1646 
1647 	pmdrx.eb_in.token = buf;
1648 	pmdrx.eb_in.len = (int)len;
1649 
1650 	/* for the non-pm-deflate case */
1651 
1652 	pmdrx.eb_out = pmdrx.eb_in;
1653 
1654 	switch ((int)*wp) {
1655 	case LWS_WRITE_PING:
1656 	case LWS_WRITE_PONG:
1657 	case LWS_WRITE_CLOSE:
1658 		break;
1659 	default:
1660 #if !defined(LWS_WITHOUT_EXTENSIONS)
1661 		n = lws_ext_cb_active(wsi, LWS_EXT_CB_PAYLOAD_TX, &pmdrx, *wp);
1662 		if (n < 0)
1663 			return -1;
1664 		lwsl_ext("%s: defl ext ret %d, ext in remaining %d, "
1665 			    "out %d compressed (wp 0x%x)\n", __func__, n,
1666 			    (int)pmdrx.eb_in.len, (int)pmdrx.eb_out.len, *wp);
1667 
1668 		if (n == PMDR_HAS_PENDING) {
1669 			lwsl_ext("%s: HAS PENDING: write drain len %d "
1670 				    "(wp 0x%x) SETTING tx_draining_ext "
1671 				    "(remaining in %d)\n", __func__,
1672 				    (int)pmdrx.eb_out.len, *wp,
1673 				    (int)pmdrx.eb_in.len);
1674 			/* extension requires further draining */
1675 			wsi->ws->tx_draining_ext = 1;
1676 			wsi->ws->tx_draining_ext_list =
1677 					pt->ws.tx_draining_ext_list;
1678 			pt->ws.tx_draining_ext_list = wsi;
1679 			/* we must come back to do more */
1680 			lws_callback_on_writable(wsi);
1681 			/*
1682 			 * keep a copy of the write type for the overall
1683 			 * action that has provoked generation of these
1684 			 * fragments, so the last guy can use its FIN state.
1685 			 */
1686 			wsi->ws->tx_draining_stashed_wp = *wp;
1687 			/*
1688 			 * Despite what we may have thought, this is definitely
1689 			 * NOT the last fragment, because the extension asserted
1690 			 * he has more coming.  For example, the extension may
1691 			 * be compressing, and has saved up everything until the
1692 			 * end, where the output is larger than one chunk.
1693 			 *
1694 			 * Make sure this intermediate one doesn't actually
1695 			 * go out with a FIN.
1696 			 */
1697 			*wp |= LWS_WRITE_NO_FIN;
1698 		}
1699 #endif
1700 		if (pmdrx.eb_out.len && wsi->ws->stashed_write_pending) {
1701 			wsi->ws->stashed_write_pending = 0;
1702 			*wp = ((*wp) & 0xc0) | (int)wsi->ws->stashed_write_type;
1703 		}
1704 	}
1705 
1706 	/*
1707 	 * an extension did something we need to keep... for example, if
1708 	 * compression extension, it has already updated its state according
1709 	 * to this being issued
1710 	 */
1711 	if (buf != pmdrx.eb_out.token) {
1712 		/*
1713 		 * ext might eat it, but not have anything to issue yet.
1714 		 * In that case we have to follow his lead, but stash and
1715 		 * replace the write type that was lost here the first time.
1716 		 */
1717 		if (len && !pmdrx.eb_out.len) {
1718 			if (!wsi->ws->stashed_write_pending)
1719 				wsi->ws->stashed_write_type =
1720 						(char)(*wp) & 0x3f;
1721 			wsi->ws->stashed_write_pending = 1;
1722 			return (int)len;
1723 		}
1724 		/*
1725 		 * extension recreated it:
1726 		 * need to buffer this if not all sent
1727 		 */
1728 		wsi->ws->clean_buffer = 0;
1729 	}
1730 
1731 	buf = pmdrx.eb_out.token;
1732 	len = pmdrx.eb_out.len;
1733 
1734 	if (!buf) {
1735 		lwsl_err("null buf (%d)\n", (int)len);
1736 		return -1;
1737 	}
1738 
1739 	switch (wsi->ws->ietf_spec_revision) {
1740 	case 13:
1741 		if (masked7) {
1742 			pre += 4;
1743 			dropmask = &buf[0 - pre];
1744 			is_masked_bit = 0x80;
1745 		}
1746 
1747 		switch ((*wp) & 0xf) {
1748 		case LWS_WRITE_TEXT:
1749 			n = LWSWSOPC_TEXT_FRAME;
1750 			break;
1751 		case LWS_WRITE_BINARY:
1752 			n = LWSWSOPC_BINARY_FRAME;
1753 			break;
1754 		case LWS_WRITE_CONTINUATION:
1755 			n = LWSWSOPC_CONTINUATION;
1756 			break;
1757 
1758 		case LWS_WRITE_CLOSE:
1759 			n = LWSWSOPC_CLOSE;
1760 			break;
1761 		case LWS_WRITE_PING:
1762 			n = LWSWSOPC_PING;
1763 			break;
1764 		case LWS_WRITE_PONG:
1765 			n = LWSWSOPC_PONG;
1766 			break;
1767 		default:
1768 			lwsl_warn("lws_write: unknown write opc / wp\n");
1769 			return -1;
1770 		}
1771 
1772 		if (!((*wp) & LWS_WRITE_NO_FIN))
1773 			n |= 1 << 7;
1774 
1775 		if (len < 126) {
1776 			pre += 2;
1777 			buf[-pre] = n;
1778 			buf[-pre + 1] = (unsigned char)(len | is_masked_bit);
1779 		} else {
1780 			if (len < 65536) {
1781 				pre += 4;
1782 				buf[-pre] = n;
1783 				buf[-pre + 1] = 126 | is_masked_bit;
1784 				buf[-pre + 2] = (unsigned char)(len >> 8);
1785 				buf[-pre + 3] = (unsigned char)len;
1786 			} else {
1787 				pre += 10;
1788 				buf[-pre] = n;
1789 				buf[-pre + 1] = 127 | is_masked_bit;
1790 #if defined __LP64__
1791 					buf[-pre + 2] = (len >> 56) & 0x7f;
1792 					buf[-pre + 3] = len >> 48;
1793 					buf[-pre + 4] = len >> 40;
1794 					buf[-pre + 5] = len >> 32;
1795 #else
1796 					buf[-pre + 2] = 0;
1797 					buf[-pre + 3] = 0;
1798 					buf[-pre + 4] = 0;
1799 					buf[-pre + 5] = 0;
1800 #endif
1801 				buf[-pre + 6] = (unsigned char)(len >> 24);
1802 				buf[-pre + 7] = (unsigned char)(len >> 16);
1803 				buf[-pre + 8] = (unsigned char)(len >> 8);
1804 				buf[-pre + 9] = (unsigned char)len;
1805 			}
1806 		}
1807 		break;
1808 	}
1809 
1810 do_more_inside_frame:
1811 
1812 	/*
1813 	 * Deal with masking if we are in client -> server direction and
1814 	 * the wp demands it
1815 	 */
1816 
1817 	if (masked7) {
1818 		if (!wsi->ws->inside_frame)
1819 			if (lws_0405_frame_mask_generate(wsi)) {
1820 				lwsl_err("frame mask generation failed\n");
1821 				return -1;
1822 			}
1823 
1824 		/*
1825 		 * in v7, just mask the payload
1826 		 */
1827 		if (dropmask) { /* never set if already inside frame */
1828 			for (n = 4; n < (int)len + 4; n++)
1829 				dropmask[n] = dropmask[n] ^ wsi->ws->mask[
1830 					(wsi->ws->mask_idx++) & 3];
1831 
1832 			/* copy the frame nonce into place */
1833 			memcpy(dropmask, wsi->ws->mask, 4);
1834 		}
1835 	}
1836 
1837 	if (lwsi_role_h2_ENCAPSULATION(wsi)) {
1838 		struct lws *encap = lws_get_network_wsi(wsi);
1839 
1840 		assert(encap != wsi);
1841 		return encap->role_ops->write_role_protocol(wsi, buf - pre,
1842 							    len + pre, wp);
1843 	}
1844 
1845 	switch ((*wp) & 0x1f) {
1846 	case LWS_WRITE_TEXT:
1847 	case LWS_WRITE_BINARY:
1848 	case LWS_WRITE_CONTINUATION:
1849 		if (!wsi->h2_stream_carries_ws) {
1850 
1851 			/*
1852 			 * give any active extensions a chance to munge the
1853 			 * buffer before send.  We pass in a pointer to an
1854 			 * lws_tokens struct prepared with the default buffer
1855 			 * and content length that's in there.  Rather than
1856 			 * rewrite the default buffer, extensions that expect
1857 			 * to grow the buffer can adapt .token to point to their
1858 			 * own per-connection buffer in the extension user
1859 			 * allocation.  By default with no extensions or no
1860 			 * extension callback handling, just the normal input
1861 			 * buffer is used then so it is efficient.
1862 			 *
1863 			 * callback returns 1 in case it wants to spill more
1864 			 * buffers
1865 			 *
1866 			 * This takes care of holding the buffer if send is
1867 			 * incomplete, ie, if wsi->ws->clean_buffer is 0
1868 			 * (meaning an extension meddled with the buffer).  If
1869 			 * wsi->ws->clean_buffer is 1, it will instead return
1870 			 * to the user code how much OF THE USER BUFFER was
1871 			 * consumed.
1872 			 */
1873 
1874 			n = lws_issue_raw_ext_access(wsi, buf - pre, len + pre);
1875 			wsi->ws->inside_frame = 1;
1876 			if (n <= 0)
1877 				return n;
1878 
1879 			if (n == (int)len + pre) {
1880 				/* everything in the buffer was handled
1881 				 * (or rebuffered...) */
1882 				wsi->ws->inside_frame = 0;
1883 				return (int)orig_len;
1884 			}
1885 
1886 			/*
1887 			 * it is how many bytes of user buffer got sent... may
1888 			 * be < orig_len in which case callback when writable
1889 			 * has already been arranged and user code can call
1890 			 * lws_write() again with the rest later.
1891 			 */
1892 
1893 			return n - pre;
1894 		}
1895 		break;
1896 	default:
1897 		break;
1898 	}
1899 
1900 send_raw:
1901 	return lws_issue_raw(wsi, (unsigned char *)buf - pre, len + pre);
1902 }
1903 
1904 static int
rops_close_kill_connection_ws(struct lws * wsi,enum lws_close_status reason)1905 rops_close_kill_connection_ws(struct lws *wsi, enum lws_close_status reason)
1906 {
1907 	/* deal with ws encapsulation in h2 */
1908 #if defined(LWS_WITH_HTTP2)
1909 	if (wsi->mux_substream && wsi->h2_stream_carries_ws)
1910 		return role_ops_h2.close_kill_connection(wsi, reason);
1911 
1912 	return 0;
1913 #else
1914 	return 0;
1915 #endif
1916 }
1917 
1918 static int
rops_callback_on_writable_ws(struct lws * wsi)1919 rops_callback_on_writable_ws(struct lws *wsi)
1920 {
1921 #if defined(LWS_WITH_HTTP2)
1922 	if (lwsi_role_h2_ENCAPSULATION(wsi)) {
1923 		/* we know then that it has an h2 parent */
1924 		struct lws *enc = role_ops_h2.encapsulation_parent(wsi);
1925 
1926 		assert(enc);
1927 		if (enc->role_ops->callback_on_writable(wsi))
1928 			return 1;
1929 	}
1930 #endif
1931 	return 0;
1932 }
1933 
1934 static int
rops_init_vhost_ws(struct lws_vhost * vh,const struct lws_context_creation_info * info)1935 rops_init_vhost_ws(struct lws_vhost *vh,
1936 		   const struct lws_context_creation_info *info)
1937 {
1938 #if !defined(LWS_WITHOUT_EXTENSIONS)
1939 #ifdef LWS_WITH_PLUGINS
1940 	struct lws_plugin *plugin;
1941 	int m;
1942 
1943 	if (vh->context->plugin_extension_count) {
1944 
1945 		m = 0;
1946 		while (info->extensions && info->extensions[m].callback)
1947 			m++;
1948 
1949 		/*
1950 		 * give the vhost a unified list of extensions including the
1951 		 * ones that came from plugins
1952 		 */
1953 		vh->ws.extensions = lws_zalloc(sizeof(struct lws_extension) *
1954 				     (m + vh->context->plugin_extension_count + 1),
1955 				     "extensions");
1956 		if (!vh->ws.extensions)
1957 			return 1;
1958 
1959 		memcpy((struct lws_extension *)vh->ws.extensions, info->extensions,
1960 		       sizeof(struct lws_extension) * m);
1961 		plugin = vh->context->plugin_list;
1962 		while (plugin) {
1963 			memcpy((struct lws_extension *)&vh->ws.extensions[m],
1964 				plugin->caps.extensions,
1965 			       sizeof(struct lws_extension) *
1966 			       plugin->caps.count_extensions);
1967 			m += plugin->caps.count_extensions;
1968 			plugin = plugin->list;
1969 		}
1970 	} else
1971 #endif
1972 		vh->ws.extensions = info->extensions;
1973 #endif
1974 
1975 	return 0;
1976 }
1977 
1978 static int
rops_destroy_vhost_ws(struct lws_vhost * vh)1979 rops_destroy_vhost_ws(struct lws_vhost *vh)
1980 {
1981 #ifdef LWS_WITH_PLUGINS
1982 #if !defined(LWS_WITHOUT_EXTENSIONS)
1983 	if (vh->context->plugin_extension_count)
1984 		lws_free((void *)vh->ws.extensions);
1985 #endif
1986 #endif
1987 
1988 	return 0;
1989 }
1990 
1991 #if defined(LWS_WITH_HTTP_PROXY)
1992 static int
ws_destroy_proxy_buf(struct lws_dll2 * d,void * user)1993 ws_destroy_proxy_buf(struct lws_dll2 *d, void *user)
1994 {
1995 	lws_free(d);
1996 
1997 	return 0;
1998 }
1999 #endif
2000 
2001 static int
rops_destroy_role_ws(struct lws * wsi)2002 rops_destroy_role_ws(struct lws *wsi)
2003 {
2004 #if defined(LWS_WITH_HTTP_PROXY)
2005 	lws_dll2_foreach_safe(&wsi->ws->proxy_owner, NULL, ws_destroy_proxy_buf);
2006 #endif
2007 
2008 	lws_free_set_NULL(wsi->ws);
2009 
2010 	return 0;
2011 }
2012 
2013 static int
rops_issue_keepalive_ws(struct lws * wsi,int isvalid)2014 rops_issue_keepalive_ws(struct lws *wsi, int isvalid)
2015 {
2016 	uint64_t us;
2017 
2018 #if defined(LWS_WITH_HTTP2)
2019 	if (lwsi_role_h2_ENCAPSULATION(wsi)) {
2020 		/* we know then that it has an h2 parent */
2021 		struct lws *enc = role_ops_h2.encapsulation_parent(wsi);
2022 
2023 		assert(enc);
2024 		if (enc->role_ops->issue_keepalive(enc, isvalid))
2025 			return 1;
2026 	}
2027 #endif
2028 
2029 	if (isvalid)
2030 		_lws_validity_confirmed_role(wsi);
2031 	else {
2032 		us = lws_now_usecs();
2033 		memcpy(&wsi->ws->ping_payload_buf[LWS_PRE], &us, 8);
2034 		wsi->ws->send_check_ping = 1;
2035 		lws_callback_on_writable(wsi);
2036 	}
2037 
2038 	return 0;
2039 }
2040 
2041 const struct lws_role_ops role_ops_ws = {
2042 	/* role name */			"ws",
2043 	/* alpn id */			NULL,
2044 	/* check_upgrades */		NULL,
2045 	/* pt_init_destroy */		NULL,
2046 	/* init_vhost */		rops_init_vhost_ws,
2047 	/* destroy_vhost */		rops_destroy_vhost_ws,
2048 	/* service_flag_pending */	rops_service_flag_pending_ws,
2049 	/* handle_POLLIN */		rops_handle_POLLIN_ws,
2050 	/* handle_POLLOUT */		rops_handle_POLLOUT_ws,
2051 	/* perform_user_POLLOUT */	NULL,
2052 	/* callback_on_writable */	rops_callback_on_writable_ws,
2053 	/* tx_credit */			NULL,
2054 	/* write_role_protocol */	rops_write_role_protocol_ws,
2055 	/* encapsulation_parent */	NULL,
2056 	/* alpn_negotiated */		NULL,
2057 	/* close_via_role_protocol */	rops_close_via_role_protocol_ws,
2058 	/* close_role */		rops_close_role_ws,
2059 	/* close_kill_connection */	rops_close_kill_connection_ws,
2060 	/* destroy_role */		rops_destroy_role_ws,
2061 	/* adoption_bind */		NULL,
2062 	/* client_bind */		NULL,
2063 	/* issue_keepalive */		rops_issue_keepalive_ws,
2064 	/* adoption_cb clnt, srv */	{ LWS_CALLBACK_SERVER_NEW_CLIENT_INSTANTIATED,
2065 					  LWS_CALLBACK_SERVER_NEW_CLIENT_INSTANTIATED },
2066 	/* rx_cb clnt, srv */		{ LWS_CALLBACK_CLIENT_RECEIVE,
2067 					  LWS_CALLBACK_RECEIVE },
2068 	/* writeable cb clnt, srv */	{ LWS_CALLBACK_CLIENT_WRITEABLE,
2069 					  LWS_CALLBACK_SERVER_WRITEABLE },
2070 	/* close cb clnt, srv */	{ LWS_CALLBACK_CLIENT_CLOSED,
2071 					  LWS_CALLBACK_CLOSED },
2072 	/* protocol_bind cb c, srv */	{ LWS_CALLBACK_WS_CLIENT_BIND_PROTOCOL,
2073 					  LWS_CALLBACK_WS_SERVER_BIND_PROTOCOL },
2074 	/* protocol_unbind cb c, srv */	{ LWS_CALLBACK_WS_CLIENT_DROP_PROTOCOL,
2075 					  LWS_CALLBACK_WS_SERVER_DROP_PROTOCOL },
2076 	/* file handles */		0
2077 };
2078