1 /***************************************************************************
2  *                                  _   _ ____  _
3  *  Project                     ___| | | |  _ \| |
4  *                             / __| | | | |_) | |
5  *                            | (__| |_| |  _ <| |___
6  *                             \___|\___/|_| \_\_____|
7  *
8  * Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
9  *
10  * This software is licensed as described in the file COPYING, which
11  * you should have received as part of this distribution. The terms
12  * are also available at https://curl.haxx.se/docs/copyright.html.
13  *
14  * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15  * copies of the Software, and permit persons to whom the Software is
16  * furnished to do so, under the terms of the COPYING file.
17  *
18  * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19  * KIND, either express or implied.
20  *
21  ***************************************************************************/
22 
23 #include "curl_setup.h"
24 #include "strtoofft.h"
25 
26 #ifdef HAVE_NETINET_IN_H
27 #include <netinet/in.h>
28 #endif
29 #ifdef HAVE_NETDB_H
30 #include <netdb.h>
31 #endif
32 #ifdef HAVE_ARPA_INET_H
33 #include <arpa/inet.h>
34 #endif
35 #ifdef HAVE_NET_IF_H
36 #include <net/if.h>
37 #endif
38 #ifdef HAVE_SYS_IOCTL_H
39 #include <sys/ioctl.h>
40 #endif
41 #ifdef HAVE_SIGNAL_H
42 #include <signal.h>
43 #endif
44 
45 #ifdef HAVE_SYS_PARAM_H
46 #include <sys/param.h>
47 #endif
48 
49 #ifdef HAVE_SYS_SELECT_H
50 #include <sys/select.h>
51 #endif
52 
53 #ifndef HAVE_SOCKET
54 #error "We can't compile without socket() support!"
55 #endif
56 
57 #include "urldata.h"
58 #include <curl/curl.h>
59 #include "netrc.h"
60 
61 #include "content_encoding.h"
62 #include "hostip.h"
63 #include "transfer.h"
64 #include "sendf.h"
65 #include "speedcheck.h"
66 #include "progress.h"
67 #include "http.h"
68 #include "url.h"
69 #include "getinfo.h"
70 #include "vtls/vtls.h"
71 #include "select.h"
72 #include "multiif.h"
73 #include "connect.h"
74 #include "non-ascii.h"
75 #include "http2.h"
76 #include "mime.h"
77 #include "strcase.h"
78 
79 /* The last 3 #include files should be in this order */
80 #include "curl_printf.h"
81 #include "curl_memory.h"
82 #include "memdebug.h"
83 
84 #if !defined(CURL_DISABLE_HTTP) || !defined(CURL_DISABLE_SMTP) || \
85     !defined(CURL_DISABLE_IMAP)
86 /*
87  * checkheaders() checks the linked list of custom headers for a
88  * particular header (prefix).
89  *
90  * Returns a pointer to the first matching header or NULL if none matched.
91  */
Curl_checkheaders(const struct connectdata * conn,const char * thisheader)92 char *Curl_checkheaders(const struct connectdata *conn,
93                         const char *thisheader)
94 {
95   struct curl_slist *head;
96   size_t thislen = strlen(thisheader);
97   struct Curl_easy *data = conn->data;
98 
99   for(head = data->set.headers; head; head = head->next) {
100     if(strncasecompare(head->data, thisheader, thislen))
101       return head->data;
102   }
103 
104   return NULL;
105 }
106 #endif
107 
108 /*
109  * This function will call the read callback to fill our buffer with data
110  * to upload.
111  */
Curl_fillreadbuffer(struct connectdata * conn,int bytes,int * nreadp)112 CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
113 {
114   struct Curl_easy *data = conn->data;
115   size_t buffersize = (size_t)bytes;
116   int nread;
117 #ifdef CURL_DOES_CONVERSIONS
118   bool sending_http_headers = FALSE;
119 
120   if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
121     const struct HTTP *http = data->req.protop;
122 
123     if(http->sending == HTTPSEND_REQUEST)
124       /* We're sending the HTTP request headers, not the data.
125          Remember that so we don't re-translate them into garbage. */
126       sending_http_headers = TRUE;
127   }
128 #endif
129 
130   if(data->req.upload_chunky) {
131     /* if chunked Transfer-Encoding */
132     buffersize -= (8 + 2 + 2);   /* 32bit hex + CRLF + CRLF */
133     data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
134   }
135 
136   /* this function returns a size_t, so we typecast to int to prevent warnings
137      with picky compilers */
138   nread = (int)data->state.fread_func(data->req.upload_fromhere, 1,
139                                       buffersize, data->state.in);
140 
141   if(nread == CURL_READFUNC_ABORT) {
142     failf(data, "operation aborted by callback");
143     *nreadp = 0;
144     return CURLE_ABORTED_BY_CALLBACK;
145   }
146   if(nread == CURL_READFUNC_PAUSE) {
147     struct SingleRequest *k = &data->req;
148 
149     if(conn->handler->flags & PROTOPT_NONETWORK) {
150       /* protocols that work without network cannot be paused. This is
151          actually only FILE:// just now, and it can't pause since the transfer
152          isn't done using the "normal" procedure. */
153       failf(data, "Read callback asked for PAUSE when not supported!");
154       return CURLE_READ_ERROR;
155     }
156 
157     /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
158     k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
159     if(data->req.upload_chunky) {
160         /* Back out the preallocation done above */
161       data->req.upload_fromhere -= (8 + 2);
162     }
163     *nreadp = 0;
164 
165     return CURLE_OK; /* nothing was read */
166   }
167   else if((size_t)nread > buffersize) {
168     /* the read function returned a too large value */
169     *nreadp = 0;
170     failf(data, "read function returned funny value");
171     return CURLE_READ_ERROR;
172   }
173 
174   if(!data->req.forbidchunk && data->req.upload_chunky) {
175     /* if chunked Transfer-Encoding
176      *    build chunk:
177      *
178      *        <HEX SIZE> CRLF
179      *        <DATA> CRLF
180      */
181     /* On non-ASCII platforms the <DATA> may or may not be
182        translated based on set.prefer_ascii while the protocol
183        portion must always be translated to the network encoding.
184        To further complicate matters, line end conversion might be
185        done later on, so we need to prevent CRLFs from becoming
186        CRCRLFs if that's the case.  To do this we use bare LFs
187        here, knowing they'll become CRLFs later on.
188      */
189 
190     char hexbuffer[11];
191     const char *endofline_native;
192     const char *endofline_network;
193     int hexlen;
194 
195     if(
196 #ifdef CURL_DO_LINEEND_CONV
197        (data->set.prefer_ascii) ||
198 #endif
199        (data->set.crlf)) {
200       /* \n will become \r\n later on */
201       endofline_native  = "\n";
202       endofline_network = "\x0a";
203     }
204     else {
205       endofline_native  = "\r\n";
206       endofline_network = "\x0d\x0a";
207     }
208     hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
209                       "%x%s", nread, endofline_native);
210 
211     /* move buffer pointer */
212     data->req.upload_fromhere -= hexlen;
213     nread += hexlen;
214 
215     /* copy the prefix to the buffer, leaving out the NUL */
216     memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
217 
218     /* always append ASCII CRLF to the data */
219     memcpy(data->req.upload_fromhere + nread,
220            endofline_network,
221            strlen(endofline_network));
222 
223 #ifdef CURL_DOES_CONVERSIONS
224     {
225       CURLcode result;
226       int length;
227       if(data->set.prefer_ascii)
228         /* translate the protocol and data */
229         length = nread;
230       else
231         /* just translate the protocol portion */
232         length = (int)strlen(hexbuffer);
233       result = Curl_convert_to_network(data, data->req.upload_fromhere,
234                                        length);
235       /* Curl_convert_to_network calls failf if unsuccessful */
236       if(result)
237         return result;
238     }
239 #endif /* CURL_DOES_CONVERSIONS */
240 
241     if((nread - hexlen) == 0) {
242       /* mark this as done once this chunk is transferred */
243       data->req.upload_done = TRUE;
244       infof(data, "Signaling end of chunked upload via terminating chunk.\n");
245     }
246 
247     nread += (int)strlen(endofline_native); /* for the added end of line */
248   }
249 #ifdef CURL_DOES_CONVERSIONS
250   else if((data->set.prefer_ascii) && (!sending_http_headers)) {
251     CURLcode result;
252     result = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
253     /* Curl_convert_to_network calls failf if unsuccessful */
254     if(result)
255       return result;
256   }
257 #endif /* CURL_DOES_CONVERSIONS */
258 
259   *nreadp = nread;
260 
261   return CURLE_OK;
262 }
263 
264 
265 /*
266  * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
267  * POST/PUT with multi-pass authentication when a sending was denied and a
268  * resend is necessary.
269  */
Curl_readrewind(struct connectdata * conn)270 CURLcode Curl_readrewind(struct connectdata *conn)
271 {
272   struct Curl_easy *data = conn->data;
273   curl_mimepart *mimepart = &data->set.mimepost;
274 
275   conn->bits.rewindaftersend = FALSE; /* we rewind now */
276 
277   /* explicitly switch off sending data on this connection now since we are
278      about to restart a new transfer and thus we want to avoid inadvertently
279      sending more data on the existing connection until the next transfer
280      starts */
281   data->req.keepon &= ~KEEP_SEND;
282 
283   /* We have sent away data. If not using CURLOPT_POSTFIELDS or
284      CURLOPT_HTTPPOST, call app to rewind
285   */
286   if(conn->handler->protocol & PROTO_FAMILY_HTTP) {
287     struct HTTP *http = data->req.protop;
288 
289     if(http->sendit)
290       mimepart = http->sendit;
291   }
292   if(data->set.postfields)
293     ; /* do nothing */
294   else if(data->set.httpreq == HTTPREQ_POST_MIME ||
295           data->set.httpreq == HTTPREQ_POST_FORM) {
296     if(Curl_mime_rewind(mimepart)) {
297       failf(data, "Cannot rewind mime/post data");
298       return CURLE_SEND_FAIL_REWIND;
299     }
300   }
301   else {
302     if(data->set.seek_func) {
303       int err;
304 
305       err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
306       if(err) {
307         failf(data, "seek callback returned error %d", (int)err);
308         return CURLE_SEND_FAIL_REWIND;
309       }
310     }
311     else if(data->set.ioctl_func) {
312       curlioerr err;
313 
314       err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
315                                    data->set.ioctl_client);
316       infof(data, "the ioctl callback returned %d\n", (int)err);
317 
318       if(err) {
319         /* FIXME: convert to a human readable error message */
320         failf(data, "ioctl callback returned error %d", (int)err);
321         return CURLE_SEND_FAIL_REWIND;
322       }
323     }
324     else {
325       /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
326          given FILE * stream and we can actually attempt to rewind that
327          ourselves with fseek() */
328       if(data->state.fread_func == (curl_read_callback)fread) {
329         if(-1 != fseek(data->state.in, 0, SEEK_SET))
330           /* successful rewind */
331           return CURLE_OK;
332       }
333 
334       /* no callback set or failure above, makes us fail at once */
335       failf(data, "necessary data rewind wasn't possible");
336       return CURLE_SEND_FAIL_REWIND;
337     }
338   }
339   return CURLE_OK;
340 }
341 
data_pending(const struct connectdata * conn)342 static int data_pending(const struct connectdata *conn)
343 {
344   /* in the case of libssh2, we can never be really sure that we have emptied
345      its internal buffers so we MUST always try until we get EAGAIN back */
346   return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
347 #if defined(USE_NGHTTP2)
348     Curl_ssl_data_pending(conn, FIRSTSOCKET) ||
349     /* For HTTP/2, we may read up everything including responde body
350        with header fields in Curl_http_readwrite_headers. If no
351        content-length is provided, curl waits for the connection
352        close, which we emulate it using conn->proto.httpc.closed =
353        TRUE. The thing is if we read everything, then http2_recv won't
354        be called and we cannot signal the HTTP/2 stream has closed. As
355        a workaround, we return nonzero here to call http2_recv. */
356     ((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion == 20);
357 #else
358     Curl_ssl_data_pending(conn, FIRSTSOCKET);
359 #endif
360 }
361 
read_rewind(struct connectdata * conn,size_t thismuch)362 static void read_rewind(struct connectdata *conn,
363                         size_t thismuch)
364 {
365   DEBUGASSERT(conn->read_pos >= thismuch);
366 
367   conn->read_pos -= thismuch;
368   conn->bits.stream_was_rewound = TRUE;
369 
370 #ifdef DEBUGBUILD
371   {
372     char buf[512 + 1];
373     size_t show;
374 
375     show = CURLMIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
376     if(conn->master_buffer) {
377       memcpy(buf, conn->master_buffer + conn->read_pos, show);
378       buf[show] = '\0';
379     }
380     else {
381       buf[0] = '\0';
382     }
383 
384     DEBUGF(infof(conn->data,
385                  "Buffer after stream rewind (read_pos = %zu): [%s]\n",
386                  conn->read_pos, buf));
387   }
388 #endif
389 }
390 
391 /*
392  * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
393  * remote document with the time provided by CURLOPT_TIMEVAL
394  */
Curl_meets_timecondition(struct Curl_easy * data,time_t timeofdoc)395 bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc)
396 {
397   if((timeofdoc == 0) || (data->set.timevalue == 0))
398     return TRUE;
399 
400   switch(data->set.timecondition) {
401   case CURL_TIMECOND_IFMODSINCE:
402   default:
403     if(timeofdoc <= data->set.timevalue) {
404       infof(data,
405             "The requested document is not new enough\n");
406       data->info.timecond = TRUE;
407       return FALSE;
408     }
409     break;
410   case CURL_TIMECOND_IFUNMODSINCE:
411     if(timeofdoc >= data->set.timevalue) {
412       infof(data,
413             "The requested document is not old enough\n");
414       data->info.timecond = TRUE;
415       return FALSE;
416     }
417     break;
418   }
419 
420   return TRUE;
421 }
422 
423 /*
424  * Go ahead and do a read if we have a readable socket or if
425  * the stream was rewound (in which case we have data in a
426  * buffer)
427  *
428  * return '*comeback' TRUE if we didn't properly drain the socket so this
429  * function should get called again without select() or similar in between!
430  */
readwrite_data(struct Curl_easy * data,struct connectdata * conn,struct SingleRequest * k,int * didwhat,bool * done,bool * comeback)431 static CURLcode readwrite_data(struct Curl_easy *data,
432                                struct connectdata *conn,
433                                struct SingleRequest *k,
434                                int *didwhat, bool *done,
435                                bool *comeback)
436 {
437   CURLcode result = CURLE_OK;
438   ssize_t nread; /* number of bytes read */
439   size_t excess = 0; /* excess bytes read */
440   bool is_empty_data = FALSE;
441   bool readmore = FALSE; /* used by RTP to signal for more data */
442   int maxloops = 100;
443 
444   *done = FALSE;
445   *comeback = FALSE;
446 
447   /* This is where we loop until we have read everything there is to
448      read or we get a CURLE_AGAIN */
449   do {
450     size_t buffersize = data->set.buffer_size;
451     size_t bytestoread = buffersize;
452 
453     if(
454 #if defined(USE_NGHTTP2)
455        /* For HTTP/2, read data without caring about the content
456           length. This is safe because body in HTTP/2 is always
457           segmented thanks to its framing layer. Meanwhile, we have to
458           call Curl_read to ensure that http2_handle_stream_close is
459           called when we read all incoming bytes for a particular
460           stream. */
461        !((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
462          conn->httpversion == 20) &&
463 #endif
464        k->size != -1 && !k->header) {
465       /* make sure we don't read "too much" if we can help it since we
466          might be pipelining and then someone else might want to read what
467          follows! */
468       curl_off_t totalleft = k->size - k->bytecount;
469       if(totalleft < (curl_off_t)bytestoread)
470         bytestoread = (size_t)totalleft;
471     }
472 
473     if(bytestoread) {
474       /* receive data from the network! */
475       result = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
476 
477       /* read would've blocked */
478       if(CURLE_AGAIN == result)
479         break; /* get out of loop */
480 
481       if(result>0)
482         return result;
483     }
484     else {
485       /* read nothing but since we wanted nothing we consider this an OK
486          situation to proceed from */
487       DEBUGF(infof(data, "readwrite_data: we're done!\n"));
488       nread = 0;
489     }
490 
491     if((k->bytecount == 0) && (k->writebytecount == 0)) {
492       Curl_pgrsTime(data, TIMER_STARTTRANSFER);
493       if(k->exp100 > EXP100_SEND_DATA)
494         /* set time stamp to compare with when waiting for the 100 */
495         k->start100 = Curl_now();
496     }
497 
498     *didwhat |= KEEP_RECV;
499     /* indicates data of zero size, i.e. empty file */
500     is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
501 
502     /* NUL terminate, allowing string ops to be used */
503     if(0 < nread || is_empty_data) {
504       k->buf[nread] = 0;
505     }
506     else if(0 >= nread) {
507       /* if we receive 0 or less here, the server closed the connection
508          and we bail out from this! */
509       DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
510       k->keepon &= ~KEEP_RECV;
511       break;
512     }
513 
514     /* Default buffer to use when we write the buffer, it may be changed
515        in the flow below before the actual storing is done. */
516     k->str = k->buf;
517 
518     if(conn->handler->readwrite) {
519       result = conn->handler->readwrite(data, conn, &nread, &readmore);
520       if(result)
521         return result;
522       if(readmore)
523         break;
524     }
525 
526 #ifndef CURL_DISABLE_HTTP
527     /* Since this is a two-state thing, we check if we are parsing
528        headers at the moment or not. */
529     if(k->header) {
530       /* we are in parse-the-header-mode */
531       bool stop_reading = FALSE;
532       result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
533       if(result)
534         return result;
535 
536       if(conn->handler->readwrite &&
537          (k->maxdownload <= 0 && nread > 0)) {
538         result = conn->handler->readwrite(data, conn, &nread, &readmore);
539         if(result)
540           return result;
541         if(readmore)
542           break;
543       }
544 
545       if(stop_reading) {
546         /* We've stopped dealing with input, get out of the do-while loop */
547 
548         if(nread > 0) {
549           if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
550             infof(data,
551                   "Rewinding stream by : %zd"
552                   " bytes on url %s (zero-length body)\n",
553                   nread, data->state.path);
554             read_rewind(conn, (size_t)nread);
555           }
556           else {
557             infof(data,
558                   "Excess found in a non pipelined read:"
559                   " excess = %zd"
560                   " url = %s (zero-length body)\n",
561                   nread, data->state.path);
562           }
563         }
564 
565         break;
566       }
567     }
568 #endif /* CURL_DISABLE_HTTP */
569 
570 
571     /* This is not an 'else if' since it may be a rest from the header
572        parsing, where the beginning of the buffer is headers and the end
573        is non-headers. */
574     if(k->str && !k->header && (nread > 0 || is_empty_data)) {
575 
576       if(data->set.opt_no_body) {
577         /* data arrives although we want none, bail out */
578         streamclose(conn, "ignoring body");
579         *done = TRUE;
580         return CURLE_WEIRD_SERVER_REPLY;
581       }
582 
583 #ifndef CURL_DISABLE_HTTP
584       if(0 == k->bodywrites && !is_empty_data) {
585         /* These checks are only made the first time we are about to
586            write a piece of the body */
587         if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
588           /* HTTP-only checks */
589 
590           if(data->req.newurl) {
591             if(conn->bits.close) {
592               /* Abort after the headers if "follow Location" is set
593                  and we're set to close anyway. */
594               k->keepon &= ~KEEP_RECV;
595               *done = TRUE;
596               return CURLE_OK;
597             }
598             /* We have a new url to load, but since we want to be able
599                to re-use this connection properly, we read the full
600                response in "ignore more" */
601             k->ignorebody = TRUE;
602             infof(data, "Ignoring the response-body\n");
603           }
604           if(data->state.resume_from && !k->content_range &&
605              (data->set.httpreq == HTTPREQ_GET) &&
606              !k->ignorebody) {
607 
608             if(k->size == data->state.resume_from) {
609               /* The resume point is at the end of file, consider this fine
610                  even if it doesn't allow resume from here. */
611               infof(data, "The entire document is already downloaded");
612               connclose(conn, "already downloaded");
613               /* Abort download */
614               k->keepon &= ~KEEP_RECV;
615               *done = TRUE;
616               return CURLE_OK;
617             }
618 
619             /* we wanted to resume a download, although the server doesn't
620              * seem to support this and we did this with a GET (if it
621              * wasn't a GET we did a POST or PUT resume) */
622             failf(data, "HTTP server doesn't seem to support "
623                   "byte ranges. Cannot resume.");
624             return CURLE_RANGE_ERROR;
625           }
626 
627           if(data->set.timecondition && !data->state.range) {
628             /* A time condition has been set AND no ranges have been
629                requested. This seems to be what chapter 13.3.4 of
630                RFC 2616 defines to be the correct action for a
631                HTTP/1.1 client */
632 
633             if(!Curl_meets_timecondition(data, k->timeofdoc)) {
634               *done = TRUE;
635               /* We're simulating a http 304 from server so we return
636                  what should have been returned from the server */
637               data->info.httpcode = 304;
638               infof(data, "Simulate a HTTP 304 response!\n");
639               /* we abort the transfer before it is completed == we ruin the
640                  re-use ability. Close the connection */
641               connclose(conn, "Simulated 304 handling");
642               return CURLE_OK;
643             }
644           } /* we have a time condition */
645 
646         } /* this is HTTP or RTSP */
647       } /* this is the first time we write a body part */
648 #endif /* CURL_DISABLE_HTTP */
649 
650       k->bodywrites++;
651 
652       /* pass data to the debug function before it gets "dechunked" */
653       if(data->set.verbose) {
654         if(k->badheader) {
655           Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
656                      (size_t)k->hbuflen, conn);
657           if(k->badheader == HEADER_PARTHEADER)
658             Curl_debug(data, CURLINFO_DATA_IN,
659                        k->str, (size_t)nread, conn);
660         }
661         else
662           Curl_debug(data, CURLINFO_DATA_IN,
663                      k->str, (size_t)nread, conn);
664       }
665 
666 #ifndef CURL_DISABLE_HTTP
667       if(k->chunk) {
668         /*
669          * Here comes a chunked transfer flying and we need to decode this
670          * properly.  While the name says read, this function both reads
671          * and writes away the data. The returned 'nread' holds the number
672          * of actual data it wrote to the client.
673          */
674 
675         CHUNKcode res =
676           Curl_httpchunk_read(conn, k->str, nread, &nread);
677 
678         if(CHUNKE_OK < res) {
679           if(CHUNKE_WRITE_ERROR == res) {
680             failf(data, "Failed writing data");
681             return CURLE_WRITE_ERROR;
682           }
683           failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
684           return CURLE_RECV_ERROR;
685         }
686         if(CHUNKE_STOP == res) {
687           size_t dataleft;
688           /* we're done reading chunks! */
689           k->keepon &= ~KEEP_RECV; /* read no more */
690 
691           /* There are now possibly N number of bytes at the end of the
692              str buffer that weren't written to the client.
693 
694              We DO care about this data if we are pipelining.
695              Push it back to be read on the next pass. */
696 
697           dataleft = conn->chunk.dataleft;
698           if(dataleft != 0) {
699             infof(conn->data, "Leftovers after chunking: %zu bytes\n",
700                   dataleft);
701             if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
702               /* only attempt the rewind if we truly are pipelining */
703               infof(conn->data, "Rewinding %zu bytes\n",dataleft);
704               read_rewind(conn, dataleft);
705             }
706           }
707         }
708         /* If it returned OK, we just keep going */
709       }
710 #endif   /* CURL_DISABLE_HTTP */
711 
712       /* Account for body content stored in the header buffer */
713       if(k->badheader && !k->ignorebody) {
714         DEBUGF(infof(data, "Increasing bytecount by %zu from hbuflen\n",
715                      k->hbuflen));
716         k->bytecount += k->hbuflen;
717       }
718 
719       if((-1 != k->maxdownload) &&
720          (k->bytecount + nread >= k->maxdownload)) {
721 
722         excess = (size_t)(k->bytecount + nread - k->maxdownload);
723         if(excess > 0 && !k->ignorebody) {
724           if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
725             infof(data,
726                   "Rewinding stream by : %zu"
727                   " bytes on url %s (size = %" CURL_FORMAT_CURL_OFF_T
728                   ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
729                   ", bytecount = %" CURL_FORMAT_CURL_OFF_T ", nread = %zd)\n",
730                   excess, data->state.path,
731                   k->size, k->maxdownload, k->bytecount, nread);
732             read_rewind(conn, excess);
733           }
734           else {
735             infof(data,
736                   "Excess found in a non pipelined read:"
737                   " excess = %zu"
738                   ", size = %" CURL_FORMAT_CURL_OFF_T
739                   ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
740                   ", bytecount = %" CURL_FORMAT_CURL_OFF_T "\n",
741                   excess, k->size, k->maxdownload, k->bytecount);
742           }
743         }
744 
745         nread = (ssize_t) (k->maxdownload - k->bytecount);
746         if(nread < 0) /* this should be unusual */
747           nread = 0;
748 
749         k->keepon &= ~KEEP_RECV; /* we're done reading */
750       }
751 
752       k->bytecount += nread;
753 
754       Curl_pgrsSetDownloadCounter(data, k->bytecount);
755 
756       if(!k->chunk && (nread || k->badheader || is_empty_data)) {
757         /* If this is chunky transfer, it was already written */
758 
759         if(k->badheader && !k->ignorebody) {
760           /* we parsed a piece of data wrongly assuming it was a header
761              and now we output it as body instead */
762 
763           /* Don't let excess data pollute body writes */
764           if(k->maxdownload == -1 || (curl_off_t)k->hbuflen <= k->maxdownload)
765             result = Curl_client_write(conn, CLIENTWRITE_BODY,
766                                        data->state.headerbuff,
767                                        k->hbuflen);
768           else
769             result = Curl_client_write(conn, CLIENTWRITE_BODY,
770                                        data->state.headerbuff,
771                                        (size_t)k->maxdownload);
772 
773           if(result)
774             return result;
775         }
776         if(k->badheader < HEADER_ALLBAD) {
777           /* This switch handles various content encodings. If there's an
778              error here, be sure to check over the almost identical code
779              in http_chunks.c.
780              Make sure that ALL_CONTENT_ENCODINGS contains all the
781              encodings handled here. */
782           if(conn->data->set.http_ce_skip || !k->writer_stack) {
783             if(!k->ignorebody) {
784 #ifndef CURL_DISABLE_POP3
785               if(conn->handler->protocol & PROTO_FAMILY_POP3)
786                 result = Curl_pop3_write(conn, k->str, nread);
787               else
788 #endif /* CURL_DISABLE_POP3 */
789                 result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
790                                            nread);
791             }
792           }
793           else
794             result = Curl_unencode_write(conn, k->writer_stack, k->str, nread);
795         }
796         k->badheader = HEADER_NORMAL; /* taken care of now */
797 
798         if(result)
799           return result;
800       }
801 
802     } /* if(!header and data to read) */
803 
804     if(conn->handler->readwrite &&
805        (excess > 0 && !conn->bits.stream_was_rewound)) {
806       /* Parse the excess data */
807       k->str += nread;
808       nread = (ssize_t)excess;
809 
810       result = conn->handler->readwrite(data, conn, &nread, &readmore);
811       if(result)
812         return result;
813 
814       if(readmore)
815         k->keepon |= KEEP_RECV; /* we're not done reading */
816       break;
817     }
818 
819     if(is_empty_data) {
820       /* if we received nothing, the server closed the connection and we
821          are done */
822       k->keepon &= ~KEEP_RECV;
823     }
824 
825   } while(data_pending(conn) && maxloops--);
826 
827   if(maxloops <= 0) {
828     /* we mark it as read-again-please */
829     conn->cselect_bits = CURL_CSELECT_IN;
830     *comeback = TRUE;
831   }
832 
833   if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
834      conn->bits.close) {
835     /* When we've read the entire thing and the close bit is set, the server
836        may now close the connection. If there's now any kind of sending going
837        on from our side, we need to stop that immediately. */
838     infof(data, "we are done reading and this is set to close, stop send\n");
839     k->keepon &= ~KEEP_SEND; /* no writing anymore either */
840   }
841 
842   return CURLE_OK;
843 }
844 
done_sending(struct connectdata * conn,struct SingleRequest * k)845 static CURLcode done_sending(struct connectdata *conn,
846                              struct SingleRequest *k)
847 {
848   k->keepon &= ~KEEP_SEND; /* we're done writing */
849 
850   Curl_http2_done_sending(conn);
851 
852   if(conn->bits.rewindaftersend) {
853     CURLcode result = Curl_readrewind(conn);
854     if(result)
855       return result;
856   }
857   return CURLE_OK;
858 }
859 
860 
861 /*
862  * Send data to upload to the server, when the socket is writable.
863  */
readwrite_upload(struct Curl_easy * data,struct connectdata * conn,int * didwhat)864 static CURLcode readwrite_upload(struct Curl_easy *data,
865                                  struct connectdata *conn,
866                                  int *didwhat)
867 {
868   ssize_t i, si;
869   ssize_t bytes_written;
870   CURLcode result;
871   ssize_t nread; /* number of bytes read */
872   bool sending_http_headers = FALSE;
873   struct SingleRequest *k = &data->req;
874 
875   if((k->bytecount == 0) && (k->writebytecount == 0))
876     Curl_pgrsTime(data, TIMER_STARTTRANSFER);
877 
878   *didwhat |= KEEP_SEND;
879 
880   do {
881 
882     /* only read more data if there's no upload data already
883        present in the upload buffer */
884     if(0 == k->upload_present) {
885       /* init the "upload from here" pointer */
886       k->upload_fromhere = data->state.uploadbuffer;
887 
888       if(!k->upload_done) {
889         /* HTTP pollution, this should be written nicer to become more
890            protocol agnostic. */
891         int fillcount;
892         struct HTTP *http = k->protop;
893 
894         if((k->exp100 == EXP100_SENDING_REQUEST) &&
895            (http->sending == HTTPSEND_BODY)) {
896           /* If this call is to send body data, we must take some action:
897              We have sent off the full HTTP 1.1 request, and we shall now
898              go into the Expect: 100 state and await such a header */
899           k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
900           k->keepon &= ~KEEP_SEND;         /* disable writing */
901           k->start100 = Curl_now();       /* timeout count starts now */
902           *didwhat &= ~KEEP_SEND;  /* we didn't write anything actually */
903 
904           /* set a timeout for the multi interface */
905           Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
906           break;
907         }
908 
909         if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
910           if(http->sending == HTTPSEND_REQUEST)
911             /* We're sending the HTTP request headers, not the data.
912                Remember that so we don't change the line endings. */
913             sending_http_headers = TRUE;
914           else
915             sending_http_headers = FALSE;
916         }
917 
918         result = Curl_fillreadbuffer(conn, UPLOAD_BUFSIZE, &fillcount);
919         if(result)
920           return result;
921 
922         nread = (ssize_t)fillcount;
923       }
924       else
925         nread = 0; /* we're done uploading/reading */
926 
927       if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
928         /* this is a paused transfer */
929         break;
930       }
931       if(nread <= 0) {
932         result = done_sending(conn, k);
933         if(result)
934           return result;
935         break;
936       }
937 
938       /* store number of bytes available for upload */
939       k->upload_present = nread;
940 
941       /* convert LF to CRLF if so asked */
942       if((!sending_http_headers) && (
943 #ifdef CURL_DO_LINEEND_CONV
944          /* always convert if we're FTPing in ASCII mode */
945          (data->set.prefer_ascii) ||
946 #endif
947          (data->set.crlf))) {
948         /* Do we need to allocate a scratch buffer? */
949         if(!data->state.scratch) {
950           data->state.scratch = malloc(2 * data->set.buffer_size);
951           if(!data->state.scratch) {
952             failf(data, "Failed to alloc scratch buffer!");
953 
954             return CURLE_OUT_OF_MEMORY;
955           }
956         }
957 
958         /*
959          * ASCII/EBCDIC Note: This is presumably a text (not binary)
960          * transfer so the data should already be in ASCII.
961          * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
962          * must be used instead of the escape sequences \r & \n.
963          */
964         for(i = 0, si = 0; i < nread; i++, si++) {
965           if(k->upload_fromhere[i] == 0x0a) {
966             data->state.scratch[si++] = 0x0d;
967             data->state.scratch[si] = 0x0a;
968             if(!data->set.crlf) {
969               /* we're here only because FTP is in ASCII mode...
970                  bump infilesize for the LF we just added */
971               if(data->state.infilesize != -1)
972                 data->state.infilesize++;
973             }
974           }
975           else
976             data->state.scratch[si] = k->upload_fromhere[i];
977         }
978 
979         if(si != nread) {
980           /* only perform the special operation if we really did replace
981              anything */
982           nread = si;
983 
984           /* upload from the new (replaced) buffer instead */
985           k->upload_fromhere = data->state.scratch;
986 
987           /* set the new amount too */
988           k->upload_present = nread;
989         }
990       }
991 
992 #ifndef CURL_DISABLE_SMTP
993       if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
994         result = Curl_smtp_escape_eob(conn, nread);
995         if(result)
996           return result;
997       }
998 #endif /* CURL_DISABLE_SMTP */
999     } /* if 0 == k->upload_present */
1000     else {
1001       /* We have a partial buffer left from a previous "round". Use
1002          that instead of reading more data */
1003     }
1004 
1005     /* write to socket (send away data) */
1006     result = Curl_write(conn,
1007                         conn->writesockfd,  /* socket to send to */
1008                         k->upload_fromhere, /* buffer pointer */
1009                         k->upload_present,  /* buffer size */
1010                         &bytes_written);    /* actually sent */
1011 
1012     if(result)
1013       return result;
1014 
1015     if(data->set.verbose)
1016       /* show the data before we change the pointer upload_fromhere */
1017       Curl_debug(data, CURLINFO_DATA_OUT, k->upload_fromhere,
1018                  (size_t)bytes_written, conn);
1019 
1020     k->writebytecount += bytes_written;
1021 
1022     if((!k->upload_chunky || k->forbidchunk) &&
1023        (k->writebytecount == data->state.infilesize)) {
1024       /* we have sent all data we were supposed to */
1025       k->upload_done = TRUE;
1026       infof(data, "We are completely uploaded and fine\n");
1027     }
1028 
1029     if(k->upload_present != bytes_written) {
1030       /* we only wrote a part of the buffer (if anything), deal with it! */
1031 
1032       /* store the amount of bytes left in the buffer to write */
1033       k->upload_present -= bytes_written;
1034 
1035       /* advance the pointer where to find the buffer when the next send
1036          is to happen */
1037       k->upload_fromhere += bytes_written;
1038     }
1039     else {
1040       /* we've uploaded that buffer now */
1041       k->upload_fromhere = data->state.uploadbuffer;
1042       k->upload_present = 0; /* no more bytes left */
1043 
1044       if(k->upload_done) {
1045         result = done_sending(conn, k);
1046         if(result)
1047           return result;
1048       }
1049     }
1050 
1051     Curl_pgrsSetUploadCounter(data, k->writebytecount);
1052 
1053   } WHILE_FALSE; /* just to break out from! */
1054 
1055   return CURLE_OK;
1056 }
1057 
1058 /*
1059  * Curl_readwrite() is the low-level function to be called when data is to
1060  * be read and written to/from the connection.
1061  *
1062  * return '*comeback' TRUE if we didn't properly drain the socket so this
1063  * function should get called again without select() or similar in between!
1064  */
Curl_readwrite(struct connectdata * conn,struct Curl_easy * data,bool * done,bool * comeback)1065 CURLcode Curl_readwrite(struct connectdata *conn,
1066                         struct Curl_easy *data,
1067                         bool *done,
1068                         bool *comeback)
1069 {
1070   struct SingleRequest *k = &data->req;
1071   CURLcode result;
1072   int didwhat = 0;
1073 
1074   curl_socket_t fd_read;
1075   curl_socket_t fd_write;
1076   int select_res = conn->cselect_bits;
1077 
1078   conn->cselect_bits = 0;
1079 
1080   /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1081      then we are in rate limiting state in that transfer direction */
1082 
1083   if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1084     fd_read = conn->sockfd;
1085   else
1086     fd_read = CURL_SOCKET_BAD;
1087 
1088   if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1089     fd_write = conn->writesockfd;
1090   else
1091     fd_write = CURL_SOCKET_BAD;
1092 
1093   if(conn->data->state.drain) {
1094     select_res |= CURL_CSELECT_IN;
1095     DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data\n"));
1096   }
1097 
1098   if(!select_res) /* Call for select()/poll() only, if read/write/error
1099                      status is not known. */
1100     select_res = Curl_socket_check(fd_read, CURL_SOCKET_BAD, fd_write, 0);
1101 
1102   if(select_res == CURL_CSELECT_ERR) {
1103     failf(data, "select/poll returned error");
1104     return CURLE_SEND_ERROR;
1105   }
1106 
1107   /* We go ahead and do a read if we have a readable socket or if
1108      the stream was rewound (in which case we have data in a
1109      buffer) */
1110   if((k->keepon & KEEP_RECV) &&
1111      ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
1112 
1113     result = readwrite_data(data, conn, k, &didwhat, done, comeback);
1114     if(result || *done)
1115       return result;
1116   }
1117 
1118   /* If we still have writing to do, we check if we have a writable socket. */
1119   if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1120     /* write */
1121 
1122     result = readwrite_upload(data, conn, &didwhat);
1123     if(result)
1124       return result;
1125   }
1126 
1127   k->now = Curl_now();
1128   if(didwhat) {
1129     /* Update read/write counters */
1130     if(k->bytecountp)
1131       *k->bytecountp = k->bytecount; /* read count */
1132     if(k->writebytecountp)
1133       *k->writebytecountp = k->writebytecount; /* write count */
1134   }
1135   else {
1136     /* no read no write, this is a timeout? */
1137     if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1138       /* This should allow some time for the header to arrive, but only a
1139          very short time as otherwise it'll be too much wasted time too
1140          often. */
1141 
1142       /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1143 
1144          Therefore, when a client sends this header field to an origin server
1145          (possibly via a proxy) from which it has never seen a 100 (Continue)
1146          status, the client SHOULD NOT wait for an indefinite period before
1147          sending the request body.
1148 
1149       */
1150 
1151       timediff_t ms = Curl_timediff(k->now, k->start100);
1152       if(ms >= data->set.expect_100_timeout) {
1153         /* we've waited long enough, continue anyway */
1154         k->exp100 = EXP100_SEND_DATA;
1155         k->keepon |= KEEP_SEND;
1156         Curl_expire_done(data, EXPIRE_100_TIMEOUT);
1157         infof(data, "Done waiting for 100-continue\n");
1158       }
1159     }
1160   }
1161 
1162   if(Curl_pgrsUpdate(conn))
1163     result = CURLE_ABORTED_BY_CALLBACK;
1164   else
1165     result = Curl_speedcheck(data, k->now);
1166   if(result)
1167     return result;
1168 
1169   if(k->keepon) {
1170     if(0 > Curl_timeleft(data, &k->now, FALSE)) {
1171       if(k->size != -1) {
1172         failf(data, "Operation timed out after %ld milliseconds with %"
1173               CURL_FORMAT_CURL_OFF_T " out of %"
1174               CURL_FORMAT_CURL_OFF_T " bytes received",
1175               Curl_timediff(k->now, data->progress.t_startsingle),
1176               k->bytecount, k->size);
1177       }
1178       else {
1179         failf(data, "Operation timed out after %ld milliseconds with %"
1180               CURL_FORMAT_CURL_OFF_T " bytes received",
1181               Curl_timediff(k->now, data->progress.t_startsingle),
1182               k->bytecount);
1183       }
1184       return CURLE_OPERATION_TIMEDOUT;
1185     }
1186   }
1187   else {
1188     /*
1189      * The transfer has been performed. Just make some general checks before
1190      * returning.
1191      */
1192 
1193     if(!(data->set.opt_no_body) && (k->size != -1) &&
1194        (k->bytecount != k->size) &&
1195 #ifdef CURL_DO_LINEEND_CONV
1196        /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1197           so we'll check to see if the discrepancy can be explained
1198           by the number of CRLFs we've changed to LFs.
1199        */
1200        (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1201 #endif /* CURL_DO_LINEEND_CONV */
1202        !k->newurl) {
1203       failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T
1204             " bytes remaining to read", k->size - k->bytecount);
1205       return CURLE_PARTIAL_FILE;
1206     }
1207     if(!(data->set.opt_no_body) && k->chunk &&
1208        (conn->chunk.state != CHUNK_STOP)) {
1209       /*
1210        * In chunked mode, return an error if the connection is closed prior to
1211        * the empty (terminating) chunk is read.
1212        *
1213        * The condition above used to check for
1214        * conn->proto.http->chunk.datasize != 0 which is true after reading
1215        * *any* chunk, not just the empty chunk.
1216        *
1217        */
1218       failf(data, "transfer closed with outstanding read data remaining");
1219       return CURLE_PARTIAL_FILE;
1220     }
1221     if(Curl_pgrsUpdate(conn))
1222       return CURLE_ABORTED_BY_CALLBACK;
1223   }
1224 
1225   /* Now update the "done" boolean we return */
1226   *done = (0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
1227                             KEEP_RECV_PAUSE|KEEP_SEND_PAUSE))) ? TRUE : FALSE;
1228 
1229   return CURLE_OK;
1230 }
1231 
1232 /*
1233  * Curl_single_getsock() gets called by the multi interface code when the app
1234  * has requested to get the sockets for the current connection. This function
1235  * will then be called once for every connection that the multi interface
1236  * keeps track of. This function will only be called for connections that are
1237  * in the proper state to have this information available.
1238  */
Curl_single_getsock(const struct connectdata * conn,curl_socket_t * sock,int numsocks)1239 int Curl_single_getsock(const struct connectdata *conn,
1240                         curl_socket_t *sock, /* points to numsocks number
1241                                                 of sockets */
1242                         int numsocks)
1243 {
1244   const struct Curl_easy *data = conn->data;
1245   int bitmap = GETSOCK_BLANK;
1246   unsigned sockindex = 0;
1247 
1248   if(conn->handler->perform_getsock)
1249     return conn->handler->perform_getsock(conn, sock, numsocks);
1250 
1251   if(numsocks < 2)
1252     /* simple check but we might need two slots */
1253     return GETSOCK_BLANK;
1254 
1255   /* don't include HOLD and PAUSE connections */
1256   if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1257 
1258     DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1259 
1260     bitmap |= GETSOCK_READSOCK(sockindex);
1261     sock[sockindex] = conn->sockfd;
1262   }
1263 
1264   /* don't include HOLD and PAUSE connections */
1265   if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1266 
1267     if((conn->sockfd != conn->writesockfd) ||
1268        bitmap == GETSOCK_BLANK) {
1269       /* only if they are not the same socket and we have a readable
1270          one, we increase index */
1271       if(bitmap != GETSOCK_BLANK)
1272         sockindex++; /* increase index if we need two entries */
1273 
1274       DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1275 
1276       sock[sockindex] = conn->writesockfd;
1277     }
1278 
1279     bitmap |= GETSOCK_WRITESOCK(sockindex);
1280   }
1281 
1282   return bitmap;
1283 }
1284 
1285 /* Curl_init_CONNECT() gets called each time the handle switches to CONNECT
1286    which means this gets called once for each subsequent redirect etc */
Curl_init_CONNECT(struct Curl_easy * data)1287 void Curl_init_CONNECT(struct Curl_easy *data)
1288 {
1289   data->state.fread_func = data->set.fread_func_set;
1290   data->state.in = data->set.in_set;
1291 }
1292 
1293 /*
1294  * Curl_pretransfer() is called immediately before a transfer starts, and only
1295  * once for one transfer no matter if it has redirects or do multi-pass
1296  * authentication etc.
1297  */
Curl_pretransfer(struct Curl_easy * data)1298 CURLcode Curl_pretransfer(struct Curl_easy *data)
1299 {
1300   CURLcode result;
1301   if(!data->change.url) {
1302     /* we can't do anything without URL */
1303     failf(data, "No URL set!");
1304     return CURLE_URL_MALFORMAT;
1305   }
1306   /* since the URL may have been redirected in a previous use of this handle */
1307   if(data->change.url_alloc) {
1308     /* the already set URL is allocated, free it first! */
1309     Curl_safefree(data->change.url);
1310     data->change.url_alloc = FALSE;
1311   }
1312   data->change.url = data->set.str[STRING_SET_URL];
1313 
1314   /* Init the SSL session ID cache here. We do it here since we want to do it
1315      after the *_setopt() calls (that could specify the size of the cache) but
1316      before any transfer takes place. */
1317   result = Curl_ssl_initsessions(data, data->set.general_ssl.max_ssl_sessions);
1318   if(result)
1319     return result;
1320 
1321   data->state.wildcardmatch = data->set.wildcard_enabled;
1322   data->set.followlocation = 0; /* reset the location-follow counter */
1323   data->state.this_is_a_follow = FALSE; /* reset this */
1324   data->state.errorbuf = FALSE; /* no error has occurred */
1325   data->state.httpversion = 0; /* don't assume any particular server version */
1326 
1327   data->state.authproblem = FALSE;
1328   data->state.authhost.want = data->set.httpauth;
1329   data->state.authproxy.want = data->set.proxyauth;
1330   Curl_safefree(data->info.wouldredirect);
1331   data->info.wouldredirect = NULL;
1332 
1333   if(data->set.httpreq == HTTPREQ_PUT)
1334     data->state.infilesize = data->set.filesize;
1335   else {
1336     data->state.infilesize = data->set.postfieldsize;
1337     if(data->set.postfields && (data->state.infilesize == -1))
1338       data->state.infilesize = (curl_off_t)strlen(data->set.postfields);
1339   }
1340 
1341   /* If there is a list of cookie files to read, do it now! */
1342   if(data->change.cookielist)
1343     Curl_cookie_loadfiles(data);
1344 
1345   /* If there is a list of host pairs to deal with */
1346   if(data->change.resolve)
1347     result = Curl_loadhostpairs(data);
1348 
1349   if(!result) {
1350     /* Allow data->set.use_port to set which port to use. This needs to be
1351      * disabled for example when we follow Location: headers to URLs using
1352      * different ports! */
1353     data->state.allow_port = TRUE;
1354 
1355 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1356     /*************************************************************
1357      * Tell signal handler to ignore SIGPIPE
1358      *************************************************************/
1359     if(!data->set.no_signal)
1360       data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1361 #endif
1362 
1363     Curl_initinfo(data); /* reset session-specific information "variables" */
1364     Curl_pgrsResetTransferSizes(data);
1365     Curl_pgrsStartNow(data);
1366 
1367     if(data->set.timeout)
1368       Curl_expire(data, data->set.timeout, EXPIRE_TIMEOUT);
1369 
1370     if(data->set.connecttimeout)
1371       Curl_expire(data, data->set.connecttimeout, EXPIRE_CONNECTTIMEOUT);
1372 
1373     /* In case the handle is re-used and an authentication method was picked
1374        in the session we need to make sure we only use the one(s) we now
1375        consider to be fine */
1376     data->state.authhost.picked &= data->state.authhost.want;
1377     data->state.authproxy.picked &= data->state.authproxy.want;
1378 
1379     if(data->state.wildcardmatch) {
1380       struct WildcardData *wc = &data->wildcard;
1381       if(wc->state < CURLWC_INIT) {
1382         result = Curl_wildcard_init(wc); /* init wildcard structures */
1383         if(result)
1384           return CURLE_OUT_OF_MEMORY;
1385       }
1386     }
1387   }
1388 
1389   return result;
1390 }
1391 
1392 /*
1393  * Curl_posttransfer() is called immediately after a transfer ends
1394  */
Curl_posttransfer(struct Curl_easy * data)1395 CURLcode Curl_posttransfer(struct Curl_easy *data)
1396 {
1397 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1398   /* restore the signal handler for SIGPIPE before we get back */
1399   if(!data->set.no_signal)
1400     signal(SIGPIPE, data->state.prev_signal);
1401 #else
1402   (void)data; /* unused parameter */
1403 #endif
1404 
1405   return CURLE_OK;
1406 }
1407 
1408 #ifndef CURL_DISABLE_HTTP
1409 /*
1410  * Find the separator at the end of the host name, or the '?' in cases like
1411  * http://www.url.com?id=2380
1412  */
find_host_sep(const char * url)1413 static const char *find_host_sep(const char *url)
1414 {
1415   const char *sep;
1416   const char *query;
1417 
1418   /* Find the start of the hostname */
1419   sep = strstr(url, "//");
1420   if(!sep)
1421     sep = url;
1422   else
1423     sep += 2;
1424 
1425   query = strchr(sep, '?');
1426   sep = strchr(sep, '/');
1427 
1428   if(!sep)
1429     sep = url + strlen(url);
1430 
1431   if(!query)
1432     query = url + strlen(url);
1433 
1434   return sep < query ? sep : query;
1435 }
1436 
1437 /*
1438  * strlen_url() returns the length of the given URL if the spaces within the
1439  * URL were properly URL encoded.
1440  * URL encoding should be skipped for host names, otherwise IDN resolution
1441  * will fail.
1442  */
strlen_url(const char * url,bool relative)1443 static size_t strlen_url(const char *url, bool relative)
1444 {
1445   const unsigned char *ptr;
1446   size_t newlen = 0;
1447   bool left = TRUE; /* left side of the ? */
1448   const unsigned char *host_sep = (const unsigned char *) url;
1449 
1450   if(!relative)
1451     host_sep = (const unsigned char *) find_host_sep(url);
1452 
1453   for(ptr = (unsigned char *)url; *ptr; ptr++) {
1454 
1455     if(ptr < host_sep) {
1456       ++newlen;
1457       continue;
1458     }
1459 
1460     switch(*ptr) {
1461     case '?':
1462       left = FALSE;
1463       /* fall through */
1464     default:
1465       if(*ptr >= 0x80)
1466         newlen += 2;
1467       newlen++;
1468       break;
1469     case ' ':
1470       if(left)
1471         newlen += 3;
1472       else
1473         newlen++;
1474       break;
1475     }
1476   }
1477   return newlen;
1478 }
1479 
1480 /* strcpy_url() copies a url to a output buffer and URL-encodes the spaces in
1481  * the source URL accordingly.
1482  * URL encoding should be skipped for host names, otherwise IDN resolution
1483  * will fail.
1484  */
strcpy_url(char * output,const char * url,bool relative)1485 static void strcpy_url(char *output, const char *url, bool relative)
1486 {
1487   /* we must add this with whitespace-replacing */
1488   bool left = TRUE;
1489   const unsigned char *iptr;
1490   char *optr = output;
1491   const unsigned char *host_sep = (const unsigned char *) url;
1492 
1493   if(!relative)
1494     host_sep = (const unsigned char *) find_host_sep(url);
1495 
1496   for(iptr = (unsigned char *)url;    /* read from here */
1497       *iptr;         /* until zero byte */
1498       iptr++) {
1499 
1500     if(iptr < host_sep) {
1501       *optr++ = *iptr;
1502       continue;
1503     }
1504 
1505     switch(*iptr) {
1506     case '?':
1507       left = FALSE;
1508       /* fall through */
1509     default:
1510       if(*iptr >= 0x80) {
1511         snprintf(optr, 4, "%%%02x", *iptr);
1512         optr += 3;
1513       }
1514       else
1515         *optr++=*iptr;
1516       break;
1517     case ' ':
1518       if(left) {
1519         *optr++='%'; /* add a '%' */
1520         *optr++='2'; /* add a '2' */
1521         *optr++='0'; /* add a '0' */
1522       }
1523       else
1524         *optr++='+'; /* add a '+' here */
1525       break;
1526     }
1527   }
1528   *optr = 0; /* zero terminate output buffer */
1529 
1530 }
1531 
1532 /*
1533  * Returns true if the given URL is absolute (as opposed to relative)
1534  */
is_absolute_url(const char * url)1535 static bool is_absolute_url(const char *url)
1536 {
1537   char prot[16]; /* URL protocol string storage */
1538   char letter;   /* used for a silly sscanf */
1539 
1540   return (2 == sscanf(url, "%15[^?&/:]://%c", prot, &letter)) ? TRUE : FALSE;
1541 }
1542 
1543 /*
1544  * Concatenate a relative URL to a base URL making it absolute.
1545  * URL-encodes any spaces.
1546  * The returned pointer must be freed by the caller unless NULL
1547  * (returns NULL on out of memory).
1548  */
concat_url(const char * base,const char * relurl)1549 static char *concat_url(const char *base, const char *relurl)
1550 {
1551   /***
1552    TRY to append this new path to the old URL
1553    to the right of the host part. Oh crap, this is doomed to cause
1554    problems in the future...
1555   */
1556   char *newest;
1557   char *protsep;
1558   char *pathsep;
1559   size_t newlen;
1560   bool host_changed = FALSE;
1561 
1562   const char *useurl = relurl;
1563   size_t urllen;
1564 
1565   /* we must make our own copy of the URL to play with, as it may
1566      point to read-only data */
1567   char *url_clone = strdup(base);
1568 
1569   if(!url_clone)
1570     return NULL; /* skip out of this NOW */
1571 
1572   /* protsep points to the start of the host name */
1573   protsep = strstr(url_clone, "//");
1574   if(!protsep)
1575     protsep = url_clone;
1576   else
1577     protsep += 2; /* pass the slashes */
1578 
1579   if('/' != relurl[0]) {
1580     int level = 0;
1581 
1582     /* First we need to find out if there's a ?-letter in the URL,
1583        and cut it and the right-side of that off */
1584     pathsep = strchr(protsep, '?');
1585     if(pathsep)
1586       *pathsep = 0;
1587 
1588     /* we have a relative path to append to the last slash if there's one
1589        available, or if the new URL is just a query string (starts with a
1590        '?')  we append the new one at the end of the entire currently worked
1591        out URL */
1592     if(useurl[0] != '?') {
1593       pathsep = strrchr(protsep, '/');
1594       if(pathsep)
1595         *pathsep = 0;
1596     }
1597 
1598     /* Check if there's any slash after the host name, and if so, remember
1599        that position instead */
1600     pathsep = strchr(protsep, '/');
1601     if(pathsep)
1602       protsep = pathsep + 1;
1603     else
1604       protsep = NULL;
1605 
1606     /* now deal with one "./" or any amount of "../" in the newurl
1607        and act accordingly */
1608 
1609     if((useurl[0] == '.') && (useurl[1] == '/'))
1610       useurl += 2; /* just skip the "./" */
1611 
1612     while((useurl[0] == '.') &&
1613           (useurl[1] == '.') &&
1614           (useurl[2] == '/')) {
1615       level++;
1616       useurl += 3; /* pass the "../" */
1617     }
1618 
1619     if(protsep) {
1620       while(level--) {
1621         /* cut off one more level from the right of the original URL */
1622         pathsep = strrchr(protsep, '/');
1623         if(pathsep)
1624           *pathsep = 0;
1625         else {
1626           *protsep = 0;
1627           break;
1628         }
1629       }
1630     }
1631   }
1632   else {
1633     /* We got a new absolute path for this server */
1634 
1635     if((relurl[0] == '/') && (relurl[1] == '/')) {
1636       /* the new URL starts with //, just keep the protocol part from the
1637          original one */
1638       *protsep = 0;
1639       useurl = &relurl[2]; /* we keep the slashes from the original, so we
1640                               skip the new ones */
1641       host_changed = TRUE;
1642     }
1643     else {
1644       /* cut off the original URL from the first slash, or deal with URLs
1645          without slash */
1646       pathsep = strchr(protsep, '/');
1647       if(pathsep) {
1648         /* When people use badly formatted URLs, such as
1649            "http://www.url.com?dir=/home/daniel" we must not use the first
1650            slash, if there's a ?-letter before it! */
1651         char *sep = strchr(protsep, '?');
1652         if(sep && (sep < pathsep))
1653           pathsep = sep;
1654         *pathsep = 0;
1655       }
1656       else {
1657         /* There was no slash. Now, since we might be operating on a badly
1658            formatted URL, such as "http://www.url.com?id=2380" which doesn't
1659            use a slash separator as it is supposed to, we need to check for a
1660            ?-letter as well! */
1661         pathsep = strchr(protsep, '?');
1662         if(pathsep)
1663           *pathsep = 0;
1664       }
1665     }
1666   }
1667 
1668   /* If the new part contains a space, this is a mighty stupid redirect
1669      but we still make an effort to do "right". To the left of a '?'
1670      letter we replace each space with %20 while it is replaced with '+'
1671      on the right side of the '?' letter.
1672   */
1673   newlen = strlen_url(useurl, !host_changed);
1674 
1675   urllen = strlen(url_clone);
1676 
1677   newest = malloc(urllen + 1 + /* possible slash */
1678                   newlen + 1 /* zero byte */);
1679 
1680   if(!newest) {
1681     free(url_clone); /* don't leak this */
1682     return NULL;
1683   }
1684 
1685   /* copy over the root url part */
1686   memcpy(newest, url_clone, urllen);
1687 
1688   /* check if we need to append a slash */
1689   if(('/' == useurl[0]) || (protsep && !*protsep) || ('?' == useurl[0]))
1690     ;
1691   else
1692     newest[urllen++]='/';
1693 
1694   /* then append the new piece on the right side */
1695   strcpy_url(&newest[urllen], useurl, !host_changed);
1696 
1697   free(url_clone);
1698 
1699   return newest;
1700 }
1701 #endif /* CURL_DISABLE_HTTP */
1702 
1703 /*
1704  * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1705  * as given by the remote server and set up the new URL to request.
1706  */
Curl_follow(struct Curl_easy * data,char * newurl,followtype type)1707 CURLcode Curl_follow(struct Curl_easy *data,
1708                      char *newurl,    /* the Location: string */
1709                      followtype type) /* see transfer.h */
1710 {
1711 #ifdef CURL_DISABLE_HTTP
1712   (void)data;
1713   (void)newurl;
1714   (void)type;
1715   /* Location: following will not happen when HTTP is disabled */
1716   return CURLE_TOO_MANY_REDIRECTS;
1717 #else
1718 
1719   /* Location: redirect */
1720   bool disallowport = FALSE;
1721   bool reachedmax = FALSE;
1722 
1723   if(type == FOLLOW_REDIR) {
1724     if((data->set.maxredirs != -1) &&
1725        (data->set.followlocation >= data->set.maxredirs)) {
1726       reachedmax = TRUE;
1727       type = FOLLOW_FAKE; /* switch to fake to store the would-be-redirected
1728                              to URL */
1729     }
1730     else {
1731       /* mark the next request as a followed location: */
1732       data->state.this_is_a_follow = TRUE;
1733 
1734       data->set.followlocation++; /* count location-followers */
1735 
1736       if(data->set.http_auto_referer) {
1737         /* We are asked to automatically set the previous URL as the referer
1738            when we get the next URL. We pick the ->url field, which may or may
1739            not be 100% correct */
1740 
1741         if(data->change.referer_alloc) {
1742           Curl_safefree(data->change.referer);
1743           data->change.referer_alloc = FALSE;
1744         }
1745 
1746         data->change.referer = strdup(data->change.url);
1747         if(!data->change.referer)
1748           return CURLE_OUT_OF_MEMORY;
1749         data->change.referer_alloc = TRUE; /* yes, free this later */
1750       }
1751     }
1752   }
1753 
1754   if(!is_absolute_url(newurl)) {
1755     /***
1756      *DANG* this is an RFC 2068 violation. The URL is supposed
1757      to be absolute and this doesn't seem to be that!
1758      */
1759     char *absolute = concat_url(data->change.url, newurl);
1760     if(!absolute)
1761       return CURLE_OUT_OF_MEMORY;
1762     newurl = absolute;
1763   }
1764   else {
1765     /* The new URL MAY contain space or high byte values, that means a mighty
1766        stupid redirect URL but we still make an effort to do "right". */
1767     char *newest;
1768     size_t newlen = strlen_url(newurl, FALSE);
1769 
1770     /* This is an absolute URL, don't allow the custom port number */
1771     disallowport = TRUE;
1772 
1773     newest = malloc(newlen + 1); /* get memory for this */
1774     if(!newest)
1775       return CURLE_OUT_OF_MEMORY;
1776 
1777     strcpy_url(newest, newurl, FALSE); /* create a space-free URL */
1778     newurl = newest; /* use this instead now */
1779 
1780   }
1781 
1782   if(type == FOLLOW_FAKE) {
1783     /* we're only figuring out the new url if we would've followed locations
1784        but now we're done so we can get out! */
1785     data->info.wouldredirect = newurl;
1786 
1787     if(reachedmax) {
1788       failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs);
1789       return CURLE_TOO_MANY_REDIRECTS;
1790     }
1791     return CURLE_OK;
1792   }
1793 
1794   if(disallowport)
1795     data->state.allow_port = FALSE;
1796 
1797   if(data->change.url_alloc) {
1798     Curl_safefree(data->change.url);
1799     data->change.url_alloc = FALSE;
1800   }
1801 
1802   data->change.url = newurl;
1803   data->change.url_alloc = TRUE;
1804 
1805   infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
1806 
1807   /*
1808    * We get here when the HTTP code is 300-399 (and 401). We need to perform
1809    * differently based on exactly what return code there was.
1810    *
1811    * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1812    * a HTTP (proxy-) authentication scheme other than Basic.
1813    */
1814   switch(data->info.httpcode) {
1815     /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1816        Authorization: XXXX header in the HTTP request code snippet */
1817     /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1818        Proxy-Authorization: XXXX header in the HTTP request code snippet */
1819     /* 300 - Multiple Choices */
1820     /* 306 - Not used */
1821     /* 307 - Temporary Redirect */
1822   default:  /* for all above (and the unknown ones) */
1823     /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1824      * seem to be OK to POST to.
1825      */
1826     break;
1827   case 301: /* Moved Permanently */
1828     /* (quote from RFC7231, section 6.4.2)
1829      *
1830      * Note: For historical reasons, a user agent MAY change the request
1831      * method from POST to GET for the subsequent request.  If this
1832      * behavior is undesired, the 307 (Temporary Redirect) status code
1833      * can be used instead.
1834      *
1835      * ----
1836      *
1837      * Many webservers expect this, so these servers often answers to a POST
1838      * request with an error page. To be sure that libcurl gets the page that
1839      * most user agents would get, libcurl has to force GET.
1840      *
1841      * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1842      * can be overridden with CURLOPT_POSTREDIR.
1843      */
1844     if((data->set.httpreq == HTTPREQ_POST
1845         || data->set.httpreq == HTTPREQ_POST_FORM
1846         || data->set.httpreq == HTTPREQ_POST_MIME)
1847        && !(data->set.keep_post & CURL_REDIR_POST_301)) {
1848       infof(data, "Switch from POST to GET\n");
1849       data->set.httpreq = HTTPREQ_GET;
1850     }
1851     break;
1852   case 302: /* Found */
1853     /* (quote from RFC7231, section 6.4.3)
1854      *
1855      * Note: For historical reasons, a user agent MAY change the request
1856      * method from POST to GET for the subsequent request.  If this
1857      * behavior is undesired, the 307 (Temporary Redirect) status code
1858      * can be used instead.
1859      *
1860      * ----
1861      *
1862      * Many webservers expect this, so these servers often answers to a POST
1863      * request with an error page. To be sure that libcurl gets the page that
1864      * most user agents would get, libcurl has to force GET.
1865      *
1866      * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1867      * can be overridden with CURLOPT_POSTREDIR.
1868      */
1869     if((data->set.httpreq == HTTPREQ_POST
1870         || data->set.httpreq == HTTPREQ_POST_FORM
1871         || data->set.httpreq == HTTPREQ_POST_MIME)
1872        && !(data->set.keep_post & CURL_REDIR_POST_302)) {
1873       infof(data, "Switch from POST to GET\n");
1874       data->set.httpreq = HTTPREQ_GET;
1875     }
1876     break;
1877 
1878   case 303: /* See Other */
1879     /* Disable both types of POSTs, unless the user explicitly
1880        asks for POST after POST */
1881     if(data->set.httpreq != HTTPREQ_GET
1882       && !(data->set.keep_post & CURL_REDIR_POST_303)) {
1883       data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
1884       infof(data, "Disables POST, goes with %s\n",
1885             data->set.opt_no_body?"HEAD":"GET");
1886     }
1887     break;
1888   case 304: /* Not Modified */
1889     /* 304 means we did a conditional request and it was "Not modified".
1890      * We shouldn't get any Location: header in this response!
1891      */
1892     break;
1893   case 305: /* Use Proxy */
1894     /* (quote from RFC2616, section 10.3.6):
1895      * "The requested resource MUST be accessed through the proxy given
1896      * by the Location field. The Location field gives the URI of the
1897      * proxy.  The recipient is expected to repeat this single request
1898      * via the proxy. 305 responses MUST only be generated by origin
1899      * servers."
1900      */
1901     break;
1902   }
1903   Curl_pgrsTime(data, TIMER_REDIRECT);
1904   Curl_pgrsResetTransferSizes(data);
1905 
1906   return CURLE_OK;
1907 #endif /* CURL_DISABLE_HTTP */
1908 }
1909 
1910 /* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1911 
1912    NOTE: that the *url is malloc()ed. */
Curl_retry_request(struct connectdata * conn,char ** url)1913 CURLcode Curl_retry_request(struct connectdata *conn,
1914                             char **url)
1915 {
1916   struct Curl_easy *data = conn->data;
1917 
1918   *url = NULL;
1919 
1920   /* if we're talking upload, we can't do the checks below, unless the protocol
1921      is HTTP as when uploading over HTTP we will still get a response */
1922   if(data->set.upload &&
1923      !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
1924     return CURLE_OK;
1925 
1926   if((data->req.bytecount + data->req.headerbytecount == 0) &&
1927       conn->bits.reuse &&
1928       (!data->set.opt_no_body
1929         || (conn->handler->protocol & PROTO_FAMILY_HTTP)) &&
1930       (data->set.rtspreq != RTSPREQ_RECEIVE)) {
1931     /* We got no data, we attempted to re-use a connection. For HTTP this
1932        can be a retry so we try again regardless if we expected a body.
1933        For other protocols we only try again only if we expected a body.
1934 
1935        This might happen if the connection was left alive when we were
1936        done using it before, but that was closed when we wanted to read from
1937        it again. Bad luck. Retry the same request on a fresh connect! */
1938     infof(conn->data, "Connection died, retrying a fresh connect\n");
1939     *url = strdup(conn->data->change.url);
1940     if(!*url)
1941       return CURLE_OUT_OF_MEMORY;
1942 
1943     connclose(conn, "retry"); /* close this connection */
1944     conn->bits.retry = TRUE; /* mark this as a connection we're about
1945                                 to retry. Marking it this way should
1946                                 prevent i.e HTTP transfers to return
1947                                 error just because nothing has been
1948                                 transferred! */
1949 
1950 
1951     if(conn->handler->protocol&PROTO_FAMILY_HTTP) {
1952       struct HTTP *http = data->req.protop;
1953       if(http->writebytecount)
1954         return Curl_readrewind(conn);
1955     }
1956   }
1957   return CURLE_OK;
1958 }
1959 
1960 /*
1961  * Curl_setup_transfer() is called to setup some basic properties for the
1962  * upcoming transfer.
1963  */
1964 void
Curl_setup_transfer(struct connectdata * conn,int sockindex,curl_off_t size,bool getheader,curl_off_t * bytecountp,int writesockindex,curl_off_t * writecountp)1965 Curl_setup_transfer(
1966   struct connectdata *conn, /* connection data */
1967   int sockindex,            /* socket index to read from or -1 */
1968   curl_off_t size,          /* -1 if unknown at this point */
1969   bool getheader,           /* TRUE if header parsing is wanted */
1970   curl_off_t *bytecountp,   /* return number of bytes read or NULL */
1971   int writesockindex,       /* socket index to write to, it may very well be
1972                                the same we read from. -1 disables */
1973   curl_off_t *writecountp   /* return number of bytes written or NULL */
1974   )
1975 {
1976   struct Curl_easy *data;
1977   struct SingleRequest *k;
1978 
1979   DEBUGASSERT(conn != NULL);
1980 
1981   data = conn->data;
1982   k = &data->req;
1983 
1984   DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
1985 
1986   /* now copy all input parameters */
1987   conn->sockfd = sockindex == -1 ?
1988       CURL_SOCKET_BAD : conn->sock[sockindex];
1989   conn->writesockfd = writesockindex == -1 ?
1990       CURL_SOCKET_BAD:conn->sock[writesockindex];
1991   k->getheader = getheader;
1992 
1993   k->size = size;
1994   k->bytecountp = bytecountp;
1995   k->writebytecountp = writecountp;
1996 
1997   /* The code sequence below is placed in this function just because all
1998      necessary input is not always known in do_complete() as this function may
1999      be called after that */
2000 
2001   if(!k->getheader) {
2002     k->header = FALSE;
2003     if(size > 0)
2004       Curl_pgrsSetDownloadSize(data, size);
2005   }
2006   /* we want header and/or body, if neither then don't do this! */
2007   if(k->getheader || !data->set.opt_no_body) {
2008 
2009     if(conn->sockfd != CURL_SOCKET_BAD)
2010       k->keepon |= KEEP_RECV;
2011 
2012     if(conn->writesockfd != CURL_SOCKET_BAD) {
2013       struct HTTP *http = data->req.protop;
2014       /* HTTP 1.1 magic:
2015 
2016          Even if we require a 100-return code before uploading data, we might
2017          need to write data before that since the REQUEST may not have been
2018          finished sent off just yet.
2019 
2020          Thus, we must check if the request has been sent before we set the
2021          state info where we wait for the 100-return code
2022       */
2023       if((data->state.expect100header) &&
2024          (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
2025          (http->sending == HTTPSEND_BODY)) {
2026         /* wait with write until we either got 100-continue or a timeout */
2027         k->exp100 = EXP100_AWAITING_CONTINUE;
2028         k->start100 = Curl_now();
2029 
2030         /* Set a timeout for the multi interface. Add the inaccuracy margin so
2031            that we don't fire slightly too early and get denied to run. */
2032         Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
2033       }
2034       else {
2035         if(data->state.expect100header)
2036           /* when we've sent off the rest of the headers, we must await a
2037              100-continue but first finish sending the request */
2038           k->exp100 = EXP100_SENDING_REQUEST;
2039 
2040         /* enable the write bit when we're not waiting for continue */
2041         k->keepon |= KEEP_SEND;
2042       }
2043     } /* if(conn->writesockfd != CURL_SOCKET_BAD) */
2044   } /* if(k->getheader || !data->set.opt_no_body) */
2045 
2046 }
2047