transfer.c
Go to the documentation of this file.
00001 /***************************************************************************
00002  *                                  _   _ ____  _
00003  *  Project                     ___| | | |  _ \| |
00004  *                             / __| | | | |_) | |
00005  *                            | (__| |_| |  _ <| |___
00006  *                             \___|\___/|_| \_\_____|
00007  *
00008  * Copyright (C) 1998 - 2016, Daniel Stenberg, <daniel@haxx.se>, et al.
00009  *
00010  * This software is licensed as described in the file COPYING, which
00011  * you should have received as part of this distribution. The terms
00012  * are also available at https://curl.haxx.se/docs/copyright.html.
00013  *
00014  * You may opt to use, copy, modify, merge, publish, distribute and/or sell
00015  * copies of the Software, and permit persons to whom the Software is
00016  * furnished to do so, under the terms of the COPYING file.
00017  *
00018  * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
00019  * KIND, either express or implied.
00020  *
00021  ***************************************************************************/
00022 
00023 #include "curl_setup.h"
00024 #include "strtoofft.h"
00025 
00026 #ifdef HAVE_NETINET_IN_H
00027 #include <netinet/in.h>
00028 #endif
00029 #ifdef HAVE_NETDB_H
00030 #include <netdb.h>
00031 #endif
00032 #ifdef HAVE_ARPA_INET_H
00033 #include <arpa/inet.h>
00034 #endif
00035 #ifdef HAVE_NET_IF_H
00036 #include <net/if.h>
00037 #endif
00038 #ifdef HAVE_SYS_IOCTL_H
00039 #include <sys/ioctl.h>
00040 #endif
00041 #ifdef HAVE_SIGNAL_H
00042 #include <signal.h>
00043 #endif
00044 
00045 #ifdef HAVE_SYS_PARAM_H
00046 #include <sys/param.h>
00047 #endif
00048 
00049 #ifdef HAVE_SYS_SELECT_H
00050 #include <sys/select.h>
00051 #endif
00052 
00053 #ifndef HAVE_SOCKET
00054 #error "We can't compile without socket() support!"
00055 #endif
00056 
00057 #include "urldata.h"
00058 #include <curl/curl.h>
00059 #include "netrc.h"
00060 
00061 #include "content_encoding.h"
00062 #include "hostip.h"
00063 #include "transfer.h"
00064 #include "sendf.h"
00065 #include "speedcheck.h"
00066 #include "progress.h"
00067 #include "http.h"
00068 #include "url.h"
00069 #include "getinfo.h"
00070 #include "vtls/vtls.h"
00071 #include "select.h"
00072 #include "multiif.h"
00073 #include "connect.h"
00074 #include "non-ascii.h"
00075 #include "http2.h"
00076 
00077 /* The last 3 #include files should be in this order */
00078 #include "curl_printf.h"
00079 #include "curl_memory.h"
00080 #include "memdebug.h"
00081 
00082 /*
00083  * This function will call the read callback to fill our buffer with data
00084  * to upload.
00085  */
00086 CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
00087 {
00088   struct Curl_easy *data = conn->data;
00089   size_t buffersize = (size_t)bytes;
00090   int nread;
00091 #ifdef CURL_DOES_CONVERSIONS
00092   bool sending_http_headers = FALSE;
00093 
00094   if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
00095     const struct HTTP *http = data->req.protop;
00096 
00097     if(http->sending == HTTPSEND_REQUEST)
00098       /* We're sending the HTTP request headers, not the data.
00099          Remember that so we don't re-translate them into garbage. */
00100       sending_http_headers = TRUE;
00101   }
00102 #endif
00103 
00104   if(data->req.upload_chunky) {
00105     /* if chunked Transfer-Encoding */
00106     buffersize -= (8 + 2 + 2);   /* 32bit hex + CRLF + CRLF */
00107     data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
00108   }
00109 
00110   /* this function returns a size_t, so we typecast to int to prevent warnings
00111      with picky compilers */
00112   nread = (int)data->state.fread_func(data->req.upload_fromhere, 1,
00113                                       buffersize, data->state.in);
00114 
00115   if(nread == CURL_READFUNC_ABORT) {
00116     failf(data, "operation aborted by callback");
00117     *nreadp = 0;
00118     return CURLE_ABORTED_BY_CALLBACK;
00119   }
00120   else if(nread == CURL_READFUNC_PAUSE) {
00121 
00122     if(conn->handler->flags & PROTOPT_NONETWORK) {
00123       /* protocols that work without network cannot be paused. This is
00124          actually only FILE:// just now, and it can't pause since the transfer
00125          isn't done using the "normal" procedure. */
00126       failf(data, "Read callback asked for PAUSE when not supported!");
00127       return CURLE_READ_ERROR;
00128     }
00129     else {
00130       struct SingleRequest *k = &data->req;
00131       /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
00132       k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
00133       if(data->req.upload_chunky) {
00134         /* Back out the preallocation done above */
00135         data->req.upload_fromhere -= (8 + 2);
00136       }
00137       *nreadp = 0;
00138     }
00139     return CURLE_OK; /* nothing was read */
00140   }
00141   else if((size_t)nread > buffersize) {
00142     /* the read function returned a too large value */
00143     *nreadp = 0;
00144     failf(data, "read function returned funny value");
00145     return CURLE_READ_ERROR;
00146   }
00147 
00148   if(!data->req.forbidchunk && data->req.upload_chunky) {
00149     /* if chunked Transfer-Encoding
00150      *    build chunk:
00151      *
00152      *        <HEX SIZE> CRLF
00153      *        <DATA> CRLF
00154      */
00155     /* On non-ASCII platforms the <DATA> may or may not be
00156        translated based on set.prefer_ascii while the protocol
00157        portion must always be translated to the network encoding.
00158        To further complicate matters, line end conversion might be
00159        done later on, so we need to prevent CRLFs from becoming
00160        CRCRLFs if that's the case.  To do this we use bare LFs
00161        here, knowing they'll become CRLFs later on.
00162      */
00163 
00164     char hexbuffer[11];
00165     const char *endofline_native;
00166     const char *endofline_network;
00167     int hexlen;
00168 
00169     if(
00170 #ifdef CURL_DO_LINEEND_CONV
00171        (data->set.prefer_ascii) ||
00172 #endif
00173        (data->set.crlf)) {
00174       /* \n will become \r\n later on */
00175       endofline_native  = "\n";
00176       endofline_network = "\x0a";
00177     }
00178     else {
00179       endofline_native  = "\r\n";
00180       endofline_network = "\x0d\x0a";
00181     }
00182     hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
00183                       "%x%s", nread, endofline_native);
00184 
00185     /* move buffer pointer */
00186     data->req.upload_fromhere -= hexlen;
00187     nread += hexlen;
00188 
00189     /* copy the prefix to the buffer, leaving out the NUL */
00190     memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
00191 
00192     /* always append ASCII CRLF to the data */
00193     memcpy(data->req.upload_fromhere + nread,
00194            endofline_network,
00195            strlen(endofline_network));
00196 
00197 #ifdef CURL_DOES_CONVERSIONS
00198     CURLcode result;
00199     int length;
00200     if(data->set.prefer_ascii) {
00201       /* translate the protocol and data */
00202       length = nread;
00203     }
00204     else {
00205       /* just translate the protocol portion */
00206       length = strlen(hexbuffer);
00207     }
00208     result = Curl_convert_to_network(data, data->req.upload_fromhere, length);
00209     /* Curl_convert_to_network calls failf if unsuccessful */
00210     if(result)
00211       return result;
00212 #endif /* CURL_DOES_CONVERSIONS */
00213 
00214     if((nread - hexlen) == 0)
00215       /* mark this as done once this chunk is transferred */
00216       data->req.upload_done = TRUE;
00217 
00218     nread+=(int)strlen(endofline_native); /* for the added end of line */
00219   }
00220 #ifdef CURL_DOES_CONVERSIONS
00221   else if((data->set.prefer_ascii) && (!sending_http_headers)) {
00222     CURLcode result;
00223     result = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
00224     /* Curl_convert_to_network calls failf if unsuccessful */
00225     if(result)
00226       return result;
00227   }
00228 #endif /* CURL_DOES_CONVERSIONS */
00229 
00230   *nreadp = nread;
00231 
00232   return CURLE_OK;
00233 }
00234 
00235 
00236 /*
00237  * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
00238  * POST/PUT with multi-pass authentication when a sending was denied and a
00239  * resend is necessary.
00240  */
00241 CURLcode Curl_readrewind(struct connectdata *conn)
00242 {
00243   struct Curl_easy *data = conn->data;
00244 
00245   conn->bits.rewindaftersend = FALSE; /* we rewind now */
00246 
00247   /* explicitly switch off sending data on this connection now since we are
00248      about to restart a new transfer and thus we want to avoid inadvertently
00249      sending more data on the existing connection until the next transfer
00250      starts */
00251   data->req.keepon &= ~KEEP_SEND;
00252 
00253   /* We have sent away data. If not using CURLOPT_POSTFIELDS or
00254      CURLOPT_HTTPPOST, call app to rewind
00255   */
00256   if(data->set.postfields ||
00257      (data->set.httpreq == HTTPREQ_POST_FORM))
00258     ; /* do nothing */
00259   else {
00260     if(data->set.seek_func) {
00261       int err;
00262 
00263       err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
00264       if(err) {
00265         failf(data, "seek callback returned error %d", (int)err);
00266         return CURLE_SEND_FAIL_REWIND;
00267       }
00268     }
00269     else if(data->set.ioctl_func) {
00270       curlioerr err;
00271 
00272       err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
00273                                    data->set.ioctl_client);
00274       infof(data, "the ioctl callback returned %d\n", (int)err);
00275 
00276       if(err) {
00277         /* FIXME: convert to a human readable error message */
00278         failf(data, "ioctl callback returned error %d", (int)err);
00279         return CURLE_SEND_FAIL_REWIND;
00280       }
00281     }
00282     else {
00283       /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
00284          given FILE * stream and we can actually attempt to rewind that
00285          ourselves with fseek() */
00286       if(data->state.fread_func == (curl_read_callback)fread) {
00287         if(-1 != fseek(data->state.in, 0, SEEK_SET))
00288           /* successful rewind */
00289           return CURLE_OK;
00290       }
00291 
00292       /* no callback set or failure above, makes us fail at once */
00293       failf(data, "necessary data rewind wasn't possible");
00294       return CURLE_SEND_FAIL_REWIND;
00295     }
00296   }
00297   return CURLE_OK;
00298 }
00299 
00300 static int data_pending(const struct connectdata *conn)
00301 {
00302   /* in the case of libssh2, we can never be really sure that we have emptied
00303      its internal buffers so we MUST always try until we get EAGAIN back */
00304   return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
00305 #if defined(USE_NGHTTP2)
00306     Curl_ssl_data_pending(conn, FIRSTSOCKET) ||
00307     /* For HTTP/2, we may read up everything including responde body
00308        with header fields in Curl_http_readwrite_headers. If no
00309        content-length is provided, curl waits for the connection
00310        close, which we emulate it using conn->proto.httpc.closed =
00311        TRUE. The thing is if we read everything, then http2_recv won't
00312        be called and we cannot signal the HTTP/2 stream has closed. As
00313        a workaround, we return nonzero here to call http2_recv. */
00314     ((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion == 20);
00315 #else
00316     Curl_ssl_data_pending(conn, FIRSTSOCKET);
00317 #endif
00318 }
00319 
00320 static void read_rewind(struct connectdata *conn,
00321                         size_t thismuch)
00322 {
00323   DEBUGASSERT(conn->read_pos >= thismuch);
00324 
00325   conn->read_pos -= thismuch;
00326   conn->bits.stream_was_rewound = TRUE;
00327 
00328 #ifdef DEBUGBUILD
00329   {
00330     char buf[512 + 1];
00331     size_t show;
00332 
00333     show = CURLMIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
00334     if(conn->master_buffer) {
00335       memcpy(buf, conn->master_buffer + conn->read_pos, show);
00336       buf[show] = '\0';
00337     }
00338     else {
00339       buf[0] = '\0';
00340     }
00341 
00342     DEBUGF(infof(conn->data,
00343                  "Buffer after stream rewind (read_pos = %zu): [%s]\n",
00344                  conn->read_pos, buf));
00345   }
00346 #endif
00347 }
00348 
00349 /*
00350  * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
00351  * remote document with the time provided by CURLOPT_TIMEVAL
00352  */
00353 bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc)
00354 {
00355   if((timeofdoc == 0) || (data->set.timevalue == 0))
00356     return TRUE;
00357 
00358   switch(data->set.timecondition) {
00359   case CURL_TIMECOND_IFMODSINCE:
00360   default:
00361     if(timeofdoc <= data->set.timevalue) {
00362       infof(data,
00363             "The requested document is not new enough\n");
00364       data->info.timecond = TRUE;
00365       return FALSE;
00366     }
00367     break;
00368   case CURL_TIMECOND_IFUNMODSINCE:
00369     if(timeofdoc >= data->set.timevalue) {
00370       infof(data,
00371             "The requested document is not old enough\n");
00372       data->info.timecond = TRUE;
00373       return FALSE;
00374     }
00375     break;
00376   }
00377 
00378   return TRUE;
00379 }
00380 
00381 /*
00382  * Go ahead and do a read if we have a readable socket or if
00383  * the stream was rewound (in which case we have data in a
00384  * buffer)
00385  *
00386  * return '*comeback' TRUE if we didn't properly drain the socket so this
00387  * function should get called again without select() or similar in between!
00388  */
00389 static CURLcode readwrite_data(struct Curl_easy *data,
00390                                struct connectdata *conn,
00391                                struct SingleRequest *k,
00392                                int *didwhat, bool *done,
00393                                bool *comeback)
00394 {
00395   CURLcode result = CURLE_OK;
00396   ssize_t nread; /* number of bytes read */
00397   size_t excess = 0; /* excess bytes read */
00398   bool is_empty_data = FALSE;
00399   bool readmore = FALSE; /* used by RTP to signal for more data */
00400   int maxloops = 100;
00401 
00402   *done = FALSE;
00403   *comeback = FALSE;
00404 
00405   /* This is where we loop until we have read everything there is to
00406      read or we get a CURLE_AGAIN */
00407   do {
00408     size_t buffersize = data->set.buffer_size?
00409       data->set.buffer_size : BUFSIZE;
00410     size_t bytestoread = buffersize;
00411 
00412     if(
00413 #if defined(USE_NGHTTP2)
00414        /* For HTTP/2, read data without caring about the content
00415           length. This is safe because body in HTTP/2 is always
00416           segmented thanks to its framing layer. Meanwhile, we have to
00417           call Curl_read to ensure that http2_handle_stream_close is
00418           called when we read all incoming bytes for a particular
00419           stream. */
00420        !((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
00421          conn->httpversion == 20) &&
00422 #endif
00423        k->size != -1 && !k->header) {
00424       /* make sure we don't read "too much" if we can help it since we
00425          might be pipelining and then someone else might want to read what
00426          follows! */
00427       curl_off_t totalleft = k->size - k->bytecount;
00428       if(totalleft < (curl_off_t)bytestoread)
00429         bytestoread = (size_t)totalleft;
00430     }
00431 
00432     if(bytestoread) {
00433       /* receive data from the network! */
00434       result = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
00435 
00436       /* read would've blocked */
00437       if(CURLE_AGAIN == result)
00438         break; /* get out of loop */
00439 
00440       if(result>0)
00441         return result;
00442     }
00443     else {
00444       /* read nothing but since we wanted nothing we consider this an OK
00445          situation to proceed from */
00446       DEBUGF(infof(data, "readwrite_data: we're done!\n"));
00447       nread = 0;
00448     }
00449 
00450     if((k->bytecount == 0) && (k->writebytecount == 0)) {
00451       Curl_pgrsTime(data, TIMER_STARTTRANSFER);
00452       if(k->exp100 > EXP100_SEND_DATA)
00453         /* set time stamp to compare with when waiting for the 100 */
00454         k->start100 = Curl_tvnow();
00455     }
00456 
00457     *didwhat |= KEEP_RECV;
00458     /* indicates data of zero size, i.e. empty file */
00459     is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
00460 
00461     /* NUL terminate, allowing string ops to be used */
00462     if(0 < nread || is_empty_data) {
00463       k->buf[nread] = 0;
00464     }
00465     else if(0 >= nread) {
00466       /* if we receive 0 or less here, the server closed the connection
00467          and we bail out from this! */
00468       DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
00469       k->keepon &= ~KEEP_RECV;
00470       break;
00471     }
00472 
00473     /* Default buffer to use when we write the buffer, it may be changed
00474        in the flow below before the actual storing is done. */
00475     k->str = k->buf;
00476 
00477     if(conn->handler->readwrite) {
00478       result = conn->handler->readwrite(data, conn, &nread, &readmore);
00479       if(result)
00480         return result;
00481       if(readmore)
00482         break;
00483     }
00484 
00485 #ifndef CURL_DISABLE_HTTP
00486     /* Since this is a two-state thing, we check if we are parsing
00487        headers at the moment or not. */
00488     if(k->header) {
00489       /* we are in parse-the-header-mode */
00490       bool stop_reading = FALSE;
00491       result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
00492       if(result)
00493         return result;
00494 
00495       if(conn->handler->readwrite &&
00496          (k->maxdownload <= 0 && nread > 0)) {
00497         result = conn->handler->readwrite(data, conn, &nread, &readmore);
00498         if(result)
00499           return result;
00500         if(readmore)
00501           break;
00502       }
00503 
00504       if(stop_reading) {
00505         /* We've stopped dealing with input, get out of the do-while loop */
00506 
00507         if(nread > 0) {
00508           if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
00509             infof(data,
00510                   "Rewinding stream by : %zd"
00511                   " bytes on url %s (zero-length body)\n",
00512                   nread, data->state.path);
00513             read_rewind(conn, (size_t)nread);
00514           }
00515           else {
00516             infof(data,
00517                   "Excess found in a non pipelined read:"
00518                   " excess = %zd"
00519                   " url = %s (zero-length body)\n",
00520                   nread, data->state.path);
00521           }
00522         }
00523 
00524         break;
00525       }
00526     }
00527 #endif /* CURL_DISABLE_HTTP */
00528 
00529 
00530     /* This is not an 'else if' since it may be a rest from the header
00531        parsing, where the beginning of the buffer is headers and the end
00532        is non-headers. */
00533     if(k->str && !k->header && (nread > 0 || is_empty_data)) {
00534 
00535       if(data->set.opt_no_body) {
00536         /* data arrives although we want none, bail out */
00537         streamclose(conn, "ignoring body");
00538         *done = TRUE;
00539         return CURLE_WEIRD_SERVER_REPLY;
00540       }
00541 
00542 #ifndef CURL_DISABLE_HTTP
00543       if(0 == k->bodywrites && !is_empty_data) {
00544         /* These checks are only made the first time we are about to
00545            write a piece of the body */
00546         if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
00547           /* HTTP-only checks */
00548 
00549           if(data->req.newurl) {
00550             if(conn->bits.close) {
00551               /* Abort after the headers if "follow Location" is set
00552                  and we're set to close anyway. */
00553               k->keepon &= ~KEEP_RECV;
00554               *done = TRUE;
00555               return CURLE_OK;
00556             }
00557             /* We have a new url to load, but since we want to be able
00558                to re-use this connection properly, we read the full
00559                response in "ignore more" */
00560             k->ignorebody = TRUE;
00561             infof(data, "Ignoring the response-body\n");
00562           }
00563           if(data->state.resume_from && !k->content_range &&
00564              (data->set.httpreq==HTTPREQ_GET) &&
00565              !k->ignorebody) {
00566 
00567             if(k->size == data->state.resume_from) {
00568               /* The resume point is at the end of file, consider this fine
00569                  even if it doesn't allow resume from here. */
00570               infof(data, "The entire document is already downloaded");
00571               connclose(conn, "already downloaded");
00572               /* Abort download */
00573               k->keepon &= ~KEEP_RECV;
00574               *done = TRUE;
00575               return CURLE_OK;
00576             }
00577 
00578             /* we wanted to resume a download, although the server doesn't
00579              * seem to support this and we did this with a GET (if it
00580              * wasn't a GET we did a POST or PUT resume) */
00581             failf(data, "HTTP server doesn't seem to support "
00582                   "byte ranges. Cannot resume.");
00583             return CURLE_RANGE_ERROR;
00584           }
00585 
00586           if(data->set.timecondition && !data->state.range) {
00587             /* A time condition has been set AND no ranges have been
00588                requested. This seems to be what chapter 13.3.4 of
00589                RFC 2616 defines to be the correct action for a
00590                HTTP/1.1 client */
00591 
00592             if(!Curl_meets_timecondition(data, k->timeofdoc)) {
00593               *done = TRUE;
00594               /* We're simulating a http 304 from server so we return
00595                  what should have been returned from the server */
00596               data->info.httpcode = 304;
00597               infof(data, "Simulate a HTTP 304 response!\n");
00598               /* we abort the transfer before it is completed == we ruin the
00599                  re-use ability. Close the connection */
00600               connclose(conn, "Simulated 304 handling");
00601               return CURLE_OK;
00602             }
00603           } /* we have a time condition */
00604 
00605         } /* this is HTTP or RTSP */
00606       } /* this is the first time we write a body part */
00607 #endif /* CURL_DISABLE_HTTP */
00608 
00609       k->bodywrites++;
00610 
00611       /* pass data to the debug function before it gets "dechunked" */
00612       if(data->set.verbose) {
00613         if(k->badheader) {
00614           Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
00615                      (size_t)k->hbuflen, conn);
00616           if(k->badheader == HEADER_PARTHEADER)
00617             Curl_debug(data, CURLINFO_DATA_IN,
00618                        k->str, (size_t)nread, conn);
00619         }
00620         else
00621           Curl_debug(data, CURLINFO_DATA_IN,
00622                      k->str, (size_t)nread, conn);
00623       }
00624 
00625 #ifndef CURL_DISABLE_HTTP
00626       if(k->chunk) {
00627         /*
00628          * Here comes a chunked transfer flying and we need to decode this
00629          * properly.  While the name says read, this function both reads
00630          * and writes away the data. The returned 'nread' holds the number
00631          * of actual data it wrote to the client.
00632          */
00633 
00634         CHUNKcode res =
00635           Curl_httpchunk_read(conn, k->str, nread, &nread);
00636 
00637         if(CHUNKE_OK < res) {
00638           if(CHUNKE_WRITE_ERROR == res) {
00639             failf(data, "Failed writing data");
00640             return CURLE_WRITE_ERROR;
00641           }
00642           failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
00643           return CURLE_RECV_ERROR;
00644         }
00645         else if(CHUNKE_STOP == res) {
00646           size_t dataleft;
00647           /* we're done reading chunks! */
00648           k->keepon &= ~KEEP_RECV; /* read no more */
00649 
00650           /* There are now possibly N number of bytes at the end of the
00651              str buffer that weren't written to the client.
00652 
00653              We DO care about this data if we are pipelining.
00654              Push it back to be read on the next pass. */
00655 
00656           dataleft = conn->chunk.dataleft;
00657           if(dataleft != 0) {
00658             infof(conn->data, "Leftovers after chunking: %zu bytes\n",
00659                   dataleft);
00660             if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
00661               /* only attempt the rewind if we truly are pipelining */
00662               infof(conn->data, "Rewinding %zu bytes\n",dataleft);
00663               read_rewind(conn, dataleft);
00664             }
00665           }
00666         }
00667         /* If it returned OK, we just keep going */
00668       }
00669 #endif   /* CURL_DISABLE_HTTP */
00670 
00671       /* Account for body content stored in the header buffer */
00672       if(k->badheader && !k->ignorebody) {
00673         DEBUGF(infof(data, "Increasing bytecount by %zu from hbuflen\n",
00674                      k->hbuflen));
00675         k->bytecount += k->hbuflen;
00676       }
00677 
00678       if((-1 != k->maxdownload) &&
00679          (k->bytecount + nread >= k->maxdownload)) {
00680 
00681         excess = (size_t)(k->bytecount + nread - k->maxdownload);
00682         if(excess > 0 && !k->ignorebody) {
00683           if(Curl_pipeline_wanted(conn->data->multi, CURLPIPE_HTTP1)) {
00684             /* The 'excess' amount below can't be more than BUFSIZE which
00685                always will fit in a size_t */
00686             infof(data,
00687                   "Rewinding stream by : %zu"
00688                   " bytes on url %s (size = %" CURL_FORMAT_CURL_OFF_T
00689                   ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
00690                   ", bytecount = %" CURL_FORMAT_CURL_OFF_T ", nread = %zd)\n",
00691                   excess, data->state.path,
00692                   k->size, k->maxdownload, k->bytecount, nread);
00693             read_rewind(conn, excess);
00694           }
00695           else {
00696             infof(data,
00697                   "Excess found in a non pipelined read:"
00698                   " excess = %zu"
00699                   ", size = %" CURL_FORMAT_CURL_OFF_T
00700                   ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
00701                   ", bytecount = %" CURL_FORMAT_CURL_OFF_T "\n",
00702                   excess, k->size, k->maxdownload, k->bytecount);
00703           }
00704         }
00705 
00706         nread = (ssize_t) (k->maxdownload - k->bytecount);
00707         if(nread < 0) /* this should be unusual */
00708           nread = 0;
00709 
00710         k->keepon &= ~KEEP_RECV; /* we're done reading */
00711       }
00712 
00713       k->bytecount += nread;
00714 
00715       Curl_pgrsSetDownloadCounter(data, k->bytecount);
00716 
00717       if(!k->chunk && (nread || k->badheader || is_empty_data)) {
00718         /* If this is chunky transfer, it was already written */
00719 
00720         if(k->badheader && !k->ignorebody) {
00721           /* we parsed a piece of data wrongly assuming it was a header
00722              and now we output it as body instead */
00723 
00724           /* Don't let excess data pollute body writes */
00725           if(k->maxdownload == -1 || (curl_off_t)k->hbuflen <= k->maxdownload)
00726             result = Curl_client_write(conn, CLIENTWRITE_BODY,
00727                                        data->state.headerbuff,
00728                                        k->hbuflen);
00729           else
00730             result = Curl_client_write(conn, CLIENTWRITE_BODY,
00731                                        data->state.headerbuff,
00732                                        (size_t)k->maxdownload);
00733 
00734           if(result)
00735             return result;
00736         }
00737         if(k->badheader < HEADER_ALLBAD) {
00738           /* This switch handles various content encodings. If there's an
00739              error here, be sure to check over the almost identical code
00740              in http_chunks.c.
00741              Make sure that ALL_CONTENT_ENCODINGS contains all the
00742              encodings handled here. */
00743 #ifdef HAVE_LIBZ
00744           switch(conn->data->set.http_ce_skip ?
00745                  IDENTITY : k->auto_decoding) {
00746           case IDENTITY:
00747 #endif
00748             /* This is the default when the server sends no
00749                Content-Encoding header. See Curl_readwrite_init; the
00750                memset() call initializes k->auto_decoding to zero. */
00751             if(!k->ignorebody) {
00752 
00753 #ifndef CURL_DISABLE_POP3
00754               if(conn->handler->protocol&PROTO_FAMILY_POP3)
00755                 result = Curl_pop3_write(conn, k->str, nread);
00756               else
00757 #endif /* CURL_DISABLE_POP3 */
00758 
00759                 result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
00760                                            nread);
00761             }
00762 #ifdef HAVE_LIBZ
00763             break;
00764 
00765           case DEFLATE:
00766             /* Assume CLIENTWRITE_BODY; headers are not encoded. */
00767             if(!k->ignorebody)
00768               result = Curl_unencode_deflate_write(conn, k, nread);
00769             break;
00770 
00771           case GZIP:
00772             /* Assume CLIENTWRITE_BODY; headers are not encoded. */
00773             if(!k->ignorebody)
00774               result = Curl_unencode_gzip_write(conn, k, nread);
00775             break;
00776 
00777           default:
00778             failf(data, "Unrecognized content encoding type. "
00779                   "libcurl understands `identity', `deflate' and `gzip' "
00780                   "content encodings.");
00781             result = CURLE_BAD_CONTENT_ENCODING;
00782             break;
00783           }
00784 #endif
00785         }
00786         k->badheader = HEADER_NORMAL; /* taken care of now */
00787 
00788         if(result)
00789           return result;
00790       }
00791 
00792     } /* if(!header and data to read) */
00793 
00794     if(conn->handler->readwrite &&
00795        (excess > 0 && !conn->bits.stream_was_rewound)) {
00796       /* Parse the excess data */
00797       k->str += nread;
00798       nread = (ssize_t)excess;
00799 
00800       result = conn->handler->readwrite(data, conn, &nread, &readmore);
00801       if(result)
00802         return result;
00803 
00804       if(readmore)
00805         k->keepon |= KEEP_RECV; /* we're not done reading */
00806       break;
00807     }
00808 
00809     if(is_empty_data) {
00810       /* if we received nothing, the server closed the connection and we
00811          are done */
00812       k->keepon &= ~KEEP_RECV;
00813     }
00814 
00815   } while(data_pending(conn) && maxloops--);
00816 
00817   if(maxloops <= 0) {
00818     /* we mark it as read-again-please */
00819     conn->cselect_bits = CURL_CSELECT_IN;
00820     *comeback = TRUE;
00821   }
00822 
00823   if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
00824      conn->bits.close) {
00825     /* When we've read the entire thing and the close bit is set, the server
00826        may now close the connection. If there's now any kind of sending going
00827        on from our side, we need to stop that immediately. */
00828     infof(data, "we are done reading and this is set to close, stop send\n");
00829     k->keepon &= ~KEEP_SEND; /* no writing anymore either */
00830   }
00831 
00832   return CURLE_OK;
00833 }
00834 
00835 static CURLcode done_sending(struct connectdata *conn,
00836                              struct SingleRequest *k)
00837 {
00838   k->keepon &= ~KEEP_SEND; /* we're done writing */
00839 
00840   Curl_http2_done_sending(conn);
00841 
00842   if(conn->bits.rewindaftersend) {
00843     CURLcode result = Curl_readrewind(conn);
00844     if(result)
00845       return result;
00846   }
00847   return CURLE_OK;
00848 }
00849 
00850 
00851 /*
00852  * Send data to upload to the server, when the socket is writable.
00853  */
00854 static CURLcode readwrite_upload(struct Curl_easy *data,
00855                                  struct connectdata *conn,
00856                                  struct SingleRequest *k,
00857                                  int *didwhat)
00858 {
00859   ssize_t i, si;
00860   ssize_t bytes_written;
00861   CURLcode result;
00862   ssize_t nread; /* number of bytes read */
00863   bool sending_http_headers = FALSE;
00864 
00865   if((k->bytecount == 0) && (k->writebytecount == 0))
00866     Curl_pgrsTime(data, TIMER_STARTTRANSFER);
00867 
00868   *didwhat |= KEEP_SEND;
00869 
00870   do {
00871 
00872     /* only read more data if there's no upload data already
00873        present in the upload buffer */
00874     if(0 == data->req.upload_present) {
00875       /* init the "upload from here" pointer */
00876       data->req.upload_fromhere = k->uploadbuf;
00877 
00878       if(!k->upload_done) {
00879         /* HTTP pollution, this should be written nicer to become more
00880            protocol agnostic. */
00881         int fillcount;
00882         struct HTTP *http = data->req.protop;
00883 
00884         if((k->exp100 == EXP100_SENDING_REQUEST) &&
00885            (http->sending == HTTPSEND_BODY)) {
00886           /* If this call is to send body data, we must take some action:
00887              We have sent off the full HTTP 1.1 request, and we shall now
00888              go into the Expect: 100 state and await such a header */
00889           k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
00890           k->keepon &= ~KEEP_SEND;         /* disable writing */
00891           k->start100 = Curl_tvnow();       /* timeout count starts now */
00892           *didwhat &= ~KEEP_SEND;  /* we didn't write anything actually */
00893 
00894           /* set a timeout for the multi interface */
00895           Curl_expire(data, data->set.expect_100_timeout);
00896           break;
00897         }
00898 
00899         if(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)) {
00900           if(http->sending == HTTPSEND_REQUEST)
00901             /* We're sending the HTTP request headers, not the data.
00902                Remember that so we don't change the line endings. */
00903             sending_http_headers = TRUE;
00904           else
00905             sending_http_headers = FALSE;
00906         }
00907 
00908         result = Curl_fillreadbuffer(conn, BUFSIZE, &fillcount);
00909         if(result)
00910           return result;
00911 
00912         nread = (ssize_t)fillcount;
00913       }
00914       else
00915         nread = 0; /* we're done uploading/reading */
00916 
00917       if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
00918         /* this is a paused transfer */
00919         break;
00920       }
00921       else if(nread<=0) {
00922         result = done_sending(conn, k);
00923         if(result)
00924           return result;
00925         break;
00926       }
00927 
00928       /* store number of bytes available for upload */
00929       data->req.upload_present = nread;
00930 
00931       /* convert LF to CRLF if so asked */
00932       if((!sending_http_headers) && (
00933 #ifdef CURL_DO_LINEEND_CONV
00934          /* always convert if we're FTPing in ASCII mode */
00935          (data->set.prefer_ascii) ||
00936 #endif
00937          (data->set.crlf))) {
00938         /* Do we need to allocate a scratch buffer? */
00939         if(!data->state.scratch) {
00940           data->state.scratch = malloc(2 * BUFSIZE);
00941           if(!data->state.scratch) {
00942             failf(data, "Failed to alloc scratch buffer!");
00943 
00944             return CURLE_OUT_OF_MEMORY;
00945           }
00946         }
00947 
00948         /*
00949          * ASCII/EBCDIC Note: This is presumably a text (not binary)
00950          * transfer so the data should already be in ASCII.
00951          * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
00952          * must be used instead of the escape sequences \r & \n.
00953          */
00954         for(i = 0, si = 0; i < nread; i++, si++) {
00955           if(data->req.upload_fromhere[i] == 0x0a) {
00956             data->state.scratch[si++] = 0x0d;
00957             data->state.scratch[si] = 0x0a;
00958             if(!data->set.crlf) {
00959               /* we're here only because FTP is in ASCII mode...
00960                  bump infilesize for the LF we just added */
00961               if(data->state.infilesize != -1)
00962                 data->state.infilesize++;
00963             }
00964           }
00965           else
00966             data->state.scratch[si] = data->req.upload_fromhere[i];
00967         }
00968 
00969         if(si != nread) {
00970           /* only perform the special operation if we really did replace
00971              anything */
00972           nread = si;
00973 
00974           /* upload from the new (replaced) buffer instead */
00975           data->req.upload_fromhere = data->state.scratch;
00976 
00977           /* set the new amount too */
00978           data->req.upload_present = nread;
00979         }
00980       }
00981 
00982 #ifndef CURL_DISABLE_SMTP
00983       if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
00984         result = Curl_smtp_escape_eob(conn, nread);
00985         if(result)
00986           return result;
00987       }
00988 #endif /* CURL_DISABLE_SMTP */
00989     } /* if 0 == data->req.upload_present */
00990     else {
00991       /* We have a partial buffer left from a previous "round". Use
00992          that instead of reading more data */
00993     }
00994 
00995     /* write to socket (send away data) */
00996     result = Curl_write(conn,
00997                         conn->writesockfd,     /* socket to send to */
00998                         data->req.upload_fromhere, /* buffer pointer */
00999                         data->req.upload_present,  /* buffer size */
01000                         &bytes_written);           /* actually sent */
01001 
01002     if(result)
01003       return result;
01004 
01005     if(data->set.verbose)
01006       /* show the data before we change the pointer upload_fromhere */
01007       Curl_debug(data, CURLINFO_DATA_OUT, data->req.upload_fromhere,
01008                  (size_t)bytes_written, conn);
01009 
01010     k->writebytecount += bytes_written;
01011 
01012     if(k->writebytecount == data->state.infilesize) {
01013       /* we have sent all data we were supposed to */
01014       k->upload_done = TRUE;
01015       infof(data, "We are completely uploaded and fine\n");
01016     }
01017 
01018     if(data->req.upload_present != bytes_written) {
01019       /* we only wrote a part of the buffer (if anything), deal with it! */
01020 
01021       /* store the amount of bytes left in the buffer to write */
01022       data->req.upload_present -= bytes_written;
01023 
01024       /* advance the pointer where to find the buffer when the next send
01025          is to happen */
01026       data->req.upload_fromhere += bytes_written;
01027     }
01028     else {
01029       /* we've uploaded that buffer now */
01030       data->req.upload_fromhere = k->uploadbuf;
01031       data->req.upload_present = 0; /* no more bytes left */
01032 
01033       if(k->upload_done) {
01034         result = done_sending(conn, k);
01035         if(result)
01036           return result;
01037       }
01038     }
01039 
01040     Curl_pgrsSetUploadCounter(data, k->writebytecount);
01041 
01042   } WHILE_FALSE; /* just to break out from! */
01043 
01044   return CURLE_OK;
01045 }
01046 
01047 /*
01048  * Curl_readwrite() is the low-level function to be called when data is to
01049  * be read and written to/from the connection.
01050  *
01051  * return '*comeback' TRUE if we didn't properly drain the socket so this
01052  * function should get called again without select() or similar in between!
01053  */
01054 CURLcode Curl_readwrite(struct connectdata *conn,
01055                         struct Curl_easy *data,
01056                         bool *done,
01057                         bool *comeback)
01058 {
01059   struct SingleRequest *k = &data->req;
01060   CURLcode result;
01061   int didwhat=0;
01062 
01063   curl_socket_t fd_read;
01064   curl_socket_t fd_write;
01065   int select_res = conn->cselect_bits;
01066 
01067   conn->cselect_bits = 0;
01068 
01069   /* only use the proper socket if the *_HOLD bit is not set simultaneously as
01070      then we are in rate limiting state in that transfer direction */
01071 
01072   if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
01073     fd_read = conn->sockfd;
01074   else
01075     fd_read = CURL_SOCKET_BAD;
01076 
01077   if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
01078     fd_write = conn->writesockfd;
01079   else
01080     fd_write = CURL_SOCKET_BAD;
01081 
01082   if(conn->data->state.drain) {
01083     select_res |= CURL_CSELECT_IN;
01084     DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data\n"));
01085   }
01086 
01087   if(!select_res) /* Call for select()/poll() only, if read/write/error
01088                      status is not known. */
01089     select_res = Curl_socket_check(fd_read, CURL_SOCKET_BAD, fd_write, 0);
01090 
01091   if(select_res == CURL_CSELECT_ERR) {
01092     failf(data, "select/poll returned error");
01093     return CURLE_SEND_ERROR;
01094   }
01095 
01096   /* We go ahead and do a read if we have a readable socket or if
01097      the stream was rewound (in which case we have data in a
01098      buffer) */
01099   if((k->keepon & KEEP_RECV) &&
01100      ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
01101 
01102     result = readwrite_data(data, conn, k, &didwhat, done, comeback);
01103     if(result || *done)
01104       return result;
01105   }
01106 
01107   /* If we still have writing to do, we check if we have a writable socket. */
01108   if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
01109     /* write */
01110 
01111     result = readwrite_upload(data, conn, k, &didwhat);
01112     if(result)
01113       return result;
01114   }
01115 
01116   k->now = Curl_tvnow();
01117   if(didwhat) {
01118     /* Update read/write counters */
01119     if(k->bytecountp)
01120       *k->bytecountp = k->bytecount; /* read count */
01121     if(k->writebytecountp)
01122       *k->writebytecountp = k->writebytecount; /* write count */
01123   }
01124   else {
01125     /* no read no write, this is a timeout? */
01126     if(k->exp100 == EXP100_AWAITING_CONTINUE) {
01127       /* This should allow some time for the header to arrive, but only a
01128          very short time as otherwise it'll be too much wasted time too
01129          often. */
01130 
01131       /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
01132 
01133          Therefore, when a client sends this header field to an origin server
01134          (possibly via a proxy) from which it has never seen a 100 (Continue)
01135          status, the client SHOULD NOT wait for an indefinite period before
01136          sending the request body.
01137 
01138       */
01139 
01140       time_t ms = Curl_tvdiff(k->now, k->start100);
01141       if(ms >= data->set.expect_100_timeout) {
01142         /* we've waited long enough, continue anyway */
01143         k->exp100 = EXP100_SEND_DATA;
01144         k->keepon |= KEEP_SEND;
01145         infof(data, "Done waiting for 100-continue\n");
01146       }
01147     }
01148   }
01149 
01150   if(Curl_pgrsUpdate(conn))
01151     result = CURLE_ABORTED_BY_CALLBACK;
01152   else
01153     result = Curl_speedcheck(data, k->now);
01154   if(result)
01155     return result;
01156 
01157   if(k->keepon) {
01158     if(0 > Curl_timeleft(data, &k->now, FALSE)) {
01159       if(k->size != -1) {
01160         failf(data, "Operation timed out after %ld milliseconds with %"
01161               CURL_FORMAT_CURL_OFF_T " out of %"
01162               CURL_FORMAT_CURL_OFF_T " bytes received",
01163               Curl_tvdiff(k->now, data->progress.t_startsingle), k->bytecount,
01164               k->size);
01165       }
01166       else {
01167         failf(data, "Operation timed out after %ld milliseconds with %"
01168               CURL_FORMAT_CURL_OFF_T " bytes received",
01169               Curl_tvdiff(k->now, data->progress.t_startsingle), k->bytecount);
01170       }
01171       return CURLE_OPERATION_TIMEDOUT;
01172     }
01173   }
01174   else {
01175     /*
01176      * The transfer has been performed. Just make some general checks before
01177      * returning.
01178      */
01179 
01180     if(!(data->set.opt_no_body) && (k->size != -1) &&
01181        (k->bytecount != k->size) &&
01182 #ifdef CURL_DO_LINEEND_CONV
01183        /* Most FTP servers don't adjust their file SIZE response for CRLFs,
01184           so we'll check to see if the discrepancy can be explained
01185           by the number of CRLFs we've changed to LFs.
01186        */
01187        (k->bytecount != (k->size + data->state.crlf_conversions)) &&
01188 #endif /* CURL_DO_LINEEND_CONV */
01189        !data->req.newurl) {
01190       failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T
01191             " bytes remaining to read",
01192             k->size - k->bytecount);
01193       return CURLE_PARTIAL_FILE;
01194     }
01195     else if(!(data->set.opt_no_body) &&
01196             k->chunk &&
01197             (conn->chunk.state != CHUNK_STOP)) {
01198       /*
01199        * In chunked mode, return an error if the connection is closed prior to
01200        * the empty (terminating) chunk is read.
01201        *
01202        * The condition above used to check for
01203        * conn->proto.http->chunk.datasize != 0 which is true after reading
01204        * *any* chunk, not just the empty chunk.
01205        *
01206        */
01207       failf(data, "transfer closed with outstanding read data remaining");
01208       return CURLE_PARTIAL_FILE;
01209     }
01210     if(Curl_pgrsUpdate(conn))
01211       return CURLE_ABORTED_BY_CALLBACK;
01212   }
01213 
01214   /* Now update the "done" boolean we return */
01215   *done = (0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
01216                             KEEP_RECV_PAUSE|KEEP_SEND_PAUSE))) ? TRUE : FALSE;
01217 
01218   return CURLE_OK;
01219 }
01220 
01221 /*
01222  * Curl_single_getsock() gets called by the multi interface code when the app
01223  * has requested to get the sockets for the current connection. This function
01224  * will then be called once for every connection that the multi interface
01225  * keeps track of. This function will only be called for connections that are
01226  * in the proper state to have this information available.
01227  */
01228 int Curl_single_getsock(const struct connectdata *conn,
01229                         curl_socket_t *sock, /* points to numsocks number
01230                                                 of sockets */
01231                         int numsocks)
01232 {
01233   const struct Curl_easy *data = conn->data;
01234   int bitmap = GETSOCK_BLANK;
01235   unsigned sockindex = 0;
01236 
01237   if(conn->handler->perform_getsock)
01238     return conn->handler->perform_getsock(conn, sock, numsocks);
01239 
01240   if(numsocks < 2)
01241     /* simple check but we might need two slots */
01242     return GETSOCK_BLANK;
01243 
01244   /* don't include HOLD and PAUSE connections */
01245   if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
01246 
01247     DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
01248 
01249     bitmap |= GETSOCK_READSOCK(sockindex);
01250     sock[sockindex] = conn->sockfd;
01251   }
01252 
01253   /* don't include HOLD and PAUSE connections */
01254   if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
01255 
01256     if((conn->sockfd != conn->writesockfd) ||
01257        bitmap == GETSOCK_BLANK) {
01258       /* only if they are not the same socket and we have a readable
01259          one, we increase index */
01260       if(bitmap != GETSOCK_BLANK)
01261         sockindex++; /* increase index if we need two entries */
01262 
01263       DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
01264 
01265       sock[sockindex] = conn->writesockfd;
01266     }
01267 
01268     bitmap |= GETSOCK_WRITESOCK(sockindex);
01269   }
01270 
01271   return bitmap;
01272 }
01273 
01274 /* Curl_init_CONNECT() gets called each time the handle switches to CONNECT
01275    which means this gets called once for each subsequent redirect etc */
01276 void Curl_init_CONNECT(struct Curl_easy *data)
01277 {
01278   data->state.fread_func = data->set.fread_func_set;
01279   data->state.in = data->set.in_set;
01280 }
01281 
01282 /*
01283  * Curl_pretransfer() is called immediately before a transfer starts, and only
01284  * once for one transfer no matter if it has redirects or do multi-pass
01285  * authentication etc.
01286  */
01287 CURLcode Curl_pretransfer(struct Curl_easy *data)
01288 {
01289   CURLcode result;
01290   if(!data->change.url) {
01291     /* we can't do anything without URL */
01292     failf(data, "No URL set!");
01293     return CURLE_URL_MALFORMAT;
01294   }
01295 
01296   /* Init the SSL session ID cache here. We do it here since we want to do it
01297      after the *_setopt() calls (that could specify the size of the cache) but
01298      before any transfer takes place. */
01299   result = Curl_ssl_initsessions(data, data->set.general_ssl.max_ssl_sessions);
01300   if(result)
01301     return result;
01302 
01303   data->set.followlocation=0; /* reset the location-follow counter */
01304   data->state.this_is_a_follow = FALSE; /* reset this */
01305   data->state.errorbuf = FALSE; /* no error has occurred */
01306   data->state.httpversion = 0; /* don't assume any particular server version */
01307 
01308   data->state.authproblem = FALSE;
01309   data->state.authhost.want = data->set.httpauth;
01310   data->state.authproxy.want = data->set.proxyauth;
01311   Curl_safefree(data->info.wouldredirect);
01312   data->info.wouldredirect = NULL;
01313 
01314   if(data->set.httpreq == HTTPREQ_PUT)
01315     data->state.infilesize = data->set.filesize;
01316   else
01317     data->state.infilesize = data->set.postfieldsize;
01318 
01319   /* If there is a list of cookie files to read, do it now! */
01320   if(data->change.cookielist)
01321     Curl_cookie_loadfiles(data);
01322 
01323   /* If there is a list of host pairs to deal with */
01324   if(data->change.resolve)
01325     result = Curl_loadhostpairs(data);
01326 
01327   if(!result) {
01328     /* Allow data->set.use_port to set which port to use. This needs to be
01329      * disabled for example when we follow Location: headers to URLs using
01330      * different ports! */
01331     data->state.allow_port = TRUE;
01332 
01333 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
01334     /*************************************************************
01335      * Tell signal handler to ignore SIGPIPE
01336      *************************************************************/
01337     if(!data->set.no_signal)
01338       data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
01339 #endif
01340 
01341     Curl_initinfo(data); /* reset session-specific information "variables" */
01342     Curl_pgrsResetTimesSizes(data);
01343     Curl_pgrsStartNow(data);
01344 
01345     if(data->set.timeout)
01346       Curl_expire(data, data->set.timeout);
01347 
01348     if(data->set.connecttimeout)
01349       Curl_expire(data, data->set.connecttimeout);
01350 
01351     /* In case the handle is re-used and an authentication method was picked
01352        in the session we need to make sure we only use the one(s) we now
01353        consider to be fine */
01354     data->state.authhost.picked &= data->state.authhost.want;
01355     data->state.authproxy.picked &= data->state.authproxy.want;
01356 
01357     if(data->set.wildcardmatch) {
01358       struct WildcardData *wc = &data->wildcard;
01359       if(!wc->filelist) {
01360         result = Curl_wildcard_init(wc); /* init wildcard structures */
01361         if(result)
01362           return CURLE_OUT_OF_MEMORY;
01363       }
01364     }
01365 
01366   }
01367 
01368   return result;
01369 }
01370 
01371 /*
01372  * Curl_posttransfer() is called immediately after a transfer ends
01373  */
01374 CURLcode Curl_posttransfer(struct Curl_easy *data)
01375 {
01376 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
01377   /* restore the signal handler for SIGPIPE before we get back */
01378   if(!data->set.no_signal)
01379     signal(SIGPIPE, data->state.prev_signal);
01380 #else
01381   (void)data; /* unused parameter */
01382 #endif
01383 
01384   return CURLE_OK;
01385 }
01386 
01387 #ifndef CURL_DISABLE_HTTP
01388 /*
01389  * strlen_url() returns the length of the given URL if the spaces within the
01390  * URL were properly URL encoded.
01391  */
01392 static size_t strlen_url(const char *url)
01393 {
01394   const unsigned char *ptr;
01395   size_t newlen=0;
01396   bool left=TRUE; /* left side of the ? */
01397 
01398   for(ptr=(unsigned char *)url; *ptr; ptr++) {
01399     switch(*ptr) {
01400     case '?':
01401       left=FALSE;
01402       /* fall through */
01403     default:
01404       if(*ptr >= 0x80)
01405         newlen += 2;
01406       newlen++;
01407       break;
01408     case ' ':
01409       if(left)
01410         newlen+=3;
01411       else
01412         newlen++;
01413       break;
01414     }
01415   }
01416   return newlen;
01417 }
01418 
01419 /* strcpy_url() copies a url to a output buffer and URL-encodes the spaces in
01420  * the source URL accordingly.
01421  */
01422 static void strcpy_url(char *output, const char *url)
01423 {
01424   /* we must add this with whitespace-replacing */
01425   bool left=TRUE;
01426   const unsigned char *iptr;
01427   char *optr = output;
01428   for(iptr = (unsigned char *)url;    /* read from here */
01429       *iptr;         /* until zero byte */
01430       iptr++) {
01431     switch(*iptr) {
01432     case '?':
01433       left=FALSE;
01434       /* fall through */
01435     default:
01436       if(*iptr >= 0x80) {
01437         snprintf(optr, 4, "%%%02x", *iptr);
01438         optr += 3;
01439       }
01440       else
01441         *optr++=*iptr;
01442       break;
01443     case ' ':
01444       if(left) {
01445         *optr++='%'; /* add a '%' */
01446         *optr++='2'; /* add a '2' */
01447         *optr++='0'; /* add a '0' */
01448       }
01449       else
01450         *optr++='+'; /* add a '+' here */
01451       break;
01452     }
01453   }
01454   *optr=0; /* zero terminate output buffer */
01455 
01456 }
01457 
01458 /*
01459  * Returns true if the given URL is absolute (as opposed to relative)
01460  */
01461 static bool is_absolute_url(const char *url)
01462 {
01463   char prot[16]; /* URL protocol string storage */
01464   char letter;   /* used for a silly sscanf */
01465 
01466   return (2 == sscanf(url, "%15[^?&/:]://%c", prot, &letter)) ? TRUE : FALSE;
01467 }
01468 
01469 /*
01470  * Concatenate a relative URL to a base URL making it absolute.
01471  * URL-encodes any spaces.
01472  * The returned pointer must be freed by the caller unless NULL
01473  * (returns NULL on out of memory).
01474  */
01475 static char *concat_url(const char *base, const char *relurl)
01476 {
01477   /***
01478    TRY to append this new path to the old URL
01479    to the right of the host part. Oh crap, this is doomed to cause
01480    problems in the future...
01481   */
01482   char *newest;
01483   char *protsep;
01484   char *pathsep;
01485   size_t newlen;
01486 
01487   const char *useurl = relurl;
01488   size_t urllen;
01489 
01490   /* we must make our own copy of the URL to play with, as it may
01491      point to read-only data */
01492   char *url_clone=strdup(base);
01493 
01494   if(!url_clone)
01495     return NULL; /* skip out of this NOW */
01496 
01497   /* protsep points to the start of the host name */
01498   protsep=strstr(url_clone, "//");
01499   if(!protsep)
01500     protsep=url_clone;
01501   else
01502     protsep+=2; /* pass the slashes */
01503 
01504   if('/' != relurl[0]) {
01505     int level=0;
01506 
01507     /* First we need to find out if there's a ?-letter in the URL,
01508        and cut it and the right-side of that off */
01509     pathsep = strchr(protsep, '?');
01510     if(pathsep)
01511       *pathsep=0;
01512 
01513     /* we have a relative path to append to the last slash if there's one
01514        available, or if the new URL is just a query string (starts with a
01515        '?')  we append the new one at the end of the entire currently worked
01516        out URL */
01517     if(useurl[0] != '?') {
01518       pathsep = strrchr(protsep, '/');
01519       if(pathsep)
01520         *pathsep=0;
01521     }
01522 
01523     /* Check if there's any slash after the host name, and if so, remember
01524        that position instead */
01525     pathsep = strchr(protsep, '/');
01526     if(pathsep)
01527       protsep = pathsep+1;
01528     else
01529       protsep = NULL;
01530 
01531     /* now deal with one "./" or any amount of "../" in the newurl
01532        and act accordingly */
01533 
01534     if((useurl[0] == '.') && (useurl[1] == '/'))
01535       useurl+=2; /* just skip the "./" */
01536 
01537     while((useurl[0] == '.') &&
01538           (useurl[1] == '.') &&
01539           (useurl[2] == '/')) {
01540       level++;
01541       useurl+=3; /* pass the "../" */
01542     }
01543 
01544     if(protsep) {
01545       while(level--) {
01546         /* cut off one more level from the right of the original URL */
01547         pathsep = strrchr(protsep, '/');
01548         if(pathsep)
01549           *pathsep=0;
01550         else {
01551           *protsep=0;
01552           break;
01553         }
01554       }
01555     }
01556   }
01557   else {
01558     /* We got a new absolute path for this server */
01559 
01560     if((relurl[0] == '/') && (relurl[1] == '/')) {
01561       /* the new URL starts with //, just keep the protocol part from the
01562          original one */
01563       *protsep=0;
01564       useurl = &relurl[2]; /* we keep the slashes from the original, so we
01565                               skip the new ones */
01566     }
01567     else {
01568       /* cut off the original URL from the first slash, or deal with URLs
01569          without slash */
01570       pathsep = strchr(protsep, '/');
01571       if(pathsep) {
01572         /* When people use badly formatted URLs, such as
01573            "http://www.url.com?dir=/home/daniel" we must not use the first
01574            slash, if there's a ?-letter before it! */
01575         char *sep = strchr(protsep, '?');
01576         if(sep && (sep < pathsep))
01577           pathsep = sep;
01578         *pathsep=0;
01579       }
01580       else {
01581         /* There was no slash. Now, since we might be operating on a badly
01582            formatted URL, such as "http://www.url.com?id=2380" which doesn't
01583            use a slash separator as it is supposed to, we need to check for a
01584            ?-letter as well! */
01585         pathsep = strchr(protsep, '?');
01586         if(pathsep)
01587           *pathsep=0;
01588       }
01589     }
01590   }
01591 
01592   /* If the new part contains a space, this is a mighty stupid redirect
01593      but we still make an effort to do "right". To the left of a '?'
01594      letter we replace each space with %20 while it is replaced with '+'
01595      on the right side of the '?' letter.
01596   */
01597   newlen = strlen_url(useurl);
01598 
01599   urllen = strlen(url_clone);
01600 
01601   newest = malloc(urllen + 1 + /* possible slash */
01602                   newlen + 1 /* zero byte */);
01603 
01604   if(!newest) {
01605     free(url_clone); /* don't leak this */
01606     return NULL;
01607   }
01608 
01609   /* copy over the root url part */
01610   memcpy(newest, url_clone, urllen);
01611 
01612   /* check if we need to append a slash */
01613   if(('/' == useurl[0]) || (protsep && !*protsep) || ('?' == useurl[0]))
01614     ;
01615   else
01616     newest[urllen++]='/';
01617 
01618   /* then append the new piece on the right side */
01619   strcpy_url(&newest[urllen], useurl);
01620 
01621   free(url_clone);
01622 
01623   return newest;
01624 }
01625 #endif /* CURL_DISABLE_HTTP */
01626 
01627 /*
01628  * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
01629  * as given by the remote server and set up the new URL to request.
01630  */
01631 CURLcode Curl_follow(struct Curl_easy *data,
01632                      char *newurl, /* this 'newurl' is the Location: string,
01633                                       and it must be malloc()ed before passed
01634                                       here */
01635                      followtype type) /* see transfer.h */
01636 {
01637 #ifdef CURL_DISABLE_HTTP
01638   (void)data;
01639   (void)newurl;
01640   (void)type;
01641   /* Location: following will not happen when HTTP is disabled */
01642   return CURLE_TOO_MANY_REDIRECTS;
01643 #else
01644 
01645   /* Location: redirect */
01646   bool disallowport = FALSE;
01647 
01648   if(type == FOLLOW_REDIR) {
01649     if((data->set.maxredirs != -1) &&
01650         (data->set.followlocation >= data->set.maxredirs)) {
01651       failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs);
01652       return CURLE_TOO_MANY_REDIRECTS;
01653     }
01654 
01655     /* mark the next request as a followed location: */
01656     data->state.this_is_a_follow = TRUE;
01657 
01658     data->set.followlocation++; /* count location-followers */
01659 
01660     if(data->set.http_auto_referer) {
01661       /* We are asked to automatically set the previous URL as the referer
01662          when we get the next URL. We pick the ->url field, which may or may
01663          not be 100% correct */
01664 
01665       if(data->change.referer_alloc) {
01666         Curl_safefree(data->change.referer);
01667         data->change.referer_alloc = FALSE;
01668       }
01669 
01670       data->change.referer = strdup(data->change.url);
01671       if(!data->change.referer)
01672         return CURLE_OUT_OF_MEMORY;
01673       data->change.referer_alloc = TRUE; /* yes, free this later */
01674     }
01675   }
01676 
01677   if(!is_absolute_url(newurl))  {
01678     /***
01679      *DANG* this is an RFC 2068 violation. The URL is supposed
01680      to be absolute and this doesn't seem to be that!
01681      */
01682     char *absolute = concat_url(data->change.url, newurl);
01683     if(!absolute)
01684       return CURLE_OUT_OF_MEMORY;
01685     free(newurl);
01686     newurl = absolute;
01687   }
01688   else {
01689     /* The new URL MAY contain space or high byte values, that means a mighty
01690        stupid redirect URL but we still make an effort to do "right". */
01691     char *newest;
01692     size_t newlen = strlen_url(newurl);
01693 
01694     /* This is an absolute URL, don't allow the custom port number */
01695     disallowport = TRUE;
01696 
01697     newest = malloc(newlen+1); /* get memory for this */
01698     if(!newest)
01699       return CURLE_OUT_OF_MEMORY;
01700     strcpy_url(newest, newurl); /* create a space-free URL */
01701 
01702     free(newurl); /* that was no good */
01703     newurl = newest; /* use this instead now */
01704 
01705   }
01706 
01707   if(type == FOLLOW_FAKE) {
01708     /* we're only figuring out the new url if we would've followed locations
01709        but now we're done so we can get out! */
01710     data->info.wouldredirect = newurl;
01711     return CURLE_OK;
01712   }
01713 
01714   if(disallowport)
01715     data->state.allow_port = FALSE;
01716 
01717   if(data->change.url_alloc) {
01718     Curl_safefree(data->change.url);
01719     data->change.url_alloc = FALSE;
01720   }
01721 
01722   data->change.url = newurl;
01723   data->change.url_alloc = TRUE;
01724   newurl = NULL; /* don't free! */
01725 
01726   infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
01727 
01728   /*
01729    * We get here when the HTTP code is 300-399 (and 401). We need to perform
01730    * differently based on exactly what return code there was.
01731    *
01732    * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
01733    * a HTTP (proxy-) authentication scheme other than Basic.
01734    */
01735   switch(data->info.httpcode) {
01736     /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
01737        Authorization: XXXX header in the HTTP request code snippet */
01738     /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
01739        Proxy-Authorization: XXXX header in the HTTP request code snippet */
01740     /* 300 - Multiple Choices */
01741     /* 306 - Not used */
01742     /* 307 - Temporary Redirect */
01743   default:  /* for all above (and the unknown ones) */
01744     /* Some codes are explicitly mentioned since I've checked RFC2616 and they
01745      * seem to be OK to POST to.
01746      */
01747     break;
01748   case 301: /* Moved Permanently */
01749     /* (quote from RFC7231, section 6.4.2)
01750      *
01751      * Note: For historical reasons, a user agent MAY change the request
01752      * method from POST to GET for the subsequent request.  If this
01753      * behavior is undesired, the 307 (Temporary Redirect) status code
01754      * can be used instead.
01755      *
01756      * ----
01757      *
01758      * Many webservers expect this, so these servers often answers to a POST
01759      * request with an error page. To be sure that libcurl gets the page that
01760      * most user agents would get, libcurl has to force GET.
01761      *
01762      * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
01763      * can be overridden with CURLOPT_POSTREDIR.
01764      */
01765     if((data->set.httpreq == HTTPREQ_POST
01766         || data->set.httpreq == HTTPREQ_POST_FORM)
01767        && !(data->set.keep_post & CURL_REDIR_POST_301)) {
01768       infof(data, "Switch from POST to GET\n");
01769       data->set.httpreq = HTTPREQ_GET;
01770     }
01771     break;
01772   case 302: /* Found */
01773     /* (quote from RFC7231, section 6.4.3)
01774      *
01775      * Note: For historical reasons, a user agent MAY change the request
01776      * method from POST to GET for the subsequent request.  If this
01777      * behavior is undesired, the 307 (Temporary Redirect) status code
01778      * can be used instead.
01779      *
01780      * ----
01781      *
01782      * Many webservers expect this, so these servers often answers to a POST
01783      * request with an error page. To be sure that libcurl gets the page that
01784      * most user agents would get, libcurl has to force GET.
01785      *
01786      * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
01787      * can be overridden with CURLOPT_POSTREDIR.
01788      */
01789     if((data->set.httpreq == HTTPREQ_POST
01790         || data->set.httpreq == HTTPREQ_POST_FORM)
01791        && !(data->set.keep_post & CURL_REDIR_POST_302)) {
01792       infof(data, "Switch from POST to GET\n");
01793       data->set.httpreq = HTTPREQ_GET;
01794     }
01795     break;
01796 
01797   case 303: /* See Other */
01798     /* Disable both types of POSTs, unless the user explicitely
01799        asks for POST after POST */
01800     if(data->set.httpreq != HTTPREQ_GET
01801       && !(data->set.keep_post & CURL_REDIR_POST_303)) {
01802       data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
01803       infof(data, "Disables POST, goes with %s\n",
01804             data->set.opt_no_body?"HEAD":"GET");
01805     }
01806     break;
01807   case 304: /* Not Modified */
01808     /* 304 means we did a conditional request and it was "Not modified".
01809      * We shouldn't get any Location: header in this response!
01810      */
01811     break;
01812   case 305: /* Use Proxy */
01813     /* (quote from RFC2616, section 10.3.6):
01814      * "The requested resource MUST be accessed through the proxy given
01815      * by the Location field. The Location field gives the URI of the
01816      * proxy.  The recipient is expected to repeat this single request
01817      * via the proxy. 305 responses MUST only be generated by origin
01818      * servers."
01819      */
01820     break;
01821   }
01822   Curl_pgrsTime(data, TIMER_REDIRECT);
01823   Curl_pgrsResetTimesSizes(data);
01824 
01825   return CURLE_OK;
01826 #endif /* CURL_DISABLE_HTTP */
01827 }
01828 
01829 /* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
01830 
01831    NOTE: that the *url is malloc()ed. */
01832 CURLcode Curl_retry_request(struct connectdata *conn,
01833                             char **url)
01834 {
01835   struct Curl_easy *data = conn->data;
01836 
01837   *url = NULL;
01838 
01839   /* if we're talking upload, we can't do the checks below, unless the protocol
01840      is HTTP as when uploading over HTTP we will still get a response */
01841   if(data->set.upload &&
01842      !(conn->handler->protocol&(PROTO_FAMILY_HTTP|CURLPROTO_RTSP)))
01843     return CURLE_OK;
01844 
01845   if((data->req.bytecount + data->req.headerbytecount == 0) &&
01846      conn->bits.reuse &&
01847      (data->set.rtspreq != RTSPREQ_RECEIVE)) {
01848     /* We didn't get a single byte when we attempted to re-use a
01849        connection. This might happen if the connection was left alive when we
01850        were done using it before, but that was closed when we wanted to use it
01851        again. Bad luck. Retry the same request on a fresh connect! */
01852     infof(conn->data, "Connection died, retrying a fresh connect\n");
01853     *url = strdup(conn->data->change.url);
01854     if(!*url)
01855       return CURLE_OUT_OF_MEMORY;
01856 
01857     connclose(conn, "retry"); /* close this connection */
01858     conn->bits.retry = TRUE; /* mark this as a connection we're about
01859                                 to retry. Marking it this way should
01860                                 prevent i.e HTTP transfers to return
01861                                 error just because nothing has been
01862                                 transferred! */
01863 
01864 
01865     if(conn->handler->protocol&PROTO_FAMILY_HTTP) {
01866       struct HTTP *http = data->req.protop;
01867       if(http->writebytecount)
01868         return Curl_readrewind(conn);
01869     }
01870   }
01871   return CURLE_OK;
01872 }
01873 
01874 /*
01875  * Curl_setup_transfer() is called to setup some basic properties for the
01876  * upcoming transfer.
01877  */
01878 void
01879 Curl_setup_transfer(
01880   struct connectdata *conn, /* connection data */
01881   int sockindex,            /* socket index to read from or -1 */
01882   curl_off_t size,          /* -1 if unknown at this point */
01883   bool getheader,           /* TRUE if header parsing is wanted */
01884   curl_off_t *bytecountp,   /* return number of bytes read or NULL */
01885   int writesockindex,       /* socket index to write to, it may very well be
01886                                the same we read from. -1 disables */
01887   curl_off_t *writecountp   /* return number of bytes written or NULL */
01888   )
01889 {
01890   struct Curl_easy *data;
01891   struct SingleRequest *k;
01892 
01893   DEBUGASSERT(conn != NULL);
01894 
01895   data = conn->data;
01896   k = &data->req;
01897 
01898   DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
01899 
01900   /* now copy all input parameters */
01901   conn->sockfd = sockindex == -1 ?
01902       CURL_SOCKET_BAD : conn->sock[sockindex];
01903   conn->writesockfd = writesockindex == -1 ?
01904       CURL_SOCKET_BAD:conn->sock[writesockindex];
01905   k->getheader = getheader;
01906 
01907   k->size = size;
01908   k->bytecountp = bytecountp;
01909   k->writebytecountp = writecountp;
01910 
01911   /* The code sequence below is placed in this function just because all
01912      necessary input is not always known in do_complete() as this function may
01913      be called after that */
01914 
01915   if(!k->getheader) {
01916     k->header = FALSE;
01917     if(size > 0)
01918       Curl_pgrsSetDownloadSize(data, size);
01919   }
01920   /* we want header and/or body, if neither then don't do this! */
01921   if(k->getheader || !data->set.opt_no_body) {
01922 
01923     if(conn->sockfd != CURL_SOCKET_BAD)
01924       k->keepon |= KEEP_RECV;
01925 
01926     if(conn->writesockfd != CURL_SOCKET_BAD) {
01927       struct HTTP *http = data->req.protop;
01928       /* HTTP 1.1 magic:
01929 
01930          Even if we require a 100-return code before uploading data, we might
01931          need to write data before that since the REQUEST may not have been
01932          finished sent off just yet.
01933 
01934          Thus, we must check if the request has been sent before we set the
01935          state info where we wait for the 100-return code
01936       */
01937       if((data->state.expect100header) &&
01938          (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
01939          (http->sending == HTTPSEND_BODY)) {
01940         /* wait with write until we either got 100-continue or a timeout */
01941         k->exp100 = EXP100_AWAITING_CONTINUE;
01942         k->start100 = Curl_tvnow();
01943 
01944         /* Set a timeout for the multi interface. Add the inaccuracy margin so
01945            that we don't fire slightly too early and get denied to run. */
01946         Curl_expire(data, data->set.expect_100_timeout);
01947       }
01948       else {
01949         if(data->state.expect100header)
01950           /* when we've sent off the rest of the headers, we must await a
01951              100-continue but first finish sending the request */
01952           k->exp100 = EXP100_SENDING_REQUEST;
01953 
01954         /* enable the write bit when we're not waiting for continue */
01955         k->keepon |= KEEP_SEND;
01956       }
01957     } /* if(conn->writesockfd != CURL_SOCKET_BAD) */
01958   } /* if(k->getheader || !data->set.opt_no_body) */
01959 
01960 }


rc_visard_driver
Author(s): Heiko Hirschmueller , Christian Emmerich , Felix Ruess
autogenerated on Thu Jun 6 2019 20:43:07