transfer.c
Go to the documentation of this file.
1 /***************************************************************************
2  * _ _ ____ _
3  * Project ___| | | | _ \| |
4  * / __| | | | |_) | |
5  * | (__| |_| | _ <| |___
6  * \___|\___/|_| \_\_____|
7  *
8  * Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
9  *
10  * This software is licensed as described in the file COPYING, which
11  * you should have received as part of this distribution. The terms
12  * are also available at https://curl.haxx.se/docs/copyright.html.
13  *
14  * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15  * copies of the Software, and permit persons to whom the Software is
16  * furnished to do so, under the terms of the COPYING file.
17  *
18  * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19  * KIND, either express or implied.
20  *
21  ***************************************************************************/
22 
23 #include "curl_setup.h"
24 #include "strtoofft.h"
25 
26 #ifdef HAVE_NETINET_IN_H
27 #include <netinet/in.h>
28 #endif
29 #ifdef HAVE_NETDB_H
30 #include <netdb.h>
31 #endif
32 #ifdef HAVE_ARPA_INET_H
33 #include <arpa/inet.h>
34 #endif
35 #ifdef HAVE_NET_IF_H
36 #include <net/if.h>
37 #endif
38 #ifdef HAVE_SYS_IOCTL_H
39 #include <sys/ioctl.h>
40 #endif
41 #ifdef HAVE_SIGNAL_H
42 #include <signal.h>
43 #endif
44 
45 #ifdef HAVE_SYS_PARAM_H
46 #include <sys/param.h>
47 #endif
48 
49 #ifdef HAVE_SYS_SELECT_H
50 #include <sys/select.h>
51 #endif
52 
53 #ifndef HAVE_SOCKET
54 #error "We can't compile without socket() support!"
55 #endif
56 
57 #include "urldata.h"
58 #include <curl/curl.h>
59 #include "netrc.h"
60 
61 #include "content_encoding.h"
62 #include "hostip.h"
63 #include "transfer.h"
64 #include "sendf.h"
65 #include "speedcheck.h"
66 #include "progress.h"
67 #include "http.h"
68 #include "url.h"
69 #include "getinfo.h"
70 #include "vtls/vtls.h"
71 #include "select.h"
72 #include "multiif.h"
73 #include "connect.h"
74 #include "non-ascii.h"
75 #include "http2.h"
76 #include "mime.h"
77 #include "strcase.h"
78 
79 /* The last 3 #include files should be in this order */
80 #include "curl_printf.h"
81 #include "curl_memory.h"
82 #include "memdebug.h"
83 
84 #if !defined(CURL_DISABLE_HTTP) || !defined(CURL_DISABLE_SMTP) || \
85  !defined(CURL_DISABLE_IMAP)
86 /*
87  * checkheaders() checks the linked list of custom headers for a
88  * particular header (prefix).
89  *
90  * Returns a pointer to the first matching header or NULL if none matched.
91  */
92 char *Curl_checkheaders(const struct connectdata *conn,
93  const char *thisheader)
94 {
95  struct curl_slist *head;
96  size_t thislen = strlen(thisheader);
97  struct Curl_easy *data = conn->data;
98 
99  for(head = data->set.headers; head; head = head->next) {
100  if(strncasecompare(head->data, thisheader, thislen))
101  return head->data;
102  }
103 
104  return NULL;
105 }
106 #endif
107 
108 /*
109  * This function will call the read callback to fill our buffer with data
110  * to upload.
111  */
112 CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
113 {
114  struct Curl_easy *data = conn->data;
115  size_t buffersize = (size_t)bytes;
116  int nread;
117 #ifdef CURL_DOES_CONVERSIONS
118  bool sending_http_headers = FALSE;
119 
121  const struct HTTP *http = data->req.protop;
122 
123  if(http->sending == HTTPSEND_REQUEST)
124  /* We're sending the HTTP request headers, not the data.
125  Remember that so we don't re-translate them into garbage. */
126  sending_http_headers = TRUE;
127  }
128 #endif
129 
130  if(data->req.upload_chunky) {
131  /* if chunked Transfer-Encoding */
132  buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
133  data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
134  }
135 
136  /* this function returns a size_t, so we typecast to int to prevent warnings
137  with picky compilers */
138  nread = (int)data->state.fread_func(data->req.upload_fromhere, 1,
139  buffersize, data->state.in);
140 
141  if(nread == CURL_READFUNC_ABORT) {
142  failf(data, "operation aborted by callback");
143  *nreadp = 0;
145  }
146  if(nread == CURL_READFUNC_PAUSE) {
147  struct SingleRequest *k = &data->req;
148 
149  if(conn->handler->flags & PROTOPT_NONETWORK) {
150  /* protocols that work without network cannot be paused. This is
151  actually only FILE:// just now, and it can't pause since the transfer
152  isn't done using the "normal" procedure. */
153  failf(data, "Read callback asked for PAUSE when not supported!");
154  return CURLE_READ_ERROR;
155  }
156 
157  /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
158  k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
159  if(data->req.upload_chunky) {
160  /* Back out the preallocation done above */
161  data->req.upload_fromhere -= (8 + 2);
162  }
163  *nreadp = 0;
164 
165  return CURLE_OK; /* nothing was read */
166  }
167  else if((size_t)nread > buffersize) {
168  /* the read function returned a too large value */
169  *nreadp = 0;
170  failf(data, "read function returned funny value");
171  return CURLE_READ_ERROR;
172  }
173 
174  if(!data->req.forbidchunk && data->req.upload_chunky) {
175  /* if chunked Transfer-Encoding
176  * build chunk:
177  *
178  * <HEX SIZE> CRLF
179  * <DATA> CRLF
180  */
181  /* On non-ASCII platforms the <DATA> may or may not be
182  translated based on set.prefer_ascii while the protocol
183  portion must always be translated to the network encoding.
184  To further complicate matters, line end conversion might be
185  done later on, so we need to prevent CRLFs from becoming
186  CRCRLFs if that's the case. To do this we use bare LFs
187  here, knowing they'll become CRLFs later on.
188  */
189 
190  char hexbuffer[11];
191  const char *endofline_native;
192  const char *endofline_network;
193  int hexlen;
194 
195  if(
197  (data->set.prefer_ascii) ||
198 #endif
199  (data->set.crlf)) {
200  /* \n will become \r\n later on */
201  endofline_native = "\n";
202  endofline_network = "\x0a";
203  }
204  else {
205  endofline_native = "\r\n";
206  endofline_network = "\x0d\x0a";
207  }
208  hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
209  "%x%s", nread, endofline_native);
210 
211  /* move buffer pointer */
212  data->req.upload_fromhere -= hexlen;
213  nread += hexlen;
214 
215  /* copy the prefix to the buffer, leaving out the NUL */
216  memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
217 
218  /* always append ASCII CRLF to the data */
219  memcpy(data->req.upload_fromhere + nread,
220  endofline_network,
221  strlen(endofline_network));
222 
223 #ifdef CURL_DOES_CONVERSIONS
224  {
226  int length;
227  if(data->set.prefer_ascii)
228  /* translate the protocol and data */
229  length = nread;
230  else
231  /* just translate the protocol portion */
232  length = (int)strlen(hexbuffer);
233  result = Curl_convert_to_network(data, data->req.upload_fromhere,
234  length);
235  /* Curl_convert_to_network calls failf if unsuccessful */
236  if(result)
237  return result;
238  }
239 #endif /* CURL_DOES_CONVERSIONS */
240 
241  if((nread - hexlen) == 0)
242  /* mark this as done once this chunk is transferred */
243  data->req.upload_done = TRUE;
244 
245  nread += (int)strlen(endofline_native); /* for the added end of line */
246  }
247 #ifdef CURL_DOES_CONVERSIONS
248  else if((data->set.prefer_ascii) && (!sending_http_headers)) {
250  result = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
251  /* Curl_convert_to_network calls failf if unsuccessful */
252  if(result)
253  return result;
254  }
255 #endif /* CURL_DOES_CONVERSIONS */
256 
257  *nreadp = nread;
258 
259  return CURLE_OK;
260 }
261 
262 
263 /*
264  * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
265  * POST/PUT with multi-pass authentication when a sending was denied and a
266  * resend is necessary.
267  */
269 {
270  struct Curl_easy *data = conn->data;
271  curl_mimepart *mimepart = &data->set.mimepost;
272 
273  conn->bits.rewindaftersend = FALSE; /* we rewind now */
274 
275  /* explicitly switch off sending data on this connection now since we are
276  about to restart a new transfer and thus we want to avoid inadvertently
277  sending more data on the existing connection until the next transfer
278  starts */
279  data->req.keepon &= ~KEEP_SEND;
280 
281  /* We have sent away data. If not using CURLOPT_POSTFIELDS or
282  CURLOPT_HTTPPOST, call app to rewind
283  */
284  if(conn->handler->protocol & PROTO_FAMILY_HTTP) {
285  struct HTTP *http = data->req.protop;
286 
287  if(http->sendit)
288  mimepart = http->sendit;
289  }
290  if(data->set.postfields)
291  ; /* do nothing */
292  else if(data->set.httpreq == HTTPREQ_POST_MIME ||
293  data->set.httpreq == HTTPREQ_POST_FORM) {
294  if(Curl_mime_rewind(mimepart)) {
295  failf(data, "Cannot rewind mime/post data");
296  return CURLE_SEND_FAIL_REWIND;
297  }
298  }
299  else {
300  if(data->set.seek_func) {
301  int err;
302 
303  err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
304  if(err) {
305  failf(data, "seek callback returned error %d", (int)err);
306  return CURLE_SEND_FAIL_REWIND;
307  }
308  }
309  else if(data->set.ioctl_func) {
310  curlioerr err;
311 
312  err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
313  data->set.ioctl_client);
314  infof(data, "the ioctl callback returned %d\n", (int)err);
315 
316  if(err) {
317  /* FIXME: convert to a human readable error message */
318  failf(data, "ioctl callback returned error %d", (int)err);
319  return CURLE_SEND_FAIL_REWIND;
320  }
321  }
322  else {
323  /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
324  given FILE * stream and we can actually attempt to rewind that
325  ourselves with fseek() */
326  if(data->state.fread_func == (curl_read_callback)fread) {
327  if(-1 != fseek(data->state.in, 0, SEEK_SET))
328  /* successful rewind */
329  return CURLE_OK;
330  }
331 
332  /* no callback set or failure above, makes us fail at once */
333  failf(data, "necessary data rewind wasn't possible");
334  return CURLE_SEND_FAIL_REWIND;
335  }
336  }
337  return CURLE_OK;
338 }
339 
340 static int data_pending(const struct connectdata *conn)
341 {
342  /* in the case of libssh2, we can never be really sure that we have emptied
343  its internal buffers so we MUST always try until we get EAGAIN back */
344  return conn->handler->protocol&(CURLPROTO_SCP|CURLPROTO_SFTP) ||
345 #if defined(USE_NGHTTP2)
347  /* For HTTP/2, we may read up everything including responde body
348  with header fields in Curl_http_readwrite_headers. If no
349  content-length is provided, curl waits for the connection
350  close, which we emulate it using conn->proto.httpc.closed =
351  TRUE. The thing is if we read everything, then http2_recv won't
352  be called and we cannot signal the HTTP/2 stream has closed. As
353  a workaround, we return nonzero here to call http2_recv. */
354  ((conn->handler->protocol&PROTO_FAMILY_HTTP) && conn->httpversion == 20);
355 #else
357 #endif
358 }
359 
360 static void read_rewind(struct connectdata *conn,
361  size_t thismuch)
362 {
363  DEBUGASSERT(conn->read_pos >= thismuch);
364 
365  conn->read_pos -= thismuch;
366  conn->bits.stream_was_rewound = TRUE;
367 
368 #ifdef DEBUGBUILD
369  {
370  char buf[512 + 1];
371  size_t show;
372 
373  show = CURLMIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
374  if(conn->master_buffer) {
375  memcpy(buf, conn->master_buffer + conn->read_pos, show);
376  buf[show] = '\0';
377  }
378  else {
379  buf[0] = '\0';
380  }
381 
382  DEBUGF(infof(conn->data,
383  "Buffer after stream rewind (read_pos = %zu): [%s]\n",
384  conn->read_pos, buf));
385  }
386 #endif
387 }
388 
389 /*
390  * Check to see if CURLOPT_TIMECONDITION was met by comparing the time of the
391  * remote document with the time provided by CURLOPT_TIMEVAL
392  */
393 bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc)
394 {
395  if((timeofdoc == 0) || (data->set.timevalue == 0))
396  return TRUE;
397 
398  switch(data->set.timecondition) {
400  default:
401  if(timeofdoc <= data->set.timevalue) {
402  infof(data,
403  "The requested document is not new enough\n");
404  data->info.timecond = TRUE;
405  return FALSE;
406  }
407  break;
409  if(timeofdoc >= data->set.timevalue) {
410  infof(data,
411  "The requested document is not old enough\n");
412  data->info.timecond = TRUE;
413  return FALSE;
414  }
415  break;
416  }
417 
418  return TRUE;
419 }
420 
421 /*
422  * Go ahead and do a read if we have a readable socket or if
423  * the stream was rewound (in which case we have data in a
424  * buffer)
425  *
426  * return '*comeback' TRUE if we didn't properly drain the socket so this
427  * function should get called again without select() or similar in between!
428  */
430  struct connectdata *conn,
431  struct SingleRequest *k,
432  int *didwhat, bool *done,
433  bool *comeback)
434 {
436  ssize_t nread; /* number of bytes read */
437  size_t excess = 0; /* excess bytes read */
438  bool is_empty_data = FALSE;
439  bool readmore = FALSE; /* used by RTP to signal for more data */
440  int maxloops = 100;
441 
442  *done = FALSE;
443  *comeback = FALSE;
444 
445  /* This is where we loop until we have read everything there is to
446  read or we get a CURLE_AGAIN */
447  do {
448  size_t buffersize = data->set.buffer_size;
449  size_t bytestoread = buffersize;
450 
451  if(
452 #if defined(USE_NGHTTP2)
453  /* For HTTP/2, read data without caring about the content
454  length. This is safe because body in HTTP/2 is always
455  segmented thanks to its framing layer. Meanwhile, we have to
456  call Curl_read to ensure that http2_handle_stream_close is
457  called when we read all incoming bytes for a particular
458  stream. */
459  !((conn->handler->protocol & PROTO_FAMILY_HTTP) &&
460  conn->httpversion == 20) &&
461 #endif
462  k->size != -1 && !k->header) {
463  /* make sure we don't read "too much" if we can help it since we
464  might be pipelining and then someone else might want to read what
465  follows! */
466  curl_off_t totalleft = k->size - k->bytecount;
467  if(totalleft < (curl_off_t)bytestoread)
468  bytestoread = (size_t)totalleft;
469  }
470 
471  if(bytestoread) {
472  /* receive data from the network! */
473  result = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
474 
475  /* read would've blocked */
476  if(CURLE_AGAIN == result)
477  break; /* get out of loop */
478 
479  if(result>0)
480  return result;
481  }
482  else {
483  /* read nothing but since we wanted nothing we consider this an OK
484  situation to proceed from */
485  DEBUGF(infof(data, "readwrite_data: we're done!\n"));
486  nread = 0;
487  }
488 
489  if((k->bytecount == 0) && (k->writebytecount == 0)) {
491  if(k->exp100 > EXP100_SEND_DATA)
492  /* set time stamp to compare with when waiting for the 100 */
493  k->start100 = Curl_tvnow();
494  }
495 
496  *didwhat |= KEEP_RECV;
497  /* indicates data of zero size, i.e. empty file */
498  is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
499 
500  /* NUL terminate, allowing string ops to be used */
501  if(0 < nread || is_empty_data) {
502  k->buf[nread] = 0;
503  }
504  else if(0 >= nread) {
505  /* if we receive 0 or less here, the server closed the connection
506  and we bail out from this! */
507  DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
508  k->keepon &= ~KEEP_RECV;
509  break;
510  }
511 
512  /* Default buffer to use when we write the buffer, it may be changed
513  in the flow below before the actual storing is done. */
514  k->str = k->buf;
515 
516  if(conn->handler->readwrite) {
517  result = conn->handler->readwrite(data, conn, &nread, &readmore);
518  if(result)
519  return result;
520  if(readmore)
521  break;
522  }
523 
524 #ifndef CURL_DISABLE_HTTP
525  /* Since this is a two-state thing, we check if we are parsing
526  headers at the moment or not. */
527  if(k->header) {
528  /* we are in parse-the-header-mode */
529  bool stop_reading = FALSE;
530  result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
531  if(result)
532  return result;
533 
534  if(conn->handler->readwrite &&
535  (k->maxdownload <= 0 && nread > 0)) {
536  result = conn->handler->readwrite(data, conn, &nread, &readmore);
537  if(result)
538  return result;
539  if(readmore)
540  break;
541  }
542 
543  if(stop_reading) {
544  /* We've stopped dealing with input, get out of the do-while loop */
545 
546  if(nread > 0) {
548  infof(data,
549  "Rewinding stream by : %zd"
550  " bytes on url %s (zero-length body)\n",
551  nread, data->state.path);
552  read_rewind(conn, (size_t)nread);
553  }
554  else {
555  infof(data,
556  "Excess found in a non pipelined read:"
557  " excess = %zd"
558  " url = %s (zero-length body)\n",
559  nread, data->state.path);
560  }
561  }
562 
563  break;
564  }
565  }
566 #endif /* CURL_DISABLE_HTTP */
567 
568 
569  /* This is not an 'else if' since it may be a rest from the header
570  parsing, where the beginning of the buffer is headers and the end
571  is non-headers. */
572  if(k->str && !k->header && (nread > 0 || is_empty_data)) {
573 
574  if(data->set.opt_no_body) {
575  /* data arrives although we want none, bail out */
576  streamclose(conn, "ignoring body");
577  *done = TRUE;
579  }
580 
581 #ifndef CURL_DISABLE_HTTP
582  if(0 == k->bodywrites && !is_empty_data) {
583  /* These checks are only made the first time we are about to
584  write a piece of the body */
586  /* HTTP-only checks */
587 
588  if(data->req.newurl) {
589  if(conn->bits.close) {
590  /* Abort after the headers if "follow Location" is set
591  and we're set to close anyway. */
592  k->keepon &= ~KEEP_RECV;
593  *done = TRUE;
594  return CURLE_OK;
595  }
596  /* We have a new url to load, but since we want to be able
597  to re-use this connection properly, we read the full
598  response in "ignore more" */
599  k->ignorebody = TRUE;
600  infof(data, "Ignoring the response-body\n");
601  }
602  if(data->state.resume_from && !k->content_range &&
603  (data->set.httpreq == HTTPREQ_GET) &&
604  !k->ignorebody) {
605 
606  if(k->size == data->state.resume_from) {
607  /* The resume point is at the end of file, consider this fine
608  even if it doesn't allow resume from here. */
609  infof(data, "The entire document is already downloaded");
610  connclose(conn, "already downloaded");
611  /* Abort download */
612  k->keepon &= ~KEEP_RECV;
613  *done = TRUE;
614  return CURLE_OK;
615  }
616 
617  /* we wanted to resume a download, although the server doesn't
618  * seem to support this and we did this with a GET (if it
619  * wasn't a GET we did a POST or PUT resume) */
620  failf(data, "HTTP server doesn't seem to support "
621  "byte ranges. Cannot resume.");
622  return CURLE_RANGE_ERROR;
623  }
624 
625  if(data->set.timecondition && !data->state.range) {
626  /* A time condition has been set AND no ranges have been
627  requested. This seems to be what chapter 13.3.4 of
628  RFC 2616 defines to be the correct action for a
629  HTTP/1.1 client */
630 
631  if(!Curl_meets_timecondition(data, k->timeofdoc)) {
632  *done = TRUE;
633  /* We're simulating a http 304 from server so we return
634  what should have been returned from the server */
635  data->info.httpcode = 304;
636  infof(data, "Simulate a HTTP 304 response!\n");
637  /* we abort the transfer before it is completed == we ruin the
638  re-use ability. Close the connection */
639  connclose(conn, "Simulated 304 handling");
640  return CURLE_OK;
641  }
642  } /* we have a time condition */
643 
644  } /* this is HTTP or RTSP */
645  } /* this is the first time we write a body part */
646 #endif /* CURL_DISABLE_HTTP */
647 
648  k->bodywrites++;
649 
650  /* pass data to the debug function before it gets "dechunked" */
651  if(data->set.verbose) {
652  if(k->badheader) {
654  (size_t)k->hbuflen, conn);
655  if(k->badheader == HEADER_PARTHEADER)
657  k->str, (size_t)nread, conn);
658  }
659  else
661  k->str, (size_t)nread, conn);
662  }
663 
664 #ifndef CURL_DISABLE_HTTP
665  if(k->chunk) {
666  /*
667  * Here comes a chunked transfer flying and we need to decode this
668  * properly. While the name says read, this function both reads
669  * and writes away the data. The returned 'nread' holds the number
670  * of actual data it wrote to the client.
671  */
672 
673  CHUNKcode res =
674  Curl_httpchunk_read(conn, k->str, nread, &nread);
675 
676  if(CHUNKE_OK < res) {
677  if(CHUNKE_WRITE_ERROR == res) {
678  failf(data, "Failed writing data");
679  return CURLE_WRITE_ERROR;
680  }
681  failf(data, "%s in chunked-encoding", Curl_chunked_strerror(res));
682  return CURLE_RECV_ERROR;
683  }
684  if(CHUNKE_STOP == res) {
685  size_t dataleft;
686  /* we're done reading chunks! */
687  k->keepon &= ~KEEP_RECV; /* read no more */
688 
689  /* There are now possibly N number of bytes at the end of the
690  str buffer that weren't written to the client.
691 
692  We DO care about this data if we are pipelining.
693  Push it back to be read on the next pass. */
694 
695  dataleft = conn->chunk.dataleft;
696  if(dataleft != 0) {
697  infof(conn->data, "Leftovers after chunking: %zu bytes\n",
698  dataleft);
700  /* only attempt the rewind if we truly are pipelining */
701  infof(conn->data, "Rewinding %zu bytes\n",dataleft);
702  read_rewind(conn, dataleft);
703  }
704  }
705  }
706  /* If it returned OK, we just keep going */
707  }
708 #endif /* CURL_DISABLE_HTTP */
709 
710  /* Account for body content stored in the header buffer */
711  if(k->badheader && !k->ignorebody) {
712  DEBUGF(infof(data, "Increasing bytecount by %zu from hbuflen\n",
713  k->hbuflen));
714  k->bytecount += k->hbuflen;
715  }
716 
717  if((-1 != k->maxdownload) &&
718  (k->bytecount + nread >= k->maxdownload)) {
719 
720  excess = (size_t)(k->bytecount + nread - k->maxdownload);
721  if(excess > 0 && !k->ignorebody) {
723  infof(data,
724  "Rewinding stream by : %zu"
725  " bytes on url %s (size = %" CURL_FORMAT_CURL_OFF_T
726  ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
727  ", bytecount = %" CURL_FORMAT_CURL_OFF_T ", nread = %zd)\n",
728  excess, data->state.path,
729  k->size, k->maxdownload, k->bytecount, nread);
730  read_rewind(conn, excess);
731  }
732  else {
733  infof(data,
734  "Excess found in a non pipelined read:"
735  " excess = %zu"
736  ", size = %" CURL_FORMAT_CURL_OFF_T
737  ", maxdownload = %" CURL_FORMAT_CURL_OFF_T
738  ", bytecount = %" CURL_FORMAT_CURL_OFF_T "\n",
739  excess, k->size, k->maxdownload, k->bytecount);
740  }
741  }
742 
743  nread = (ssize_t) (k->maxdownload - k->bytecount);
744  if(nread < 0) /* this should be unusual */
745  nread = 0;
746 
747  k->keepon &= ~KEEP_RECV; /* we're done reading */
748  }
749 
750  k->bytecount += nread;
751 
753 
754  if(!k->chunk && (nread || k->badheader || is_empty_data)) {
755  /* If this is chunky transfer, it was already written */
756 
757  if(k->badheader && !k->ignorebody) {
758  /* we parsed a piece of data wrongly assuming it was a header
759  and now we output it as body instead */
760 
761  /* Don't let excess data pollute body writes */
762  if(k->maxdownload == -1 || (curl_off_t)k->hbuflen <= k->maxdownload)
763  result = Curl_client_write(conn, CLIENTWRITE_BODY,
764  data->state.headerbuff,
765  k->hbuflen);
766  else
767  result = Curl_client_write(conn, CLIENTWRITE_BODY,
768  data->state.headerbuff,
769  (size_t)k->maxdownload);
770 
771  if(result)
772  return result;
773  }
774  if(k->badheader < HEADER_ALLBAD) {
775  /* This switch handles various content encodings. If there's an
776  error here, be sure to check over the almost identical code
777  in http_chunks.c.
778  Make sure that ALL_CONTENT_ENCODINGS contains all the
779  encodings handled here. */
780 #ifdef HAVE_LIBZ
781  switch(conn->data->set.http_ce_skip ?
782  IDENTITY : k->auto_decoding) {
783  case IDENTITY:
784 #endif
785  /* This is the default when the server sends no
786  Content-Encoding header. See Curl_readwrite_init; the
787  memset() call initializes k->auto_decoding to zero. */
788  if(!k->ignorebody) {
789 
790 #ifndef CURL_DISABLE_POP3
792  result = Curl_pop3_write(conn, k->str, nread);
793  else
794 #endif /* CURL_DISABLE_POP3 */
795 
796  result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
797  nread);
798  }
799 #ifdef HAVE_LIBZ
800  break;
801 
802  case DEFLATE:
803  /* Assume CLIENTWRITE_BODY; headers are not encoded. */
804  if(!k->ignorebody)
805  result = Curl_unencode_deflate_write(conn, k, nread);
806  break;
807 
808  case GZIP:
809  /* Assume CLIENTWRITE_BODY; headers are not encoded. */
810  if(!k->ignorebody)
811  result = Curl_unencode_gzip_write(conn, k, nread);
812  break;
813 
814  default:
815  failf(data, "Unrecognized content encoding type. "
816  "libcurl understands `identity', `deflate' and `gzip' "
817  "content encodings.");
819  break;
820  }
821 #endif
822  }
823  k->badheader = HEADER_NORMAL; /* taken care of now */
824 
825  if(result)
826  return result;
827  }
828 
829  } /* if(!header and data to read) */
830 
831  if(conn->handler->readwrite &&
832  (excess > 0 && !conn->bits.stream_was_rewound)) {
833  /* Parse the excess data */
834  k->str += nread;
835  nread = (ssize_t)excess;
836 
837  result = conn->handler->readwrite(data, conn, &nread, &readmore);
838  if(result)
839  return result;
840 
841  if(readmore)
842  k->keepon |= KEEP_RECV; /* we're not done reading */
843  break;
844  }
845 
846  if(is_empty_data) {
847  /* if we received nothing, the server closed the connection and we
848  are done */
849  k->keepon &= ~KEEP_RECV;
850  }
851 
852  } while(data_pending(conn) && maxloops--);
853 
854  if(maxloops <= 0) {
855  /* we mark it as read-again-please */
857  *comeback = TRUE;
858  }
859 
860  if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
861  conn->bits.close) {
862  /* When we've read the entire thing and the close bit is set, the server
863  may now close the connection. If there's now any kind of sending going
864  on from our side, we need to stop that immediately. */
865  infof(data, "we are done reading and this is set to close, stop send\n");
866  k->keepon &= ~KEEP_SEND; /* no writing anymore either */
867  }
868 
869  return CURLE_OK;
870 }
871 
872 static CURLcode done_sending(struct connectdata *conn,
873  struct SingleRequest *k)
874 {
875  k->keepon &= ~KEEP_SEND; /* we're done writing */
876 
878 
879  if(conn->bits.rewindaftersend) {
881  if(result)
882  return result;
883  }
884  return CURLE_OK;
885 }
886 
887 
888 /*
889  * Send data to upload to the server, when the socket is writable.
890  */
892  struct connectdata *conn,
893  int *didwhat)
894 {
895  ssize_t i, si;
896  ssize_t bytes_written;
898  ssize_t nread; /* number of bytes read */
899  bool sending_http_headers = FALSE;
900  struct SingleRequest *k = &data->req;
901 
902  if((k->bytecount == 0) && (k->writebytecount == 0))
904 
905  *didwhat |= KEEP_SEND;
906 
907  do {
908 
909  /* only read more data if there's no upload data already
910  present in the upload buffer */
911  if(0 == k->upload_present) {
912  /* init the "upload from here" pointer */
914 
915  if(!k->upload_done) {
916  /* HTTP pollution, this should be written nicer to become more
917  protocol agnostic. */
918  int fillcount;
919  struct HTTP *http = k->protop;
920 
921  if((k->exp100 == EXP100_SENDING_REQUEST) &&
922  (http->sending == HTTPSEND_BODY)) {
923  /* If this call is to send body data, we must take some action:
924  We have sent off the full HTTP 1.1 request, and we shall now
925  go into the Expect: 100 state and await such a header */
926  k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
927  k->keepon &= ~KEEP_SEND; /* disable writing */
928  k->start100 = Curl_tvnow(); /* timeout count starts now */
929  *didwhat &= ~KEEP_SEND; /* we didn't write anything actually */
930 
931  /* set a timeout for the multi interface */
933  break;
934  }
935 
937  if(http->sending == HTTPSEND_REQUEST)
938  /* We're sending the HTTP request headers, not the data.
939  Remember that so we don't change the line endings. */
940  sending_http_headers = TRUE;
941  else
942  sending_http_headers = FALSE;
943  }
944 
945  result = Curl_fillreadbuffer(conn, UPLOAD_BUFSIZE, &fillcount);
946  if(result)
947  return result;
948 
949  nread = (ssize_t)fillcount;
950  }
951  else
952  nread = 0; /* we're done uploading/reading */
953 
954  if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
955  /* this is a paused transfer */
956  break;
957  }
958  if(nread <= 0) {
959  result = done_sending(conn, k);
960  if(result)
961  return result;
962  break;
963  }
964 
965  /* store number of bytes available for upload */
966  k->upload_present = nread;
967 
968  /* convert LF to CRLF if so asked */
969  if((!sending_http_headers) && (
971  /* always convert if we're FTPing in ASCII mode */
972  (data->set.prefer_ascii) ||
973 #endif
974  (data->set.crlf))) {
975  /* Do we need to allocate a scratch buffer? */
976  if(!data->state.scratch) {
977  data->state.scratch = malloc(2 * data->set.buffer_size);
978  if(!data->state.scratch) {
979  failf(data, "Failed to alloc scratch buffer!");
980 
981  return CURLE_OUT_OF_MEMORY;
982  }
983  }
984 
985  /*
986  * ASCII/EBCDIC Note: This is presumably a text (not binary)
987  * transfer so the data should already be in ASCII.
988  * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
989  * must be used instead of the escape sequences \r & \n.
990  */
991  for(i = 0, si = 0; i < nread; i++, si++) {
992  if(k->upload_fromhere[i] == 0x0a) {
993  data->state.scratch[si++] = 0x0d;
994  data->state.scratch[si] = 0x0a;
995  if(!data->set.crlf) {
996  /* we're here only because FTP is in ASCII mode...
997  bump infilesize for the LF we just added */
998  if(data->state.infilesize != -1)
999  data->state.infilesize++;
1000  }
1001  }
1002  else
1003  data->state.scratch[si] = k->upload_fromhere[i];
1004  }
1005 
1006  if(si != nread) {
1007  /* only perform the special operation if we really did replace
1008  anything */
1009  nread = si;
1010 
1011  /* upload from the new (replaced) buffer instead */
1012  k->upload_fromhere = data->state.scratch;
1013 
1014  /* set the new amount too */
1015  k->upload_present = nread;
1016  }
1017  }
1018 
1019 #ifndef CURL_DISABLE_SMTP
1020  if(conn->handler->protocol & PROTO_FAMILY_SMTP) {
1021  result = Curl_smtp_escape_eob(conn, nread);
1022  if(result)
1023  return result;
1024  }
1025 #endif /* CURL_DISABLE_SMTP */
1026  } /* if 0 == k->upload_present */
1027  else {
1028  /* We have a partial buffer left from a previous "round". Use
1029  that instead of reading more data */
1030  }
1031 
1032  /* write to socket (send away data) */
1033  result = Curl_write(conn,
1034  conn->writesockfd, /* socket to send to */
1035  k->upload_fromhere, /* buffer pointer */
1036  k->upload_present, /* buffer size */
1037  &bytes_written); /* actually sent */
1038 
1039  if(result)
1040  return result;
1041 
1042  if(data->set.verbose)
1043  /* show the data before we change the pointer upload_fromhere */
1045  (size_t)bytes_written, conn);
1046 
1047  k->writebytecount += bytes_written;
1048 
1049  if(k->writebytecount == data->state.infilesize) {
1050  /* we have sent all data we were supposed to */
1051  k->upload_done = TRUE;
1052  infof(data, "We are completely uploaded and fine\n");
1053  }
1054 
1055  if(k->upload_present != bytes_written) {
1056  /* we only wrote a part of the buffer (if anything), deal with it! */
1057 
1058  /* store the amount of bytes left in the buffer to write */
1059  k->upload_present -= bytes_written;
1060 
1061  /* advance the pointer where to find the buffer when the next send
1062  is to happen */
1063  k->upload_fromhere += bytes_written;
1064  }
1065  else {
1066  /* we've uploaded that buffer now */
1067  k->upload_fromhere = data->state.uploadbuffer;
1068  k->upload_present = 0; /* no more bytes left */
1069 
1070  if(k->upload_done) {
1071  result = done_sending(conn, k);
1072  if(result)
1073  return result;
1074  }
1075  }
1076 
1078 
1079  } WHILE_FALSE; /* just to break out from! */
1080 
1081  return CURLE_OK;
1082 }
1083 
1084 /*
1085  * Curl_readwrite() is the low-level function to be called when data is to
1086  * be read and written to/from the connection.
1087  *
1088  * return '*comeback' TRUE if we didn't properly drain the socket so this
1089  * function should get called again without select() or similar in between!
1090  */
1092  struct Curl_easy *data,
1093  bool *done,
1094  bool *comeback)
1095 {
1096  struct SingleRequest *k = &data->req;
1097  CURLcode result;
1098  int didwhat = 0;
1099 
1100  curl_socket_t fd_read;
1101  curl_socket_t fd_write;
1102  int select_res = conn->cselect_bits;
1103 
1104  conn->cselect_bits = 0;
1105 
1106  /* only use the proper socket if the *_HOLD bit is not set simultaneously as
1107  then we are in rate limiting state in that transfer direction */
1108 
1109  if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
1110  fd_read = conn->sockfd;
1111  else
1112  fd_read = CURL_SOCKET_BAD;
1113 
1114  if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
1115  fd_write = conn->writesockfd;
1116  else
1117  fd_write = CURL_SOCKET_BAD;
1118 
1119  if(conn->data->state.drain) {
1120  select_res |= CURL_CSELECT_IN;
1121  DEBUGF(infof(data, "Curl_readwrite: forcibly told to drain data\n"));
1122  }
1123 
1124  if(!select_res) /* Call for select()/poll() only, if read/write/error
1125  status is not known. */
1126  select_res = Curl_socket_check(fd_read, CURL_SOCKET_BAD, fd_write, 0);
1127 
1128  if(select_res == CURL_CSELECT_ERR) {
1129  failf(data, "select/poll returned error");
1130  return CURLE_SEND_ERROR;
1131  }
1132 
1133  /* We go ahead and do a read if we have a readable socket or if
1134  the stream was rewound (in which case we have data in a
1135  buffer) */
1136  if((k->keepon & KEEP_RECV) &&
1137  ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
1138 
1139  result = readwrite_data(data, conn, k, &didwhat, done, comeback);
1140  if(result || *done)
1141  return result;
1142  }
1143 
1144  /* If we still have writing to do, we check if we have a writable socket. */
1145  if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1146  /* write */
1147 
1148  result = readwrite_upload(data, conn, &didwhat);
1149  if(result)
1150  return result;
1151  }
1152 
1153  k->now = Curl_tvnow();
1154  if(didwhat) {
1155  /* Update read/write counters */
1156  if(k->bytecountp)
1157  *k->bytecountp = k->bytecount; /* read count */
1158  if(k->writebytecountp)
1159  *k->writebytecountp = k->writebytecount; /* write count */
1160  }
1161  else {
1162  /* no read no write, this is a timeout? */
1163  if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1164  /* This should allow some time for the header to arrive, but only a
1165  very short time as otherwise it'll be too much wasted time too
1166  often. */
1167 
1168  /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1169 
1170  Therefore, when a client sends this header field to an origin server
1171  (possibly via a proxy) from which it has never seen a 100 (Continue)
1172  status, the client SHOULD NOT wait for an indefinite period before
1173  sending the request body.
1174 
1175  */
1176 
1177  time_t ms = Curl_tvdiff(k->now, k->start100);
1178  if(ms >= data->set.expect_100_timeout) {
1179  /* we've waited long enough, continue anyway */
1180  k->exp100 = EXP100_SEND_DATA;
1181  k->keepon |= KEEP_SEND;
1183  infof(data, "Done waiting for 100-continue\n");
1184  }
1185  }
1186  }
1187 
1188  if(Curl_pgrsUpdate(conn))
1189  result = CURLE_ABORTED_BY_CALLBACK;
1190  else
1191  result = Curl_speedcheck(data, k->now);
1192  if(result)
1193  return result;
1194 
1195  if(k->keepon) {
1196  if(0 > Curl_timeleft(data, &k->now, FALSE)) {
1197  if(k->size != -1) {
1198  failf(data, "Operation timed out after %ld milliseconds with %"
1199  CURL_FORMAT_CURL_OFF_T " out of %"
1200  CURL_FORMAT_CURL_OFF_T " bytes received",
1202  k->size);
1203  }
1204  else {
1205  failf(data, "Operation timed out after %ld milliseconds with %"
1206  CURL_FORMAT_CURL_OFF_T " bytes received",
1208  }
1209  return CURLE_OPERATION_TIMEDOUT;
1210  }
1211  }
1212  else {
1213  /*
1214  * The transfer has been performed. Just make some general checks before
1215  * returning.
1216  */
1217 
1218  if(!(data->set.opt_no_body) && (k->size != -1) &&
1219  (k->bytecount != k->size) &&
1220 #ifdef CURL_DO_LINEEND_CONV
1221  /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1222  so we'll check to see if the discrepancy can be explained
1223  by the number of CRLFs we've changed to LFs.
1224  */
1225  (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1226 #endif /* CURL_DO_LINEEND_CONV */
1227  !k->newurl) {
1228  failf(data, "transfer closed with %" CURL_FORMAT_CURL_OFF_T
1229  " bytes remaining to read", k->size - k->bytecount);
1230  return CURLE_PARTIAL_FILE;
1231  }
1232  if(!(data->set.opt_no_body) && k->chunk &&
1233  (conn->chunk.state != CHUNK_STOP)) {
1234  /*
1235  * In chunked mode, return an error if the connection is closed prior to
1236  * the empty (terminating) chunk is read.
1237  *
1238  * The condition above used to check for
1239  * conn->proto.http->chunk.datasize != 0 which is true after reading
1240  * *any* chunk, not just the empty chunk.
1241  *
1242  */
1243  failf(data, "transfer closed with outstanding read data remaining");
1244  return CURLE_PARTIAL_FILE;
1245  }
1246  if(Curl_pgrsUpdate(conn))
1248  }
1249 
1250  /* Now update the "done" boolean we return */
1251  *done = (0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
1253 
1254  return CURLE_OK;
1255 }
1256 
1257 /*
1258  * Curl_single_getsock() gets called by the multi interface code when the app
1259  * has requested to get the sockets for the current connection. This function
1260  * will then be called once for every connection that the multi interface
1261  * keeps track of. This function will only be called for connections that are
1262  * in the proper state to have this information available.
1263  */
1264 int Curl_single_getsock(const struct connectdata *conn,
1265  curl_socket_t *sock, /* points to numsocks number
1266  of sockets */
1267  int numsocks)
1268 {
1269  const struct Curl_easy *data = conn->data;
1270  int bitmap = GETSOCK_BLANK;
1271  unsigned sockindex = 0;
1272 
1273  if(conn->handler->perform_getsock)
1274  return conn->handler->perform_getsock(conn, sock, numsocks);
1275 
1276  if(numsocks < 2)
1277  /* simple check but we might need two slots */
1278  return GETSOCK_BLANK;
1279 
1280  /* don't include HOLD and PAUSE connections */
1281  if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1282 
1284 
1285  bitmap |= GETSOCK_READSOCK(sockindex);
1286  sock[sockindex] = conn->sockfd;
1287  }
1288 
1289  /* don't include HOLD and PAUSE connections */
1290  if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1291 
1292  if((conn->sockfd != conn->writesockfd) ||
1293  bitmap == GETSOCK_BLANK) {
1294  /* only if they are not the same socket and we have a readable
1295  one, we increase index */
1296  if(bitmap != GETSOCK_BLANK)
1297  sockindex++; /* increase index if we need two entries */
1298 
1300 
1301  sock[sockindex] = conn->writesockfd;
1302  }
1303 
1304  bitmap |= GETSOCK_WRITESOCK(sockindex);
1305  }
1306 
1307  return bitmap;
1308 }
1309 
1310 /* Curl_init_CONNECT() gets called each time the handle switches to CONNECT
1311  which means this gets called once for each subsequent redirect etc */
1313 {
1314  data->state.fread_func = data->set.fread_func_set;
1315  data->state.in = data->set.in_set;
1316 }
1317 
1318 /*
1319  * Curl_pretransfer() is called immediately before a transfer starts, and only
1320  * once for one transfer no matter if it has redirects or do multi-pass
1321  * authentication etc.
1322  */
1324 {
1325  CURLcode result;
1326  if(!data->change.url) {
1327  /* we can't do anything without URL */
1328  failf(data, "No URL set!");
1329  return CURLE_URL_MALFORMAT;
1330  }
1331  /* since the URL may have been redirected in a previous use of this handle */
1332  if(data->change.url_alloc) {
1333  /* the already set URL is allocated, free it first! */
1334  Curl_safefree(data->change.url);
1335  data->change.url_alloc = FALSE;
1336  }
1337  data->change.url = data->set.str[STRING_SET_URL];
1338 
1339  /* Init the SSL session ID cache here. We do it here since we want to do it
1340  after the *_setopt() calls (that could specify the size of the cache) but
1341  before any transfer takes place. */
1342  result = Curl_ssl_initsessions(data, data->set.general_ssl.max_ssl_sessions);
1343  if(result)
1344  return result;
1345 
1346  data->set.followlocation = 0; /* reset the location-follow counter */
1347  data->state.this_is_a_follow = FALSE; /* reset this */
1348  data->state.errorbuf = FALSE; /* no error has occurred */
1349  data->state.httpversion = 0; /* don't assume any particular server version */
1350 
1351  data->state.authproblem = FALSE;
1352  data->state.authhost.want = data->set.httpauth;
1353  data->state.authproxy.want = data->set.proxyauth;
1355  data->info.wouldredirect = NULL;
1356 
1357  if(data->set.httpreq == HTTPREQ_PUT)
1358  data->state.infilesize = data->set.filesize;
1359  else {
1360  data->state.infilesize = data->set.postfieldsize;
1361  if(data->set.postfields && (data->state.infilesize == -1))
1362  data->state.infilesize = (curl_off_t)strlen(data->set.postfields);
1363  }
1364 
1365  /* If there is a list of cookie files to read, do it now! */
1366  if(data->change.cookielist)
1367  Curl_cookie_loadfiles(data);
1368 
1369  /* If there is a list of host pairs to deal with */
1370  if(data->change.resolve)
1371  result = Curl_loadhostpairs(data);
1372 
1373  if(!result) {
1374  /* Allow data->set.use_port to set which port to use. This needs to be
1375  * disabled for example when we follow Location: headers to URLs using
1376  * different ports! */
1377  data->state.allow_port = TRUE;
1378 
1379 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1380  /*************************************************************
1381  * Tell signal handler to ignore SIGPIPE
1382  *************************************************************/
1383  if(!data->set.no_signal)
1384  data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1385 #endif
1386 
1387  Curl_initinfo(data); /* reset session-specific information "variables" */
1389  Curl_pgrsStartNow(data);
1390 
1391  if(data->set.timeout)
1392  Curl_expire(data, data->set.timeout, EXPIRE_TIMEOUT);
1393 
1394  if(data->set.connecttimeout)
1396 
1397  /* In case the handle is re-used and an authentication method was picked
1398  in the session we need to make sure we only use the one(s) we now
1399  consider to be fine */
1400  data->state.authhost.picked &= data->state.authhost.want;
1401  data->state.authproxy.picked &= data->state.authproxy.want;
1402 
1403  if(data->set.wildcardmatch) {
1404  struct WildcardData *wc = &data->wildcard;
1405  if(wc->state < CURLWC_INIT) {
1406  result = Curl_wildcard_init(wc); /* init wildcard structures */
1407  if(result)
1408  return CURLE_OUT_OF_MEMORY;
1409  }
1410  }
1411  }
1412 
1413  return result;
1414 }
1415 
1416 /*
1417  * Curl_posttransfer() is called immediately after a transfer ends
1418  */
1420 {
1421 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1422  /* restore the signal handler for SIGPIPE before we get back */
1423  if(!data->set.no_signal)
1424  signal(SIGPIPE, data->state.prev_signal);
1425 #else
1426  (void)data; /* unused parameter */
1427 #endif
1428 
1429  return CURLE_OK;
1430 }
1431 
1432 #ifndef CURL_DISABLE_HTTP
1433 /*
1434  * Find the separator at the end of the host name, or the '?' in cases like
1435  * http://www.url.com?id=2380
1436  */
1437 static const char *find_host_sep(const char *url)
1438 {
1439  const char *sep;
1440  const char *query;
1441 
1442  /* Find the start of the hostname */
1443  sep = strstr(url, "//");
1444  if(!sep)
1445  sep = url;
1446  else
1447  sep += 2;
1448 
1449  query = strchr(sep, '?');
1450  sep = strchr(sep, '/');
1451 
1452  if(!sep)
1453  sep = url + strlen(url);
1454 
1455  if(!query)
1456  query = url + strlen(url);
1457 
1458  return sep < query ? sep : query;
1459 }
1460 
1461 /*
1462  * strlen_url() returns the length of the given URL if the spaces within the
1463  * URL were properly URL encoded.
1464  * URL encoding should be skipped for host names, otherwise IDN resolution
1465  * will fail.
1466  */
1467 static size_t strlen_url(const char *url, bool relative)
1468 {
1469  const unsigned char *ptr;
1470  size_t newlen = 0;
1471  bool left = TRUE; /* left side of the ? */
1472  const unsigned char *host_sep = (const unsigned char *) url;
1473 
1474  if(!relative)
1475  host_sep = (const unsigned char *) find_host_sep(url);
1476 
1477  for(ptr = (unsigned char *)url; *ptr; ptr++) {
1478 
1479  if(ptr < host_sep) {
1480  ++newlen;
1481  continue;
1482  }
1483 
1484  switch(*ptr) {
1485  case '?':
1486  left = FALSE;
1487  /* fall through */
1488  default:
1489  if(*ptr >= 0x80)
1490  newlen += 2;
1491  newlen++;
1492  break;
1493  case ' ':
1494  if(left)
1495  newlen += 3;
1496  else
1497  newlen++;
1498  break;
1499  }
1500  }
1501  return newlen;
1502 }
1503 
1504 /* strcpy_url() copies a url to a output buffer and URL-encodes the spaces in
1505  * the source URL accordingly.
1506  * URL encoding should be skipped for host names, otherwise IDN resolution
1507  * will fail.
1508  */
1509 static void strcpy_url(char *output, const char *url, bool relative)
1510 {
1511  /* we must add this with whitespace-replacing */
1512  bool left = TRUE;
1513  const unsigned char *iptr;
1514  char *optr = output;
1515  const unsigned char *host_sep = (const unsigned char *) url;
1516 
1517  if(!relative)
1518  host_sep = (const unsigned char *) find_host_sep(url);
1519 
1520  for(iptr = (unsigned char *)url; /* read from here */
1521  *iptr; /* until zero byte */
1522  iptr++) {
1523 
1524  if(iptr < host_sep) {
1525  *optr++ = *iptr;
1526  continue;
1527  }
1528 
1529  switch(*iptr) {
1530  case '?':
1531  left = FALSE;
1532  /* fall through */
1533  default:
1534  if(*iptr >= 0x80) {
1535  snprintf(optr, 4, "%%%02x", *iptr);
1536  optr += 3;
1537  }
1538  else
1539  *optr++=*iptr;
1540  break;
1541  case ' ':
1542  if(left) {
1543  *optr++='%'; /* add a '%' */
1544  *optr++='2'; /* add a '2' */
1545  *optr++='0'; /* add a '0' */
1546  }
1547  else
1548  *optr++='+'; /* add a '+' here */
1549  break;
1550  }
1551  }
1552  *optr = 0; /* zero terminate output buffer */
1553 
1554 }
1555 
1556 /*
1557  * Returns true if the given URL is absolute (as opposed to relative)
1558  */
1559 static bool is_absolute_url(const char *url)
1560 {
1561  char prot[16]; /* URL protocol string storage */
1562  char letter; /* used for a silly sscanf */
1563 
1564  return (2 == sscanf(url, "%15[^?&/:]://%c", prot, &letter)) ? TRUE : FALSE;
1565 }
1566 
1567 /*
1568  * Concatenate a relative URL to a base URL making it absolute.
1569  * URL-encodes any spaces.
1570  * The returned pointer must be freed by the caller unless NULL
1571  * (returns NULL on out of memory).
1572  */
1573 static char *concat_url(const char *base, const char *relurl)
1574 {
1575  /***
1576  TRY to append this new path to the old URL
1577  to the right of the host part. Oh crap, this is doomed to cause
1578  problems in the future...
1579  */
1580  char *newest;
1581  char *protsep;
1582  char *pathsep;
1583  size_t newlen;
1584  bool host_changed = FALSE;
1585 
1586  const char *useurl = relurl;
1587  size_t urllen;
1588 
1589  /* we must make our own copy of the URL to play with, as it may
1590  point to read-only data */
1591  char *url_clone = strdup(base);
1592 
1593  if(!url_clone)
1594  return NULL; /* skip out of this NOW */
1595 
1596  /* protsep points to the start of the host name */
1597  protsep = strstr(url_clone, "//");
1598  if(!protsep)
1599  protsep = url_clone;
1600  else
1601  protsep += 2; /* pass the slashes */
1602 
1603  if('/' != relurl[0]) {
1604  int level = 0;
1605 
1606  /* First we need to find out if there's a ?-letter in the URL,
1607  and cut it and the right-side of that off */
1608  pathsep = strchr(protsep, '?');
1609  if(pathsep)
1610  *pathsep = 0;
1611 
1612  /* we have a relative path to append to the last slash if there's one
1613  available, or if the new URL is just a query string (starts with a
1614  '?') we append the new one at the end of the entire currently worked
1615  out URL */
1616  if(useurl[0] != '?') {
1617  pathsep = strrchr(protsep, '/');
1618  if(pathsep)
1619  *pathsep = 0;
1620  }
1621 
1622  /* Check if there's any slash after the host name, and if so, remember
1623  that position instead */
1624  pathsep = strchr(protsep, '/');
1625  if(pathsep)
1626  protsep = pathsep + 1;
1627  else
1628  protsep = NULL;
1629 
1630  /* now deal with one "./" or any amount of "../" in the newurl
1631  and act accordingly */
1632 
1633  if((useurl[0] == '.') && (useurl[1] == '/'))
1634  useurl += 2; /* just skip the "./" */
1635 
1636  while((useurl[0] == '.') &&
1637  (useurl[1] == '.') &&
1638  (useurl[2] == '/')) {
1639  level++;
1640  useurl += 3; /* pass the "../" */
1641  }
1642 
1643  if(protsep) {
1644  while(level--) {
1645  /* cut off one more level from the right of the original URL */
1646  pathsep = strrchr(protsep, '/');
1647  if(pathsep)
1648  *pathsep = 0;
1649  else {
1650  *protsep = 0;
1651  break;
1652  }
1653  }
1654  }
1655  }
1656  else {
1657  /* We got a new absolute path for this server */
1658 
1659  if((relurl[0] == '/') && (relurl[1] == '/')) {
1660  /* the new URL starts with //, just keep the protocol part from the
1661  original one */
1662  *protsep = 0;
1663  useurl = &relurl[2]; /* we keep the slashes from the original, so we
1664  skip the new ones */
1665  host_changed = TRUE;
1666  }
1667  else {
1668  /* cut off the original URL from the first slash, or deal with URLs
1669  without slash */
1670  pathsep = strchr(protsep, '/');
1671  if(pathsep) {
1672  /* When people use badly formatted URLs, such as
1673  "http://www.url.com?dir=/home/daniel" we must not use the first
1674  slash, if there's a ?-letter before it! */
1675  char *sep = strchr(protsep, '?');
1676  if(sep && (sep < pathsep))
1677  pathsep = sep;
1678  *pathsep = 0;
1679  }
1680  else {
1681  /* There was no slash. Now, since we might be operating on a badly
1682  formatted URL, such as "http://www.url.com?id=2380" which doesn't
1683  use a slash separator as it is supposed to, we need to check for a
1684  ?-letter as well! */
1685  pathsep = strchr(protsep, '?');
1686  if(pathsep)
1687  *pathsep = 0;
1688  }
1689  }
1690  }
1691 
1692  /* If the new part contains a space, this is a mighty stupid redirect
1693  but we still make an effort to do "right". To the left of a '?'
1694  letter we replace each space with %20 while it is replaced with '+'
1695  on the right side of the '?' letter.
1696  */
1697  newlen = strlen_url(useurl, !host_changed);
1698 
1699  urllen = strlen(url_clone);
1700 
1701  newest = malloc(urllen + 1 + /* possible slash */
1702  newlen + 1 /* zero byte */);
1703 
1704  if(!newest) {
1705  free(url_clone); /* don't leak this */
1706  return NULL;
1707  }
1708 
1709  /* copy over the root url part */
1710  memcpy(newest, url_clone, urllen);
1711 
1712  /* check if we need to append a slash */
1713  if(('/' == useurl[0]) || (protsep && !*protsep) || ('?' == useurl[0]))
1714  ;
1715  else
1716  newest[urllen++]='/';
1717 
1718  /* then append the new piece on the right side */
1719  strcpy_url(&newest[urllen], useurl, !host_changed);
1720 
1721  free(url_clone);
1722 
1723  return newest;
1724 }
1725 #endif /* CURL_DISABLE_HTTP */
1726 
1727 /*
1728  * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1729  * as given by the remote server and set up the new URL to request.
1730  */
1732  char *newurl, /* the Location: string */
1733  followtype type) /* see transfer.h */
1734 {
1735 #ifdef CURL_DISABLE_HTTP
1736  (void)data;
1737  (void)newurl;
1738  (void)type;
1739  /* Location: following will not happen when HTTP is disabled */
1740  return CURLE_TOO_MANY_REDIRECTS;
1741 #else
1742 
1743  /* Location: redirect */
1744  bool disallowport = FALSE;
1745  bool reachedmax = FALSE;
1746 
1747  if(type == FOLLOW_REDIR) {
1748  if((data->set.maxredirs != -1) &&
1749  (data->set.followlocation >= data->set.maxredirs)) {
1750  reachedmax = TRUE;
1751  type = FOLLOW_FAKE; /* switch to fake to store the would-be-redirected
1752  to URL */
1753  }
1754  else {
1755  /* mark the next request as a followed location: */
1756  data->state.this_is_a_follow = TRUE;
1757 
1758  data->set.followlocation++; /* count location-followers */
1759 
1760  if(data->set.http_auto_referer) {
1761  /* We are asked to automatically set the previous URL as the referer
1762  when we get the next URL. We pick the ->url field, which may or may
1763  not be 100% correct */
1764 
1765  if(data->change.referer_alloc) {
1766  Curl_safefree(data->change.referer);
1767  data->change.referer_alloc = FALSE;
1768  }
1769 
1770  data->change.referer = strdup(data->change.url);
1771  if(!data->change.referer)
1772  return CURLE_OUT_OF_MEMORY;
1773  data->change.referer_alloc = TRUE; /* yes, free this later */
1774  }
1775  }
1776  }
1777 
1778  if(!is_absolute_url(newurl)) {
1779  /***
1780  *DANG* this is an RFC 2068 violation. The URL is supposed
1781  to be absolute and this doesn't seem to be that!
1782  */
1783  char *absolute = concat_url(data->change.url, newurl);
1784  if(!absolute)
1785  return CURLE_OUT_OF_MEMORY;
1786  newurl = absolute;
1787  }
1788  else {
1789  /* The new URL MAY contain space or high byte values, that means a mighty
1790  stupid redirect URL but we still make an effort to do "right". */
1791  char *newest;
1792  size_t newlen = strlen_url(newurl, FALSE);
1793 
1794  /* This is an absolute URL, don't allow the custom port number */
1795  disallowport = TRUE;
1796 
1797  newest = malloc(newlen + 1); /* get memory for this */
1798  if(!newest)
1799  return CURLE_OUT_OF_MEMORY;
1800 
1801  strcpy_url(newest, newurl, FALSE); /* create a space-free URL */
1802  newurl = newest; /* use this instead now */
1803 
1804  }
1805 
1806  if(type == FOLLOW_FAKE) {
1807  /* we're only figuring out the new url if we would've followed locations
1808  but now we're done so we can get out! */
1809  data->info.wouldredirect = newurl;
1810 
1811  if(reachedmax) {
1812  failf(data, "Maximum (%ld) redirects followed", data->set.maxredirs);
1813  return CURLE_TOO_MANY_REDIRECTS;
1814  }
1815  return CURLE_OK;
1816  }
1817 
1818  if(disallowport)
1819  data->state.allow_port = FALSE;
1820 
1821  if(data->change.url_alloc) {
1822  Curl_safefree(data->change.url);
1823  data->change.url_alloc = FALSE;
1824  }
1825 
1826  data->change.url = newurl;
1827  data->change.url_alloc = TRUE;
1828 
1829  infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
1830 
1831  /*
1832  * We get here when the HTTP code is 300-399 (and 401). We need to perform
1833  * differently based on exactly what return code there was.
1834  *
1835  * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1836  * a HTTP (proxy-) authentication scheme other than Basic.
1837  */
1838  switch(data->info.httpcode) {
1839  /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1840  Authorization: XXXX header in the HTTP request code snippet */
1841  /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1842  Proxy-Authorization: XXXX header in the HTTP request code snippet */
1843  /* 300 - Multiple Choices */
1844  /* 306 - Not used */
1845  /* 307 - Temporary Redirect */
1846  default: /* for all above (and the unknown ones) */
1847  /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1848  * seem to be OK to POST to.
1849  */
1850  break;
1851  case 301: /* Moved Permanently */
1852  /* (quote from RFC7231, section 6.4.2)
1853  *
1854  * Note: For historical reasons, a user agent MAY change the request
1855  * method from POST to GET for the subsequent request. If this
1856  * behavior is undesired, the 307 (Temporary Redirect) status code
1857  * can be used instead.
1858  *
1859  * ----
1860  *
1861  * Many webservers expect this, so these servers often answers to a POST
1862  * request with an error page. To be sure that libcurl gets the page that
1863  * most user agents would get, libcurl has to force GET.
1864  *
1865  * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1866  * can be overridden with CURLOPT_POSTREDIR.
1867  */
1868  if((data->set.httpreq == HTTPREQ_POST
1869  || data->set.httpreq == HTTPREQ_POST_FORM
1870  || data->set.httpreq == HTTPREQ_POST_MIME)
1871  && !(data->set.keep_post & CURL_REDIR_POST_301)) {
1872  infof(data, "Switch from POST to GET\n");
1873  data->set.httpreq = HTTPREQ_GET;
1874  }
1875  break;
1876  case 302: /* Found */
1877  /* (quote from RFC7231, section 6.4.3)
1878  *
1879  * Note: For historical reasons, a user agent MAY change the request
1880  * method from POST to GET for the subsequent request. If this
1881  * behavior is undesired, the 307 (Temporary Redirect) status code
1882  * can be used instead.
1883  *
1884  * ----
1885  *
1886  * Many webservers expect this, so these servers often answers to a POST
1887  * request with an error page. To be sure that libcurl gets the page that
1888  * most user agents would get, libcurl has to force GET.
1889  *
1890  * This behaviour is forbidden by RFC1945 and the obsolete RFC2616, and
1891  * can be overridden with CURLOPT_POSTREDIR.
1892  */
1893  if((data->set.httpreq == HTTPREQ_POST
1894  || data->set.httpreq == HTTPREQ_POST_FORM
1895  || data->set.httpreq == HTTPREQ_POST_MIME)
1896  && !(data->set.keep_post & CURL_REDIR_POST_302)) {
1897  infof(data, "Switch from POST to GET\n");
1898  data->set.httpreq = HTTPREQ_GET;
1899  }
1900  break;
1901 
1902  case 303: /* See Other */
1903  /* Disable both types of POSTs, unless the user explicitly
1904  asks for POST after POST */
1905  if(data->set.httpreq != HTTPREQ_GET
1906  && !(data->set.keep_post & CURL_REDIR_POST_303)) {
1907  data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
1908  infof(data, "Disables POST, goes with %s\n",
1909  data->set.opt_no_body?"HEAD":"GET");
1910  }
1911  break;
1912  case 304: /* Not Modified */
1913  /* 304 means we did a conditional request and it was "Not modified".
1914  * We shouldn't get any Location: header in this response!
1915  */
1916  break;
1917  case 305: /* Use Proxy */
1918  /* (quote from RFC2616, section 10.3.6):
1919  * "The requested resource MUST be accessed through the proxy given
1920  * by the Location field. The Location field gives the URI of the
1921  * proxy. The recipient is expected to repeat this single request
1922  * via the proxy. 305 responses MUST only be generated by origin
1923  * servers."
1924  */
1925  break;
1926  }
1929 
1930  return CURLE_OK;
1931 #endif /* CURL_DISABLE_HTTP */
1932 }
1933 
1934 /* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1935 
1936  NOTE: that the *url is malloc()ed. */
1938  char **url)
1939 {
1940  struct Curl_easy *data = conn->data;
1941 
1942  *url = NULL;
1943 
1944  /* if we're talking upload, we can't do the checks below, unless the protocol
1945  is HTTP as when uploading over HTTP we will still get a response */
1946  if(data->set.upload &&
1948  return CURLE_OK;
1949 
1950  if((data->req.bytecount + data->req.headerbytecount == 0) &&
1951  conn->bits.reuse &&
1952  (!data->set.opt_no_body
1953  || (conn->handler->protocol & PROTO_FAMILY_HTTP)) &&
1954  (data->set.rtspreq != RTSPREQ_RECEIVE)) {
1955  /* We got no data, we attempted to re-use a connection. For HTTP this
1956  can be a retry so we try again regardless if we expected a body.
1957  For other protocols we only try again only if we expected a body.
1958 
1959  This might happen if the connection was left alive when we were
1960  done using it before, but that was closed when we wanted to read from
1961  it again. Bad luck. Retry the same request on a fresh connect! */
1962  infof(conn->data, "Connection died, retrying a fresh connect\n");
1963  *url = strdup(conn->data->change.url);
1964  if(!*url)
1965  return CURLE_OUT_OF_MEMORY;
1966 
1967  connclose(conn, "retry"); /* close this connection */
1968  conn->bits.retry = TRUE; /* mark this as a connection we're about
1969  to retry. Marking it this way should
1970  prevent i.e HTTP transfers to return
1971  error just because nothing has been
1972  transferred! */
1973 
1974 
1975  if(conn->handler->protocol&PROTO_FAMILY_HTTP) {
1976  struct HTTP *http = data->req.protop;
1977  if(http->writebytecount)
1978  return Curl_readrewind(conn);
1979  }
1980  }
1981  return CURLE_OK;
1982 }
1983 
1984 /*
1985  * Curl_setup_transfer() is called to setup some basic properties for the
1986  * upcoming transfer.
1987  */
1988 void
1990  struct connectdata *conn, /* connection data */
1991  int sockindex, /* socket index to read from or -1 */
1992  curl_off_t size, /* -1 if unknown at this point */
1993  bool getheader, /* TRUE if header parsing is wanted */
1994  curl_off_t *bytecountp, /* return number of bytes read or NULL */
1995  int writesockindex, /* socket index to write to, it may very well be
1996  the same we read from. -1 disables */
1997  curl_off_t *writecountp /* return number of bytes written or NULL */
1998  )
1999 {
2000  struct Curl_easy *data;
2001  struct SingleRequest *k;
2002 
2003  DEBUGASSERT(conn != NULL);
2004 
2005  data = conn->data;
2006  k = &data->req;
2007 
2008  DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
2009 
2010  /* now copy all input parameters */
2011  conn->sockfd = sockindex == -1 ?
2012  CURL_SOCKET_BAD : conn->sock[sockindex];
2013  conn->writesockfd = writesockindex == -1 ?
2014  CURL_SOCKET_BAD:conn->sock[writesockindex];
2015  k->getheader = getheader;
2016 
2017  k->size = size;
2018  k->bytecountp = bytecountp;
2019  k->writebytecountp = writecountp;
2020 
2021  /* The code sequence below is placed in this function just because all
2022  necessary input is not always known in do_complete() as this function may
2023  be called after that */
2024 
2025  if(!k->getheader) {
2026  k->header = FALSE;
2027  if(size > 0)
2028  Curl_pgrsSetDownloadSize(data, size);
2029  }
2030  /* we want header and/or body, if neither then don't do this! */
2031  if(k->getheader || !data->set.opt_no_body) {
2032 
2033  if(conn->sockfd != CURL_SOCKET_BAD)
2034  k->keepon |= KEEP_RECV;
2035 
2036  if(conn->writesockfd != CURL_SOCKET_BAD) {
2037  struct HTTP *http = data->req.protop;
2038  /* HTTP 1.1 magic:
2039 
2040  Even if we require a 100-return code before uploading data, we might
2041  need to write data before that since the REQUEST may not have been
2042  finished sent off just yet.
2043 
2044  Thus, we must check if the request has been sent before we set the
2045  state info where we wait for the 100-return code
2046  */
2047  if((data->state.expect100header) &&
2048  (conn->handler->protocol&PROTO_FAMILY_HTTP) &&
2049  (http->sending == HTTPSEND_BODY)) {
2050  /* wait with write until we either got 100-continue or a timeout */
2052  k->start100 = Curl_tvnow();
2053 
2054  /* Set a timeout for the multi interface. Add the inaccuracy margin so
2055  that we don't fire slightly too early and get denied to run. */
2057  }
2058  else {
2059  if(data->state.expect100header)
2060  /* when we've sent off the rest of the headers, we must await a
2061  100-continue but first finish sending the request */
2063 
2064  /* enable the write bit when we're not waiting for continue */
2065  k->keepon |= KEEP_SEND;
2066  }
2067  } /* if(conn->writesockfd != CURL_SOCKET_BAD) */
2068  } /* if(k->getheader || !data->set.opt_no_body) */
2069 
2070 }
#define free(ptr)
Definition: curl_memory.h:130
bool crlf
Definition: urldata.h:1566
long timeout
Definition: urldata.h:1547
bool timecond
Definition: urldata.h:1059
CHUNKcode Curl_httpchunk_read(struct connectdata *conn, char *datap, ssize_t length, ssize_t *wrote)
Definition: http_chunks.c:105
int Curl_debug(struct Curl_easy *data, curl_infotype type, char *ptr, size_t size, struct connectdata *conn)
Definition: sendf.c:819
bool errorbuf
Definition: urldata.h:1271
bool upload_chunky
Definition: urldata.h:606
curl_mimepart mimepost
Definition: urldata.h:1563
long followlocation
Definition: urldata.h:1500
#define DEFLATE
Definition: urldata.h:566
#define CLIENTWRITE_BODY
Definition: sendf.h:50
char * upload_fromhere
Definition: urldata.h:603
#define CURLPROTO_SCP
Definition: curl.h:848
CURLcode Curl_pop3_write(struct connectdata *conn, char *str, size_t nread)
Definition: pop3.c:1416
struct ConnectBits bits
Definition: urldata.h:893
auto base
CURLcode Curl_follow(struct Curl_easy *data, char *newurl, followtype type)
Definition: transfer.c:1731
curl_wildcard_states state
Definition: wildcard.h:47
bool close
Definition: urldata.h:376
ChunkyState state
Definition: http_chunks.h:85
struct UserDefined set
Definition: urldata.h:1762
static CURLcode readwrite_data(struct Curl_easy *data, struct connectdata *conn, struct SingleRequest *k, int *didwhat, bool *done, bool *comeback)
Definition: transfer.c:429
bool wildcardmatch
Definition: urldata.h:1675
void Curl_pgrsSetUploadCounter(struct Curl_easy *data, curl_off_t size)
Definition: progress.c:304
#define connclose(x, y)
Definition: connect.h:141
curl_off_t resume_from
Definition: urldata.h:1338
void Curl_expire_done(struct Curl_easy *data, expire_id id)
Definition: multi.c:2995
curl_off_t size
Definition: urldata.h:519
static CURLcode done_sending(struct connectdata *conn, struct SingleRequest *k)
Definition: transfer.c:872
enum expect100 exp100
Definition: urldata.h:560
#define CURL_DO_LINEEND_CONV
Definition: urldata.h:1322
#define CURL_CSELECT_OUT
Definition: multi.h:265
bool opt_no_body
Definition: urldata.h:1631
CURLcode Curl_readwrite(struct connectdata *conn, struct Curl_easy *data, bool *done, bool *comeback)
Definition: transfer.c:1091
int(* perform_getsock)(const struct connectdata *conn, curl_socket_t *socks, int numsocks)
Definition: urldata.h:671
char * url
Definition: urldata.h:1372
void Curl_pgrsTime(struct Curl_easy *data, timerid timer)
Definition: progress.c:162
char * range
Definition: urldata.h:1336
#define FIRSTSOCKET
Definition: urldata.h:487
long connecttimeout
Definition: urldata.h:1548
unsigned long httpauth
Definition: urldata.h:1497
#define streamclose(x, y)
Definition: connect.h:140
CURLcode Curl_posttransfer(struct Curl_easy *data)
Definition: transfer.c:1419
#define CURL_SOCKET_BAD
Definition: curl.h:131
bool prefer_ascii
Definition: urldata.h:1618
#define GZIP
Definition: urldata.h:567
int cselect_bits
Definition: urldata.h:1010
#define failf
Definition: sendf.h:48
char * data
Definition: curl.h:2336
static const char * find_host_sep(const char *url)
Definition: transfer.c:1437
#define strdup(ptr)
Definition: curl_memory.h:122
static size_t strlen_url(const char *url, bool relative)
Definition: transfer.c:1467
const struct Curl_handler * handler
Definition: urldata.h:904
curl_socket_t sockfd
Definition: urldata.h:911
bool http_auto_referer
Definition: urldata.h:1630
#define DEBUGASSERT(x)
struct Curl_multi * multi
Definition: urldata.h:1754
UNITTEST_START char * ptr
Definition: unit1330.c:38
CURLcode
Definition: curl.h:454
#define GETSOCK_READSOCK(x)
Definition: multiif.h:48
void Curl_init_CONNECT(struct Curl_easy *data)
Definition: transfer.c:1312
bool referer_alloc
Definition: urldata.h:1375
CURLcode Curl_initinfo(struct Curl_easy *data)
Definition: lib/getinfo.c:45
#define KEEP_RECV
Definition: urldata.h:453
size_t buf_len
Definition: urldata.h:962
struct Curl_chunker chunk
Definition: urldata.h:798
bool Curl_pipeline_wanted(const struct Curl_multi *multi, int bits)
Definition: multi.c:790
#define CURLPIPE_HTTP1
Definition: multi.h:83
static int res
Curl_HttpReq httpreq
Definition: urldata.h:1582
bool header
Definition: urldata.h:538
size_t max_ssl_sessions
Definition: urldata.h:249
struct curl_slist * headers
Definition: urldata.h:1560
static bool is_absolute_url(const char *url)
Definition: transfer.c:1559
#define malloc(size)
Definition: curl_memory.h:124
void * seek_client
Definition: urldata.h:1536
bool expect100header
Definition: urldata.h:1314
curl_off_t filesize
Definition: urldata.h:1553
static void read_rewind(struct connectdata *conn, size_t thismuch)
Definition: transfer.c:360
CURLcode(* readwrite)(struct Curl_easy *data, struct connectdata *conn, ssize_t *nread, bool *readmore)
Definition: urldata.h:684
void Curl_pgrsStartNow(struct Curl_easy *data)
Definition: progress.c:226
UNITTEST_START int result
Definition: unit1304.c:49
#define KEEP_SEND_PAUSE
Definition: urldata.h:460
Definition: http.h:130
bool upload_done
Definition: urldata.h:583
bool this_is_a_follow
Definition: urldata.h:1257
unsigned int i
Definition: unit1303.c:79
CURLcode Curl_wildcard_init(struct WildcardData *wc)
Definition: wildcard.c:33
#define KEEP_SENDBITS
Definition: urldata.h:463
struct DynamicStatic change
Definition: urldata.h:1763
ssize_t upload_present
Definition: urldata.h:597
#define WHILE_FALSE
#define CURLMIN(x, y)
Definition: urldata.h:153
static void strcpy_url(char *output, const char *url, bool relative)
Definition: transfer.c:1509
#define CURLPROTO_SFTP
Definition: curl.h:849
int httpcode
Definition: urldata.h:1052
unsigned int protocol
Definition: urldata.h:694
long headerbytecount
Definition: urldata.h:529
long maxredirs
Definition: urldata.h:1501
time_t timeofdoc
Definition: urldata.h:575
char * buf
Definition: urldata.h:578
int Curl_pgrsUpdate(struct connectdata *conn)
Definition: progress.c:350
curl_off_t postfieldsize
Definition: urldata.h:1510
bool content_range
Definition: urldata.h:554
curl_off_t writebytecount
Definition: http.h:138
struct curltime now
Definition: urldata.h:537
memcpy(filename, filename1, strlen(filename1))
#define CURL_CSELECT_ERR
Definition: multi.h:266
#define Curl_ssl_data_pending(x, y)
Definition: vtls.h:264
int httpversion
Definition: urldata.h:871
CURLcode Curl_readrewind(struct connectdata *conn)
Definition: transfer.c:268
UNITTEST_START char * output
Definition: unit1302.c:50
#define CURL_REDIR_POST_301
Definition: curl.h:1949
enum SingleRequest::@32 badheader
void Curl_setup_transfer(struct connectdata *conn, int sockindex, curl_off_t size, bool getheader, curl_off_t *bytecountp, int writesockindex, curl_off_t *writecountp)
Definition: transfer.c:1989
#define CURLPROTO_RTSP
Definition: curl.h:862
#define FALSE
time_t Curl_timeleft(struct Curl_easy *data, struct curltime *nowp, bool duringconnect)
Definition: connect.c:182
curl_easy_setopt expects a curl_off_t argument for this option curl_easy_setopt expects a curl_write_callback argument for this option curl_easy_setopt expects a curl_ioctl_callback argument for this option curl_easy_setopt expects a curl_opensocket_callback argument for this option curl_easy_setopt expects a curl_debug_callback argument for this option curl_easy_setopt expects a curl_conv_callback argument for this option curl_easy_setopt expects a private data pointer as argument for this option curl_easy_setopt expects a FILE *argument for this option curl_easy_setopt expects a struct curl_httppost *argument for this option curl_easy_setopt expects a struct curl_slist *argument for this option curl_easy_getinfo expects a pointer to char *for this info curl_easy_getinfo expects a pointer to double for this info curl_easy_getinfo expects a pointer to struct curl_tlssessioninfo *for this info curl_easy_getinfo expects a pointer to curl_socket_t for this info size_t
char * referer
Definition: urldata.h:1374
struct SingleRequest req
Definition: urldata.h:1761
unsigned int flags
Definition: urldata.h:696
#define CURL_CSELECT_IN
Definition: multi.h:264
void * in_set
Definition: urldata.h:1493
curl_off_t infilesize
Definition: urldata.h:1345
CURLcode Curl_mime_rewind(curl_mimepart *part)
Definition: mime.c:1437
bool url_alloc
Definition: urldata.h:1373
bool allow_port
Definition: urldata.h:1279
struct curl_slist * next
Definition: curl.h:2337
bool ignorebody
Definition: urldata.h:586
bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc)
Definition: transfer.c:393
size_t dataleft
Definition: http_chunks.h:87
UNITTEST_START struct Curl_easy data
Definition: unit1399.c:82
bool getheader
Definition: urldata.h:608
#define strncasecompare(a, b, c)
Definition: strcase.h:36
char * wouldredirect
Definition: urldata.h:1067
#define Curl_ssl_initsessions(x, y)
Definition: vtls.h:262
enum HTTP::@27 sending
CHUNKcode
Definition: http_chunks.h:68
long bodywrites
Definition: urldata.h:576
bool forbidchunk
Definition: urldata.h:610
unsigned long proxyauth
Definition: urldata.h:1498
void Curl_pgrsSetDownloadSize(struct Curl_easy *data, curl_off_t size)
Definition: progress.c:322
long expect_100_timeout
Definition: urldata.h:1699
#define Curl_convert_to_network(a, b, c)
Definition: non-ascii.h:56
#define GETSOCK_BLANK
Definition: multiif.h:42
#define Curl_tvnow()
Definition: timeval.h:57
CURL_TYPEOF_CURL_OFF_T curl_off_t
Definition: system.h:420
#define KEEP_SEND
Definition: urldata.h:454
followtype
Definition: transfer.h:34
size_t(* curl_read_callback)(char *buffer, size_t size, size_t nitems, void *instream)
Definition: curl.h:355
char * headerbuff
Definition: urldata.h:1250
struct Progress progress
Definition: urldata.h:1768
#define UPLOAD_BUFSIZE
Definition: urldata.h:137
#define KEEP_RECVBITS
Definition: urldata.h:462
CURLcode Curl_write(struct connectdata *conn, curl_socket_t sockfd, const void *mem, size_t len, ssize_t *written)
Definition: sendf.c:318
CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
Definition: transfer.c:112
#define GETSOCK_WRITESOCK(x)
Definition: multiif.h:45
void * ioctl_client
Definition: urldata.h:1546
void Curl_pgrsResetTransferSizes(struct Curl_easy *data)
Definition: progress.c:153
char * path
Definition: urldata.h:1329
CURLcode Curl_unencode_gzip_write(struct connectdata *conn, struct SingleRequest *k, ssize_t nread)
Definition: curl.h:455
CURLcode Curl_speedcheck(struct Curl_easy *data, struct curltime now)
Definition: speedcheck.c:39
struct curl_slist * cookielist
Definition: urldata.h:1376
void * protop
Definition: urldata.h:614
CURLcode Curl_client_write(struct connectdata *conn, int type, char *ptr, size_t len)
Definition: sendf.c:624
#define Curl_safefree(ptr)
Definition: memdebug.h:170
curl_socket_t writesockfd
Definition: urldata.h:912
curl_ioctl_callback ioctl_func
Definition: urldata.h:1525
bool chunk
Definition: urldata.h:605
CURLcode Curl_smtp_escape_eob(struct connectdata *conn, const ssize_t nread)
Definition: smtp.c:1533
bool stream_was_rewound
Definition: urldata.h:422
#define PROTOPT_NONETWORK
Definition: urldata.h:708
curlioerr
Definition: curl.h:395
struct UrlState state
Definition: urldata.h:1769
#define CURL_READFUNC_ABORT
Definition: curl.h:350
curl_mimepart * sendit
Definition: http.h:131
int Curl_socket_check(curl_socket_t readfd0, curl_socket_t readfd1, curl_socket_t writefd, time_t timeout_ms)
Definition: select.c:145
size_t hbuflen
Definition: urldata.h:549
char uploadbuffer[UPLOAD_BUFSIZE+1]
Definition: urldata.h:1254
CURLcode Curl_http_readwrite_headers(struct Curl_easy *data, struct connectdata *conn, ssize_t *nread, bool *stop_reading)
Definition: http.c:2948
time_t timevalue
Definition: urldata.h:1581
#define KEEP_RECV_PAUSE
Definition: urldata.h:459
bool rewindaftersend
Definition: urldata.h:408
curl_off_t writebytecount
Definition: urldata.h:527
struct PureInfo info
Definition: urldata.h:1772
#define CURL_REDIR_POST_302
Definition: curl.h:1950
size_t fread(void *, size_t, size_t, FILE *)
TFSIMD_FORCE_INLINE Vector3 absolute() const
int Curl_single_getsock(const struct connectdata *conn, curl_socket_t *sock, int numsocks)
Definition: transfer.c:1264
curl_off_t * writebytecountp
Definition: urldata.h:524
CURLcode Curl_pretransfer(struct Curl_easy *data)
Definition: transfer.c:1323
char * Curl_checkheaders(const struct connectdata *conn, const char *thisheader)
Definition: transfer.c:92
#define show(x)
Definition: getpart.c:49
#define ssize_t
Definition: config-win32.h:382
int keep_post
Definition: urldata.h:1504
curl_socket_t sock[2]
Definition: urldata.h:876
struct auth authhost
Definition: urldata.h:1289
char * newurl
Definition: urldata.h:592
bool reuse
Definition: urldata.h:377
void Curl_pgrsSetDownloadCounter(struct Curl_easy *data, curl_off_t size)
Definition: progress.c:286
#define Curl_http2_done_sending(x)
Definition: http2.h:73
int auto_decoding
Definition: urldata.h:563
struct curltime t_startsingle
Definition: urldata.h:1119
curl_TimeCond timecondition
Definition: urldata.h:1580
curl_read_callback fread_func_set
Definition: urldata.h:1519
char * scratch
Definition: urldata.h:1270
int httpversion
Definition: urldata.h:1312
void * in
Definition: urldata.h:1356
size_t read_pos
Definition: urldata.h:961
unsigned long picked
Definition: urldata.h:1182
struct curl_slist * resolve
Definition: urldata.h:1378
char buf[3]
Definition: unit1398.c:32
curl_off_t * bytecountp
Definition: urldata.h:520
bool verbose
Definition: urldata.h:1635
#define infof
Definition: sendf.h:44
#define CURL_READFUNC_PAUSE
Definition: curl.h:353
#define Curl_tvdiff(x, y)
Definition: timeval.h:58
void * postfields
Definition: urldata.h:1508
#define CURL_REDIR_POST_303
Definition: curl.h:1951
curl_off_t crlf_conversions
Definition: urldata.h:1326
curl_read_callback fread_func
Definition: urldata.h:1355
static CURLcode readwrite_upload(struct Curl_easy *data, struct connectdata *conn, int *didwhat)
Definition: transfer.c:891
char * str[STRING_LAST]
Definition: urldata.h:1663
TFSIMD_FORCE_INLINE tfScalar length(const Quaternion &q)
const char * Curl_chunked_strerror(CHUNKcode code)
Definition: http_chunks.c:359
size_t drain
Definition: urldata.h:1348
#define PROTO_FAMILY_POP3
Definition: urldata.h:68
char * str
Definition: urldata.h:550
struct auth authproxy
Definition: urldata.h:1290
size_t size
Definition: unit1302.c:52
#define snprintf
Definition: curl_printf.h:42
curl_seek_callback seek_func
Definition: urldata.h:1509
char * master_buffer
Definition: urldata.h:959
void Curl_expire(struct Curl_easy *data, time_t milli, expire_id id)
Definition: multi.c:2930
#define TRUE
unsigned long want
Definition: urldata.h:1180
#define IDENTITY
Definition: urldata.h:565
bool retry
Definition: urldata.h:398
struct WildcardData wildcard
Definition: urldata.h:1771
bool http_ce_skip
Definition: urldata.h:1657
int curl_socket_t
Definition: curl.h:130
struct ssl_general_config general_ssl
Definition: urldata.h:1588
CURLcode Curl_unencode_deflate_write(struct connectdata *conn, struct SingleRequest *req, ssize_t nread)
bool no_signal
Definition: urldata.h:1647
long buffer_size
Definition: urldata.h:1591
#define PROTO_FAMILY_HTTP
Definition: urldata.h:66
bool upload
Definition: urldata.h:1632
curl_off_t bytecount
Definition: urldata.h:526
#define PROTO_FAMILY_SMTP
Definition: urldata.h:70
static int data_pending(const struct connectdata *conn)
Definition: transfer.c:340
struct curltime start100
Definition: urldata.h:559
static char * concat_url(const char *base, const char *relurl)
Definition: transfer.c:1573
Curl_RtspReq rtspreq
Definition: urldata.h:1673
Definition: debug.c:29
bool authproblem
Definition: urldata.h:1292
CURLcode Curl_retry_request(struct connectdata *conn, char **url)
Definition: transfer.c:1937
curl_off_t maxdownload
Definition: urldata.h:522
CURLcode Curl_read(struct connectdata *conn, curl_socket_t sockfd, char *buf, size_t sizerequested, ssize_t *n)
Definition: sendf.c:686
CURLcode Curl_loadhostpairs(struct Curl_easy *data)
Definition: hostip.c:777
#define CURL_FORMAT_CURL_OFF_T
Definition: system.h:373
struct curl_llist_element * head
case 1: list has >1 element, removing head : 1: list size will be decremented by one 2: head will be ...
Definition: unit1300.c:60
#define DEBUGF(x)
struct Curl_easy * data
Definition: urldata.h:791


rc_tagdetect_client
Author(s): Monika Florek-Jasinska , Raphael Schaller
autogenerated on Sat Feb 13 2021 03:42:16