protobuf/src/google/protobuf/io/tokenizer_unittest.cc
Go to the documentation of this file.
1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2008 Google Inc. All rights reserved.
3 // https://developers.google.com/protocol-buffers/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
7 // met:
8 //
9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
14 // distribution.
15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // Author: kenton@google.com (Kenton Varda)
32 // Based on original Protocol Buffers design by
33 // Sanjay Ghemawat, Jeff Dean, and others.
34 
35 #include <google/protobuf/io/tokenizer.h>
36 
37 #include <limits.h>
38 #include <math.h>
39 
40 #include <vector>
41 
42 #include <google/protobuf/stubs/common.h>
43 #include <google/protobuf/stubs/logging.h>
44 #include <google/protobuf/stubs/strutil.h>
45 #include <google/protobuf/stubs/substitute.h>
46 #include <google/protobuf/io/zero_copy_stream_impl.h>
47 #include <google/protobuf/testing/googletest.h>
48 #include <gtest/gtest.h>
49 
50 namespace google {
51 namespace protobuf {
52 namespace io {
53 namespace {
54 
55 // ===================================================================
56 // Data-Driven Test Infrastructure
57 
58 // TODO(kenton): This is copied from coded_stream_unittest. This is
59 // temporary until these features are integrated into gTest itself.
60 
61 // TEST_1D and TEST_2D are macros I'd eventually like to see added to
62 // gTest. These macros can be used to declare tests which should be
63 // run multiple times, once for each item in some input array. TEST_1D
64 // tests all cases in a single input array. TEST_2D tests all
65 // combinations of cases from two arrays. The arrays must be statically
66 // defined such that the GOOGLE_ARRAYSIZE() macro works on them. Example:
67 //
68 // int kCases[] = {1, 2, 3, 4}
69 // TEST_1D(MyFixture, MyTest, kCases) {
70 // EXPECT_GT(kCases_case, 0);
71 // }
72 //
73 // This test iterates through the numbers 1, 2, 3, and 4 and tests that
74 // they are all grater than zero. In case of failure, the exact case
75 // which failed will be printed. The case type must be printable using
76 // ostream::operator<<.
77 
78 #define TEST_1D(FIXTURE, NAME, CASES) \
79  class FIXTURE##_##NAME##_DD : public FIXTURE { \
80  protected: \
81  template <typename CaseType> \
82  void DoSingleCase(const CaseType& CASES##_case); \
83  }; \
84  \
85  TEST_F(FIXTURE##_##NAME##_DD, NAME) { \
86  for (int i = 0; i < GOOGLE_ARRAYSIZE(CASES); i++) { \
87  SCOPED_TRACE(testing::Message() \
88  << #CASES " case #" << i << ": " << CASES[i]); \
89  DoSingleCase(CASES[i]); \
90  } \
91  } \
92  \
93  template <typename CaseType> \
94  void FIXTURE##_##NAME##_DD::DoSingleCase(const CaseType& CASES##_case)
95 
96 #define TEST_2D(FIXTURE, NAME, CASES1, CASES2) \
97  class FIXTURE##_##NAME##_DD : public FIXTURE { \
98  protected: \
99  template <typename CaseType1, typename CaseType2> \
100  void DoSingleCase(const CaseType1& CASES1##_case, \
101  const CaseType2& CASES2##_case); \
102  }; \
103  \
104  TEST_F(FIXTURE##_##NAME##_DD, NAME) { \
105  for (int i = 0; i < GOOGLE_ARRAYSIZE(CASES1); i++) { \
106  for (int j = 0; j < GOOGLE_ARRAYSIZE(CASES2); j++) { \
107  SCOPED_TRACE(testing::Message() \
108  << #CASES1 " case #" << i << ": " << CASES1[i] << ", " \
109  << #CASES2 " case #" << j << ": " << CASES2[j]); \
110  DoSingleCase(CASES1[i], CASES2[j]); \
111  } \
112  } \
113  } \
114  \
115  template <typename CaseType1, typename CaseType2> \
116  void FIXTURE##_##NAME##_DD::DoSingleCase(const CaseType1& CASES1##_case, \
117  const CaseType2& CASES2##_case)
118 
119 // -------------------------------------------------------------------
120 
121 // An input stream that is basically like an ArrayInputStream but sometimes
122 // returns empty buffers, just to throw us off.
123 class TestInputStream : public ZeroCopyInputStream {
124  public:
125  TestInputStream(const void* data, int size, int block_size)
126  : array_stream_(data, size, block_size), counter_(0) {}
127  ~TestInputStream() {}
128 
129  // implements ZeroCopyInputStream ----------------------------------
130  bool Next(const void** data, int* size) override {
131  // We'll return empty buffers starting with the first buffer, and every
132  // 3 and 5 buffers after that.
133  if (counter_ % 3 == 0 || counter_ % 5 == 0) {
134  *data = NULL;
135  *size = 0;
136  ++counter_;
137  return true;
138  } else {
139  ++counter_;
140  return array_stream_.Next(data, size);
141  }
142  }
143 
144  void BackUp(int count) override { return array_stream_.BackUp(count); }
145  bool Skip(int count) override { return array_stream_.Skip(count); }
146  int64_t ByteCount() const override { return array_stream_.ByteCount(); }
147 
148  private:
149  ArrayInputStream array_stream_;
150  int counter_;
151 };
152 
153 // -------------------------------------------------------------------
154 
155 // An error collector which simply concatenates all its errors into a big
156 // block of text which can be checked.
157 class TestErrorCollector : public ErrorCollector {
158  public:
159  TestErrorCollector() {}
160  ~TestErrorCollector() {}
161 
163 
164  // implements ErrorCollector ---------------------------------------
165  void AddError(int line, int column, const std::string& message) {
166  strings::SubstituteAndAppend(&text_, "$0:$1: $2\n", line, column, message);
167  }
168 };
169 
170 // -------------------------------------------------------------------
171 
172 // We test each operation over a variety of block sizes to insure that
173 // we test cases where reads cross buffer boundaries as well as cases
174 // where they don't. This is sort of a brute-force approach to this,
175 // but it's easy to write and easy to understand.
176 const int kBlockSizes[] = {1, 2, 3, 5, 7, 13, 32, 1024};
177 
178 class TokenizerTest : public testing::Test {
179  protected:
180  // For easy testing.
182  uint64 result;
184  return result;
185  }
186 };
187 
188 // ===================================================================
189 
190 // These tests causes gcc 3.3.5 (and earlier?) to give the cryptic error:
191 // "sorry, unimplemented: `method_call_expr' not supported by dump_expr"
192 #if !defined(__GNUC__) || __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 3)
193 
194 // In each test case, the entire input text should parse as a single token
195 // of the given type.
196 struct SimpleTokenCase {
199 };
200 
201 inline std::ostream& operator<<(std::ostream& out,
202  const SimpleTokenCase& test_case) {
203  return out << CEscape(test_case.input);
204 }
205 
206 SimpleTokenCase kSimpleTokenCases[] = {
207  // Test identifiers.
208  {"hello", Tokenizer::TYPE_IDENTIFIER},
209 
210  // Test integers.
211  {"123", Tokenizer::TYPE_INTEGER},
212  {"0xab6", Tokenizer::TYPE_INTEGER},
213  {"0XAB6", Tokenizer::TYPE_INTEGER},
214  {"0X1234567", Tokenizer::TYPE_INTEGER},
215  {"0x89abcdef", Tokenizer::TYPE_INTEGER},
216  {"0x89ABCDEF", Tokenizer::TYPE_INTEGER},
217  {"01234567", Tokenizer::TYPE_INTEGER},
218 
219  // Test floats.
220  {"123.45", Tokenizer::TYPE_FLOAT},
221  {"1.", Tokenizer::TYPE_FLOAT},
222  {"1e3", Tokenizer::TYPE_FLOAT},
223  {"1E3", Tokenizer::TYPE_FLOAT},
224  {"1e-3", Tokenizer::TYPE_FLOAT},
225  {"1e+3", Tokenizer::TYPE_FLOAT},
226  {"1.e3", Tokenizer::TYPE_FLOAT},
227  {"1.2e3", Tokenizer::TYPE_FLOAT},
228  {".1", Tokenizer::TYPE_FLOAT},
229  {".1e3", Tokenizer::TYPE_FLOAT},
230  {".1e-3", Tokenizer::TYPE_FLOAT},
231  {".1e+3", Tokenizer::TYPE_FLOAT},
232 
233  // Test strings.
234  {"'hello'", Tokenizer::TYPE_STRING},
235  {"\"foo\"", Tokenizer::TYPE_STRING},
236  {"'a\"b'", Tokenizer::TYPE_STRING},
237  {"\"a'b\"", Tokenizer::TYPE_STRING},
238  {"'a\\'b'", Tokenizer::TYPE_STRING},
239  {"\"a\\\"b\"", Tokenizer::TYPE_STRING},
240  {"'\\xf'", Tokenizer::TYPE_STRING},
241  {"'\\0'", Tokenizer::TYPE_STRING},
242 
243  // Test symbols.
244  {"+", Tokenizer::TYPE_SYMBOL},
245  {".", Tokenizer::TYPE_SYMBOL},
246 };
247 
248 TEST_2D(TokenizerTest, SimpleTokens, kSimpleTokenCases, kBlockSizes) {
249  // Set up the tokenizer.
250  TestInputStream input(kSimpleTokenCases_case.input.data(),
251  kSimpleTokenCases_case.input.size(), kBlockSizes_case);
252  TestErrorCollector error_collector;
253  Tokenizer tokenizer(&input, &error_collector);
254 
255  // Before Next() is called, the initial token should always be TYPE_START.
256  EXPECT_EQ(Tokenizer::TYPE_START, tokenizer.current().type);
257  EXPECT_EQ("", tokenizer.current().text);
258  EXPECT_EQ(0, tokenizer.current().line);
259  EXPECT_EQ(0, tokenizer.current().column);
260  EXPECT_EQ(0, tokenizer.current().end_column);
261 
262  // Parse the token.
263  ASSERT_TRUE(tokenizer.Next());
264 
265  // Check that it has the right type.
266  EXPECT_EQ(kSimpleTokenCases_case.type, tokenizer.current().type);
267  // Check that it contains the complete input text.
268  EXPECT_EQ(kSimpleTokenCases_case.input, tokenizer.current().text);
269  // Check that it is located at the beginning of the input
270  EXPECT_EQ(0, tokenizer.current().line);
271  EXPECT_EQ(0, tokenizer.current().column);
272  EXPECT_EQ(kSimpleTokenCases_case.input.size(),
273  tokenizer.current().end_column);
274 
275  // There should be no more input.
276  EXPECT_FALSE(tokenizer.Next());
277 
278  // After Next() returns false, the token should have type TYPE_END.
279  EXPECT_EQ(Tokenizer::TYPE_END, tokenizer.current().type);
280  EXPECT_EQ("", tokenizer.current().text);
281  EXPECT_EQ(0, tokenizer.current().line);
282  EXPECT_EQ(kSimpleTokenCases_case.input.size(), tokenizer.current().column);
283  EXPECT_EQ(kSimpleTokenCases_case.input.size(),
284  tokenizer.current().end_column);
285 
286  // There should be no errors.
287  EXPECT_TRUE(error_collector.text_.empty());
288 }
289 
290 TEST_1D(TokenizerTest, FloatSuffix, kBlockSizes) {
291  // Test the "allow_f_after_float" option.
292 
293  // Set up the tokenizer.
294  const char* text = "1f 2.5f 6e3f 7F";
295  TestInputStream input(text, strlen(text), kBlockSizes_case);
296  TestErrorCollector error_collector;
297  Tokenizer tokenizer(&input, &error_collector);
298  tokenizer.set_allow_f_after_float(true);
299 
300  // Advance through tokens and check that they are parsed as expected.
301  ASSERT_TRUE(tokenizer.Next());
302  EXPECT_EQ(tokenizer.current().text, "1f");
303  EXPECT_EQ(tokenizer.current().type, Tokenizer::TYPE_FLOAT);
304  ASSERT_TRUE(tokenizer.Next());
305  EXPECT_EQ(tokenizer.current().text, "2.5f");
306  EXPECT_EQ(tokenizer.current().type, Tokenizer::TYPE_FLOAT);
307  ASSERT_TRUE(tokenizer.Next());
308  EXPECT_EQ(tokenizer.current().text, "6e3f");
309  EXPECT_EQ(tokenizer.current().type, Tokenizer::TYPE_FLOAT);
310  ASSERT_TRUE(tokenizer.Next());
311  EXPECT_EQ(tokenizer.current().text, "7F");
312  EXPECT_EQ(tokenizer.current().type, Tokenizer::TYPE_FLOAT);
313 
314  // There should be no more input.
315  EXPECT_FALSE(tokenizer.Next());
316  // There should be no errors.
317  EXPECT_TRUE(error_collector.text_.empty());
318 }
319 
320 SimpleTokenCase kWhitespaceTokenCases[] = {
326  {"\v\t", Tokenizer::TYPE_WHITESPACE},
327  {" \t\r", Tokenizer::TYPE_WHITESPACE},
328  // Newlines:
329  {"\n", Tokenizer::TYPE_NEWLINE},
330 };
331 
332 TEST_2D(TokenizerTest, Whitespace, kWhitespaceTokenCases, kBlockSizes) {
333  {
334  TestInputStream input(kWhitespaceTokenCases_case.input.data(),
335  kWhitespaceTokenCases_case.input.size(),
336  kBlockSizes_case);
337  TestErrorCollector error_collector;
338  Tokenizer tokenizer(&input, &error_collector);
339 
340  EXPECT_FALSE(tokenizer.Next());
341  }
342  {
343  TestInputStream input(kWhitespaceTokenCases_case.input.data(),
344  kWhitespaceTokenCases_case.input.size(),
345  kBlockSizes_case);
346  TestErrorCollector error_collector;
347  Tokenizer tokenizer(&input, &error_collector);
348  tokenizer.set_report_whitespace(true);
349  tokenizer.set_report_newlines(true);
350 
351  ASSERT_TRUE(tokenizer.Next());
352  EXPECT_EQ(tokenizer.current().text, kWhitespaceTokenCases_case.input);
353  EXPECT_EQ(tokenizer.current().type, kWhitespaceTokenCases_case.type);
354 
355  EXPECT_FALSE(tokenizer.Next());
356  }
357 }
358 
359 #endif
360 
361 // -------------------------------------------------------------------
362 
363 // In each case, the input is parsed to produce a list of tokens. The
364 // last token in "output" must have type TYPE_END.
365 struct MultiTokenCase {
367  std::vector<Tokenizer::Token> output;
368 };
369 
370 inline std::ostream& operator<<(std::ostream& out,
371  const MultiTokenCase& test_case) {
372  return out << CEscape(test_case.input);
373 }
374 
375 MultiTokenCase kMultiTokenCases[] = {
376  // Test empty input.
377  {"",
378  {
379  {Tokenizer::TYPE_END, "", 0, 0, 0},
380  }},
381 
382  // Test all token types at the same time.
383  {"foo 1 1.2 + 'bar'",
384  {
385  {Tokenizer::TYPE_IDENTIFIER, "foo", 0, 0, 3},
386  {Tokenizer::TYPE_INTEGER, "1", 0, 4, 5},
387  {Tokenizer::TYPE_FLOAT, "1.2", 0, 6, 9},
388  {Tokenizer::TYPE_SYMBOL, "+", 0, 10, 11},
389  {Tokenizer::TYPE_STRING, "'bar'", 0, 12, 17},
390  {Tokenizer::TYPE_END, "", 0, 17, 17},
391  }},
392 
393  // Test that consecutive symbols are parsed as separate tokens.
394  {"!@+%",
395  {
396  {Tokenizer::TYPE_SYMBOL, "!", 0, 0, 1},
397  {Tokenizer::TYPE_SYMBOL, "@", 0, 1, 2},
398  {Tokenizer::TYPE_SYMBOL, "+", 0, 2, 3},
399  {Tokenizer::TYPE_SYMBOL, "%", 0, 3, 4},
400  {Tokenizer::TYPE_END, "", 0, 4, 4},
401  }},
402 
403  // Test that newlines affect line numbers correctly.
404  {"foo bar\nrab oof",
405  {
406  {Tokenizer::TYPE_IDENTIFIER, "foo", 0, 0, 3},
407  {Tokenizer::TYPE_IDENTIFIER, "bar", 0, 4, 7},
408  {Tokenizer::TYPE_IDENTIFIER, "rab", 1, 0, 3},
409  {Tokenizer::TYPE_IDENTIFIER, "oof", 1, 4, 7},
410  {Tokenizer::TYPE_END, "", 1, 7, 7},
411  }},
412 
413  // Test that tabs affect column numbers correctly.
414  {"foo\tbar \tbaz",
415  {
416  {Tokenizer::TYPE_IDENTIFIER, "foo", 0, 0, 3},
417  {Tokenizer::TYPE_IDENTIFIER, "bar", 0, 8, 11},
418  {Tokenizer::TYPE_IDENTIFIER, "baz", 0, 16, 19},
419  {Tokenizer::TYPE_END, "", 0, 19, 19},
420  }},
421 
422  // Test that tabs in string literals affect column numbers correctly.
423  {"\"foo\tbar\" baz",
424  {
425  {Tokenizer::TYPE_STRING, "\"foo\tbar\"", 0, 0, 12},
426  {Tokenizer::TYPE_IDENTIFIER, "baz", 0, 13, 16},
427  {Tokenizer::TYPE_END, "", 0, 16, 16},
428  }},
429 
430  // Test that line comments are ignored.
431  {"foo // This is a comment\n"
432  "bar // This is another comment",
433  {
434  {Tokenizer::TYPE_IDENTIFIER, "foo", 0, 0, 3},
435  {Tokenizer::TYPE_IDENTIFIER, "bar", 1, 0, 3},
436  {Tokenizer::TYPE_END, "", 1, 30, 30},
437  }},
438 
439  // Test that block comments are ignored.
440  {"foo /* This is a block comment */ bar",
441  {
442  {Tokenizer::TYPE_IDENTIFIER, "foo", 0, 0, 3},
443  {Tokenizer::TYPE_IDENTIFIER, "bar", 0, 34, 37},
444  {Tokenizer::TYPE_END, "", 0, 37, 37},
445  }},
446 
447  // Test that sh-style comments are not ignored by default.
448  {"foo # bar\n"
449  "baz",
450  {
451  {Tokenizer::TYPE_IDENTIFIER, "foo", 0, 0, 3},
452  {Tokenizer::TYPE_SYMBOL, "#", 0, 4, 5},
453  {Tokenizer::TYPE_IDENTIFIER, "bar", 0, 6, 9},
454  {Tokenizer::TYPE_IDENTIFIER, "baz", 1, 0, 3},
455  {Tokenizer::TYPE_END, "", 1, 3, 3},
456  }},
457 
458  // Test all whitespace chars
459  {"foo\n\t\r\v\fbar",
460  {
461  {Tokenizer::TYPE_IDENTIFIER, "foo", 0, 0, 3},
462  {Tokenizer::TYPE_IDENTIFIER, "bar", 1, 11, 14},
463  {Tokenizer::TYPE_END, "", 1, 14, 14},
464  }},
465 };
466 
467 TEST_2D(TokenizerTest, MultipleTokens, kMultiTokenCases, kBlockSizes) {
468  // Set up the tokenizer.
469  TestInputStream input(kMultiTokenCases_case.input.data(),
470  kMultiTokenCases_case.input.size(), kBlockSizes_case);
471  TestErrorCollector error_collector;
472  Tokenizer tokenizer(&input, &error_collector);
473 
474  // Before Next() is called, the initial token should always be TYPE_START.
475  EXPECT_EQ(Tokenizer::TYPE_START, tokenizer.current().type);
476  EXPECT_EQ("", tokenizer.current().text);
477  EXPECT_EQ(0, tokenizer.current().line);
478  EXPECT_EQ(0, tokenizer.current().column);
479  EXPECT_EQ(0, tokenizer.current().end_column);
480 
481  // Loop through all expected tokens.
482  int i = 0;
483  Tokenizer::Token token;
484  do {
485  token = kMultiTokenCases_case.output[i++];
486 
487  SCOPED_TRACE(testing::Message() << "Token #" << i << ": " << token.text);
488 
489  Tokenizer::Token previous = tokenizer.current();
490 
491  // Next() should only return false when it hits the end token.
492  if (token.type != Tokenizer::TYPE_END) {
493  ASSERT_TRUE(tokenizer.Next());
494  } else {
495  ASSERT_FALSE(tokenizer.Next());
496  }
497 
498  // Check that the previous token is set correctly.
499  EXPECT_EQ(previous.type, tokenizer.previous().type);
500  EXPECT_EQ(previous.text, tokenizer.previous().text);
501  EXPECT_EQ(previous.line, tokenizer.previous().line);
502  EXPECT_EQ(previous.column, tokenizer.previous().column);
503  EXPECT_EQ(previous.end_column, tokenizer.previous().end_column);
504 
505  // Check that the token matches the expected one.
506  EXPECT_EQ(token.type, tokenizer.current().type);
507  EXPECT_EQ(token.text, tokenizer.current().text);
508  EXPECT_EQ(token.line, tokenizer.current().line);
509  EXPECT_EQ(token.column, tokenizer.current().column);
510  EXPECT_EQ(token.end_column, tokenizer.current().end_column);
511 
512  } while (token.type != Tokenizer::TYPE_END);
513 
514  // There should be no errors.
515  EXPECT_TRUE(error_collector.text_.empty());
516 }
517 
518 MultiTokenCase kMultiWhitespaceTokenCases[] = {
519  // Test all token types at the same time.
520  {"foo 1 \t1.2 \n +\v'bar'",
521  {
522  {Tokenizer::TYPE_IDENTIFIER, "foo", 0, 0, 3},
523  {Tokenizer::TYPE_WHITESPACE, " ", 0, 3, 4},
524  {Tokenizer::TYPE_INTEGER, "1", 0, 4, 5},
525  {Tokenizer::TYPE_WHITESPACE, " \t", 0, 5, 8},
526  {Tokenizer::TYPE_FLOAT, "1.2", 0, 8, 11},
527  {Tokenizer::TYPE_WHITESPACE, " ", 0, 11, 13},
528  {Tokenizer::TYPE_NEWLINE, "\n", 0, 13, 0},
529  {Tokenizer::TYPE_WHITESPACE, " ", 1, 0, 3},
530  {Tokenizer::TYPE_SYMBOL, "+", 1, 3, 4},
531  {Tokenizer::TYPE_WHITESPACE, "\v", 1, 4, 5},
532  {Tokenizer::TYPE_STRING, "'bar'", 1, 5, 10},
533  {Tokenizer::TYPE_END, "", 1, 10, 10},
534  }},
535 
536 };
537 
538 TEST_2D(TokenizerTest, MultipleWhitespaceTokens, kMultiWhitespaceTokenCases,
539  kBlockSizes) {
540  // Set up the tokenizer.
541  TestInputStream input(kMultiWhitespaceTokenCases_case.input.data(),
542  kMultiWhitespaceTokenCases_case.input.size(),
543  kBlockSizes_case);
544  TestErrorCollector error_collector;
545  Tokenizer tokenizer(&input, &error_collector);
546  tokenizer.set_report_whitespace(true);
547  tokenizer.set_report_newlines(true);
548 
549  // Before Next() is called, the initial token should always be TYPE_START.
550  EXPECT_EQ(Tokenizer::TYPE_START, tokenizer.current().type);
551  EXPECT_EQ("", tokenizer.current().text);
552  EXPECT_EQ(0, tokenizer.current().line);
553  EXPECT_EQ(0, tokenizer.current().column);
554  EXPECT_EQ(0, tokenizer.current().end_column);
555 
556  // Loop through all expected tokens.
557  int i = 0;
558  Tokenizer::Token token;
559  do {
560  token = kMultiWhitespaceTokenCases_case.output[i++];
561 
562  SCOPED_TRACE(testing::Message() << "Token #" << i << ": " << token.text);
563 
564  Tokenizer::Token previous = tokenizer.current();
565 
566  // Next() should only return false when it hits the end token.
567  if (token.type != Tokenizer::TYPE_END) {
568  ASSERT_TRUE(tokenizer.Next());
569  } else {
570  ASSERT_FALSE(tokenizer.Next());
571  }
572 
573  // Check that the previous token is set correctly.
574  EXPECT_EQ(previous.type, tokenizer.previous().type);
575  EXPECT_EQ(previous.text, tokenizer.previous().text);
576  EXPECT_EQ(previous.line, tokenizer.previous().line);
577  EXPECT_EQ(previous.column, tokenizer.previous().column);
578  EXPECT_EQ(previous.end_column, tokenizer.previous().end_column);
579 
580  // Check that the token matches the expected one.
581  EXPECT_EQ(token.type, tokenizer.current().type);
582  EXPECT_EQ(token.text, tokenizer.current().text);
583  EXPECT_EQ(token.line, tokenizer.current().line);
584  EXPECT_EQ(token.column, tokenizer.current().column);
585  EXPECT_EQ(token.end_column, tokenizer.current().end_column);
586 
587  } while (token.type != Tokenizer::TYPE_END);
588 
589  // There should be no errors.
590  EXPECT_TRUE(error_collector.text_.empty());
591 }
592 
593 // This test causes gcc 3.3.5 (and earlier?) to give the cryptic error:
594 // "sorry, unimplemented: `method_call_expr' not supported by dump_expr"
595 #if !defined(__GNUC__) || __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 3)
596 
597 TEST_1D(TokenizerTest, ShCommentStyle, kBlockSizes) {
598  // Test the "comment_style" option.
599 
600  const char* text =
601  "foo # bar\n"
602  "baz // qux\n"
603  "corge /* grault */\n"
604  "garply";
605  const char* const kTokens[] = {"foo", // "# bar" is ignored
606  "baz", "/", "/", "qux", "corge", "/",
607  "*", "grault", "*", "/", "garply"};
608 
609  // Set up the tokenizer.
610  TestInputStream input(text, strlen(text), kBlockSizes_case);
611  TestErrorCollector error_collector;
612  Tokenizer tokenizer(&input, &error_collector);
613  tokenizer.set_comment_style(Tokenizer::SH_COMMENT_STYLE);
614 
615  // Advance through tokens and check that they are parsed as expected.
616  for (int i = 0; i < GOOGLE_ARRAYSIZE(kTokens); i++) {
617  EXPECT_TRUE(tokenizer.Next());
618  EXPECT_EQ(tokenizer.current().text, kTokens[i]);
619  }
620 
621  // There should be no more input.
622  EXPECT_FALSE(tokenizer.Next());
623  // There should be no errors.
624  EXPECT_TRUE(error_collector.text_.empty());
625 }
626 
627 #endif
628 
629 // -------------------------------------------------------------------
630 
631 // In each case, the input is expected to have two tokens named "prev" and
632 // "next" with comments in between.
633 struct DocCommentCase {
635 
637  const char* detached_comments[10];
639 };
640 
641 inline std::ostream& operator<<(std::ostream& out,
642  const DocCommentCase& test_case) {
643  return out << CEscape(test_case.input);
644 }
645 
646 DocCommentCase kDocCommentCases[] = {
647  {"prev next",
648 
649  "",
650  {},
651  ""},
652 
653  {"prev /* ignored */ next",
654 
655  "",
656  {},
657  ""},
658 
659  {"prev // trailing comment\n"
660  "next",
661 
662  " trailing comment\n",
663  {},
664  ""},
665 
666  {"prev\n"
667  "// leading comment\n"
668  "// line 2\n"
669  "next",
670 
671  "",
672  {},
673  " leading comment\n"
674  " line 2\n"},
675 
676  {"prev\n"
677  "// trailing comment\n"
678  "// line 2\n"
679  "\n"
680  "next",
681 
682  " trailing comment\n"
683  " line 2\n",
684  {},
685  ""},
686 
687  {"prev // trailing comment\n"
688  "// leading comment\n"
689  "// line 2\n"
690  "next",
691 
692  " trailing comment\n",
693  {},
694  " leading comment\n"
695  " line 2\n"},
696 
697  {"prev /* trailing block comment */\n"
698  "/* leading block comment\n"
699  " * line 2\n"
700  " * line 3 */"
701  "next",
702 
703  " trailing block comment ",
704  {},
705  " leading block comment\n"
706  " line 2\n"
707  " line 3 "},
708 
709  {"prev\n"
710  "/* trailing block comment\n"
711  " * line 2\n"
712  " * line 3\n"
713  " */\n"
714  "/* leading block comment\n"
715  " * line 2\n"
716  " * line 3 */"
717  "next",
718 
719  " trailing block comment\n"
720  " line 2\n"
721  " line 3\n",
722  {},
723  " leading block comment\n"
724  " line 2\n"
725  " line 3 "},
726 
727  {"prev\n"
728  "// trailing comment\n"
729  "\n"
730  "// detached comment\n"
731  "// line 2\n"
732  "\n"
733  "// second detached comment\n"
734  "/* third detached comment\n"
735  " * line 2 */\n"
736  "// leading comment\n"
737  "next",
738 
739  " trailing comment\n",
740  {" detached comment\n"
741  " line 2\n",
742  " second detached comment\n",
743  " third detached comment\n"
744  " line 2 "},
745  " leading comment\n"},
746 
747  {"prev /**/\n"
748  "\n"
749  "// detached comment\n"
750  "\n"
751  "// leading comment\n"
752  "next",
753 
754  "",
755  {" detached comment\n"},
756  " leading comment\n"},
757 
758  {"prev /**/\n"
759  "// leading comment\n"
760  "next",
761 
762  "",
763  {},
764  " leading comment\n"},
765 };
766 
767 TEST_2D(TokenizerTest, DocComments, kDocCommentCases, kBlockSizes) {
768  // Set up the tokenizer.
769  TestInputStream input(kDocCommentCases_case.input.data(),
770  kDocCommentCases_case.input.size(), kBlockSizes_case);
771  TestErrorCollector error_collector;
772  Tokenizer tokenizer(&input, &error_collector);
773 
774  // Set up a second tokenizer where we'll pass all NULLs to NextWithComments().
775  TestInputStream input2(kDocCommentCases_case.input.data(),
776  kDocCommentCases_case.input.size(), kBlockSizes_case);
777  Tokenizer tokenizer2(&input2, &error_collector);
778 
779  tokenizer.Next();
780  tokenizer2.Next();
781 
782  EXPECT_EQ("prev", tokenizer.current().text);
783  EXPECT_EQ("prev", tokenizer2.current().text);
784 
786  std::vector<std::string> detached_comments;
788  tokenizer.NextWithComments(&prev_trailing_comments, &detached_comments,
790  tokenizer2.NextWithComments(NULL, NULL, NULL);
791  EXPECT_EQ("next", tokenizer.current().text);
792  EXPECT_EQ("next", tokenizer2.current().text);
793 
794  EXPECT_EQ(kDocCommentCases_case.prev_trailing_comments,
796 
797  for (int i = 0; i < detached_comments.size(); i++) {
798  ASSERT_LT(i, GOOGLE_ARRAYSIZE(kDocCommentCases));
799  ASSERT_TRUE(kDocCommentCases_case.detached_comments[i] != NULL);
800  EXPECT_EQ(kDocCommentCases_case.detached_comments[i], detached_comments[i]);
801  }
802 
803  // Verify that we matched all the detached comments.
804  EXPECT_EQ(NULL,
805  kDocCommentCases_case.detached_comments[detached_comments.size()]);
806 
807  EXPECT_EQ(kDocCommentCases_case.next_leading_comments, next_leading_comments);
808 }
809 
810 // -------------------------------------------------------------------
811 
812 // Test parse helpers. It's not really worth setting up a full data-driven
813 // test here.
814 TEST_F(TokenizerTest, ParseInteger) {
815  EXPECT_EQ(0, ParseInteger("0"));
816  EXPECT_EQ(123, ParseInteger("123"));
817  EXPECT_EQ(0xabcdef12u, ParseInteger("0xabcdef12"));
818  EXPECT_EQ(0xabcdef12u, ParseInteger("0xABCDEF12"));
819  EXPECT_EQ(kuint64max, ParseInteger("0xFFFFFFFFFFFFFFFF"));
820  EXPECT_EQ(01234567, ParseInteger("01234567"));
821  EXPECT_EQ(0X123, ParseInteger("0X123"));
822 
823  // Test invalid integers that may still be tokenized as integers.
824  EXPECT_EQ(0, ParseInteger("0x"));
825 
826  uint64 i;
827 
828  // Test invalid integers that will never be tokenized as integers.
834 
835  // Test overflows.
839  EXPECT_TRUE(Tokenizer::ParseInteger("12345", 12345, &i));
840  EXPECT_FALSE(Tokenizer::ParseInteger("12346", 12345, &i));
841  EXPECT_TRUE(Tokenizer::ParseInteger("0xFFFFFFFFFFFFFFFF", kuint64max, &i));
842  EXPECT_FALSE(Tokenizer::ParseInteger("0x10000000000000000", kuint64max, &i));
843 }
844 
845 TEST_F(TokenizerTest, ParseFloat) {
849  EXPECT_DOUBLE_EQ(1.5e3, Tokenizer::ParseFloat("1.5e3"));
853  EXPECT_DOUBLE_EQ(.25e3, Tokenizer::ParseFloat(".25e3"));
854  EXPECT_DOUBLE_EQ(.1e+3, Tokenizer::ParseFloat(".1e+3"));
855  EXPECT_DOUBLE_EQ(.1e-3, Tokenizer::ParseFloat(".1e-3"));
857  EXPECT_DOUBLE_EQ(6e-12, Tokenizer::ParseFloat("6e-12"));
860 
861  // Test invalid integers that may still be tokenized as integers.
865 
866  // Test 'f' suffix.
870 
871  // These should parse successfully even though they are out of range.
872  // Overflows become infinity and underflows become zero.
873  EXPECT_EQ(0.0, Tokenizer::ParseFloat("1e-9999999999999999999999999999"));
874  EXPECT_EQ(HUGE_VAL, Tokenizer::ParseFloat("1e+9999999999999999999999999999"));
875 
876 #ifdef PROTOBUF_HAS_DEATH_TEST // death tests do not work on Windows yet
877  // Test invalid integers that will never be tokenized as integers.
878  EXPECT_DEBUG_DEATH(
879  Tokenizer::ParseFloat("zxy"),
880  "passed text that could not have been tokenized as a float");
881  EXPECT_DEBUG_DEATH(
882  Tokenizer::ParseFloat("1-e0"),
883  "passed text that could not have been tokenized as a float");
884  EXPECT_DEBUG_DEATH(
885  Tokenizer::ParseFloat("-1.0"),
886  "passed text that could not have been tokenized as a float");
887 #endif // PROTOBUF_HAS_DEATH_TEST
888 }
889 
890 TEST_F(TokenizerTest, ParseString) {
892  Tokenizer::ParseString("'hello'", &output);
893  EXPECT_EQ("hello", output);
894  Tokenizer::ParseString("\"blah\\nblah2\"", &output);
895  EXPECT_EQ("blah\nblah2", output);
896  Tokenizer::ParseString("'\\1x\\1\\123\\739\\52\\334n\\3'", &output);
897  EXPECT_EQ("\1x\1\123\739\52\334n\3", output);
898  Tokenizer::ParseString("'\\x20\\x4'", &output);
899  EXPECT_EQ("\x20\x4", output);
900 
901  // Test invalid strings that may still be tokenized as strings.
902  Tokenizer::ParseString("\"\\a\\l\\v\\t", &output); // \l is invalid
903  EXPECT_EQ("\a?\v\t", output);
905  EXPECT_EQ("", output);
907  EXPECT_EQ("\\", output);
908 
909  // Experiment with Unicode escapes. Here are one-, two- and three-byte Unicode
910  // characters.
911  Tokenizer::ParseString("'\\u0024\\u00a2\\u20ac\\U00024b62XX'", &output);
912  EXPECT_EQ("$¢€𤭢XX", output);
913  // Same thing encoded using UTF16.
914  Tokenizer::ParseString("'\\u0024\\u00a2\\u20ac\\ud852\\udf62XX'", &output);
915  EXPECT_EQ("$¢€𤭢XX", output);
916  // Here's some broken UTF16; there's a head surrogate with no tail surrogate.
917  // We just output this as if it were UTF8; it's not a defined code point, but
918  // it has a defined encoding.
919  Tokenizer::ParseString("'\\ud852XX'", &output);
920  EXPECT_EQ("\xed\xa1\x92XX", output);
921  // Malformed escape: Demons may fly out of the nose.
922  Tokenizer::ParseString("'\\u0'", &output);
923  EXPECT_EQ("u0", output);
924  // Beyond the range of valid UTF-32 code units.
925  Tokenizer::ParseString("'\\U00110000\\U00200000\\UFFFFFFFF'", &output);
926  EXPECT_EQ("\\U00110000\\U00200000\\Uffffffff", output);
927 
928  // Test invalid strings that will never be tokenized as strings.
929 #ifdef PROTOBUF_HAS_DEATH_TEST // death tests do not work on Windows yet
930  EXPECT_DEBUG_DEATH(
932  "passed text that could not have been tokenized as a string");
933 #endif // PROTOBUF_HAS_DEATH_TEST
934 }
935 
936 TEST_F(TokenizerTest, ParseStringAppend) {
937  // Check that ParseString and ParseStringAppend differ.
938  std::string output("stuff+");
940  EXPECT_EQ("stuff+hello", output);
941  Tokenizer::ParseString("'hello'", &output);
942  EXPECT_EQ("hello", output);
943 }
944 
945 // -------------------------------------------------------------------
946 
947 // Each case parses some input text, ignoring the tokens produced, and
948 // checks that the error output matches what is expected.
949 struct ErrorCase {
951  bool recoverable; // True if the tokenizer should be able to recover and
952  // parse more tokens after seeing this error. Cases
953  // for which this is true must end with "foo" as
954  // the last token, which the test will check for.
955  const char* errors;
956 };
957 
958 inline std::ostream& operator<<(std::ostream& out, const ErrorCase& test_case) {
959  return out << CEscape(test_case.input);
960 }
961 
962 ErrorCase kErrorCases[] = {
963  // String errors.
964  {"'\\l' foo", true, "0:2: Invalid escape sequence in string literal.\n"},
965  {"'\\X' foo", true, "0:2: Invalid escape sequence in string literal.\n"},
966  {"'\\x' foo", true, "0:3: Expected hex digits for escape sequence.\n"},
967  {"'foo", false, "0:4: Unexpected end of string.\n"},
968  {"'bar\nfoo", true, "0:4: String literals cannot cross line boundaries.\n"},
969  {"'\\u01' foo", true,
970  "0:5: Expected four hex digits for \\u escape sequence.\n"},
971  {"'\\u01' foo", true,
972  "0:5: Expected four hex digits for \\u escape sequence.\n"},
973  {"'\\uXYZ' foo", true,
974  "0:3: Expected four hex digits for \\u escape sequence.\n"},
975 
976  // Integer errors.
977  {"123foo", true, "0:3: Need space between number and identifier.\n"},
978 
979  // Hex/octal errors.
980  {"0x foo", true, "0:2: \"0x\" must be followed by hex digits.\n"},
981  {"0541823 foo", true,
982  "0:4: Numbers starting with leading zero must be in octal.\n"},
983  {"0x123z foo", true, "0:5: Need space between number and identifier.\n"},
984  {"0x123.4 foo", true, "0:5: Hex and octal numbers must be integers.\n"},
985  {"0123.4 foo", true, "0:4: Hex and octal numbers must be integers.\n"},
986 
987  // Float errors.
988  {"1e foo", true, "0:2: \"e\" must be followed by exponent.\n"},
989  {"1e- foo", true, "0:3: \"e\" must be followed by exponent.\n"},
990  {"1.2.3 foo", true,
991  "0:3: Already saw decimal point or exponent; can't have another one.\n"},
992  {"1e2.3 foo", true,
993  "0:3: Already saw decimal point or exponent; can't have another one.\n"},
994  {"a.1 foo", true,
995  "0:1: Need space between identifier and decimal point.\n"},
996  // allow_f_after_float not enabled, so this should be an error.
997  {"1.0f foo", true, "0:3: Need space between number and identifier.\n"},
998 
999  // Block comment errors.
1000  {"/*", false,
1001  "0:2: End-of-file inside block comment.\n"
1002  "0:0: Comment started here.\n"},
1003  {"/*/*/ foo", true,
1004  "0:3: \"/*\" inside block comment. Block comments cannot be nested.\n"},
1005 
1006  // Control characters. Multiple consecutive control characters should only
1007  // produce one error.
1008  {"\b foo", true, "0:0: Invalid control characters encountered in text.\n"},
1009  {"\b\b foo", true,
1010  "0:0: Invalid control characters encountered in text.\n"},
1011 
1012  // Check that control characters at end of input don't result in an
1013  // infinite loop.
1014  {"\b", false, "0:0: Invalid control characters encountered in text.\n"},
1015 
1016  // Check recovery from '\0'. We have to explicitly specify the length of
1017  // these strings because otherwise the string constructor will just call
1018  // strlen() which will see the first '\0' and think that is the end of the
1019  // string.
1020  {std::string("\0foo", 4), true,
1021  "0:0: Invalid control characters encountered in text.\n"},
1022  {std::string("\0\0foo", 5), true,
1023  "0:0: Invalid control characters encountered in text.\n"},
1024 
1025  // Check error from high order bits set
1026  {"\300foo", true, "0:0: Interpreting non ascii codepoint 192.\n"},
1027 };
1028 
1029 TEST_2D(TokenizerTest, Errors, kErrorCases, kBlockSizes) {
1030  // Set up the tokenizer.
1031  TestInputStream input(kErrorCases_case.input.data(),
1032  kErrorCases_case.input.size(), kBlockSizes_case);
1033  TestErrorCollector error_collector;
1034  Tokenizer tokenizer(&input, &error_collector);
1035 
1036  // Ignore all input, except remember if the last token was "foo".
1037  bool last_was_foo = false;
1038  while (tokenizer.Next()) {
1039  last_was_foo = tokenizer.current().text == "foo";
1040  }
1041 
1042  // Check that the errors match what was expected.
1043  EXPECT_EQ(kErrorCases_case.errors, error_collector.text_);
1044 
1045  // If the error was recoverable, make sure we saw "foo" after it.
1046  if (kErrorCases_case.recoverable) {
1047  EXPECT_TRUE(last_was_foo);
1048  }
1049 }
1050 
1051 // -------------------------------------------------------------------
1052 
1053 TEST_1D(TokenizerTest, BackUpOnDestruction, kBlockSizes) {
1054  std::string text = "foo bar";
1055  TestInputStream input(text.data(), text.size(), kBlockSizes_case);
1056 
1057  // Create a tokenizer, read one token, then destroy it.
1058  {
1059  TestErrorCollector error_collector;
1060  Tokenizer tokenizer(&input, &error_collector);
1061 
1062  tokenizer.Next();
1063  }
1064 
1065  // Only "foo" should have been read.
1066  EXPECT_EQ(strlen("foo"), input.ByteCount());
1067 }
1068 
1069 
1070 } // namespace
1071 } // namespace io
1072 } // namespace protobuf
1073 } // namespace google
EXPECT_FALSE
#define EXPECT_FALSE(condition)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:1970
_gevent_test_main.result
result
Definition: _gevent_test_main.py:96
gen_build_yaml.out
dictionary out
Definition: src/benchmark/gen_build_yaml.py:24
google::protobuf::io::Tokenizer::TokenType
TokenType
Definition: bloaty/third_party/protobuf/src/google/protobuf/io/tokenizer.h:101
output
std::vector< Tokenizer::Token > output
Definition: protobuf/src/google/protobuf/io/tokenizer_unittest.cc:367
grpc::protobuf::io::ZeroCopyInputStream
GRPC_CUSTOM_ZEROCOPYINPUTSTREAM ZeroCopyInputStream
Definition: include/grpcpp/impl/codegen/config_protobuf.h:101
google::protobuf::io::Tokenizer::ParseInteger
static bool ParseInteger(const std::string &text, uint64 max_value, uint64 *output)
Definition: bloaty/third_party/protobuf/src/google/protobuf/io/tokenizer.cc:863
generate_changelog.previous
previous
Definition: bloaty/third_party/protobuf/generate_changelog.py:55
google::protobuf::io::Tokenizer::TYPE_IDENTIFIER
@ TYPE_IDENTIFIER
Definition: bloaty/third_party/protobuf/src/google/protobuf/io/tokenizer.h:105
google::protobuf.text_format.ParseInteger
def ParseInteger(text, is_signed=False, is_long=False)
Definition: bloaty/third_party/protobuf/python/google/protobuf/text_format.py:1634
testing::internal::string
::std::string string
Definition: bloaty/third_party/protobuf/third_party/googletest/googletest/include/gtest/internal/gtest-port.h:881
TEST_2D
#define TEST_2D(FIXTURE, NAME, CASES1, CASES2)
Definition: protobuf/src/google/protobuf/io/tokenizer_unittest.cc:96
google::protobuf::CEscape
string CEscape(const string &src)
Definition: bloaty/third_party/protobuf/src/google/protobuf/stubs/strutil.cc:615
google::protobuf
Definition: bloaty/third_party/protobuf/benchmarks/util/data_proto2_to_proto3_util.h:12
input
std::string input
Definition: protobuf/src/google/protobuf/io/tokenizer_unittest.cc:197
type
Tokenizer::TokenType type
Definition: protobuf/src/google/protobuf/io/tokenizer_unittest.cc:198
google::protobuf::io::Tokenizer::SH_COMMENT_STYLE
@ SH_COMMENT_STYLE
Definition: bloaty/third_party/protobuf/src/google/protobuf/io/tokenizer.h:236
absl::base_internal::Next
static AllocList * Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena)
Definition: abseil-cpp/absl/base/internal/low_level_alloc.cc:453
message
char * message
Definition: libuv/docs/code/tty-gravity/main.c:12
testing::Message
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest-message.h:90
recoverable
bool recoverable
Definition: protobuf/src/google/protobuf/io/tokenizer_unittest.cc:951
testing::Test
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:402
EXPECT_EQ
#define EXPECT_EQ(a, b)
Definition: iomgr/time_averaged_stats_test.cc:27
TEST_1D
#define TEST_1D(FIXTURE, NAME, CASES)
Definition: protobuf/src/google/protobuf/io/tokenizer_unittest.cc:78
errors
const char * errors
Definition: protobuf/src/google/protobuf/io/tokenizer_unittest.cc:955
counter_
int counter_
Definition: protobuf/src/google/protobuf/io/tokenizer_unittest.cc:150
SCOPED_TRACE
#define SCOPED_TRACE(message)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:2264
gen_server_registered_method_bad_client_test_body.text
def text
Definition: gen_server_registered_method_bad_client_test_body.py:50
text_
std::string text_
Definition: protobuf/src/google/protobuf/io/tokenizer_unittest.cc:162
ASSERT_LT
#define ASSERT_LT(val1, val2)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:2068
kBlockSizes
static const int kBlockSizes[]
Definition: bloaty/third_party/protobuf/src/google/protobuf/io/zero_copy_stream_unittest.cc:134
int64_t
signed __int64 int64_t
Definition: stdint-msvc2008.h:89
prev_trailing_comments
const char * prev_trailing_comments
Definition: protobuf/src/google/protobuf/io/tokenizer_unittest.cc:636
google::protobuf::io::Tokenizer::ParseFloat
static double ParseFloat(const std::string &text)
Definition: bloaty/third_party/protobuf/src/google/protobuf/io/tokenizer.cc:902
google::protobuf::io::Tokenizer::TYPE_INTEGER
@ TYPE_INTEGER
Definition: bloaty/third_party/protobuf/src/google/protobuf/io/tokenizer.h:109
google::protobuf::uint64
uint64_t uint64
Definition: third_party/bloaty/third_party/protobuf/src/google/protobuf/stubs/port.h:156
io
data
char data[kBufferLength]
Definition: abseil-cpp/absl/strings/internal/str_format/float_conversion.cc:1006
google::protobuf::io::Tokenizer::TYPE_START
@ TYPE_START
Definition: bloaty/third_party/protobuf/src/google/protobuf/io/tokenizer.h:102
google::protobuf::io::Tokenizer::ParseStringAppend
static void ParseStringAppend(const std::string &text, std::string *output)
Definition: bloaty/third_party/protobuf/src/google/protobuf/io/tokenizer.cc:1035
google::protobuf::operator<<
std::ostream & operator<<(std::ostream &o, const uint128 &b)
Definition: bloaty/third_party/protobuf/src/google/protobuf/stubs/int128.cc:128
absl::strings_internal::ParseFloat
strings_internal::ParsedFloat ParseFloat(const char *begin, const char *end, chars_format format_flags)
Definition: abseil-cpp/absl/strings/internal/charconv_parse.cc:355
google::protobuf::io::Tokenizer::TYPE_FLOAT
@ TYPE_FLOAT
Definition: bloaty/third_party/protobuf/src/google/protobuf/io/tokenizer.h:115
next_leading_comments
const char * next_leading_comments
Definition: protobuf/src/google/protobuf/io/tokenizer_unittest.cc:638
absl::Skip
static PerThreadSynch * Skip(PerThreadSynch *x)
Definition: abseil-cpp/absl/synchronization/mutex.cc:837
google::protobuf::io::Tokenizer::TYPE_WHITESPACE
@ TYPE_WHITESPACE
Definition: protobuf/src/google/protobuf/io/tokenizer.h:125
google::protobuf::io::Tokenizer::TYPE_SYMBOL
@ TYPE_SYMBOL
Definition: bloaty/third_party/protobuf/src/google/protobuf/io/tokenizer.h:121
count
int * count
Definition: bloaty/third_party/googletest/googlemock/test/gmock_stress_test.cc:96
google::protobuf::io::Tokenizer::TYPE_NEWLINE
@ TYPE_NEWLINE
Definition: protobuf/src/google/protobuf/io/tokenizer.h:128
google::protobuf::io::Tokenizer::ParseString
static void ParseString(const std::string &text, std::string *output)
Definition: bloaty/third_party/protobuf/src/google/protobuf/io/tokenizer.h:400
google::protobuf::TEST_F
TEST_F(DynamicMessageTest, Descriptor)
Definition: bloaty/third_party/protobuf/src/google/protobuf/dynamic_message_unittest.cc:126
GOOGLE_ARRAYSIZE
#define GOOGLE_ARRAYSIZE(a)
Definition: bloaty/third_party/protobuf/src/google/protobuf/stubs/macros.h:88
regen-readme.line
line
Definition: regen-readme.py:30
ASSERT_TRUE
#define ASSERT_TRUE(condition)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:1973
ASSERT_FALSE
#define ASSERT_FALSE(condition)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:1976
EXPECT_TRUE
#define EXPECT_TRUE(condition)
Definition: bloaty/third_party/googletest/googletest/include/gtest/gtest.h:1967
EXPECT_DOUBLE_EQ
#define EXPECT_DOUBLE_EQ(a, b)
Definition: iomgr/time_averaged_stats_test.cc:28
google::protobuf::io::Tokenizer::TYPE_END
@ TYPE_END
Definition: bloaty/third_party/protobuf/src/google/protobuf/io/tokenizer.h:103
google::protobuf::kuint64max
static const uint64 kuint64max
Definition: third_party/bloaty/third_party/protobuf/src/google/protobuf/stubs/port.h:164
size
voidpf void uLong size
Definition: bloaty/third_party/zlib/contrib/minizip/ioapi.h:136
detached_comments
const char * detached_comments[10]
Definition: protobuf/src/google/protobuf/io/tokenizer_unittest.cc:637
array_stream_
ArrayInputStream array_stream_
Definition: protobuf/src/google/protobuf/io/tokenizer_unittest.cc:149
google::protobuf::io::Tokenizer::TYPE_STRING
@ TYPE_STRING
Definition: bloaty/third_party/protobuf/src/google/protobuf/io/tokenizer.h:118
google
Definition: bloaty/third_party/protobuf/benchmarks/util/data_proto2_to_proto3_util.h:11
i
uint64_t i
Definition: abseil-cpp/absl/container/btree_benchmark.cc:230
google::protobuf::strings::SubstituteAndAppend
void SubstituteAndAppend(string *output, const char *format, const SubstituteArg &arg0, const SubstituteArg &arg1, const SubstituteArg &arg2, const SubstituteArg &arg3, const SubstituteArg &arg4, const SubstituteArg &arg5, const SubstituteArg &arg6, const SubstituteArg &arg7, const SubstituteArg &arg8, const SubstituteArg &arg9)
Definition: bloaty/third_party/protobuf/src/google/protobuf/stubs/substitute.cc:68


grpc
Author(s):
autogenerated on Fri May 16 2025 03:00:39