诸暨麻将添加redis
Nie możesz wybrać więcej, niż 25 tematów Tematy muszą się zaczynać od litery lub cyfry, mogą zawierać myślniki ('-') i mogą mieć do 35 znaków.
 
 
 
 
 
 

621 wiersze
22 KiB

  1. // Protocol Buffers - Google's data interchange format
  2. // Copyright 2008 Google Inc. All rights reserved.
  3. // https://developers.google.com/protocol-buffers/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are
  7. // met:
  8. //
  9. // * Redistributions of source code must retain the above copyright
  10. // notice, this list of conditions and the following disclaimer.
  11. // * Redistributions in binary form must reproduce the above
  12. // copyright notice, this list of conditions and the following disclaimer
  13. // in the documentation and/or other materials provided with the
  14. // distribution.
  15. // * Neither the name of Google Inc. nor the names of its
  16. // contributors may be used to endorse or promote products derived from
  17. // this software without specific prior written permission.
  18. //
  19. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. #include <google/protobuf/parse_context.h>
  31. #include <google/protobuf/stubs/stringprintf.h>
  32. #include <google/protobuf/io/coded_stream.h>
  33. #include <google/protobuf/io/zero_copy_stream.h>
  34. #include <google/protobuf/arenastring.h>
  35. #include <google/protobuf/message_lite.h>
  36. #include <google/protobuf/repeated_field.h>
  37. #include <google/protobuf/wire_format_lite.h>
  38. #include <google/protobuf/stubs/strutil.h>
  39. #include <google/protobuf/port_def.inc>
  40. namespace google {
  41. namespace protobuf {
  42. namespace internal {
  43. namespace {
  44. // Only call if at start of tag.
  45. bool ParseEndsInSlopRegion(const char* begin, int overrun, int d) {
  46. constexpr int kSlopBytes = EpsCopyInputStream::kSlopBytes;
  47. GOOGLE_DCHECK(overrun >= 0);
  48. GOOGLE_DCHECK(overrun <= kSlopBytes);
  49. auto ptr = begin + overrun;
  50. auto end = begin + kSlopBytes;
  51. while (ptr < end) {
  52. uint32 tag;
  53. ptr = ReadTag(ptr, &tag);
  54. if (ptr == nullptr || ptr > end) return false;
  55. // ending on 0 tag is allowed and is the major reason for the necessity of
  56. // this function.
  57. if (tag == 0) return true;
  58. switch (tag & 7) {
  59. case 0: { // Varint
  60. uint64 val;
  61. ptr = VarintParse(ptr, &val);
  62. if (ptr == nullptr) return false;
  63. break;
  64. }
  65. case 1: { // fixed64
  66. ptr += 8;
  67. break;
  68. }
  69. case 2: { // len delim
  70. int32 size = ReadSize(&ptr);
  71. if (ptr == nullptr || size > end - ptr) return false;
  72. ptr += size;
  73. break;
  74. }
  75. case 3: { // start group
  76. d++;
  77. break;
  78. }
  79. case 4: { // end group
  80. if (--d < 0) return true; // We exit early
  81. break;
  82. }
  83. case 5: { // fixed32
  84. ptr += 4;
  85. break;
  86. }
  87. default:
  88. return false; // Unknown wireformat
  89. }
  90. }
  91. return false;
  92. }
  93. } // namespace
  94. const char* EpsCopyInputStream::Next(int overrun, int d) {
  95. if (next_chunk_ == nullptr) return nullptr; // We've reached end of stream.
  96. if (next_chunk_ != buffer_) {
  97. GOOGLE_DCHECK(size_ > kSlopBytes);
  98. // The chunk is large enough to be used directly
  99. buffer_end_ = next_chunk_ + size_ - kSlopBytes;
  100. auto res = next_chunk_;
  101. next_chunk_ = buffer_;
  102. if (aliasing_ == kOnPatch) aliasing_ = kNoDelta;
  103. return res;
  104. }
  105. // Move the slop bytes of previous buffer to start of the patch buffer.
  106. // Note we must use memmove because the previous buffer could be part of
  107. // buffer_.
  108. std::memmove(buffer_, buffer_end_, kSlopBytes);
  109. if (overall_limit_ > 0 &&
  110. (d < 0 || !ParseEndsInSlopRegion(buffer_, overrun, d))) {
  111. const void* data;
  112. // ZeroCopyInputStream indicates Next may return 0 size buffers. Hence
  113. // we loop.
  114. while (StreamNext(&data)) {
  115. if (size_ > kSlopBytes) {
  116. // We got a large chunk
  117. std::memcpy(buffer_ + kSlopBytes, data, kSlopBytes);
  118. next_chunk_ = static_cast<const char*>(data);
  119. buffer_end_ = buffer_ + kSlopBytes;
  120. if (aliasing_ >= kNoDelta) aliasing_ = kOnPatch;
  121. return buffer_;
  122. } else if (size_ > 0) {
  123. std::memcpy(buffer_ + kSlopBytes, data, size_);
  124. next_chunk_ = buffer_;
  125. buffer_end_ = buffer_ + size_;
  126. if (aliasing_ >= kNoDelta) aliasing_ = kOnPatch;
  127. return buffer_;
  128. }
  129. GOOGLE_DCHECK(size_ == 0) << size_;
  130. }
  131. overall_limit_ = 0; // Next failed, no more needs for next
  132. }
  133. // End of stream or array
  134. if (aliasing_ == kNoDelta) {
  135. // If there is no more block and aliasing is true, the previous block
  136. // is still valid and we can alias. We have users relying on string_view's
  137. // obtained from protos to outlive the proto, when the parse was from an
  138. // array. This guarantees string_view's are always aliased if parsed from
  139. // an array.
  140. aliasing_ = reinterpret_cast<std::uintptr_t>(buffer_end_) -
  141. reinterpret_cast<std::uintptr_t>(buffer_);
  142. }
  143. next_chunk_ = nullptr;
  144. buffer_end_ = buffer_ + kSlopBytes;
  145. size_ = 0;
  146. return buffer_;
  147. }
  148. std::pair<const char*, bool> EpsCopyInputStream::DoneFallback(const char* ptr,
  149. int d) {
  150. GOOGLE_DCHECK(ptr >= limit_end_);
  151. int overrun = ptr - buffer_end_;
  152. GOOGLE_DCHECK(overrun <= kSlopBytes); // Guaranteed by parse loop.
  153. // Did we exceeded the limit (parse error).
  154. if (PROTOBUF_PREDICT_FALSE(overrun > limit_)) return {nullptr, true};
  155. GOOGLE_DCHECK(overrun != limit_); // Guaranteed by caller.
  156. GOOGLE_DCHECK(overrun < limit_); // Follows from above
  157. // TODO(gerbens) Instead of this dcheck we could just assign, and remove
  158. // updating the limit_end from PopLimit, ie.
  159. // limit_end_ = buffer_end_ + (std::min)(0, limit_);
  160. // if (ptr < limit_end_) return {ptr, false};
  161. GOOGLE_DCHECK(limit_end_ == buffer_end_ + (std::min)(0, limit_));
  162. // At this point we know the following assertion holds.
  163. GOOGLE_DCHECK(limit_ > 0);
  164. GOOGLE_DCHECK(limit_end_ == buffer_end_); // because limit_ > 0
  165. do {
  166. // We are past the end of buffer_end_, in the slop region.
  167. GOOGLE_DCHECK(overrun >= 0);
  168. auto p = Next(overrun, d);
  169. if (p == nullptr) {
  170. // We are at the end of the stream
  171. if (PROTOBUF_PREDICT_FALSE(overrun != 0)) return {nullptr, true};
  172. GOOGLE_DCHECK(limit_ > 0);
  173. limit_end_ = buffer_end_;
  174. // Distinquish ending on a pushed limit or ending on end-of-stream.
  175. SetEndOfStream();
  176. return {ptr, true};
  177. }
  178. limit_ -= buffer_end_ - p; // Adjust limit_ relative to new anchor
  179. ptr = p + overrun;
  180. overrun = ptr - buffer_end_;
  181. } while (overrun >= 0);
  182. limit_end_ = buffer_end_ + std::min(0, limit_);
  183. return {ptr, false};
  184. }
  185. const char* EpsCopyInputStream::SkipFallback(const char* ptr, int size) {
  186. return AppendSize(ptr, size, [](const char* p, int s) {});
  187. }
  188. const char* EpsCopyInputStream::ReadStringFallback(const char* ptr, int size,
  189. std::string* s) {
  190. s->clear();
  191. // TODO(gerbens) assess security. At the moment its parity with
  192. // CodedInputStream but it allows a payload to reserve large memory.
  193. if (PROTOBUF_PREDICT_TRUE(size <= buffer_end_ - ptr + limit_)) {
  194. s->reserve(size);
  195. }
  196. return AppendStringFallback(ptr, size, s);
  197. }
  198. const char* EpsCopyInputStream::AppendStringFallback(const char* ptr, int size,
  199. std::string* str) {
  200. // TODO(gerbens) assess security. At the moment its parity with
  201. // CodedInputStream but it allows a payload to reserve large memory.
  202. if (PROTOBUF_PREDICT_TRUE(size <= buffer_end_ - ptr + limit_)) {
  203. str->reserve(size);
  204. }
  205. return AppendSize(ptr, size,
  206. [str](const char* p, int s) { str->append(p, s); });
  207. }
  208. template <typename Tag, typename T>
  209. const char* EpsCopyInputStream::ReadRepeatedFixed(const char* ptr,
  210. Tag expected_tag,
  211. RepeatedField<T>* out) {
  212. do {
  213. out->Add(UnalignedLoad<T>(ptr));
  214. ptr += sizeof(T);
  215. if (PROTOBUF_PREDICT_FALSE(ptr >= limit_end_)) return ptr;
  216. } while (UnalignedLoad<Tag>(ptr) == expected_tag&& ptr += sizeof(Tag));
  217. return ptr;
  218. }
  219. template <int>
  220. void byteswap(void* p);
  221. template <>
  222. void byteswap<1>(void* p) {}
  223. template <>
  224. void byteswap<4>(void* p) {
  225. *static_cast<uint32*>(p) = bswap_32(*static_cast<uint32*>(p));
  226. }
  227. template <>
  228. void byteswap<8>(void* p) {
  229. *static_cast<uint64*>(p) = bswap_64(*static_cast<uint64*>(p));
  230. }
  231. template <typename T>
  232. const char* EpsCopyInputStream::ReadPackedFixed(const char* ptr, int size,
  233. RepeatedField<T>* out) {
  234. int nbytes = buffer_end_ + kSlopBytes - ptr;
  235. while (size > nbytes) {
  236. int num = nbytes / sizeof(T);
  237. int old_entries = out->size();
  238. out->Reserve(old_entries + num);
  239. int block_size = num * sizeof(T);
  240. auto dst = out->AddNAlreadyReserved(num);
  241. #ifdef PROTOBUF_LITTLE_ENDIAN
  242. std::memcpy(dst, ptr, block_size);
  243. #else
  244. for (int i = 0; i < num; i++)
  245. dst[i] = UnalignedLoad<T>(ptr + i * sizeof(T));
  246. #endif
  247. ptr += block_size;
  248. size -= block_size;
  249. if (DoneWithCheck(&ptr, -1)) return nullptr;
  250. nbytes = buffer_end_ + kSlopBytes - ptr;
  251. }
  252. int num = size / sizeof(T);
  253. int old_entries = out->size();
  254. out->Reserve(old_entries + num);
  255. int block_size = num * sizeof(T);
  256. auto dst = out->AddNAlreadyReserved(num);
  257. #ifdef PROTOBUF_LITTLE_ENDIAN
  258. std::memcpy(dst, ptr, block_size);
  259. #else
  260. for (int i = 0; i < num; i++) dst[i] = UnalignedLoad<T>(ptr + i * sizeof(T));
  261. #endif
  262. ptr += block_size;
  263. if (size != block_size) return nullptr;
  264. return ptr;
  265. }
  266. const char* EpsCopyInputStream::InitFrom(io::ZeroCopyInputStream* zcis) {
  267. zcis_ = zcis;
  268. const void* data;
  269. int size;
  270. limit_ = INT_MAX;
  271. if (zcis->Next(&data, &size)) {
  272. overall_limit_ -= size;
  273. if (size > kSlopBytes) {
  274. auto ptr = static_cast<const char*>(data);
  275. limit_ -= size - kSlopBytes;
  276. limit_end_ = buffer_end_ = ptr + size - kSlopBytes;
  277. next_chunk_ = buffer_;
  278. if (aliasing_ == kOnPatch) aliasing_ = kNoDelta;
  279. return ptr;
  280. } else {
  281. limit_end_ = buffer_end_ = buffer_ + kSlopBytes;
  282. next_chunk_ = buffer_;
  283. auto ptr = buffer_ + 2 * kSlopBytes - size;
  284. std::memcpy(ptr, data, size);
  285. return ptr;
  286. }
  287. }
  288. overall_limit_ = 0;
  289. next_chunk_ = nullptr;
  290. size_ = 0;
  291. limit_end_ = buffer_end_ = buffer_;
  292. return buffer_;
  293. }
  294. const char* ParseContext::ParseMessage(MessageLite* msg, const char* ptr) {
  295. return ParseMessage<MessageLite>(msg, ptr);
  296. }
  297. const char* ParseContext::ParseMessage(Message* msg, const char* ptr) {
  298. // Use reinterptret case to prevent inclusion of non lite header
  299. return ParseMessage(reinterpret_cast<MessageLite*>(msg), ptr);
  300. }
  301. inline void WriteVarint(uint64 val, std::string* s) {
  302. while (val >= 128) {
  303. uint8 c = val | 0x80;
  304. s->push_back(c);
  305. val >>= 7;
  306. }
  307. s->push_back(val);
  308. }
  309. void WriteVarint(uint32 num, uint64 val, std::string* s) {
  310. WriteVarint(num << 3, s);
  311. WriteVarint(val, s);
  312. }
  313. void WriteLengthDelimited(uint32 num, StringPiece val, std::string* s) {
  314. WriteVarint((num << 3) + 2, s);
  315. WriteVarint(val.size(), s);
  316. s->append(val.data(), val.size());
  317. }
  318. std::pair<const char*, uint32> VarintParseSlow32(const char* p, uint32 res) {
  319. for (std::uint32_t i = 2; i < 5; i++) {
  320. uint32 byte = static_cast<uint8>(p[i]);
  321. res += (byte - 1) << (7 * i);
  322. if (PROTOBUF_PREDICT_TRUE(byte < 128)) {
  323. return {p + i + 1, res};
  324. }
  325. }
  326. // Accept >5 bytes
  327. for (std::uint32_t i = 5; i < 10; i++) {
  328. uint32 byte = static_cast<uint8>(p[i]);
  329. if (PROTOBUF_PREDICT_TRUE(byte < 128)) {
  330. return {p + i + 1, res};
  331. }
  332. }
  333. return {nullptr, 0};
  334. }
  335. std::pair<const char*, uint64> VarintParseSlow64(const char* p, uint32 res32) {
  336. uint64 res = res32;
  337. for (std::uint32_t i = 2; i < 10; i++) {
  338. uint64 byte = static_cast<uint8>(p[i]);
  339. res += (byte - 1) << (7 * i);
  340. if (PROTOBUF_PREDICT_TRUE(byte < 128)) {
  341. return {p + i + 1, res};
  342. }
  343. }
  344. return {nullptr, 0};
  345. }
  346. std::pair<const char*, uint32> ReadTagFallback(const char* p, uint32 res) {
  347. for (std::uint32_t i = 2; i < 5; i++) {
  348. uint32 byte = static_cast<uint8>(p[i]);
  349. res += (byte - 1) << (7 * i);
  350. if (PROTOBUF_PREDICT_TRUE(byte < 128)) {
  351. return {p + i + 1, res};
  352. }
  353. }
  354. return {nullptr, 0};
  355. }
  356. std::pair<const char*, int32> ReadSizeFallback(const char* p, uint32 res) {
  357. for (std::uint32_t i = 1; i < 4; i++) {
  358. uint32 byte = static_cast<uint8>(p[i]);
  359. res += (byte - 1) << (7 * i);
  360. if (PROTOBUF_PREDICT_TRUE(byte < 128)) {
  361. return {p + i + 1, res};
  362. }
  363. }
  364. std::uint32_t byte = static_cast<uint8>(p[4]);
  365. if (PROTOBUF_PREDICT_FALSE(byte >= 8)) return {nullptr, 0}; // size >= 2gb
  366. res += (byte - 1) << 28;
  367. // Protect against sign integer overflow in PushLimit. Limits are relative
  368. // to buffer ends and ptr could potential be kSlopBytes beyond a buffer end.
  369. // To protect against overflow we reject limits absurdly close to INT_MAX.
  370. if (PROTOBUF_PREDICT_FALSE(res > INT_MAX - ParseContext::kSlopBytes)) {
  371. return {nullptr, 0};
  372. }
  373. return {p + 5, res};
  374. }
  375. const char* StringParser(const char* begin, const char* end, void* object,
  376. ParseContext*) {
  377. auto str = static_cast<std::string*>(object);
  378. str->append(begin, end - begin);
  379. return end;
  380. }
  381. // Defined in wire_format_lite.cc
  382. void PrintUTF8ErrorLog(const char* field_name, const char* operation_str,
  383. bool emit_stacktrace);
  384. bool VerifyUTF8(StringPiece str, const char* field_name) {
  385. if (!IsStructurallyValidUTF8(str)) {
  386. PrintUTF8ErrorLog(field_name, "parsing", false);
  387. return false;
  388. }
  389. return true;
  390. }
  391. const char* InlineGreedyStringParser(std::string* s, const char* ptr,
  392. ParseContext* ctx) {
  393. int size = ReadSize(&ptr);
  394. if (!ptr) return nullptr;
  395. return ctx->ReadString(ptr, size, s);
  396. }
  397. const char* InlineGreedyStringParserUTF8(std::string* s, const char* ptr,
  398. ParseContext* ctx,
  399. const char* field_name) {
  400. auto p = InlineGreedyStringParser(s, ptr, ctx);
  401. GOOGLE_PROTOBUF_PARSER_ASSERT(VerifyUTF8(*s, field_name));
  402. return p;
  403. }
  404. template <typename T, bool sign>
  405. const char* VarintParser(void* object, const char* ptr, ParseContext* ctx) {
  406. return ctx->ReadPackedVarint(ptr, [object](uint64 varint) {
  407. T val;
  408. if (sign) {
  409. if (sizeof(T) == 8) {
  410. val = WireFormatLite::ZigZagDecode64(varint);
  411. } else {
  412. val = WireFormatLite::ZigZagDecode32(varint);
  413. }
  414. } else {
  415. val = varint;
  416. }
  417. static_cast<RepeatedField<T>*>(object)->Add(val);
  418. });
  419. }
  420. const char* PackedInt32Parser(void* object, const char* ptr,
  421. ParseContext* ctx) {
  422. return VarintParser<int32, false>(object, ptr, ctx);
  423. }
  424. const char* PackedUInt32Parser(void* object, const char* ptr,
  425. ParseContext* ctx) {
  426. return VarintParser<uint32, false>(object, ptr, ctx);
  427. }
  428. const char* PackedInt64Parser(void* object, const char* ptr,
  429. ParseContext* ctx) {
  430. return VarintParser<int64, false>(object, ptr, ctx);
  431. }
  432. const char* PackedUInt64Parser(void* object, const char* ptr,
  433. ParseContext* ctx) {
  434. return VarintParser<uint64, false>(object, ptr, ctx);
  435. }
  436. const char* PackedSInt32Parser(void* object, const char* ptr,
  437. ParseContext* ctx) {
  438. return VarintParser<int32, true>(object, ptr, ctx);
  439. }
  440. const char* PackedSInt64Parser(void* object, const char* ptr,
  441. ParseContext* ctx) {
  442. return VarintParser<int64, true>(object, ptr, ctx);
  443. }
  444. const char* PackedEnumParser(void* object, const char* ptr, ParseContext* ctx) {
  445. return VarintParser<int, false>(object, ptr, ctx);
  446. }
  447. const char* PackedEnumParser(void* object, const char* ptr, ParseContext* ctx,
  448. bool (*is_valid)(int),
  449. InternalMetadataWithArenaLite* metadata,
  450. int field_num) {
  451. return ctx->ReadPackedVarint(
  452. ptr, [object, is_valid, metadata, field_num](uint64 val) {
  453. if (is_valid(val)) {
  454. static_cast<RepeatedField<int>*>(object)->Add(val);
  455. } else {
  456. WriteVarint(field_num, val, metadata->mutable_unknown_fields());
  457. }
  458. });
  459. }
  460. const char* PackedEnumParserArg(void* object, const char* ptr,
  461. ParseContext* ctx,
  462. bool (*is_valid)(const void*, int),
  463. const void* data,
  464. InternalMetadataWithArenaLite* metadata,
  465. int field_num) {
  466. return ctx->ReadPackedVarint(
  467. ptr, [object, is_valid, data, metadata, field_num](uint64 val) {
  468. if (is_valid(data, val)) {
  469. static_cast<RepeatedField<int>*>(object)->Add(val);
  470. } else {
  471. WriteVarint(field_num, val, metadata->mutable_unknown_fields());
  472. }
  473. });
  474. }
  475. const char* PackedBoolParser(void* object, const char* ptr, ParseContext* ctx) {
  476. return VarintParser<bool, false>(object, ptr, ctx);
  477. }
  478. template <typename T>
  479. const char* FixedParser(void* object, const char* ptr, ParseContext* ctx) {
  480. int size = ReadSize(&ptr);
  481. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
  482. return ctx->ReadPackedFixed(ptr, size,
  483. static_cast<RepeatedField<T>*>(object));
  484. }
  485. const char* PackedFixed32Parser(void* object, const char* ptr,
  486. ParseContext* ctx) {
  487. return FixedParser<uint32>(object, ptr, ctx);
  488. }
  489. const char* PackedSFixed32Parser(void* object, const char* ptr,
  490. ParseContext* ctx) {
  491. return FixedParser<int32>(object, ptr, ctx);
  492. }
  493. const char* PackedFixed64Parser(void* object, const char* ptr,
  494. ParseContext* ctx) {
  495. return FixedParser<uint64>(object, ptr, ctx);
  496. }
  497. const char* PackedSFixed64Parser(void* object, const char* ptr,
  498. ParseContext* ctx) {
  499. return FixedParser<int64>(object, ptr, ctx);
  500. }
  501. const char* PackedFloatParser(void* object, const char* ptr,
  502. ParseContext* ctx) {
  503. return FixedParser<float>(object, ptr, ctx);
  504. }
  505. const char* PackedDoubleParser(void* object, const char* ptr,
  506. ParseContext* ctx) {
  507. return FixedParser<double>(object, ptr, ctx);
  508. }
  509. class UnknownFieldLiteParserHelper {
  510. public:
  511. explicit UnknownFieldLiteParserHelper(std::string* unknown)
  512. : unknown_(unknown) {}
  513. void AddVarint(uint32 num, uint64 value) {
  514. if (unknown_ == nullptr) return;
  515. WriteVarint(num * 8, unknown_);
  516. WriteVarint(value, unknown_);
  517. }
  518. void AddFixed64(uint32 num, uint64 value) {
  519. if (unknown_ == nullptr) return;
  520. WriteVarint(num * 8 + 1, unknown_);
  521. char buffer[8];
  522. io::CodedOutputStream::WriteLittleEndian64ToArray(
  523. value, reinterpret_cast<uint8*>(buffer));
  524. unknown_->append(buffer, 8);
  525. }
  526. const char* ParseLengthDelimited(uint32 num, const char* ptr,
  527. ParseContext* ctx) {
  528. int size = ReadSize(&ptr);
  529. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
  530. if (unknown_ == nullptr) return ctx->Skip(ptr, size);
  531. WriteVarint(num * 8 + 2, unknown_);
  532. WriteVarint(size, unknown_);
  533. return ctx->AppendString(ptr, size, unknown_);
  534. }
  535. const char* ParseGroup(uint32 num, const char* ptr, ParseContext* ctx) {
  536. if (unknown_) WriteVarint(num * 8 + 3, unknown_);
  537. ptr = ctx->ParseGroup(this, ptr, num * 8 + 3);
  538. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
  539. if (unknown_) WriteVarint(num * 8 + 4, unknown_);
  540. return ptr;
  541. }
  542. void AddFixed32(uint32 num, uint32 value) {
  543. if (unknown_ == nullptr) return;
  544. WriteVarint(num * 8 + 5, unknown_);
  545. char buffer[4];
  546. io::CodedOutputStream::WriteLittleEndian32ToArray(
  547. value, reinterpret_cast<uint8*>(buffer));
  548. unknown_->append(buffer, 4);
  549. }
  550. const char* _InternalParse(const char* ptr, ParseContext* ctx) {
  551. return WireFormatParser(*this, ptr, ctx);
  552. }
  553. private:
  554. std::string* unknown_;
  555. };
  556. const char* UnknownGroupLiteParse(std::string* unknown, const char* ptr,
  557. ParseContext* ctx) {
  558. UnknownFieldLiteParserHelper field_parser(unknown);
  559. return WireFormatParser(field_parser, ptr, ctx);
  560. }
  561. const char* UnknownFieldParse(uint32 tag, std::string* unknown, const char* ptr,
  562. ParseContext* ctx) {
  563. UnknownFieldLiteParserHelper field_parser(unknown);
  564. return FieldParser(tag, field_parser, ptr, ctx);
  565. }
  566. const char* UnknownFieldParse(uint32 tag,
  567. InternalMetadataWithArenaLite* metadata,
  568. const char* ptr, ParseContext* ctx) {
  569. return UnknownFieldParse(tag, metadata->mutable_unknown_fields(), ptr, ctx);
  570. }
  571. } // namespace internal
  572. } // namespace protobuf
  573. } // namespace google