诸暨麻将添加redis
Nie możesz wybrać więcej, niż 25 tematów Tematy muszą się zaczynać od litery lub cyfry, mogą zawierać myślniki ('-') i mogą mieć do 35 znaków.
 
 
 
 
 
 

788 wiersze
29 KiB

  1. // Protocol Buffers - Google's data interchange format
  2. // Copyright 2008 Google Inc. All rights reserved.
  3. // https://developers.google.com/protocol-buffers/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are
  7. // met:
  8. //
  9. // * Redistributions of source code must retain the above copyright
  10. // notice, this list of conditions and the following disclaimer.
  11. // * Redistributions in binary form must reproduce the above
  12. // copyright notice, this list of conditions and the following disclaimer
  13. // in the documentation and/or other materials provided with the
  14. // distribution.
  15. // * Neither the name of Google Inc. nor the names of its
  16. // contributors may be used to endorse or promote products derived from
  17. // this software without specific prior written permission.
  18. //
  19. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. #ifndef GOOGLE_PROTOBUF_PARSE_CONTEXT_H__
  31. #define GOOGLE_PROTOBUF_PARSE_CONTEXT_H__
  32. #include <cstdint>
  33. #include <cstring>
  34. #include <string>
  35. #include <google/protobuf/io/coded_stream.h>
  36. #include <google/protobuf/io/zero_copy_stream.h>
  37. #include <google/protobuf/arenastring.h>
  38. #include <google/protobuf/implicit_weak_message.h>
  39. #include <google/protobuf/metadata_lite.h>
  40. #include <google/protobuf/port.h>
  41. #include <google/protobuf/repeated_field.h>
  42. #include <google/protobuf/wire_format_lite.h>
  43. #include <google/protobuf/stubs/strutil.h>
  44. #include <google/protobuf/port_def.inc>
  45. namespace google {
  46. namespace protobuf {
  47. class UnknownFieldSet;
  48. class DescriptorPool;
  49. class MessageFactory;
  50. namespace internal {
  51. // Template code below needs to know about the existence of these functions.
  52. PROTOBUF_EXPORT void WriteVarint(uint32 num, uint64 val, std::string* s);
  53. PROTOBUF_EXPORT void WriteLengthDelimited(uint32 num, StringPiece val,
  54. std::string* s);
  55. // Inline because it is just forwarding to s->WriteVarint
  56. inline void WriteVarint(uint32 num, uint64 val, UnknownFieldSet* s);
  57. inline void WriteLengthDelimited(uint32 num, StringPiece val,
  58. UnknownFieldSet* s);
  59. // The basic abstraction the parser is designed for is a slight modification
  60. // of the ZeroCopyInputStream (ZCIS) abstraction. A ZCIS presents a serialized
  61. // stream as a series of buffers that concatenate to the full stream.
  62. // Pictorially a ZCIS presents a stream in chunks like so
  63. // [---------------------------------------------------------------]
  64. // [---------------------] chunk 1
  65. // [----------------------------] chunk 2
  66. // chunk 3 [--------------]
  67. //
  68. // Where the '-' represent the bytes which are vertically lined up with the
  69. // bytes of the stream. The proto parser requires its input to be presented
  70. // similarily with the extra
  71. // property that each chunk has kSlopBytes past its end that overlaps with the
  72. // first kSlopBytes of the next chunk, or if there is no next chunk at least its
  73. // still valid to read those bytes. Again, pictorially, we now have
  74. //
  75. // [---------------------------------------------------------------]
  76. // [-------------------....] chunk 1
  77. // [------------------------....] chunk 2
  78. // chunk 3 [------------------..**]
  79. // chunk 4 [--****]
  80. // Here '-' mean the bytes of the stream or chunk and '.' means bytes past the
  81. // chunk that match up with the start of the next chunk. Above each chunk has
  82. // 4 '.' after the chunk. In the case these 'overflow' bytes represents bytes
  83. // past the stream, indicated by '*' above, their values are unspecified. It is
  84. // still legal to read them (ie. should not segfault). Reading past the
  85. // end should be detected by the user and indicated as an error.
  86. //
  87. // The reason for this, admittedly, unconventional invariant is to ruthlessly
  88. // optimize the protobuf parser. Having an overlap helps in two important ways.
  89. // Firstly it alleviates having to performing bounds checks if a piece of code
  90. // is guaranteed to not read more than kSlopBytes. Secondly, and more
  91. // importantly, the protobuf wireformat is such that reading a key/value pair is
  92. // always less than 16 bytes. This removes the need to change to next buffer in
  93. // the middle of reading primitive values. Hence there is no need to store and
  94. // load the current position.
  95. class PROTOBUF_EXPORT EpsCopyInputStream {
  96. public:
  97. enum { kSlopBytes = 16, kMaxCordBytesToCopy = 512 };
  98. explicit EpsCopyInputStream(bool enable_aliasing)
  99. : aliasing_(enable_aliasing ? kOnPatch : kNoAliasing) {}
  100. void BackUp(const char* ptr) {
  101. GOOGLE_DCHECK(ptr <= buffer_end_ + kSlopBytes);
  102. int count;
  103. if (next_chunk_ == buffer_) {
  104. count = static_cast<int>(buffer_end_ + kSlopBytes - ptr);
  105. } else {
  106. count = size_ + static_cast<int>(buffer_end_ - ptr);
  107. }
  108. if (count > 0) StreamBackUp(count);
  109. }
  110. // If return value is negative it's an error
  111. PROTOBUF_MUST_USE_RESULT int PushLimit(const char* ptr, int limit) {
  112. GOOGLE_DCHECK(limit >= 0 && limit <= INT_MAX - kSlopBytes);
  113. // This add is safe due to the invariant above, because
  114. // ptr - buffer_end_ <= kSlopBytes.
  115. limit += static_cast<int>(ptr - buffer_end_);
  116. limit_end_ = buffer_end_ + (std::min)(0, limit);
  117. auto old_limit = limit_;
  118. limit_ = limit;
  119. return old_limit - limit;
  120. }
  121. PROTOBUF_MUST_USE_RESULT bool PopLimit(int delta) {
  122. if (PROTOBUF_PREDICT_FALSE(!EndedAtLimit())) return false;
  123. limit_ = limit_ + delta;
  124. // TODO(gerbens) We could remove this line and hoist the code to
  125. // DoneFallback. Study the perf/bin-size effects.
  126. limit_end_ = buffer_end_ + (std::min)(0, limit_);
  127. return true;
  128. }
  129. PROTOBUF_MUST_USE_RESULT const char* Skip(const char* ptr, int size) {
  130. if (size <= buffer_end_ + kSlopBytes - ptr) {
  131. return ptr + size;
  132. }
  133. return SkipFallback(ptr, size);
  134. }
  135. PROTOBUF_MUST_USE_RESULT const char* ReadString(const char* ptr, int size,
  136. std::string* s) {
  137. if (size <= buffer_end_ + kSlopBytes - ptr) {
  138. s->assign(ptr, size);
  139. return ptr + size;
  140. }
  141. return ReadStringFallback(ptr, size, s);
  142. }
  143. PROTOBUF_MUST_USE_RESULT const char* AppendString(const char* ptr, int size,
  144. std::string* s) {
  145. if (size <= buffer_end_ + kSlopBytes - ptr) {
  146. s->append(ptr, size);
  147. return ptr + size;
  148. }
  149. return AppendStringFallback(ptr, size, s);
  150. }
  151. template <typename Tag, typename T>
  152. PROTOBUF_MUST_USE_RESULT const char* ReadRepeatedFixed(const char* ptr,
  153. Tag expected_tag,
  154. RepeatedField<T>* out);
  155. template <typename T>
  156. PROTOBUF_MUST_USE_RESULT const char* ReadPackedFixed(const char* ptr,
  157. int size,
  158. RepeatedField<T>* out);
  159. template <typename Add>
  160. PROTOBUF_MUST_USE_RESULT const char* ReadPackedVarint(const char* ptr,
  161. Add add);
  162. uint32 LastTag() const { return last_tag_minus_1_ + 1; }
  163. bool ConsumeEndGroup(uint32 start_tag) {
  164. bool res = last_tag_minus_1_ == start_tag;
  165. last_tag_minus_1_ = 0;
  166. return res;
  167. }
  168. bool EndedAtLimit() const { return last_tag_minus_1_ == 0; }
  169. bool EndedAtEndOfStream() const { return last_tag_minus_1_ == 1; }
  170. void SetLastTag(uint32 tag) { last_tag_minus_1_ = tag - 1; }
  171. void SetEndOfStream() { last_tag_minus_1_ = 1; }
  172. bool IsExceedingLimit(const char* ptr) {
  173. return ptr > limit_end_ &&
  174. (next_chunk_ == nullptr || ptr - buffer_end_ > limit_);
  175. }
  176. int BytesUntilLimit(const char* ptr) const {
  177. return limit_ + static_cast<int>(buffer_end_ - ptr);
  178. }
  179. // Returns true if more data is available, if false is returned one has to
  180. // call Done for further checks.
  181. bool DataAvailable(const char* ptr) { return ptr < limit_end_; }
  182. protected:
  183. // Returns true is limit (either an explicit limit or end of stream) is
  184. // reached. It aligns *ptr across buffer seams.
  185. // If limit is exceeded it returns true and ptr is set to null.
  186. bool DoneWithCheck(const char** ptr, int d) {
  187. GOOGLE_DCHECK(*ptr);
  188. if (PROTOBUF_PREDICT_TRUE(*ptr < limit_end_)) return false;
  189. // No need to fetch buffer if we ended on a limit in the slop region
  190. if ((*ptr - buffer_end_) == limit_) return true;
  191. auto res = DoneFallback(*ptr, d);
  192. *ptr = res.first;
  193. return res.second;
  194. }
  195. const char* InitFrom(StringPiece flat) {
  196. overall_limit_ = 0;
  197. if (flat.size() > kSlopBytes) {
  198. limit_ = kSlopBytes;
  199. limit_end_ = buffer_end_ = flat.end() - kSlopBytes;
  200. next_chunk_ = buffer_;
  201. if (aliasing_ == kOnPatch) aliasing_ = kNoDelta;
  202. return flat.begin();
  203. } else {
  204. std::memcpy(buffer_, flat.begin(), flat.size());
  205. limit_ = 0;
  206. limit_end_ = buffer_end_ = buffer_ + flat.size();
  207. next_chunk_ = nullptr;
  208. if (aliasing_ == kOnPatch) {
  209. aliasing_ = reinterpret_cast<std::uintptr_t>(flat.data()) -
  210. reinterpret_cast<std::uintptr_t>(buffer_);
  211. }
  212. return buffer_;
  213. }
  214. }
  215. const char* InitFrom(io::ZeroCopyInputStream* zcis);
  216. const char* InitFrom(io::ZeroCopyInputStream* zcis, int limit) {
  217. overall_limit_ = limit;
  218. auto res = InitFrom(zcis);
  219. limit_ = limit - static_cast<int>(buffer_end_ - res);
  220. limit_end_ = buffer_end_ + (std::min)(0, limit_);
  221. return res;
  222. }
  223. private:
  224. const char* limit_end_; // buffer_end_ + min(limit_, 0)
  225. const char* buffer_end_;
  226. const char* next_chunk_;
  227. int size_;
  228. int limit_; // relative to buffer_end_;
  229. io::ZeroCopyInputStream* zcis_ = nullptr;
  230. char buffer_[2 * kSlopBytes] = {};
  231. enum { kNoAliasing = 0, kOnPatch = 1, kNoDelta = 2 };
  232. std::uintptr_t aliasing_ = kNoAliasing;
  233. // This variable is used to communicate how the parse ended, in order to
  234. // completely verify the parsed data. A wire-format parse can end because of
  235. // one of the following conditions:
  236. // 1) A parse can end on a pushed limit.
  237. // 2) A parse can end on End Of Stream (EOS).
  238. // 3) A parse can end on 0 tag (only valid for toplevel message).
  239. // 4) A parse can end on an end-group tag.
  240. // This variable should always be set to 0, which indicates case 1. If the
  241. // parse terminated due to EOS (case 2), it's set to 1. In case the parse
  242. // ended due to a terminating tag (case 3 and 4) it's set to (tag - 1).
  243. // This var doesn't really belong in EpsCopyInputStream and should be part of
  244. // the ParseContext, but case 2 is most easily and optimally implemented in
  245. // DoneFallback.
  246. uint32 last_tag_minus_1_ = 0;
  247. int overall_limit_ = INT_MAX; // Overall limit independent of pushed limits.
  248. std::pair<const char*, bool> DoneFallback(const char* ptr, int d);
  249. const char* Next(int overrun, int d);
  250. const char* SkipFallback(const char* ptr, int size);
  251. const char* AppendStringFallback(const char* ptr, int size, std::string* str);
  252. const char* ReadStringFallback(const char* ptr, int size, std::string* str);
  253. bool StreamNext(const void** data) {
  254. bool res = zcis_->Next(data, &size_);
  255. if (res) overall_limit_ -= size_;
  256. return res;
  257. }
  258. void StreamBackUp(int count) {
  259. zcis_->BackUp(count);
  260. overall_limit_ += count;
  261. }
  262. template <typename A>
  263. const char* AppendSize(const char* ptr, int size, const A& append) {
  264. int chunk_size = buffer_end_ + kSlopBytes - ptr;
  265. do {
  266. GOOGLE_DCHECK(size > chunk_size);
  267. append(ptr, chunk_size);
  268. ptr += chunk_size;
  269. size -= chunk_size;
  270. // DoneFallBack asserts it isn't called when exactly on the limit. If this
  271. // happens we fail the parse, as we are at the limit and still more bytes
  272. // to read.
  273. if (limit_ == kSlopBytes) return nullptr;
  274. auto res = DoneFallback(ptr, -1);
  275. if (res.second) return nullptr; // If done we passed the limit
  276. ptr = res.first;
  277. chunk_size = buffer_end_ + kSlopBytes - ptr;
  278. } while (size > chunk_size);
  279. append(ptr, size);
  280. return ptr + size;
  281. }
  282. // AppendUntilEnd appends data until a limit (either a PushLimit or end of
  283. // stream. Normal payloads are from length delimited fields which have an
  284. // explicit size. Reading until limit only comes when the string takes
  285. // the place of a protobuf, ie RawMessage/StringRawMessage, lazy fields and
  286. // implicit weak messages. We keep these methods private and friend them.
  287. template <typename A>
  288. const char* AppendUntilEnd(const char* ptr, const A& append) {
  289. while (!DoneWithCheck(&ptr, -1)) {
  290. append(ptr, limit_end_ - ptr);
  291. ptr = limit_end_;
  292. }
  293. return ptr;
  294. }
  295. PROTOBUF_MUST_USE_RESULT const char* AppendString(const char* ptr,
  296. std::string* str) {
  297. return AppendUntilEnd(
  298. ptr, [str](const char* p, ptrdiff_t s) { str->append(p, s); });
  299. }
  300. friend class ImplicitWeakMessage;
  301. };
  302. // ParseContext holds all data that is global to the entire parse. Most
  303. // importantly it contains the input stream, but also recursion depth and also
  304. // stores the end group tag, in case a parser ended on a endgroup, to verify
  305. // matching start/end group tags.
  306. class PROTOBUF_EXPORT ParseContext : public EpsCopyInputStream {
  307. public:
  308. struct Data {
  309. const DescriptorPool* pool = nullptr;
  310. MessageFactory* factory = nullptr;
  311. };
  312. template <typename... T>
  313. ParseContext(int depth, bool aliasing, const char** start, T&&... args)
  314. : EpsCopyInputStream(aliasing), depth_(depth) {
  315. *start = InitFrom(std::forward<T>(args)...);
  316. }
  317. void TrackCorrectEnding() { group_depth_ = 0; }
  318. bool Done(const char** ptr) { return DoneWithCheck(ptr, group_depth_); }
  319. bool DoneNoSlopCheck(const char** ptr) { return DoneWithCheck(ptr, -1); }
  320. int depth() const { return depth_; }
  321. Data& data() { return data_; }
  322. const Data& data() const { return data_; }
  323. template <typename T>
  324. PROTOBUF_MUST_USE_RESULT const char* ParseMessage(T* msg, const char* ptr);
  325. // We outline when the type is generic and we go through a virtual
  326. const char* ParseMessage(MessageLite* msg, const char* ptr);
  327. const char* ParseMessage(Message* msg, const char* ptr);
  328. template <typename T>
  329. PROTOBUF_MUST_USE_RESULT PROTOBUF_ALWAYS_INLINE const char* ParseGroup(
  330. T* msg, const char* ptr, uint32 tag) {
  331. if (--depth_ < 0) return nullptr;
  332. group_depth_++;
  333. ptr = msg->_InternalParse(ptr, this);
  334. group_depth_--;
  335. depth_++;
  336. if (PROTOBUF_PREDICT_FALSE(!ConsumeEndGroup(tag))) return nullptr;
  337. return ptr;
  338. }
  339. private:
  340. // The context keeps an internal stack to keep track of the recursive
  341. // part of the parse state.
  342. // Current depth of the active parser, depth counts down.
  343. // This is used to limit recursion depth (to prevent overflow on malicious
  344. // data), but is also used to index in stack_ to store the current state.
  345. int depth_;
  346. // Unfortunately necessary for the fringe case of ending on 0 or end-group tag
  347. // in the last kSlopBytes of a ZeroCopyInputStream chunk.
  348. int group_depth_ = INT_MIN;
  349. Data data_;
  350. };
  351. template <uint32 tag>
  352. bool ExpectTag(const char* ptr) {
  353. if (tag < 128) {
  354. return *ptr == tag;
  355. } else {
  356. static_assert(tag < 128 * 128, "We only expect tags for 1 or 2 bytes");
  357. char buf[2] = {static_cast<char>(tag | 0x80), static_cast<char>(tag >> 7)};
  358. return std::memcmp(ptr, buf, 2) == 0;
  359. }
  360. }
  361. template <int>
  362. struct EndianHelper;
  363. template <>
  364. struct EndianHelper<1> {
  365. static uint8 Load(const void* p) { return *static_cast<const uint8*>(p); }
  366. };
  367. template <>
  368. struct EndianHelper<2> {
  369. static uint16 Load(const void* p) {
  370. uint16 tmp;
  371. std::memcpy(&tmp, p, 2);
  372. #ifndef PROTOBUF_LITTLE_ENDIAN
  373. tmp = bswap_16(tmp);
  374. #endif
  375. return tmp;
  376. }
  377. };
  378. template <>
  379. struct EndianHelper<4> {
  380. static uint32 Load(const void* p) {
  381. uint32 tmp;
  382. std::memcpy(&tmp, p, 4);
  383. #ifndef PROTOBUF_LITTLE_ENDIAN
  384. tmp = bswap_32(tmp);
  385. #endif
  386. return tmp;
  387. }
  388. };
  389. template <>
  390. struct EndianHelper<8> {
  391. static uint64 Load(const void* p) {
  392. uint64 tmp;
  393. std::memcpy(&tmp, p, 8);
  394. #ifndef PROTOBUF_LITTLE_ENDIAN
  395. tmp = bswap_64(tmp);
  396. #endif
  397. return tmp;
  398. }
  399. };
  400. template <typename T>
  401. T UnalignedLoad(const char* p) {
  402. auto tmp = EndianHelper<sizeof(T)>::Load(p);
  403. T res;
  404. memcpy(&res, &tmp, sizeof(T));
  405. return res;
  406. }
  407. PROTOBUF_EXPORT
  408. std::pair<const char*, uint32> VarintParseSlow32(const char* p, uint32 res);
  409. PROTOBUF_EXPORT
  410. std::pair<const char*, uint64> VarintParseSlow64(const char* p, uint32 res);
  411. inline const char* VarintParseSlow(const char* p, uint32 res, uint32* out) {
  412. auto tmp = VarintParseSlow32(p, res);
  413. *out = tmp.second;
  414. return tmp.first;
  415. }
  416. inline const char* VarintParseSlow(const char* p, uint32 res, uint64* out) {
  417. auto tmp = VarintParseSlow64(p, res);
  418. *out = tmp.second;
  419. return tmp.first;
  420. }
  421. template <typename T>
  422. PROTOBUF_MUST_USE_RESULT const char* VarintParse(const char* p, T* out) {
  423. auto ptr = reinterpret_cast<const uint8*>(p);
  424. uint32 res = ptr[0];
  425. if (!(res & 0x80)) {
  426. *out = res;
  427. return p + 1;
  428. }
  429. uint32 byte = ptr[1];
  430. res += (byte - 1) << 7;
  431. if (!(byte & 0x80)) {
  432. *out = res;
  433. return p + 2;
  434. }
  435. return VarintParseSlow(p, res, out);
  436. }
  437. // Used for tags, could read up to 5 bytes which must be available.
  438. // Caller must ensure its safe to call.
  439. PROTOBUF_EXPORT
  440. std::pair<const char*, uint32> ReadTagFallback(const char* p, uint32 res);
  441. // Same as ParseVarint but only accept 5 bytes at most.
  442. inline const char* ReadTag(const char* p, uint32* out, uint32 max_tag = 0) {
  443. uint32 res = static_cast<uint8>(p[0]);
  444. if (res < 128) {
  445. *out = res;
  446. return p + 1;
  447. }
  448. uint32 second = static_cast<uint8>(p[1]);
  449. res += (second - 1) << 7;
  450. if (second < 128) {
  451. *out = res;
  452. return p + 2;
  453. }
  454. auto tmp = ReadTagFallback(p, res);
  455. *out = tmp.second;
  456. return tmp.first;
  457. }
  458. // Decode 2 consecutive bytes of a varint and returns the value, shifted left
  459. // by 1. It simultaneous updates *ptr to *ptr + 1 or *ptr + 2 depending if the
  460. // first byte's continuation bit is set.
  461. // If bit 15 of return value is set (equivalent to the continuation bits of both
  462. // bytes being set) the varint continues, otherwise the parse is done. On x86
  463. // movsx eax, dil
  464. // add edi, eax
  465. // adc [rsi], 1
  466. // add eax, eax
  467. // and eax, edi
  468. inline uint32 DecodeTwoBytes(const char** ptr) {
  469. uint32 value = UnalignedLoad<uint16>(*ptr);
  470. // Sign extend the low byte continuation bit
  471. uint32_t x = static_cast<int8_t>(value);
  472. // This add is an amazing operation, it cancels the low byte continuation bit
  473. // from y transferring it to the carry. Simultaneously it also shifts the 7
  474. // LSB left by one tightly against high byte varint bits. Hence value now
  475. // contains the unpacked value shifted left by 1.
  476. value += x;
  477. // Use the carry to update the ptr appropriately.
  478. *ptr += value < x ? 2 : 1;
  479. return value & (x + x); // Mask out the high byte iff no continuation
  480. }
  481. // More efficient varint parsing for big varints
  482. inline const char* ParseBigVarint(const char* p, uint64* out) {
  483. auto pnew = p;
  484. auto tmp = DecodeTwoBytes(&pnew);
  485. uint64 res = tmp >> 1;
  486. if (PROTOBUF_PREDICT_TRUE(std::int16_t(tmp) >= 0)) {
  487. *out = res;
  488. return pnew;
  489. }
  490. for (std::uint32_t i = 1; i < 5; i++) {
  491. pnew = p + 2 * i;
  492. tmp = DecodeTwoBytes(&pnew);
  493. res += (static_cast<std::uint64_t>(tmp) - 2) << (14 * i - 1);
  494. if (PROTOBUF_PREDICT_TRUE(std::int16_t(tmp) >= 0)) {
  495. *out = res;
  496. return pnew;
  497. }
  498. }
  499. return nullptr;
  500. }
  501. PROTOBUF_EXPORT
  502. std::pair<const char*, int32> ReadSizeFallback(const char* p, uint32 first);
  503. // Used for tags, could read up to 5 bytes which must be available. Additionally
  504. // it makes sure the unsigned value fits a int32, otherwise returns nullptr.
  505. // Caller must ensure its safe to call.
  506. inline uint32 ReadSize(const char** pp) {
  507. auto p = *pp;
  508. uint32 res = static_cast<uint8>(p[0]);
  509. if (res < 128) {
  510. *pp = p + 1;
  511. return res;
  512. }
  513. auto x = ReadSizeFallback(p, res);
  514. *pp = x.first;
  515. return x.second;
  516. }
  517. // Some convenience functions to simplify the generated parse loop code.
  518. // Returning the value and updating the buffer pointer allows for nicer
  519. // function composition. We rely on the compiler to inline this.
  520. // Also in debug compiles having local scoped variables tend to generated
  521. // stack frames that scale as O(num fields).
  522. inline uint64 ReadVarint(const char** p) {
  523. uint64 tmp;
  524. *p = VarintParse(*p, &tmp);
  525. return tmp;
  526. }
  527. inline int64 ReadVarintZigZag64(const char** p) {
  528. uint64 tmp;
  529. *p = VarintParse(*p, &tmp);
  530. return WireFormatLite::ZigZagDecode64(tmp);
  531. }
  532. inline int32 ReadVarintZigZag32(const char** p) {
  533. uint64 tmp;
  534. *p = VarintParse(*p, &tmp);
  535. return WireFormatLite::ZigZagDecode32(static_cast<uint32>(tmp));
  536. }
  537. template <typename T>
  538. PROTOBUF_MUST_USE_RESULT const char* ParseContext::ParseMessage(
  539. T* msg, const char* ptr) {
  540. int size = ReadSize(&ptr);
  541. if (!ptr) return nullptr;
  542. auto old = PushLimit(ptr, size);
  543. if (--depth_ < 0) return nullptr;
  544. ptr = msg->_InternalParse(ptr, this);
  545. if (PROTOBUF_PREDICT_FALSE(ptr == nullptr)) return nullptr;
  546. depth_++;
  547. if (!PopLimit(old)) return nullptr;
  548. return ptr;
  549. }
  550. template <typename Add>
  551. const char* EpsCopyInputStream::ReadPackedVarint(const char* ptr, Add add) {
  552. int size = ReadSize(&ptr);
  553. if (ptr == nullptr) return nullptr;
  554. auto old = PushLimit(ptr, size);
  555. if (old < 0) return nullptr;
  556. while (!DoneWithCheck(&ptr, -1)) {
  557. uint64 varint;
  558. ptr = VarintParse(ptr, &varint);
  559. if (!ptr) return nullptr;
  560. add(varint);
  561. }
  562. if (!PopLimit(old)) return nullptr;
  563. return ptr;
  564. }
  565. // Helper for verification of utf8
  566. PROTOBUF_EXPORT
  567. bool VerifyUTF8(StringPiece s, const char* field_name);
  568. // All the string parsers with or without UTF checking and for all CTypes.
  569. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* InlineGreedyStringParser(
  570. std::string* s, const char* ptr, ParseContext* ctx);
  571. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char*
  572. InlineGreedyStringParserUTF8(std::string* s, const char* ptr, ParseContext* ctx,
  573. const char* field_name);
  574. // Inline because we don't want to pay the price of field_name in opt mode.
  575. inline PROTOBUF_MUST_USE_RESULT const char* InlineGreedyStringParserUTF8Verify(
  576. std::string* s, const char* ptr, ParseContext* ctx,
  577. const char* field_name) {
  578. auto p = InlineGreedyStringParser(s, ptr, ctx);
  579. #ifndef NDEBUG
  580. VerifyUTF8(*s, field_name);
  581. #else // !NDEBUG
  582. (void)field_name;
  583. #endif // !NDEBUG
  584. return p;
  585. }
  586. // Add any of the following lines to debug which parse function is failing.
  587. #define GOOGLE_PROTOBUF_ASSERT_RETURN(predicate, ret) \
  588. if (!(predicate)) { \
  589. /* ::raise(SIGINT); */ \
  590. /* GOOGLE_LOG(ERROR) << "Parse failure"; */ \
  591. return ret; \
  592. }
  593. #define GOOGLE_PROTOBUF_PARSER_ASSERT(predicate) \
  594. GOOGLE_PROTOBUF_ASSERT_RETURN(predicate, nullptr)
  595. template <typename T>
  596. PROTOBUF_MUST_USE_RESULT const char* FieldParser(uint64 tag, T& field_parser,
  597. const char* ptr,
  598. ParseContext* ctx) {
  599. uint32 number = tag >> 3;
  600. GOOGLE_PROTOBUF_PARSER_ASSERT(number != 0);
  601. using WireType = internal::WireFormatLite::WireType;
  602. switch (tag & 7) {
  603. case WireType::WIRETYPE_VARINT: {
  604. uint64 value;
  605. ptr = VarintParse(ptr, &value);
  606. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
  607. field_parser.AddVarint(number, value);
  608. break;
  609. }
  610. case WireType::WIRETYPE_FIXED64: {
  611. uint64 value = UnalignedLoad<uint64>(ptr);
  612. ptr += 8;
  613. field_parser.AddFixed64(number, value);
  614. break;
  615. }
  616. case WireType::WIRETYPE_LENGTH_DELIMITED: {
  617. ptr = field_parser.ParseLengthDelimited(number, ptr, ctx);
  618. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
  619. break;
  620. }
  621. case WireType::WIRETYPE_START_GROUP: {
  622. ptr = field_parser.ParseGroup(number, ptr, ctx);
  623. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
  624. break;
  625. }
  626. case WireType::WIRETYPE_END_GROUP: {
  627. GOOGLE_LOG(FATAL) << "Can't happen";
  628. break;
  629. }
  630. case WireType::WIRETYPE_FIXED32: {
  631. uint32 value = UnalignedLoad<uint32>(ptr);
  632. ptr += 4;
  633. field_parser.AddFixed32(number, value);
  634. break;
  635. }
  636. default:
  637. return nullptr;
  638. }
  639. return ptr;
  640. }
  641. template <typename T>
  642. PROTOBUF_MUST_USE_RESULT const char* WireFormatParser(T& field_parser,
  643. const char* ptr,
  644. ParseContext* ctx) {
  645. while (!ctx->Done(&ptr)) {
  646. uint32 tag;
  647. ptr = ReadTag(ptr, &tag);
  648. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr != nullptr);
  649. if (tag == 0 || (tag & 7) == 4) {
  650. ctx->SetLastTag(tag);
  651. return ptr;
  652. }
  653. ptr = FieldParser(tag, field_parser, ptr, ctx);
  654. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr != nullptr);
  655. }
  656. return ptr;
  657. }
  658. // The packed parsers parse repeated numeric primitives directly into the
  659. // corresponding field
  660. // These are packed varints
  661. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedInt32Parser(
  662. void* object, const char* ptr, ParseContext* ctx);
  663. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedUInt32Parser(
  664. void* object, const char* ptr, ParseContext* ctx);
  665. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedInt64Parser(
  666. void* object, const char* ptr, ParseContext* ctx);
  667. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedUInt64Parser(
  668. void* object, const char* ptr, ParseContext* ctx);
  669. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedSInt32Parser(
  670. void* object, const char* ptr, ParseContext* ctx);
  671. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedSInt64Parser(
  672. void* object, const char* ptr, ParseContext* ctx);
  673. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedEnumParser(
  674. void* object, const char* ptr, ParseContext* ctx);
  675. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedEnumParser(
  676. void* object, const char* ptr, ParseContext* ctx, bool (*is_valid)(int),
  677. InternalMetadataWithArenaLite* metadata, int field_num);
  678. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedEnumParserArg(
  679. void* object, const char* ptr, ParseContext* ctx,
  680. bool (*is_valid)(const void*, int), const void* data,
  681. InternalMetadataWithArenaLite* metadata, int field_num);
  682. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedBoolParser(
  683. void* object, const char* ptr, ParseContext* ctx);
  684. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedFixed32Parser(
  685. void* object, const char* ptr, ParseContext* ctx);
  686. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedSFixed32Parser(
  687. void* object, const char* ptr, ParseContext* ctx);
  688. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedFixed64Parser(
  689. void* object, const char* ptr, ParseContext* ctx);
  690. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedSFixed64Parser(
  691. void* object, const char* ptr, ParseContext* ctx);
  692. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedFloatParser(
  693. void* object, const char* ptr, ParseContext* ctx);
  694. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* PackedDoubleParser(
  695. void* object, const char* ptr, ParseContext* ctx);
  696. // This is the only recursive parser.
  697. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* UnknownGroupLiteParse(
  698. std::string* unknown, const char* ptr, ParseContext* ctx);
  699. // This is a helper to for the UnknownGroupLiteParse but is actually also
  700. // useful in the generated code. It uses overload on std::string* vs
  701. // UnknownFieldSet* to make the generated code isomorphic between full and lite.
  702. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* UnknownFieldParse(
  703. uint32 tag, std::string* unknown, const char* ptr, ParseContext* ctx);
  704. PROTOBUF_EXPORT PROTOBUF_MUST_USE_RESULT const char* UnknownFieldParse(
  705. uint32 tag, InternalMetadataWithArenaLite* metadata, const char* ptr,
  706. ParseContext* ctx);
  707. } // namespace internal
  708. } // namespace protobuf
  709. } // namespace google
  710. #include <google/protobuf/port_undef.inc>
  711. #endif // GOOGLE_PROTOBUF_PARSE_CONTEXT_H__