诸暨麻将添加redis
25개 이상의 토픽을 선택하실 수 없습니다. Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 
 

416 lines
14 KiB

  1. // Protocol Buffers - Google's data interchange format
  2. // Copyright 2008 Google Inc. All rights reserved.
  3. // https://developers.google.com/protocol-buffers/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are
  7. // met:
  8. //
  9. // * Redistributions of source code must retain the above copyright
  10. // notice, this list of conditions and the following disclaimer.
  11. // * Redistributions in binary form must reproduce the above
  12. // copyright notice, this list of conditions and the following disclaimer
  13. // in the documentation and/or other materials provided with the
  14. // distribution.
  15. // * Neither the name of Google Inc. nor the names of its
  16. // contributors may be used to endorse or promote products derived from
  17. // this software without specific prior written permission.
  18. //
  19. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. #include <google/protobuf/arena.h>
  31. #include <algorithm>
  32. #include <atomic>
  33. #include <limits>
  34. #include <google/protobuf/stubs/mutex.h>
  35. #ifdef ADDRESS_SANITIZER
  36. #include <sanitizer/asan_interface.h>
  37. #endif // ADDRESS_SANITIZER
  38. #include <google/protobuf/port_def.inc>
  39. static const size_t kMinCleanupListElements = 8;
  40. static const size_t kMaxCleanupListElements = 64; // 1kB on 64-bit.
  41. namespace google {
  42. namespace protobuf {
  43. namespace internal {
  44. std::atomic<LifecycleId> ArenaImpl::lifecycle_id_generator_;
  45. #if defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
  46. ArenaImpl::ThreadCache& ArenaImpl::thread_cache() {
  47. static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ =
  48. new internal::ThreadLocalStorage<ThreadCache>();
  49. return *thread_cache_->Get();
  50. }
  51. #elif defined(PROTOBUF_USE_DLLS)
  52. ArenaImpl::ThreadCache& ArenaImpl::thread_cache() {
  53. static GOOGLE_THREAD_LOCAL ThreadCache thread_cache_ = {-1, NULL};
  54. return thread_cache_;
  55. }
  56. #else
  57. GOOGLE_THREAD_LOCAL ArenaImpl::ThreadCache ArenaImpl::thread_cache_ = {-1, NULL};
  58. #endif
  59. void ArenaImpl::Init() {
  60. lifecycle_id_ =
  61. lifecycle_id_generator_.fetch_add(1, std::memory_order_relaxed);
  62. hint_.store(nullptr, std::memory_order_relaxed);
  63. threads_.store(nullptr, std::memory_order_relaxed);
  64. if (initial_block_) {
  65. // Thread which calls Init() owns the first block. This allows the
  66. // single-threaded case to allocate on the first block without having to
  67. // perform atomic operations.
  68. new (initial_block_) Block(options_.initial_block_size, NULL);
  69. SerialArena* serial =
  70. SerialArena::New(initial_block_, &thread_cache(), this);
  71. serial->set_next(NULL);
  72. threads_.store(serial, std::memory_order_relaxed);
  73. space_allocated_.store(options_.initial_block_size,
  74. std::memory_order_relaxed);
  75. CacheSerialArena(serial);
  76. } else {
  77. space_allocated_.store(0, std::memory_order_relaxed);
  78. }
  79. }
  80. ArenaImpl::~ArenaImpl() {
  81. // Have to do this in a first pass, because some of the destructors might
  82. // refer to memory in other blocks.
  83. CleanupList();
  84. FreeBlocks();
  85. }
  86. uint64 ArenaImpl::Reset() {
  87. // Have to do this in a first pass, because some of the destructors might
  88. // refer to memory in other blocks.
  89. CleanupList();
  90. uint64 space_allocated = FreeBlocks();
  91. Init();
  92. return space_allocated;
  93. }
  94. ArenaImpl::Block* ArenaImpl::NewBlock(Block* last_block, size_t min_bytes) {
  95. size_t size;
  96. if (last_block) {
  97. // Double the current block size, up to a limit.
  98. size = std::min(2 * last_block->size(), options_.max_block_size);
  99. } else {
  100. size = options_.start_block_size;
  101. }
  102. // Verify that min_bytes + kBlockHeaderSize won't overflow.
  103. GOOGLE_CHECK_LE(min_bytes, std::numeric_limits<size_t>::max() - kBlockHeaderSize);
  104. size = std::max(size, kBlockHeaderSize + min_bytes);
  105. void* mem = options_.block_alloc(size);
  106. Block* b = new (mem) Block(size, last_block);
  107. space_allocated_.fetch_add(size, std::memory_order_relaxed);
  108. return b;
  109. }
  110. ArenaImpl::Block::Block(size_t size, Block* next)
  111. : next_(next), pos_(kBlockHeaderSize), size_(size) {}
  112. PROTOBUF_NOINLINE
  113. void ArenaImpl::SerialArena::AddCleanupFallback(void* elem,
  114. void (*cleanup)(void*)) {
  115. size_t size = cleanup_ ? cleanup_->size * 2 : kMinCleanupListElements;
  116. size = std::min(size, kMaxCleanupListElements);
  117. size_t bytes = internal::AlignUpTo8(CleanupChunk::SizeOf(size));
  118. CleanupChunk* list = reinterpret_cast<CleanupChunk*>(AllocateAligned(bytes));
  119. list->next = cleanup_;
  120. list->size = size;
  121. cleanup_ = list;
  122. cleanup_ptr_ = &list->nodes[0];
  123. cleanup_limit_ = &list->nodes[size];
  124. AddCleanup(elem, cleanup);
  125. }
  126. PROTOBUF_FUNC_ALIGN(32)
  127. void* ArenaImpl::AllocateAligned(size_t n) {
  128. SerialArena* arena;
  129. if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
  130. return arena->AllocateAligned(n);
  131. } else {
  132. return AllocateAlignedFallback(n);
  133. }
  134. }
  135. void* ArenaImpl::AllocateAlignedAndAddCleanup(size_t n,
  136. void (*cleanup)(void*)) {
  137. SerialArena* arena;
  138. if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
  139. return arena->AllocateAlignedAndAddCleanup(n, cleanup);
  140. } else {
  141. return AllocateAlignedAndAddCleanupFallback(n, cleanup);
  142. }
  143. }
  144. void ArenaImpl::AddCleanup(void* elem, void (*cleanup)(void*)) {
  145. SerialArena* arena;
  146. if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
  147. arena->AddCleanup(elem, cleanup);
  148. } else {
  149. return AddCleanupFallback(elem, cleanup);
  150. }
  151. }
  152. PROTOBUF_NOINLINE
  153. void* ArenaImpl::AllocateAlignedFallback(size_t n) {
  154. return GetSerialArena()->AllocateAligned(n);
  155. }
  156. PROTOBUF_NOINLINE
  157. void* ArenaImpl::AllocateAlignedAndAddCleanupFallback(size_t n,
  158. void (*cleanup)(void*)) {
  159. return GetSerialArena()->AllocateAlignedAndAddCleanup(n, cleanup);
  160. }
  161. PROTOBUF_NOINLINE
  162. void ArenaImpl::AddCleanupFallback(void* elem, void (*cleanup)(void*)) {
  163. GetSerialArena()->AddCleanup(elem, cleanup);
  164. }
  165. inline PROTOBUF_ALWAYS_INLINE bool ArenaImpl::GetSerialArenaFast(
  166. ArenaImpl::SerialArena** arena) {
  167. // If this thread already owns a block in this arena then try to use that.
  168. // This fast path optimizes the case where multiple threads allocate from the
  169. // same arena.
  170. ThreadCache* tc = &thread_cache();
  171. if (PROTOBUF_PREDICT_TRUE(tc->last_lifecycle_id_seen == lifecycle_id_)) {
  172. *arena = tc->last_serial_arena;
  173. return true;
  174. }
  175. // Check whether we own the last accessed SerialArena on this arena. This
  176. // fast path optimizes the case where a single thread uses multiple arenas.
  177. SerialArena* serial = hint_.load(std::memory_order_acquire);
  178. if (PROTOBUF_PREDICT_TRUE(serial != NULL && serial->owner() == tc)) {
  179. *arena = serial;
  180. return true;
  181. }
  182. return false;
  183. }
  184. ArenaImpl::SerialArena* ArenaImpl::GetSerialArena() {
  185. SerialArena* arena;
  186. if (PROTOBUF_PREDICT_TRUE(GetSerialArenaFast(&arena))) {
  187. return arena;
  188. } else {
  189. return GetSerialArenaFallback(&thread_cache());
  190. }
  191. }
  192. PROTOBUF_NOINLINE
  193. void* ArenaImpl::SerialArena::AllocateAlignedFallback(size_t n) {
  194. // Sync back to current's pos.
  195. head_->set_pos(head_->size() - (limit_ - ptr_));
  196. head_ = arena_->NewBlock(head_, n);
  197. ptr_ = head_->Pointer(head_->pos());
  198. limit_ = head_->Pointer(head_->size());
  199. #ifdef ADDRESS_SANITIZER
  200. ASAN_POISON_MEMORY_REGION(ptr_, limit_ - ptr_);
  201. #endif // ADDRESS_SANITIZER
  202. return AllocateAligned(n);
  203. }
  204. uint64 ArenaImpl::SpaceAllocated() const {
  205. return space_allocated_.load(std::memory_order_relaxed);
  206. }
  207. uint64 ArenaImpl::SpaceUsed() const {
  208. SerialArena* serial = threads_.load(std::memory_order_acquire);
  209. uint64 space_used = 0;
  210. for (; serial; serial = serial->next()) {
  211. space_used += serial->SpaceUsed();
  212. }
  213. return space_used;
  214. }
  215. uint64 ArenaImpl::SerialArena::SpaceUsed() const {
  216. // Get current block's size from ptr_ (since we can't trust head_->pos().
  217. uint64 space_used = ptr_ - head_->Pointer(kBlockHeaderSize);
  218. // Get subsequent block size from b->pos().
  219. for (Block* b = head_->next(); b; b = b->next()) {
  220. space_used += (b->pos() - kBlockHeaderSize);
  221. }
  222. // Remove the overhead of the SerialArena itself.
  223. space_used -= kSerialArenaSize;
  224. return space_used;
  225. }
  226. uint64 ArenaImpl::FreeBlocks() {
  227. uint64 space_allocated = 0;
  228. // By omitting an Acquire barrier we ensure that any user code that doesn't
  229. // properly synchronize Reset() or the destructor will throw a TSAN warning.
  230. SerialArena* serial = threads_.load(std::memory_order_relaxed);
  231. while (serial) {
  232. // This is inside a block we are freeing, so we need to read it now.
  233. SerialArena* next = serial->next();
  234. space_allocated += ArenaImpl::SerialArena::Free(serial, initial_block_,
  235. options_.block_dealloc);
  236. // serial is dead now.
  237. serial = next;
  238. }
  239. return space_allocated;
  240. }
  241. uint64 ArenaImpl::SerialArena::Free(ArenaImpl::SerialArena* serial,
  242. Block* initial_block,
  243. void (*block_dealloc)(void*, size_t)) {
  244. uint64 space_allocated = 0;
  245. // We have to be careful in this function, since we will be freeing the Block
  246. // that contains this SerialArena. Be careful about accessing |serial|.
  247. for (Block* b = serial->head_; b;) {
  248. // This is inside the block we are freeing, so we need to read it now.
  249. Block* next_block = b->next();
  250. space_allocated += (b->size());
  251. #ifdef ADDRESS_SANITIZER
  252. // This memory was provided by the underlying allocator as unpoisoned, so
  253. // return it in an unpoisoned state.
  254. ASAN_UNPOISON_MEMORY_REGION(b->Pointer(0), b->size());
  255. #endif // ADDRESS_SANITIZER
  256. if (b != initial_block) {
  257. block_dealloc(b, b->size());
  258. }
  259. b = next_block;
  260. }
  261. return space_allocated;
  262. }
  263. void ArenaImpl::CleanupList() {
  264. // By omitting an Acquire barrier we ensure that any user code that doesn't
  265. // properly synchronize Reset() or the destructor will throw a TSAN warning.
  266. SerialArena* serial = threads_.load(std::memory_order_relaxed);
  267. for (; serial; serial = serial->next()) {
  268. serial->CleanupList();
  269. }
  270. }
  271. void ArenaImpl::SerialArena::CleanupList() {
  272. if (cleanup_ != NULL) {
  273. CleanupListFallback();
  274. }
  275. }
  276. void ArenaImpl::SerialArena::CleanupListFallback() {
  277. // The first chunk might be only partially full, so calculate its size
  278. // from cleanup_ptr_. Subsequent chunks are always full, so use list->size.
  279. size_t n = cleanup_ptr_ - &cleanup_->nodes[0];
  280. CleanupChunk* list = cleanup_;
  281. while (true) {
  282. CleanupNode* node = &list->nodes[0];
  283. // Cleanup newest elements first (allocated last).
  284. for (size_t i = n; i > 0; i--) {
  285. node[i - 1].cleanup(node[i - 1].elem);
  286. }
  287. list = list->next;
  288. if (list == nullptr) {
  289. break;
  290. }
  291. // All but the first chunk are always full.
  292. n = list->size;
  293. }
  294. }
  295. ArenaImpl::SerialArena* ArenaImpl::SerialArena::New(Block* b, void* owner,
  296. ArenaImpl* arena) {
  297. GOOGLE_DCHECK_EQ(b->pos(), kBlockHeaderSize); // Should be a fresh block
  298. GOOGLE_DCHECK_LE(kBlockHeaderSize + kSerialArenaSize, b->size());
  299. SerialArena* serial =
  300. reinterpret_cast<SerialArena*>(b->Pointer(kBlockHeaderSize));
  301. b->set_pos(kBlockHeaderSize + kSerialArenaSize);
  302. serial->arena_ = arena;
  303. serial->owner_ = owner;
  304. serial->head_ = b;
  305. serial->ptr_ = b->Pointer(b->pos());
  306. serial->limit_ = b->Pointer(b->size());
  307. serial->cleanup_ = NULL;
  308. serial->cleanup_ptr_ = NULL;
  309. serial->cleanup_limit_ = NULL;
  310. return serial;
  311. }
  312. PROTOBUF_NOINLINE
  313. ArenaImpl::SerialArena* ArenaImpl::GetSerialArenaFallback(void* me) {
  314. // Look for this SerialArena in our linked list.
  315. SerialArena* serial = threads_.load(std::memory_order_acquire);
  316. for (; serial; serial = serial->next()) {
  317. if (serial->owner() == me) {
  318. break;
  319. }
  320. }
  321. if (!serial) {
  322. // This thread doesn't have any SerialArena, which also means it doesn't
  323. // have any blocks yet. So we'll allocate its first block now.
  324. Block* b = NewBlock(NULL, kSerialArenaSize);
  325. serial = SerialArena::New(b, me, this);
  326. SerialArena* head = threads_.load(std::memory_order_relaxed);
  327. do {
  328. serial->set_next(head);
  329. } while (!threads_.compare_exchange_weak(
  330. head, serial, std::memory_order_release, std::memory_order_relaxed));
  331. }
  332. CacheSerialArena(serial);
  333. return serial;
  334. }
  335. } // namespace internal
  336. void Arena::CallDestructorHooks() {
  337. uint64 space_allocated = impl_.SpaceAllocated();
  338. // Call the reset hook
  339. if (on_arena_reset_ != NULL) {
  340. on_arena_reset_(this, hooks_cookie_, space_allocated);
  341. }
  342. // Call the destruction hook
  343. if (on_arena_destruction_ != NULL) {
  344. on_arena_destruction_(this, hooks_cookie_, space_allocated);
  345. }
  346. }
  347. void Arena::OnArenaAllocation(const std::type_info* allocated_type,
  348. size_t n) const {
  349. if (on_arena_allocation_ != NULL) {
  350. on_arena_allocation_(allocated_type, n, hooks_cookie_);
  351. }
  352. }
  353. } // namespace protobuf
  354. } // namespace google