github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/c-deps/libroach/rocksdbutils/aligned_buffer.h (about) 1 // Copyright 2019 The Cockroach Authors. 2 // 3 // Use of this software is governed by the Business Source License 4 // included in the file licenses/BSL.txt. 5 // 6 // As of the Change Date specified in that file, in accordance with 7 // the Business Source License, use of this software will be governed 8 // by the Apache License, Version 2.0, included in the file 9 // licenses/APL.txt. 10 11 // Copyright (c) 2011-present, Facebook, Inc. All rights reserved. 12 // This source code is licensed under both the GPLv2 (found at 13 // https://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html) 14 // and Apache 2.0 License (found in licenses/APL.txt in the root 15 // of this repository). 16 // 17 // Copyright (c) 2011 The LevelDB Authors. All rights reserved. 18 // Use of this source code is governed by a BSD-style license that can be 19 // found in the licences/LevelDB.txt file in in the root of this repository. 20 // See the LevelDB.AUTHORS file for names of contributors. 21 #pragma once 22 23 #include <algorithm> 24 #include <cassert> 25 #include <cstring> 26 #include <memory> 27 28 namespace rocksdb_utils { 29 30 // Needed parts from rocksdb/port/win/port_win.h 31 // VS < 2015 32 #if defined(OS_WIN) && defined(_MSC_VER) && (_MSC_VER < 1900) 33 #define ROCKSDB_NOEXCEPT 34 #else // VS >= 2015 or MinGW 35 #define ROCKSDB_NOEXCEPT noexcept 36 #endif 37 38 inline size_t TruncateToPageBoundary(size_t page_size, size_t s) { 39 s -= (s & (page_size - 1)); 40 assert((s % page_size) == 0); 41 return s; 42 } 43 44 inline size_t Roundup(size_t x, size_t y) { return ((x + y - 1) / y) * y; } 45 46 // This class is to manage an aligned user 47 // allocated buffer for direct I/O purposes 48 // though can be used for any purpose. 49 class AlignedBuffer { 50 size_t alignment_; 51 std::unique_ptr<char[]> buf_; 52 size_t capacity_; 53 size_t cursize_; 54 char* bufstart_; 55 56 public: 57 AlignedBuffer() : alignment_(), capacity_(0), cursize_(0), bufstart_(nullptr) {} 58 59 AlignedBuffer(AlignedBuffer&& o) ROCKSDB_NOEXCEPT { *this = std::move(o); } 60 61 AlignedBuffer& operator=(AlignedBuffer&& o) ROCKSDB_NOEXCEPT { 62 alignment_ = std::move(o.alignment_); 63 buf_ = std::move(o.buf_); 64 capacity_ = std::move(o.capacity_); 65 cursize_ = std::move(o.cursize_); 66 bufstart_ = std::move(o.bufstart_); 67 return *this; 68 } 69 70 AlignedBuffer(const AlignedBuffer&) = delete; 71 72 AlignedBuffer& operator=(const AlignedBuffer&) = delete; 73 74 static bool isAligned(const void* ptr, size_t alignment) { 75 return reinterpret_cast<uintptr_t>(ptr) % alignment == 0; 76 } 77 78 static bool isAligned(size_t n, size_t alignment) { return n % alignment == 0; } 79 80 size_t Alignment() const { return alignment_; } 81 82 size_t Capacity() const { return capacity_; } 83 84 size_t CurrentSize() const { return cursize_; } 85 86 const char* BufferStart() const { return bufstart_; } 87 88 char* BufferStart() { return bufstart_; } 89 90 void Clear() { cursize_ = 0; } 91 92 void Alignment(size_t alignment) { 93 assert(alignment > 0); 94 assert((alignment & (alignment - 1)) == 0); 95 alignment_ = alignment; 96 } 97 98 // Allocates a new buffer and sets bufstart_ to the aligned first byte 99 void AllocateNewBuffer(size_t requested_capacity, bool copy_data = false) { 100 assert(alignment_ > 0); 101 assert((alignment_ & (alignment_ - 1)) == 0); 102 103 if (copy_data && requested_capacity < cursize_) { 104 // If we are downsizing to a capacity that is smaller than the current 105 // data in the buffer. Ignore the request. 106 return; 107 } 108 109 size_t new_capacity = Roundup(requested_capacity, alignment_); 110 char* new_buf = new char[new_capacity + alignment_]; 111 char* new_bufstart = 112 reinterpret_cast<char*>((reinterpret_cast<uintptr_t>(new_buf) + (alignment_ - 1)) & 113 ~static_cast<uintptr_t>(alignment_ - 1)); 114 115 if (copy_data) { 116 memcpy(new_bufstart, bufstart_, cursize_); 117 } else { 118 cursize_ = 0; 119 } 120 121 bufstart_ = new_bufstart; 122 capacity_ = new_capacity; 123 buf_.reset(new_buf); 124 } 125 // Used for write 126 // Returns the number of bytes appended 127 size_t Append(const char* src, size_t append_size) { 128 size_t buffer_remaining = capacity_ - cursize_; 129 size_t to_copy = std::min(append_size, buffer_remaining); 130 131 if (to_copy > 0) { 132 memcpy(bufstart_ + cursize_, src, to_copy); 133 cursize_ += to_copy; 134 } 135 return to_copy; 136 } 137 138 size_t Read(char* dest, size_t offset, size_t read_size) const { 139 assert(offset < cursize_); 140 141 size_t to_read = 0; 142 if (offset < cursize_) { 143 to_read = std::min(cursize_ - offset, read_size); 144 } 145 if (to_read > 0) { 146 memcpy(dest, bufstart_ + offset, to_read); 147 } 148 return to_read; 149 } 150 151 /// Pad to alignment 152 void PadToAlignmentWith(int padding) { 153 size_t total_size = Roundup(cursize_, alignment_); 154 size_t pad_size = total_size - cursize_; 155 156 if (pad_size > 0) { 157 assert((pad_size + cursize_) <= capacity_); 158 memset(bufstart_ + cursize_, padding, pad_size); 159 cursize_ += pad_size; 160 } 161 } 162 163 // After a partial flush move the tail to the beginning of the buffer 164 void RefitTail(size_t tail_offset, size_t tail_size) { 165 if (tail_size > 0) { 166 memmove(bufstart_, bufstart_ + tail_offset, tail_size); 167 } 168 cursize_ = tail_size; 169 } 170 171 // Returns place to start writing 172 char* Destination() { return bufstart_ + cursize_; } 173 174 void Size(size_t cursize) { cursize_ = cursize; } 175 }; 176 } // namespace rocksdb_utils