forked from plasma-umass/Mesh
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmini_heap.h
478 lines (376 loc) · 14.2 KB
/
mini_heap.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
// -*- mode: c++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright 2019 The Mesh Authors. All rights reserved.
// Use of this source code is governed by the Apache License,
// Version 2.0, that can be found in the LICENSE file.
#pragma once
#ifndef MESH__MINI_HEAP_H
#define MESH__MINI_HEAP_H
#include <pthread.h>
#include <atomic>
#include <random>
#include "bitmap.h"
#include "fixed_array.h"
#include "internal.h"
#include "rng/mwc.h"
#include "heaplayers.h"
namespace mesh {
class MiniHeap;
class Flags {
private:
DISALLOW_COPY_AND_ASSIGN(Flags);
static inline constexpr uint32_t ATTRIBUTE_ALWAYS_INLINE getMask(uint32_t pos) {
return 1UL << pos;
}
static constexpr uint32_t MeshedOffset = 30;
static constexpr uint32_t MaxCountShift = 16;
static constexpr uint32_t SizeClassShift = 0;
static constexpr uint32_t ShuffleVectorOffsetShift = 8;
public:
explicit Flags(uint32_t maxCount, uint32_t sizeClass, uint32_t svOffset) noexcept
: _flags{(maxCount << MaxCountShift) + (sizeClass << SizeClassShift) + (svOffset << ShuffleVectorOffsetShift)} {
d_assert(svOffset < 255);
d_assert_msg(sizeClass < 255, "sizeClass: %u", sizeClass);
d_assert(maxCount <= 256);
d_assert(this->maxCount() == maxCount);
}
inline uint32_t maxCount() const {
// XXX: does this assume little endian?
return (_flags.load(std::memory_order_relaxed) >> MaxCountShift) & 0x1ff;
}
inline uint32_t sizeClass() const {
return (_flags.load(std::memory_order_relaxed) >> SizeClassShift) & 0xff;
}
inline uint8_t svOffset() const {
return (_flags.load(std::memory_order_relaxed) >> ShuffleVectorOffsetShift) & 0xff;
}
inline void setSvOffset(uint8_t off) {
d_assert(off < 255);
uint32_t mask = ~(static_cast<uint32_t>(0xff) << ShuffleVectorOffsetShift);
uint32_t newVal = (static_cast<uint32_t>(off) << ShuffleVectorOffsetShift);
uint32_t oldFlags = _flags.load(std::memory_order_relaxed);
while (!atomic_compare_exchange_weak_explicit(&_flags,
&oldFlags, // old val
(oldFlags & mask) | newVal, // new val
std::memory_order_release, // success mem model
std::memory_order_relaxed)) {
}
}
inline void setMeshed() {
set(MeshedOffset);
}
inline void unsetMeshed() {
unset(MeshedOffset);
}
inline bool ATTRIBUTE_ALWAYS_INLINE isMeshed() const {
return is(MeshedOffset);
}
private:
inline bool ATTRIBUTE_ALWAYS_INLINE is(size_t offset) const {
const auto mask = getMask(offset);
return (_flags.load(std::memory_order_acquire) & mask) == mask;
}
inline void set(size_t offset) {
const uint32_t mask = getMask(offset);
uint32_t oldFlags = _flags.load(std::memory_order_relaxed);
while (!atomic_compare_exchange_weak_explicit(&_flags,
&oldFlags, // old val
oldFlags | mask, // new val
std::memory_order_release, // success mem model
std::memory_order_relaxed)) {
}
}
inline void unset(size_t offset) {
const uint32_t mask = getMask(offset);
uint32_t oldFlags = _flags.load(std::memory_order_relaxed);
while (!atomic_compare_exchange_weak_explicit(&_flags,
&oldFlags, // old val
oldFlags & ~mask, // new val
std::memory_order_release, // success mem model
std::memory_order_relaxed)) {
}
}
std::atomic<uint32_t> _flags;
};
class MiniHeap {
private:
DISALLOW_COPY_AND_ASSIGN(MiniHeap);
public:
MiniHeap(void *arenaBegin, Span span, size_t objectCount, size_t objectSize)
: _bitmap(objectCount),
_span(span),
_flags(objectCount, objectCount > 1 ? SizeMap::SizeClass(objectSize) : 1, 0),
_objectSize(objectSize),
_objectSizeReciprocal(1.0 / (float)objectSize) {
// debug("sizeof(MiniHeap): %zu", sizeof(MiniHeap));
d_assert(_bitmap.inUseCount() == 0);
const auto expectedSpanSize = _span.byteLength();
d_assert_msg(expectedSpanSize == spanSize(), "span size %zu == %zu (%u, %u)", expectedSpanSize, spanSize(),
maxCount(), this->objectSize());
// d_assert_msg(spanSize == static_cast<size_t>(_spanSize), "%zu != %hu", spanSize, _spanSize);
// d_assert_msg(objectSize == static_cast<size_t>(objectSize()), "%zu != %hu", objectSize, _objectSize);
d_assert(!_nextMiniHeap.hasValue());
// debug("new:\n");
// dumpDebug();
}
~MiniHeap() {
// debug("destruct:\n");
// dumpDebug();
}
inline Span span() const {
return _span;
}
void printOccupancy() const {
mesh::debug("{\"name\": \"%p\", \"object-size\": %d, \"length\": %d, \"mesh-count\": %d, \"bitmap\": \"%s\"}\n",
this, objectSize(), maxCount(), meshCount(), _bitmap.to_string(maxCount()).c_str());
}
inline void ATTRIBUTE_ALWAYS_INLINE free(void *arenaBegin, void *ptr) {
// TODO: this should be removed when the logic in globalFree is
// updated to allow the 'race' between lock-free freeing and
// meshing
d_assert(!isMeshed());
const ssize_t off = getOff(arenaBegin, ptr);
if (unlikely(off < 0)) {
d_assert(false);
return;
}
freeOff(off);
}
inline void ATTRIBUTE_ALWAYS_INLINE freeOff(size_t off) {
d_assert_msg(_bitmap.isSet(off), "MiniHeap(%p) expected bit %zu to be set (svOff:%zu)", this, off, svOffset());
_bitmap.unset(off);
}
/// Copies (for meshing) the contents of src into our span.
inline void consume(const void *arenaBegin, MiniHeap *src) {
// this would be bad
d_assert(src != this);
d_assert(objectSize() == src->objectSize());
src->setMeshed();
const auto srcSpan = src->getSpanStart(arenaBegin);
// for each object in src, copy it to our backing span + update
// our bitmap and in-use count
for (auto const &off : src->bitmap()) {
d_assert(off < maxCount());
d_assert(!_bitmap.isSet(off));
void *srcObject = reinterpret_cast<void *>(srcSpan + off * objectSize());
// need to ensure we update the bitmap and in-use count
void *dstObject = mallocAt(arenaBegin, off);
// debug("meshing: %zu (%p <- %p, %zu)\n", off, dstObject, srcObject, objectSize());
d_assert(dstObject != nullptr);
memcpy(dstObject, srcObject, objectSize());
// debug("\t'%s'\n", dstObject);
// debug("\t'%s'\n", srcObject);
src->freeOff(off);
}
trackMeshedSpan(GetMiniHeapID(src));
}
inline size_t spanSize() const {
return _span.byteLength();
}
inline uint32_t ATTRIBUTE_ALWAYS_INLINE maxCount() const {
return _flags.maxCount();
}
inline size_t objectSize() const {
return _objectSize;
}
inline int sizeClass() const {
return _flags.sizeClass();
}
inline uintptr_t getSpanStart(const void *arenaBegin) const {
const auto beginval = reinterpret_cast<uintptr_t>(arenaBegin);
return beginval + _span.offset * kPageSize;
}
inline bool ATTRIBUTE_ALWAYS_INLINE isEmpty() const {
return _bitmap.inUseCount() == 0;
}
inline bool ATTRIBUTE_ALWAYS_INLINE isFull() const {
return _bitmap.inUseCount() == maxCount();
}
inline uint32_t ATTRIBUTE_ALWAYS_INLINE inUseCount() const {
return _bitmap.inUseCount();
}
inline size_t bytesFree() const {
return inUseCount() * objectSize();
}
inline void setMeshed() {
_flags.setMeshed();
}
inline void setAttached(pid_t current) {
// mesh::debug("MiniHeap(%p:%5zu): current <- %u\n", this, objectSize(), current);
_current.store(current, std::memory_order::memory_order_release);
}
inline uint8_t svOffset() const {
return _flags.svOffset();
}
inline void setSvOffset(uint8_t off) {
// debug("MiniHeap(%p) SET svOff:%zu)", this, off);
_flags.setSvOffset(off);
}
inline pid_t current() const {
return _current.load(std::memory_order::memory_order_acquire);
}
inline void unsetAttached() {
// mesh::debug("MiniHeap(%p:%5zu): current <- UNSET\n", this, objectSize());
_current.store(0, std::memory_order::memory_order_release);
}
inline bool isAttached() const {
return current() != 0;
}
inline bool ATTRIBUTE_ALWAYS_INLINE isMeshed() const {
return _flags.isMeshed();
}
inline bool ATTRIBUTE_ALWAYS_INLINE hasMeshed() const {
return _nextMiniHeap.hasValue();
}
inline bool isMeshingCandidate() const {
return !isAttached() && objectSize() < kPageSize;
}
/// Returns the fraction full (in the range [0, 1]) that this miniheap is.
inline double fullness() const {
return static_cast<double>(inUseCount()) / static_cast<double>(maxCount());
}
const internal::Bitmap &bitmap() const {
return _bitmap;
}
internal::Bitmap &writableBitmap() {
return _bitmap;
}
void trackMeshedSpan(MiniHeapID id) {
hard_assert(id.hasValue());
if (!_nextMiniHeap.hasValue()) {
_nextMiniHeap = id;
} else {
GetMiniHeap(_nextMiniHeap)->trackMeshedSpan(id);
}
}
public:
template <class Callback>
inline void forEachMeshed(Callback cb) const {
if (cb(this))
return;
if (_nextMiniHeap.hasValue()) {
const auto mh = GetMiniHeap(_nextMiniHeap);
mh->forEachMeshed(cb);
}
}
template <class Callback>
inline void forEachMeshed(Callback cb) {
if (cb(this))
return;
if (_nextMiniHeap.hasValue()) {
auto mh = GetMiniHeap(_nextMiniHeap);
mh->forEachMeshed(cb);
}
}
size_t meshCount() const {
size_t count = 0;
const MiniHeap *mh = this;
while (mh != nullptr) {
count++;
auto next = mh->_nextMiniHeap;
mh = next.hasValue() ? GetMiniHeap(next) : nullptr;
}
return count;
}
internal::BinToken getBinToken() const {
return _token;
}
void setBinToken(internal::BinToken token) {
_token = token;
}
/// public for meshTest only
inline void *mallocAt(const void *arenaBegin, size_t off) {
if (!_bitmap.tryToSet(off)) {
mesh::debug("%p: MA %u", this, off);
dumpDebug();
return nullptr;
}
return ptrFromOffset(arenaBegin, off);
}
inline void *ptrFromOffset(const void *arenaBegin, size_t off) {
return reinterpret_cast<void *>(getSpanStart(arenaBegin) + off * objectSize());
}
inline bool operator<(MiniHeap *&rhs) noexcept {
return this->inUseCount() < rhs->inUseCount();
}
void dumpDebug() const {
const auto heapPages = spanSize() / HL::CPUInfo::PageSize;
const size_t inUseCount = this->inUseCount();
const size_t meshCount = this->meshCount();
mesh::debug("MiniHeap(%p:%5zu): %3zu objects on %2zu pages (inUse: %zu, spans: %zu)\t%p-%p\n", this, objectSize(),
maxCount(), heapPages, inUseCount, meshCount, _span.offset * kPageSize,
_span.offset * kPageSize + spanSize());
mesh::debug("\t%s\n", _bitmap.to_string(maxCount()).c_str());
}
// this only works for unmeshed miniheaps
inline uint8_t ATTRIBUTE_ALWAYS_INLINE getUnmeshedOff(const void *arenaBegin, void *ptr) const {
const auto ptrval = reinterpret_cast<uintptr_t>(ptr);
uintptr_t span = reinterpret_cast<uintptr_t>(arenaBegin) + _span.offset * kPageSize;
d_assert(span != 0);
const size_t off = (ptrval - span) * _objectSizeReciprocal;
#ifndef NDEBUG
const size_t off2 = (ptrval - span) / _objectSize;
hard_assert_msg(off == off2, "%zu != %zu", off, off2);
#endif
d_assert(off < maxCount());
return off;
}
inline uint8_t ATTRIBUTE_ALWAYS_INLINE getOff(const void *arenaBegin, void *ptr) const {
const auto span = spanStart(reinterpret_cast<uintptr_t>(arenaBegin), ptr);
d_assert(span != 0);
const auto ptrval = reinterpret_cast<uintptr_t>(ptr);
const size_t off = (ptrval - span) * _objectSizeReciprocal;
#ifndef NDEBUG
const size_t off2 = (ptrval - span) / _objectSize;
hard_assert_msg(off == off2, "%zu != %zu", off, off2);
#endif
d_assert(off < maxCount());
return off;
}
protected:
inline uintptr_t ATTRIBUTE_ALWAYS_INLINE spanStart(uintptr_t arenaBegin, void *ptr) const {
const auto ptrval = reinterpret_cast<uintptr_t>(ptr);
const auto len = _span.byteLength();
// manually unroll loop once to capture the common case of
// un-meshed miniheaps
uintptr_t spanptr = arenaBegin + _span.offset * kPageSize;
if (likely(spanptr <= ptrval && ptrval < spanptr + len)) {
return spanptr;
}
return spanStartSlowpath(arenaBegin, ptrval);
}
uintptr_t ATTRIBUTE_NEVER_INLINE spanStartSlowpath(uintptr_t arenaBegin, uintptr_t ptrval) const {
const auto len = _span.byteLength();
uintptr_t spanptr = 0;
const MiniHeap *mh = this;
while (true) {
if (unlikely(!mh->_nextMiniHeap.hasValue())) {
abort();
}
mh = GetMiniHeap(mh->_nextMiniHeap);
const uintptr_t meshedSpanptr = arenaBegin + mh->span().offset * kPageSize;
if (meshedSpanptr <= ptrval && ptrval < meshedSpanptr + len) {
spanptr = meshedSpanptr;
break;
}
};
return spanptr;
}
internal::Bitmap _bitmap; // 32 bytes 32
internal::BinToken _token{
internal::bintoken::BinMax,
internal::bintoken::Max,
}; // 4 36
atomic<pid_t> _current{0}; // 4 40
const Span _span; // 8 48
Flags _flags; // 4 52
const uint32_t _objectSize; // 4 56
const float _objectSizeReciprocal; // 4 60
MiniHeapID _nextMiniHeap{}; // 4 64
};
typedef FixedArray<MiniHeap, 63> MiniHeapArray;
static_assert(sizeof(pid_t) == 4, "pid_t not 32-bits!");
static_assert(sizeof(mesh::internal::Bitmap) == 32, "Bitmap too big!");
static_assert(sizeof(MiniHeap) == 64, "MiniHeap too big!");
static_assert(sizeof(MiniHeapArray) == 64 * sizeof(void *), "MiniHeapArray too big!");
} // namespace mesh
#endif // MESH__MINI_HEAP_H