2022-05-13 13:12:05 +00:00
|
|
|
// Copyright (C) 2021 The Qt Company Ltd.
|
|
|
|
// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2012-12-18 14:03:26 +00:00
|
|
|
#include "PageAllocation.h"
|
2025-08-11 09:43:55 +00:00
|
|
|
#include "PageReservation.h"
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2025-08-11 09:43:55 +00:00
|
|
|
#include <private/qnumeric_p.h>
|
|
|
|
#include <private/qv4alloca_p.h>
|
|
|
|
#include <private/qv4engine_p.h>
|
|
|
|
#include <private/qv4identifiertable_p.h>
|
|
|
|
#include <private/qv4mapobject_p.h>
|
|
|
|
#include <private/qv4mm_p.h>
|
|
|
|
#include <private/qv4object_p.h>
|
|
|
|
#include <private/qv4profiling_p.h>
|
|
|
|
#include <private/qv4qobjectwrapper_p.h>
|
|
|
|
#include <private/qv4setobject_p.h>
|
|
|
|
#include <private/qv4stackframe_p.h>
|
|
|
|
|
|
|
|
#include <QtQml/qqmlengine.h>
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2025-08-11 09:43:55 +00:00
|
|
|
#include <QtCore/qalgorithms.h>
|
|
|
|
#include <QtCore/qelapsedtimer.h>
|
|
|
|
#include <QtCore/qloggingcategory.h>
|
|
|
|
#include <QtCore/qmap.h>
|
|
|
|
#include <QtCore/qscopedvaluerollback.h>
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2025-08-11 09:43:55 +00:00
|
|
|
#include <algorithm>
|
2023-11-21 18:36:26 +00:00
|
|
|
#include <chrono>
|
2025-08-11 09:43:55 +00:00
|
|
|
#include <cstdlib>
|
2023-11-21 18:36:26 +00:00
|
|
|
|
2017-02-13 12:38:48 +00:00
|
|
|
//#define MM_STATS
|
|
|
|
|
|
|
|
#if !defined(MM_STATS) && !defined(QT_NO_DEBUG)
|
|
|
|
#define MM_STATS
|
|
|
|
#endif
|
2016-12-22 14:20:05 +00:00
|
|
|
|
|
|
|
#if MM_DEBUG
|
|
|
|
#define DEBUG qDebug() << "MM:"
|
|
|
|
#else
|
|
|
|
#define DEBUG if (1) ; else qDebug() << "MM:"
|
|
|
|
#endif
|
|
|
|
|
2013-03-12 18:49:13 +00:00
|
|
|
#ifdef V4_USE_VALGRIND
|
|
|
|
#include <valgrind/valgrind.h>
|
|
|
|
#include <valgrind/memcheck.h>
|
|
|
|
#endif
|
|
|
|
|
2016-06-20 17:31:29 +00:00
|
|
|
#ifdef V4_USE_HEAPTRACK
|
|
|
|
#include <heaptrack_api.h>
|
|
|
|
#endif
|
|
|
|
|
2013-06-27 19:51:22 +00:00
|
|
|
#if OS(QNX)
|
|
|
|
#include <sys/storage.h> // __tls()
|
|
|
|
#endif
|
|
|
|
|
2014-01-21 09:55:18 +00:00
|
|
|
#if USE(PTHREADS) && HAVE(PTHREAD_NP_H)
|
|
|
|
#include <pthread_np.h>
|
|
|
|
#endif
|
|
|
|
|
2024-06-06 11:44:35 +00:00
|
|
|
Q_STATIC_LOGGING_CATEGORY(lcGcStats, "qt.qml.gc.statistics")
|
|
|
|
Q_STATIC_LOGGING_CATEGORY(lcGcAllocatorStats, "qt.qml.gc.allocatorStats")
|
2024-08-30 15:44:16 +00:00
|
|
|
Q_STATIC_LOGGING_CATEGORY(lcGcStateTransitions, "qt.qml.gc.stateTransitions")
|
2024-09-02 13:18:32 +00:00
|
|
|
Q_STATIC_LOGGING_CATEGORY(lcGcForcedRuns, "qt.qml.gc.forcedRuns")
|
2024-09-25 13:01:37 +00:00
|
|
|
Q_STATIC_LOGGING_CATEGORY(lcGcStepExecution, "qt.qml.gc.stepExecution")
|
2017-11-27 09:53:33 +00:00
|
|
|
|
2015-01-09 17:52:56 +00:00
|
|
|
using namespace WTF;
|
|
|
|
|
2013-06-24 13:28:00 +00:00
|
|
|
QT_BEGIN_NAMESPACE
|
|
|
|
|
2016-12-22 14:20:05 +00:00
|
|
|
namespace QV4 {
|
|
|
|
|
|
|
|
enum {
|
|
|
|
MinSlotsGCLimit = QV4::Chunk::AvailableSlots*16,
|
|
|
|
GCOverallocation = 200 /* Max overallocation by the GC in % */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct MemorySegment {
|
|
|
|
enum {
|
2019-06-06 11:12:04 +00:00
|
|
|
#ifdef Q_OS_RTEMS
|
|
|
|
NumChunks = sizeof(quint64),
|
|
|
|
#else
|
2016-12-22 14:20:05 +00:00
|
|
|
NumChunks = 8*sizeof(quint64),
|
2019-06-06 11:12:04 +00:00
|
|
|
#endif
|
2016-12-22 14:20:05 +00:00
|
|
|
SegmentSize = NumChunks*Chunk::ChunkSize,
|
|
|
|
};
|
|
|
|
|
|
|
|
MemorySegment(size_t size)
|
|
|
|
{
|
2021-10-12 13:56:39 +00:00
|
|
|
size += Chunk::ChunkSize; // make sure we can get enough 64k alignment memory
|
2016-12-22 14:20:05 +00:00
|
|
|
if (size < SegmentSize)
|
|
|
|
size = SegmentSize;
|
|
|
|
|
|
|
|
pageReservation = PageReservation::reserve(size, OSAllocator::JSGCHeapPages);
|
|
|
|
base = reinterpret_cast<Chunk *>((reinterpret_cast<quintptr>(pageReservation.base()) + Chunk::ChunkSize - 1) & ~(Chunk::ChunkSize - 1));
|
|
|
|
nChunks = NumChunks;
|
2017-03-21 13:49:10 +00:00
|
|
|
availableBytes = size - (reinterpret_cast<quintptr>(base) - reinterpret_cast<quintptr>(pageReservation.base()));
|
|
|
|
if (availableBytes < SegmentSize)
|
2016-12-22 14:20:05 +00:00
|
|
|
--nChunks;
|
|
|
|
}
|
|
|
|
MemorySegment(MemorySegment &&other) {
|
|
|
|
qSwap(pageReservation, other.pageReservation);
|
|
|
|
qSwap(base, other.base);
|
|
|
|
qSwap(allocatedMap, other.allocatedMap);
|
2017-03-21 13:49:10 +00:00
|
|
|
qSwap(availableBytes, other.availableBytes);
|
|
|
|
qSwap(nChunks, other.nChunks);
|
2016-12-22 14:20:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
~MemorySegment() {
|
|
|
|
if (base)
|
|
|
|
pageReservation.deallocate();
|
|
|
|
}
|
|
|
|
|
|
|
|
void setBit(size_t index) {
|
|
|
|
Q_ASSERT(index < nChunks);
|
|
|
|
quint64 bit = static_cast<quint64>(1) << index;
|
|
|
|
allocatedMap |= bit;
|
|
|
|
}
|
|
|
|
void clearBit(size_t index) {
|
|
|
|
Q_ASSERT(index < nChunks);
|
|
|
|
quint64 bit = static_cast<quint64>(1) << index;
|
|
|
|
allocatedMap &= ~bit;
|
|
|
|
}
|
|
|
|
bool testBit(size_t index) const {
|
|
|
|
Q_ASSERT(index < nChunks);
|
|
|
|
quint64 bit = static_cast<quint64>(1) << index;
|
|
|
|
return (allocatedMap & bit);
|
|
|
|
}
|
|
|
|
|
|
|
|
Chunk *allocate(size_t size);
|
|
|
|
void free(Chunk *chunk, size_t size) {
|
|
|
|
DEBUG << "freeing chunk" << chunk;
|
|
|
|
size_t index = static_cast<size_t>(chunk - base);
|
2017-03-21 13:49:10 +00:00
|
|
|
size_t end = qMin(static_cast<size_t>(NumChunks), index + (size - 1)/Chunk::ChunkSize + 1);
|
2016-12-22 14:20:05 +00:00
|
|
|
while (index < end) {
|
|
|
|
Q_ASSERT(testBit(index));
|
|
|
|
clearBit(index);
|
|
|
|
++index;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t pageSize = WTF::pageSize();
|
|
|
|
size = (size + pageSize - 1) & ~(pageSize - 1);
|
2022-11-23 08:22:19 +00:00
|
|
|
#if !defined(Q_OS_LINUX) && !defined(Q_OS_WIN)
|
2017-04-21 09:57:22 +00:00
|
|
|
// Linux and Windows zero out pages that have been decommitted and get committed again.
|
|
|
|
// unfortunately that's not true on other OSes (e.g. BSD based ones), so zero out the
|
|
|
|
// memory before decommit, so that we can be sure that all chunks we allocate will be
|
|
|
|
// zero initialized.
|
|
|
|
memset(chunk, 0, size);
|
|
|
|
#endif
|
2016-12-22 14:20:05 +00:00
|
|
|
pageReservation.decommit(chunk, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool contains(Chunk *c) const {
|
|
|
|
return c >= base && c < base + nChunks;
|
|
|
|
}
|
|
|
|
|
|
|
|
PageReservation pageReservation;
|
2018-02-21 09:41:54 +00:00
|
|
|
Chunk *base = nullptr;
|
2016-12-22 14:20:05 +00:00
|
|
|
quint64 allocatedMap = 0;
|
2017-03-21 13:49:10 +00:00
|
|
|
size_t availableBytes = 0;
|
2016-12-22 14:20:05 +00:00
|
|
|
uint nChunks = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
Chunk *MemorySegment::allocate(size_t size)
|
|
|
|
{
|
2017-03-21 13:49:10 +00:00
|
|
|
if (!allocatedMap && size >= SegmentSize) {
|
|
|
|
// chunk allocated for one huge allocation
|
|
|
|
Q_ASSERT(availableBytes >= size);
|
|
|
|
pageReservation.commit(base, size);
|
2019-02-14 11:07:45 +00:00
|
|
|
allocatedMap = ~static_cast<quint64>(0);
|
2017-03-21 13:49:10 +00:00
|
|
|
return base;
|
|
|
|
}
|
2016-12-22 14:20:05 +00:00
|
|
|
size_t requiredChunks = (size + sizeof(Chunk) - 1)/sizeof(Chunk);
|
|
|
|
uint sequence = 0;
|
2018-02-21 09:41:54 +00:00
|
|
|
Chunk *candidate = nullptr;
|
2016-12-22 14:20:05 +00:00
|
|
|
for (uint i = 0; i < nChunks; ++i) {
|
|
|
|
if (!testBit(i)) {
|
|
|
|
if (!candidate)
|
|
|
|
candidate = base + i;
|
|
|
|
++sequence;
|
|
|
|
} else {
|
2018-02-21 09:41:54 +00:00
|
|
|
candidate = nullptr;
|
2016-12-22 14:20:05 +00:00
|
|
|
sequence = 0;
|
|
|
|
}
|
|
|
|
if (sequence == requiredChunks) {
|
|
|
|
pageReservation.commit(candidate, size);
|
|
|
|
for (uint i = 0; i < requiredChunks; ++i)
|
|
|
|
setBit(candidate - base + i);
|
2019-06-26 14:46:23 +00:00
|
|
|
DEBUG << "allocated chunk " << candidate << Qt::hex << size;
|
|
|
|
|
2016-12-22 14:20:05 +00:00
|
|
|
return candidate;
|
|
|
|
}
|
|
|
|
}
|
2018-02-21 09:41:54 +00:00
|
|
|
return nullptr;
|
2016-12-22 14:20:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct ChunkAllocator {
|
|
|
|
ChunkAllocator() {}
|
|
|
|
|
|
|
|
size_t requiredChunkSize(size_t size) {
|
|
|
|
size += Chunk::HeaderSize; // space required for the Chunk header
|
|
|
|
size_t pageSize = WTF::pageSize();
|
|
|
|
size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes
|
|
|
|
if (size < Chunk::ChunkSize)
|
|
|
|
size = Chunk::ChunkSize;
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
Chunk *allocate(size_t size = 0);
|
|
|
|
void free(Chunk *chunk, size_t size = 0);
|
|
|
|
|
|
|
|
std::vector<MemorySegment> memorySegments;
|
|
|
|
};
|
|
|
|
|
|
|
|
Chunk *ChunkAllocator::allocate(size_t size)
|
|
|
|
{
|
|
|
|
size = requiredChunkSize(size);
|
|
|
|
for (auto &m : memorySegments) {
|
|
|
|
if (~m.allocatedMap) {
|
|
|
|
Chunk *c = m.allocate(size);
|
|
|
|
if (c)
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// allocate a new segment
|
|
|
|
memorySegments.push_back(MemorySegment(size));
|
|
|
|
Chunk *c = memorySegments.back().allocate(size);
|
|
|
|
Q_ASSERT(c);
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ChunkAllocator::free(Chunk *chunk, size_t size)
|
|
|
|
{
|
|
|
|
size = requiredChunkSize(size);
|
|
|
|
for (auto &m : memorySegments) {
|
|
|
|
if (m.contains(chunk)) {
|
|
|
|
m.free(chunk, size);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Q_ASSERT(false);
|
|
|
|
}
|
|
|
|
|
2017-04-27 17:37:35 +00:00
|
|
|
#ifdef DUMP_SWEEP
|
|
|
|
QString binary(quintptr n) {
|
|
|
|
QString s = QString::number(n, 2);
|
|
|
|
while (s.length() < 64)
|
|
|
|
s.prepend(QChar::fromLatin1('0'));
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
#define SDUMP qDebug
|
|
|
|
#else
|
|
|
|
QString binary(quintptr) { return QString(); }
|
|
|
|
#define SDUMP if (1) ; else qDebug
|
|
|
|
#endif
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2017-02-14 13:03:56 +00:00
|
|
|
// Stores a classname -> freed count mapping.
|
|
|
|
typedef QHash<const char*, int> MMStatsHash;
|
|
|
|
Q_GLOBAL_STATIC(MMStatsHash, freedObjectStatsGlobal)
|
2017-04-04 08:05:58 +00:00
|
|
|
|
2017-02-14 13:03:56 +00:00
|
|
|
// This indirection avoids sticking QHash code in each of the call sites, which
|
|
|
|
// shaves off some instructions in the case that it's unused.
|
|
|
|
static void increaseFreedCountForClass(const char *className)
|
|
|
|
{
|
|
|
|
(*freedObjectStatsGlobal())[className]++;
|
|
|
|
}
|
|
|
|
|
2017-12-12 09:35:21 +00:00
|
|
|
//bool Chunk::sweep(ClassDestroyStatsCallback classCountPtr)
|
2017-11-23 09:05:30 +00:00
|
|
|
bool Chunk::sweep(ExecutionEngine *engine)
|
2017-01-03 10:49:15 +00:00
|
|
|
{
|
2017-05-16 10:42:42 +00:00
|
|
|
bool hasUsedSlots = false;
|
2017-04-27 17:37:35 +00:00
|
|
|
SDUMP() << "sweeping chunk" << this;
|
2017-01-03 10:49:15 +00:00
|
|
|
HeapItem *o = realBase();
|
2017-04-27 17:37:35 +00:00
|
|
|
bool lastSlotFree = false;
|
2017-01-03 10:49:15 +00:00
|
|
|
for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
|
|
|
|
quintptr toFree = objectBitmap[i] ^ blackBitmap[i];
|
|
|
|
Q_ASSERT((toFree & objectBitmap[i]) == toFree); // check all black objects are marked as being used
|
|
|
|
quintptr e = extendsBitmap[i];
|
2017-04-27 17:37:35 +00:00
|
|
|
SDUMP() << " index=" << i;
|
|
|
|
SDUMP() << " toFree =" << binary(toFree);
|
|
|
|
SDUMP() << " black =" << binary(blackBitmap[i]);
|
|
|
|
SDUMP() << " object =" << binary(objectBitmap[i]);
|
|
|
|
SDUMP() << " extends =" << binary(e);
|
|
|
|
if (lastSlotFree)
|
|
|
|
e &= (e + 1); // clear all lowest extent bits
|
2017-01-03 10:49:15 +00:00
|
|
|
while (toFree) {
|
|
|
|
uint index = qCountTrailingZeroBits(toFree);
|
|
|
|
quintptr bit = (static_cast<quintptr>(1) << index);
|
|
|
|
|
|
|
|
toFree ^= bit; // mask out freed slot
|
|
|
|
|
|
|
|
// remove all extends slots that have been freed
|
|
|
|
// this is a bit of bit trickery.
|
|
|
|
quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit
|
|
|
|
quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object
|
|
|
|
quintptr result = objmask + 1;
|
|
|
|
Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something
|
|
|
|
result |= mask; // ensure we don't clear stuff to the right of the current object
|
|
|
|
e &= result;
|
|
|
|
|
|
|
|
HeapItem *itemToFree = o + index;
|
|
|
|
Heap::Base *b = *itemToFree;
|
2018-01-08 11:00:00 +00:00
|
|
|
const VTable *v = b->internalClass->vtable;
|
2017-12-12 09:35:21 +00:00
|
|
|
// if (Q_UNLIKELY(classCountPtr))
|
|
|
|
// classCountPtr(v->className);
|
2017-02-14 13:03:56 +00:00
|
|
|
if (v->destroy) {
|
|
|
|
v->destroy(b);
|
2017-01-03 10:49:15 +00:00
|
|
|
b->_checkIsDestroyed();
|
|
|
|
}
|
2018-02-01 18:01:13 +00:00
|
|
|
#ifdef V4_USE_HEAPTRACK
|
|
|
|
heaptrack_report_free(itemToFree);
|
|
|
|
#endif
|
2017-01-03 10:49:15 +00:00
|
|
|
}
|
2017-11-23 09:05:30 +00:00
|
|
|
Q_V4_PROFILE_DEALLOC(engine, qPopulationCount((objectBitmap[i] | extendsBitmap[i])
|
|
|
|
- (blackBitmap[i] | e)) * Chunk::SlotSize,
|
|
|
|
Profiling::SmallItem);
|
2017-01-03 10:49:15 +00:00
|
|
|
objectBitmap[i] = blackBitmap[i];
|
2017-05-16 10:42:42 +00:00
|
|
|
hasUsedSlots |= (blackBitmap[i] != 0);
|
2017-01-03 10:49:15 +00:00
|
|
|
extendsBitmap[i] = e;
|
2017-04-27 17:37:35 +00:00
|
|
|
lastSlotFree = !((objectBitmap[i]|extendsBitmap[i]) >> (sizeof(quintptr)*8 - 1));
|
|
|
|
SDUMP() << " new extends =" << binary(e);
|
|
|
|
SDUMP() << " lastSlotFree" << lastSlotFree;
|
|
|
|
Q_ASSERT((objectBitmap[i] & extendsBitmap[i]) == 0);
|
2017-01-03 10:49:15 +00:00
|
|
|
o += Chunk::Bits;
|
|
|
|
}
|
2017-05-16 10:42:42 +00:00
|
|
|
return hasUsedSlots;
|
2017-01-03 10:49:15 +00:00
|
|
|
}
|
|
|
|
|
2017-11-23 09:05:30 +00:00
|
|
|
void Chunk::freeAll(ExecutionEngine *engine)
|
2017-01-03 10:49:15 +00:00
|
|
|
{
|
|
|
|
HeapItem *o = realBase();
|
|
|
|
for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
|
|
|
|
quintptr toFree = objectBitmap[i];
|
|
|
|
quintptr e = extendsBitmap[i];
|
|
|
|
while (toFree) {
|
|
|
|
uint index = qCountTrailingZeroBits(toFree);
|
|
|
|
quintptr bit = (static_cast<quintptr>(1) << index);
|
|
|
|
|
|
|
|
toFree ^= bit; // mask out freed slot
|
|
|
|
|
|
|
|
// remove all extends slots that have been freed
|
|
|
|
// this is a bit of bit trickery.
|
|
|
|
quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit
|
|
|
|
quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object
|
|
|
|
quintptr result = objmask + 1;
|
|
|
|
Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something
|
|
|
|
result |= mask; // ensure we don't clear stuff to the right of the current object
|
|
|
|
e &= result;
|
|
|
|
|
|
|
|
HeapItem *itemToFree = o + index;
|
|
|
|
Heap::Base *b = *itemToFree;
|
2018-01-08 11:00:00 +00:00
|
|
|
if (b->internalClass->vtable->destroy) {
|
|
|
|
b->internalClass->vtable->destroy(b);
|
2017-01-03 10:49:15 +00:00
|
|
|
b->_checkIsDestroyed();
|
|
|
|
}
|
2018-02-01 18:01:13 +00:00
|
|
|
#ifdef V4_USE_HEAPTRACK
|
|
|
|
heaptrack_report_free(itemToFree);
|
|
|
|
#endif
|
2017-01-03 10:49:15 +00:00
|
|
|
}
|
2017-11-23 09:05:30 +00:00
|
|
|
Q_V4_PROFILE_DEALLOC(engine, (qPopulationCount(objectBitmap[i]|extendsBitmap[i])
|
|
|
|
- qPopulationCount(e)) * Chunk::SlotSize, Profiling::SmallItem);
|
2017-01-03 10:49:15 +00:00
|
|
|
objectBitmap[i] = 0;
|
|
|
|
extendsBitmap[i] = e;
|
|
|
|
o += Chunk::Bits;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-15 14:40:27 +00:00
|
|
|
void Chunk::resetBlackBits()
|
|
|
|
{
|
|
|
|
memset(blackBitmap, 0, sizeof(blackBitmap));
|
|
|
|
}
|
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
void Chunk::sortIntoBins(HeapItem **bins, uint nBins)
|
|
|
|
{
|
|
|
|
HeapItem *base = realBase();
|
|
|
|
#if QT_POINTER_SIZE == 8
|
|
|
|
const int start = 0;
|
|
|
|
#else
|
|
|
|
const int start = 1;
|
2017-02-10 10:51:43 +00:00
|
|
|
#endif
|
|
|
|
uint freeSlots = 0;
|
|
|
|
uint allocatedSlots = 0;
|
2025-06-02 10:03:54 +00:00
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
for (int i = start; i < EntriesInBitmap; ++i) {
|
|
|
|
quintptr usedSlots = (objectBitmap[i]|extendsBitmap[i]);
|
|
|
|
#if QT_POINTER_SIZE == 8
|
|
|
|
if (!i)
|
|
|
|
usedSlots |= (static_cast<quintptr>(1) << (HeaderSize/SlotSize)) - 1;
|
|
|
|
#endif
|
2017-02-10 10:51:43 +00:00
|
|
|
allocatedSlots += qPopulationCount(usedSlots);
|
|
|
|
while (1) {
|
|
|
|
uint index = qCountTrailingZeroBits(usedSlots + 1);
|
|
|
|
if (index == Bits)
|
|
|
|
break;
|
|
|
|
uint freeStart = i*Bits + index;
|
|
|
|
usedSlots &= ~((static_cast<quintptr>(1) << index) - 1);
|
|
|
|
while (!usedSlots) {
|
2020-05-20 15:23:13 +00:00
|
|
|
if (++i < EntriesInBitmap) {
|
|
|
|
usedSlots = (objectBitmap[i]|extendsBitmap[i]);
|
|
|
|
} else {
|
|
|
|
Q_ASSERT(i == EntriesInBitmap);
|
|
|
|
// Overflows to 0 when counting trailing zeroes above in next iteration.
|
|
|
|
// Then, all the bits are zeroes and we break.
|
|
|
|
usedSlots = std::numeric_limits<quintptr>::max();
|
2017-02-10 10:51:43 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
allocatedSlots += qPopulationCount(usedSlots);
|
|
|
|
}
|
|
|
|
HeapItem *freeItem = base + freeStart;
|
|
|
|
|
|
|
|
index = qCountTrailingZeroBits(usedSlots);
|
|
|
|
usedSlots |= (quintptr(1) << index) - 1;
|
|
|
|
uint freeEnd = i*Bits + index;
|
|
|
|
uint nSlots = freeEnd - freeStart;
|
|
|
|
freeSlots += nSlots;
|
|
|
|
Q_ASSERT(freeEnd > freeStart && freeEnd <= NumSlots);
|
|
|
|
freeItem->freeData.availableSlots = nSlots;
|
|
|
|
uint bin = qMin(nBins - 1, nSlots);
|
|
|
|
freeItem->freeData.next = bins[bin];
|
|
|
|
bins[bin] = freeItem;
|
2017-01-03 10:49:15 +00:00
|
|
|
}
|
|
|
|
}
|
2017-02-15 11:48:07 +00:00
|
|
|
Q_ASSERT(freeSlots + allocatedSlots == (EntriesInBitmap - start) * 8 * sizeof(quintptr));
|
2017-01-03 10:49:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
HeapItem *BlockAllocator::allocate(size_t size, bool forceAllocation) {
|
|
|
|
Q_ASSERT((size % Chunk::SlotSize) == 0);
|
|
|
|
size_t slotsRequired = size >> Chunk::SlotSizeShift;
|
2017-11-27 09:53:33 +00:00
|
|
|
|
|
|
|
if (allocationStats)
|
|
|
|
++allocationStats[binForSlots(slotsRequired)];
|
2017-01-03 10:49:15 +00:00
|
|
|
|
|
|
|
HeapItem **last;
|
|
|
|
|
|
|
|
HeapItem *m;
|
|
|
|
|
|
|
|
if (slotsRequired < NumBins - 1) {
|
|
|
|
m = freeBins[slotsRequired];
|
|
|
|
if (m) {
|
|
|
|
freeBins[slotsRequired] = m->freeData.next;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
2017-02-10 10:51:43 +00:00
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
if (nFree >= slotsRequired) {
|
|
|
|
// use bump allocation
|
|
|
|
Q_ASSERT(nextFree);
|
|
|
|
m = nextFree;
|
|
|
|
nextFree += slotsRequired;
|
|
|
|
nFree -= slotsRequired;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
// search last bin for a large enough item
|
|
|
|
last = &freeBins[NumBins - 1];
|
|
|
|
while ((m = *last)) {
|
|
|
|
if (m->freeData.availableSlots >= slotsRequired) {
|
|
|
|
*last = m->freeData.next; // take it out of the list
|
|
|
|
|
|
|
|
size_t remainingSlots = m->freeData.availableSlots - slotsRequired;
|
2017-02-10 10:51:43 +00:00
|
|
|
if (remainingSlots == 0)
|
2017-01-03 10:49:15 +00:00
|
|
|
goto done;
|
2017-02-10 10:51:43 +00:00
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
HeapItem *remainder = m + slotsRequired;
|
2017-02-10 11:17:35 +00:00
|
|
|
if (remainingSlots > nFree) {
|
2017-01-03 10:49:15 +00:00
|
|
|
if (nFree) {
|
|
|
|
size_t bin = binForSlots(nFree);
|
|
|
|
nextFree->freeData.next = freeBins[bin];
|
|
|
|
nextFree->freeData.availableSlots = nFree;
|
|
|
|
freeBins[bin] = nextFree;
|
|
|
|
}
|
|
|
|
nextFree = remainder;
|
|
|
|
nFree = remainingSlots;
|
|
|
|
} else {
|
|
|
|
remainder->freeData.availableSlots = remainingSlots;
|
|
|
|
size_t binForRemainder = binForSlots(remainingSlots);
|
|
|
|
remainder->freeData.next = freeBins[binForRemainder];
|
|
|
|
freeBins[binForRemainder] = remainder;
|
|
|
|
}
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
last = &m->freeData.next;
|
|
|
|
}
|
|
|
|
|
2017-02-10 11:17:35 +00:00
|
|
|
if (slotsRequired < NumBins - 1) {
|
|
|
|
// check if we can split up another slot
|
|
|
|
for (size_t i = slotsRequired + 1; i < NumBins - 1; ++i) {
|
|
|
|
m = freeBins[i];
|
|
|
|
if (m) {
|
|
|
|
freeBins[i] = m->freeData.next; // take it out of the list
|
|
|
|
size_t remainingSlots = i - slotsRequired;
|
|
|
|
Q_ASSERT(remainingSlots < NumBins - 1);
|
|
|
|
HeapItem *remainder = m + slotsRequired;
|
|
|
|
remainder->freeData.availableSlots = remainingSlots;
|
|
|
|
remainder->freeData.next = freeBins[remainingSlots];
|
|
|
|
freeBins[remainingSlots] = remainder;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
if (!m) {
|
|
|
|
if (!forceAllocation)
|
2018-02-21 09:41:54 +00:00
|
|
|
return nullptr;
|
2023-01-26 13:16:35 +00:00
|
|
|
if (nFree) {
|
|
|
|
// Save any remaining slots of the current chunk
|
|
|
|
// for later, smaller allocations.
|
|
|
|
size_t bin = binForSlots(nFree);
|
|
|
|
nextFree->freeData.next = freeBins[bin];
|
|
|
|
nextFree->freeData.availableSlots = nFree;
|
|
|
|
freeBins[bin] = nextFree;
|
|
|
|
}
|
2017-01-03 10:49:15 +00:00
|
|
|
Chunk *newChunk = chunkAllocator->allocate();
|
2017-11-23 09:05:30 +00:00
|
|
|
Q_V4_PROFILE_ALLOC(engine, Chunk::DataSize, Profiling::HeapPage);
|
2017-01-03 10:49:15 +00:00
|
|
|
chunks.push_back(newChunk);
|
|
|
|
nextFree = newChunk->first();
|
|
|
|
nFree = Chunk::AvailableSlots;
|
|
|
|
m = nextFree;
|
|
|
|
nextFree += slotsRequired;
|
|
|
|
nFree -= slotsRequired;
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
m->setAllocatedSlots(slotsRequired);
|
2017-11-23 09:05:30 +00:00
|
|
|
Q_V4_PROFILE_ALLOC(engine, slotsRequired * Chunk::SlotSize, Profiling::SmallItem);
|
2018-02-01 18:01:13 +00:00
|
|
|
#ifdef V4_USE_HEAPTRACK
|
|
|
|
heaptrack_report_alloc(m, slotsRequired * Chunk::SlotSize);
|
|
|
|
#endif
|
2017-01-03 10:49:15 +00:00
|
|
|
return m;
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockAllocator::sweep()
|
|
|
|
{
|
2018-02-21 09:41:54 +00:00
|
|
|
nextFree = nullptr;
|
2017-01-03 10:49:15 +00:00
|
|
|
nFree = 0;
|
|
|
|
memset(freeBins, 0, sizeof(freeBins));
|
|
|
|
|
|
|
|
usedSlotsAfterLastSweep = 0;
|
2017-05-16 10:42:42 +00:00
|
|
|
|
2018-08-03 13:09:20 +00:00
|
|
|
auto firstEmptyChunk = std::partition(chunks.begin(), chunks.end(), [this](Chunk *c) {
|
|
|
|
return c->sweep(engine);
|
|
|
|
});
|
2018-08-03 12:33:40 +00:00
|
|
|
|
2018-08-03 13:09:20 +00:00
|
|
|
std::for_each(chunks.begin(), firstEmptyChunk, [this](Chunk *c) {
|
|
|
|
c->sortIntoBins(freeBins, NumBins);
|
|
|
|
usedSlotsAfterLastSweep += c->nUsedSlots();
|
|
|
|
});
|
2018-08-03 12:33:40 +00:00
|
|
|
|
|
|
|
// only free the chunks at the end to avoid that the sweep() calls indirectly
|
|
|
|
// access freed memory
|
2018-08-03 13:09:20 +00:00
|
|
|
std::for_each(firstEmptyChunk, chunks.end(), [this](Chunk *c) {
|
2018-08-03 12:33:40 +00:00
|
|
|
Q_V4_PROFILE_DEALLOC(engine, Chunk::DataSize, Profiling::HeapPage);
|
|
|
|
chunkAllocator->free(c);
|
2018-08-03 13:09:20 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
chunks.erase(firstEmptyChunk, chunks.end());
|
2017-01-03 10:49:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void BlockAllocator::freeAll()
|
|
|
|
{
|
2018-01-05 14:30:23 +00:00
|
|
|
for (auto c : chunks)
|
2017-11-23 09:05:30 +00:00
|
|
|
c->freeAll(engine);
|
2018-01-05 14:30:23 +00:00
|
|
|
for (auto c : chunks) {
|
2017-11-23 09:05:30 +00:00
|
|
|
Q_V4_PROFILE_DEALLOC(engine, Chunk::DataSize, Profiling::HeapPage);
|
2017-01-03 10:49:15 +00:00
|
|
|
chunkAllocator->free(c);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-15 14:40:27 +00:00
|
|
|
void BlockAllocator::resetBlackBits()
|
|
|
|
{
|
|
|
|
for (auto c : chunks)
|
|
|
|
c->resetBlackBits();
|
|
|
|
}
|
|
|
|
|
2017-01-20 10:36:16 +00:00
|
|
|
HeapItem *HugeItemAllocator::allocate(size_t size) {
|
2018-03-07 07:48:57 +00:00
|
|
|
MemorySegment *m = nullptr;
|
|
|
|
Chunk *c = nullptr;
|
|
|
|
if (size >= MemorySegment::SegmentSize/2) {
|
|
|
|
// too large to handle through the ChunkAllocator, let's get our own memory segement
|
2018-10-15 06:36:17 +00:00
|
|
|
size += Chunk::HeaderSize; // space required for the Chunk header
|
2018-03-07 07:48:57 +00:00
|
|
|
size_t pageSize = WTF::pageSize();
|
|
|
|
size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes
|
2018-10-15 06:36:17 +00:00
|
|
|
m = new MemorySegment(size);
|
2018-03-07 07:48:57 +00:00
|
|
|
c = m->allocate(size);
|
|
|
|
} else {
|
|
|
|
c = chunkAllocator->allocate(size);
|
|
|
|
}
|
|
|
|
Q_ASSERT(c);
|
|
|
|
chunks.push_back(HugeChunk{m, c, size});
|
2017-01-20 10:36:16 +00:00
|
|
|
Chunk::setBit(c->objectBitmap, c->first() - c->realBase());
|
2017-11-23 09:05:30 +00:00
|
|
|
Q_V4_PROFILE_ALLOC(engine, size, Profiling::LargeItem);
|
2018-02-01 18:01:13 +00:00
|
|
|
#ifdef V4_USE_HEAPTRACK
|
|
|
|
heaptrack_report_alloc(c, size);
|
|
|
|
#endif
|
2017-01-20 10:36:16 +00:00
|
|
|
return c->first();
|
|
|
|
}
|
|
|
|
|
2017-02-14 13:03:56 +00:00
|
|
|
static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocator::HugeChunk &c, ClassDestroyStatsCallback classCountPtr)
|
2017-01-20 10:36:16 +00:00
|
|
|
{
|
|
|
|
HeapItem *itemToFree = c.chunk->first();
|
|
|
|
Heap::Base *b = *itemToFree;
|
2018-01-08 11:00:00 +00:00
|
|
|
const VTable *v = b->internalClass->vtable;
|
2017-02-14 13:03:56 +00:00
|
|
|
if (Q_UNLIKELY(classCountPtr))
|
|
|
|
classCountPtr(v->className);
|
|
|
|
|
|
|
|
if (v->destroy) {
|
|
|
|
v->destroy(b);
|
2017-01-20 10:36:16 +00:00
|
|
|
b->_checkIsDestroyed();
|
|
|
|
}
|
2018-03-07 07:48:57 +00:00
|
|
|
if (c.segment) {
|
|
|
|
// own memory segment
|
|
|
|
c.segment->free(c.chunk, c.size);
|
|
|
|
delete c.segment;
|
|
|
|
} else {
|
|
|
|
chunkAllocator->free(c.chunk, c.size);
|
|
|
|
}
|
2018-02-01 18:01:13 +00:00
|
|
|
#ifdef V4_USE_HEAPTRACK
|
|
|
|
heaptrack_report_free(c.chunk);
|
|
|
|
#endif
|
2017-01-20 10:36:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-14 13:03:56 +00:00
|
|
|
void HugeItemAllocator::sweep(ClassDestroyStatsCallback classCountPtr)
|
|
|
|
{
|
|
|
|
auto isBlack = [this, classCountPtr] (const HugeChunk &c) {
|
2017-01-20 10:36:16 +00:00
|
|
|
bool b = c.chunk->first()->isBlack();
|
|
|
|
Chunk::clearBit(c.chunk->blackBitmap, c.chunk->first() - c.chunk->realBase());
|
2017-11-23 09:05:30 +00:00
|
|
|
if (!b) {
|
|
|
|
Q_V4_PROFILE_DEALLOC(engine, c.size, Profiling::LargeItem);
|
2017-02-14 13:03:56 +00:00
|
|
|
freeHugeChunk(chunkAllocator, c, classCountPtr);
|
2017-11-23 09:05:30 +00:00
|
|
|
}
|
2017-01-20 10:36:16 +00:00
|
|
|
return !b;
|
|
|
|
};
|
|
|
|
|
|
|
|
auto newEnd = std::remove_if(chunks.begin(), chunks.end(), isBlack);
|
|
|
|
chunks.erase(newEnd, chunks.end());
|
|
|
|
}
|
|
|
|
|
2017-02-15 14:40:27 +00:00
|
|
|
void HugeItemAllocator::resetBlackBits()
|
|
|
|
{
|
|
|
|
for (auto c : chunks)
|
|
|
|
Chunk::clearBit(c.chunk->blackBitmap, c.chunk->first() - c.chunk->realBase());
|
|
|
|
}
|
|
|
|
|
2017-01-20 10:36:16 +00:00
|
|
|
void HugeItemAllocator::freeAll()
|
|
|
|
{
|
|
|
|
for (auto &c : chunks) {
|
2017-11-23 09:05:30 +00:00
|
|
|
Q_V4_PROFILE_DEALLOC(engine, c.size, Profiling::LargeItem);
|
2017-02-14 13:03:56 +00:00
|
|
|
freeHugeChunk(chunkAllocator, c, nullptr);
|
2017-01-20 10:36:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-21 18:36:26 +00:00
|
|
|
namespace {
|
|
|
|
using ExtraData = GCStateInfo::ExtraData;
|
|
|
|
GCState markStart(GCStateMachine *that, ExtraData &)
|
|
|
|
{
|
|
|
|
//Initialize the mark stack
|
|
|
|
that->mm->m_markStack = std::make_unique<MarkStack>(that->mm->engine);
|
|
|
|
that->mm->engine->isGCOngoing = true;
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::MarkGlobalObject;
|
2023-11-21 18:36:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
GCState markGlobalObject(GCStateMachine *that, ExtraData &)
|
|
|
|
{
|
|
|
|
that->mm->engine->markObjects(that->mm->m_markStack.get());
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::MarkJSStack;
|
2023-11-21 18:36:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
GCState markJSStack(GCStateMachine *that, ExtraData &)
|
|
|
|
{
|
|
|
|
that->mm->collectFromJSStack(that->mm->markStack());
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::InitMarkPersistentValues;
|
2023-11-21 18:36:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
GCState initMarkPersistentValues(GCStateMachine *that, ExtraData &stateData)
|
|
|
|
{
|
|
|
|
if (!that->mm->m_persistentValues)
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::InitMarkWeakValues; // no persistent values to mark
|
2023-11-21 18:36:26 +00:00
|
|
|
stateData = GCIteratorStorage { that->mm->m_persistentValues->begin() };
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::MarkPersistentValues;
|
2023-11-21 18:36:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static constexpr int markLoopIterationCount = 1024;
|
|
|
|
|
|
|
|
bool wasDrainNecessary(MarkStack *ms, QDeadlineTimer deadline)
|
|
|
|
{
|
2024-04-23 15:55:21 +00:00
|
|
|
if (ms->remainingBeforeSoftLimit() > markLoopIterationCount)
|
2023-11-21 18:36:26 +00:00
|
|
|
return false;
|
|
|
|
// drain
|
|
|
|
ms->drain(deadline);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
GCState markPersistentValues(GCStateMachine *that, ExtraData &stateData) {
|
|
|
|
auto markStack = that->mm->markStack();
|
|
|
|
if (wasDrainNecessary(markStack, that->deadline) && that->deadline.hasExpired())
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::MarkPersistentValues;
|
2023-11-21 18:36:26 +00:00
|
|
|
PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(stateData).it;
|
|
|
|
// avoid repeatedly hitting the timer constantly by batching iterations
|
|
|
|
for (int i = 0; i < markLoopIterationCount; ++i) {
|
|
|
|
if (!it.p)
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::InitMarkWeakValues;
|
2023-11-21 18:36:26 +00:00
|
|
|
if (Managed *m = (*it).as<Managed>())
|
|
|
|
m->mark(markStack);
|
|
|
|
++it;
|
|
|
|
}
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::MarkPersistentValues;
|
2023-11-21 18:36:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
GCState initMarkWeakValues(GCStateMachine *that, ExtraData &stateData)
|
|
|
|
{
|
|
|
|
stateData = GCIteratorStorage { that->mm->m_weakValues->begin() };
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::MarkWeakValues;
|
2023-11-21 18:36:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
GCState markWeakValues(GCStateMachine *that, ExtraData &stateData)
|
|
|
|
{
|
|
|
|
auto markStack = that->mm->markStack();
|
|
|
|
if (wasDrainNecessary(markStack, that->deadline) && that->deadline.hasExpired())
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::MarkWeakValues;
|
2023-11-21 18:36:26 +00:00
|
|
|
PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(stateData).it;
|
|
|
|
// avoid repeatedly hitting the timer constantly by batching iterations
|
|
|
|
for (int i = 0; i < markLoopIterationCount; ++i) {
|
|
|
|
if (!it.p)
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::MarkDrain;
|
2023-11-21 18:36:26 +00:00
|
|
|
QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>();
|
|
|
|
++it;
|
|
|
|
if (!qobjectWrapper)
|
|
|
|
continue;
|
|
|
|
QObject *qobject = qobjectWrapper->object();
|
|
|
|
if (!qobject)
|
|
|
|
continue;
|
|
|
|
bool keepAlive = QQmlData::keepAliveDuringGarbageCollection(qobject);
|
|
|
|
|
|
|
|
if (!keepAlive) {
|
|
|
|
if (QObject *parent = qobject->parent()) {
|
|
|
|
while (parent->parent())
|
|
|
|
parent = parent->parent();
|
|
|
|
keepAlive = QQmlData::keepAliveDuringGarbageCollection(parent);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (keepAlive)
|
|
|
|
qobjectWrapper->mark(that->mm->markStack());
|
|
|
|
}
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::MarkWeakValues;
|
2023-11-21 18:36:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
GCState markDrain(GCStateMachine *that, ExtraData &)
|
|
|
|
{
|
|
|
|
if (that->deadline.isForever()) {
|
|
|
|
that->mm->markStack()->drain();
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::MarkReady;
|
2023-11-21 18:36:26 +00:00
|
|
|
}
|
|
|
|
auto drainState = that->mm->m_markStack->drain(that->deadline);
|
|
|
|
return drainState == MarkStack::DrainState::Complete
|
2024-08-30 15:44:16 +00:00
|
|
|
? GCState::MarkReady
|
|
|
|
: GCState::MarkDrain;
|
2023-11-21 18:36:26 +00:00
|
|
|
}
|
|
|
|
|
Add a validation mode for the garbage collector
A new environment variable, "QV4_MM_CROSS_VALIDATE_INCREMENTAL_GC", is
introduced that turns on, when set, additional debug behavior in the GC.
In particular, when the mode is enabled and the GC is running in
incremental mode, after every mark phase, some additional validation will
be performed to catch some amount of issues that could be introduced by
the usage of the incremental garbage collector.
In more details, a snapshot of which object was marked by the
incremental run is stored, the mark phase of the GC is then run a second
time in a stop-the-world manner.
The result of the second phase is then compared to the original, making
sure that supposedly bug-induced discrepancies are logged.
Generally, any discrepancy should indicate something that has gone
wrong in one run or the other, possibly both independently.
Nonetheless, for this mode, we assume that non-incremental runs of the
GC can be considered to have a baseline correctness that can be used as
a verifier for the output of the mark phase of a GC run, and in
particular a run of the incremental mode of the GC.
A new state was added to the GC that is intended to run between the end
of the mark phase and the start of the sweep phase.
The implementation for the execution of the `markReady` state was
modified to traverse to this new state during an incremental run when
the relevant environment variable was set during the creation of a
`MemoryManager` instance.
The new state generally implements the algorithm described above.
In particular, a copy of the black bitmaps of each allocator, which are
the result of the mark phase and are enough to pinpoint which part of
the memory was marked, is stored away.
The relevant state that is hold by the GC, is reset to a state that
allows running the mark phase again.
The mark phase is then run from start to finish and the new state of the
black bitmaps of each allocator is compared to the one produced by the
latest run.
Errors are reported when we find that the incremental run has not marked
an object that was considered alive by the non-incremental run, that is,
that we are going to collect an object that is still alive.
Cases where the new run has found an object, that was considered to be
alive by an incremental-run, to be dead, are ignored.
This is due to the incremental run of a GC sometimes being unable to
directly identify an unreachable object as dead, for example when
allocations are performed at certain points in the incremental run.
The implementation of `Managed::className` was modified by extracting
the formatting part out so that it can be accessed as part of the newly
added error reporting.
Some documentation for the new environment variable with a brief and
generic description of the new mode was added to the "Configuring the
JavaScript Engine" documentation page, where similar GC-related
environment variables are documented.
A test was added to ensure that the specific case of discrepancies that
we are interested into are caught by enabling the validation mode.
To allow for the testing process itself to be performed by the
fictitious introduction of bugs of the class we intend to uncover, we
ensure that the entry of the new state in the relevant `GCState` enum
is positioned as if it was part of the sweep phase.
Normally, the state that performs the verification will need to redrain
the stack to take into account changes that can have occurred between
the last state and the start of the verification state, as otherwise
false positives could be introduced by the partial snapshot of the black
bitmaps.
Nonetheless, a redrain can and should re-observe some objects that could
have already been marked in precedence. When this is so, any object that
is unmarked fictitiously, could be marked back again, preventing the
test, which has to mutate the the state at the boundaries of the
computation, from correctly observing the process.
By ensuring that the validation step is performed as if "it was part of
the sweep phase", it will use, during a normal run, the general redrain
process that is commonly part of the execution loop (as performed by
`transition`), so that the embedding of the redrain in the validation
step itself can be avoided.
The test can then perform the necessary run of the GC without passing by
the normal execution loop, knowing that it controls when allocations are
performed, so that it can introduce the necessary mutations at the
boundaries without the risk of some of them being overwritten.
To simplify the testing process, and in particular to avoid having to
capture `qDebug` output during the test run, test-specific code that
saves an intermediate state that can be used as a witness of the
algorithm working correctly was added to the GC, behind the
"QT_BUILD_INTERNAL" flag.
Fixes: QTBUG-135064
Change-Id: If3f9ef029b51b77aaa5b68f349cbb1b20330be70
Reviewed-by: Fabian Kosmale <fabian.kosmale@qt.io>
2025-05-13 14:04:51 +00:00
|
|
|
GCState markReady(GCStateMachine *that, ExtraData &)
|
2023-11-21 18:36:26 +00:00
|
|
|
{
|
Add a validation mode for the garbage collector
A new environment variable, "QV4_MM_CROSS_VALIDATE_INCREMENTAL_GC", is
introduced that turns on, when set, additional debug behavior in the GC.
In particular, when the mode is enabled and the GC is running in
incremental mode, after every mark phase, some additional validation will
be performed to catch some amount of issues that could be introduced by
the usage of the incremental garbage collector.
In more details, a snapshot of which object was marked by the
incremental run is stored, the mark phase of the GC is then run a second
time in a stop-the-world manner.
The result of the second phase is then compared to the original, making
sure that supposedly bug-induced discrepancies are logged.
Generally, any discrepancy should indicate something that has gone
wrong in one run or the other, possibly both independently.
Nonetheless, for this mode, we assume that non-incremental runs of the
GC can be considered to have a baseline correctness that can be used as
a verifier for the output of the mark phase of a GC run, and in
particular a run of the incremental mode of the GC.
A new state was added to the GC that is intended to run between the end
of the mark phase and the start of the sweep phase.
The implementation for the execution of the `markReady` state was
modified to traverse to this new state during an incremental run when
the relevant environment variable was set during the creation of a
`MemoryManager` instance.
The new state generally implements the algorithm described above.
In particular, a copy of the black bitmaps of each allocator, which are
the result of the mark phase and are enough to pinpoint which part of
the memory was marked, is stored away.
The relevant state that is hold by the GC, is reset to a state that
allows running the mark phase again.
The mark phase is then run from start to finish and the new state of the
black bitmaps of each allocator is compared to the one produced by the
latest run.
Errors are reported when we find that the incremental run has not marked
an object that was considered alive by the non-incremental run, that is,
that we are going to collect an object that is still alive.
Cases where the new run has found an object, that was considered to be
alive by an incremental-run, to be dead, are ignored.
This is due to the incremental run of a GC sometimes being unable to
directly identify an unreachable object as dead, for example when
allocations are performed at certain points in the incremental run.
The implementation of `Managed::className` was modified by extracting
the formatting part out so that it can be accessed as part of the newly
added error reporting.
Some documentation for the new environment variable with a brief and
generic description of the new mode was added to the "Configuring the
JavaScript Engine" documentation page, where similar GC-related
environment variables are documented.
A test was added to ensure that the specific case of discrepancies that
we are interested into are caught by enabling the validation mode.
To allow for the testing process itself to be performed by the
fictitious introduction of bugs of the class we intend to uncover, we
ensure that the entry of the new state in the relevant `GCState` enum
is positioned as if it was part of the sweep phase.
Normally, the state that performs the verification will need to redrain
the stack to take into account changes that can have occurred between
the last state and the start of the verification state, as otherwise
false positives could be introduced by the partial snapshot of the black
bitmaps.
Nonetheless, a redrain can and should re-observe some objects that could
have already been marked in precedence. When this is so, any object that
is unmarked fictitiously, could be marked back again, preventing the
test, which has to mutate the the state at the boundaries of the
computation, from correctly observing the process.
By ensuring that the validation step is performed as if "it was part of
the sweep phase", it will use, during a normal run, the general redrain
process that is commonly part of the execution loop (as performed by
`transition`), so that the embedding of the redrain in the validation
step itself can be avoided.
The test can then perform the necessary run of the GC without passing by
the normal execution loop, knowing that it controls when allocations are
performed, so that it can introduce the necessary mutations at the
boundaries without the risk of some of them being overwritten.
To simplify the testing process, and in particular to avoid having to
capture `qDebug` output during the test run, test-specific code that
saves an intermediate state that can be used as a witness of the
algorithm working correctly was added to the GC, behind the
"QT_BUILD_INTERNAL" flag.
Fixes: QTBUG-135064
Change-Id: If3f9ef029b51b77aaa5b68f349cbb1b20330be70
Reviewed-by: Fabian Kosmale <fabian.kosmale@qt.io>
2025-05-13 14:04:51 +00:00
|
|
|
auto isIncrementalRun = [](GCStateMachine* that){
|
|
|
|
return !that->mm->aggressiveGC && that->timeLimit.count() > 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
if (that->mm->crossValidateIncrementalGC && isIncrementalRun(that))
|
|
|
|
return GCState::CrossValidateIncrementalMarkPhase;
|
|
|
|
return GCState::InitCallDestroyObjects;
|
|
|
|
}
|
|
|
|
|
|
|
|
GCState crossValidateIncrementalMarkPhase(GCStateMachine *that, ExtraData &)
|
|
|
|
{
|
|
|
|
struct {
|
|
|
|
Chunk* operator()(Chunk* chunk) { return chunk; }
|
|
|
|
Chunk* operator()(const HugeItemAllocator::HugeChunk& chunk) { return chunk.chunk; }
|
|
|
|
} getChunk{};
|
|
|
|
|
|
|
|
auto takeBlackBitmap = [&getChunk](auto& allocator, std::vector<quintptr>& storage){
|
|
|
|
for (auto chunk : allocator.chunks) {
|
|
|
|
for (auto& bitmap : getChunk(chunk)->blackBitmap) {
|
|
|
|
storage.push_back(bitmap);
|
|
|
|
}
|
|
|
|
getChunk(chunk)->resetBlackBits();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
auto runMarkPhase = [](GCStateMachine* that) {
|
|
|
|
that->reset();
|
|
|
|
that->mm->m_markStack.reset();
|
|
|
|
|
|
|
|
while (that->state != GCStateMachine::MarkReady) {
|
|
|
|
GCStateInfo& stateInfo = that->stateInfoMap[int(that->state)];
|
|
|
|
that->state = stateInfo.execute(that, that->stateData);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
auto checkBlackBitmap = [&that, &getChunk](auto& allocator, const std::vector<quintptr>& storedBitmap) {
|
|
|
|
auto reportError = [&allocator, &getChunk, &that](std::size_t chunk_index, std::size_t bitmap_index, uint bit_index){
|
|
|
|
Q_UNUSED(that);
|
|
|
|
auto object = reinterpret_cast<Heap::Base*>(getChunk(allocator.chunks[chunk_index])->realBase() + (bit_index + (bitmap_index*Chunk::Bits)));
|
|
|
|
qDebug() << "Cross Validation Error on chunk" << chunk_index
|
|
|
|
<< "on bitmap piece" << bitmap_index << "and bit" << bit_index
|
|
|
|
<< ((object->internalClass) ? "With type" : "")
|
|
|
|
<< ((object->internalClass) ?
|
|
|
|
Managed::typeToString(Managed::Type(object->internalClass->vtable->type)) : QString());
|
|
|
|
|
|
|
|
#ifdef QT_BUILD_INTERNAL
|
|
|
|
that->bitmapErrors.emplace_back(chunk_index, bitmap_index, bit_index);
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
auto original = storedBitmap.begin();
|
|
|
|
for (std::size_t chunk_index = 0; original != storedBitmap.end() && chunk_index < allocator.chunks.size(); ++chunk_index) {
|
|
|
|
for (std::size_t bitmap_index = 0; bitmap_index < Chunk::EntriesInBitmap; ++bitmap_index) {
|
|
|
|
if (auto differences = (~(*original)) & getChunk(allocator.chunks[chunk_index])->blackBitmap[bitmap_index]) {
|
|
|
|
while (differences != 0) {
|
|
|
|
uint bit_index = qCountTrailingZeroBits(differences);
|
|
|
|
reportError(chunk_index, bitmap_index, bit_index);
|
|
|
|
differences ^= quintptr{1} << bit_index;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
++original;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef QT_BUILD_INTERNAL
|
|
|
|
that->bitmapErrors.clear();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
std::vector<quintptr> blockBitmap{};
|
|
|
|
blockBitmap.reserve(Chunk::EntriesInBitmap * that->mm->blockAllocator.chunks.size());
|
|
|
|
takeBlackBitmap(that->mm->blockAllocator, blockBitmap);
|
|
|
|
|
|
|
|
std::vector<quintptr> hugeItemBitmap{};
|
|
|
|
hugeItemBitmap.reserve(Chunk::EntriesInBitmap * that->mm->hugeItemAllocator.chunks.size());
|
|
|
|
takeBlackBitmap(that->mm->hugeItemAllocator, hugeItemBitmap);
|
|
|
|
|
|
|
|
std::vector<quintptr> internalClassBitmap{};
|
|
|
|
internalClassBitmap.reserve(Chunk::EntriesInBitmap * that->mm->icAllocator.chunks.size());
|
|
|
|
takeBlackBitmap(that->mm->icAllocator, internalClassBitmap);
|
|
|
|
|
|
|
|
runMarkPhase(that);
|
|
|
|
|
|
|
|
checkBlackBitmap(that->mm->blockAllocator, blockBitmap);
|
|
|
|
checkBlackBitmap(that->mm->hugeItemAllocator, hugeItemBitmap);
|
|
|
|
checkBlackBitmap(that->mm->icAllocator, internalClassBitmap);
|
|
|
|
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::InitCallDestroyObjects;
|
2023-11-21 18:36:26 +00:00
|
|
|
}
|
|
|
|
|
2024-01-10 15:06:17 +00:00
|
|
|
/** \!internal
|
|
|
|
collects new references from the stack, then drains the mark stack again
|
|
|
|
*/
|
|
|
|
void redrain(GCStateMachine *that)
|
2023-11-21 18:36:26 +00:00
|
|
|
{
|
|
|
|
that->mm->collectFromJSStack(that->mm->markStack());
|
|
|
|
that->mm->m_markStack->drain();
|
2024-01-10 15:06:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
GCState initCallDestroyObjects(GCStateMachine *that, ExtraData &stateData)
|
|
|
|
{
|
|
|
|
// as we don't have a deletion barrier, we need to rescan the stack
|
|
|
|
redrain(that);
|
|
|
|
if (!that->mm->m_weakValues)
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::FreeWeakMaps; // no need to call destroy objects
|
2024-01-10 15:06:17 +00:00
|
|
|
stateData = GCIteratorStorage { that->mm->m_weakValues->begin() };
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::CallDestroyObjects;
|
2024-01-10 15:06:17 +00:00
|
|
|
}
|
2024-04-23 15:21:58 +00:00
|
|
|
GCState callDestroyObject(GCStateMachine *that, ExtraData &stateData)
|
2024-01-10 15:06:17 +00:00
|
|
|
{
|
|
|
|
PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(stateData).it;
|
2024-04-23 15:21:58 +00:00
|
|
|
// destroyObject might call user code, which really shouldn't call back into the gc
|
|
|
|
auto oldState = std::exchange(that->mm->gcBlocked, QV4::MemoryManager::Blockness::InCriticalSection);
|
|
|
|
auto cleanup = qScopeGuard([&]() {
|
|
|
|
that->mm->gcBlocked = oldState;
|
|
|
|
});
|
2024-01-10 15:06:17 +00:00
|
|
|
// avoid repeatedly hitting the timer constantly by batching iterations
|
|
|
|
for (int i = 0; i < markLoopIterationCount; ++i) {
|
|
|
|
if (!it.p)
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::FreeWeakMaps;
|
2024-01-10 15:06:17 +00:00
|
|
|
Managed *m = (*it).managed();
|
|
|
|
++it;
|
|
|
|
if (!m || m->markBit())
|
|
|
|
continue;
|
|
|
|
// we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed
|
|
|
|
// signal before we start sweeping the heap
|
|
|
|
if (QObjectWrapper *qobjectWrapper = m->as<QObjectWrapper>())
|
|
|
|
qobjectWrapper->destroyObject(/*lastSweep =*/false);
|
|
|
|
}
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::CallDestroyObjects;
|
2024-01-10 15:06:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void freeWeakMaps(MemoryManager *mm)
|
|
|
|
{
|
|
|
|
for (auto [map, lastMap] = std::tuple {mm->weakMaps, &mm->weakMaps }; map; map = map->nextWeakMap) {
|
|
|
|
if (!map->isMarked())
|
|
|
|
continue;
|
|
|
|
map->removeUnmarkedKeys();
|
|
|
|
*lastMap = map;
|
|
|
|
lastMap = &map->nextWeakMap;
|
2023-11-21 18:36:26 +00:00
|
|
|
}
|
2024-01-10 15:06:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
GCState freeWeakMaps(GCStateMachine *that, ExtraData &)
|
|
|
|
{
|
|
|
|
freeWeakMaps(that->mm);
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::FreeWeakSets;
|
2024-01-10 15:06:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void freeWeakSets(MemoryManager *mm)
|
|
|
|
{
|
|
|
|
for (auto [set, lastSet] = std::tuple {mm->weakSets, &mm->weakSets}; set; set = set->nextWeakSet) {
|
|
|
|
|
|
|
|
if (!set->isMarked())
|
|
|
|
continue;
|
|
|
|
set->removeUnmarkedKeys();
|
|
|
|
*lastSet = set;
|
|
|
|
lastSet = &set->nextWeakSet;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
GCState freeWeakSets(GCStateMachine *that, ExtraData &)
|
|
|
|
{
|
|
|
|
freeWeakSets(that->mm);
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::HandleQObjectWrappers;
|
2024-01-10 15:06:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
GCState handleQObjectWrappers(GCStateMachine *that, ExtraData &)
|
|
|
|
{
|
|
|
|
that->mm->cleanupDeletedQObjectWrappersInSweep();
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::DoSweep;
|
2024-01-10 15:06:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
GCState doSweep(GCStateMachine *that, ExtraData &)
|
|
|
|
{
|
|
|
|
auto mm = that->mm;
|
|
|
|
|
|
|
|
mm->engine->identifierTable->sweep();
|
|
|
|
mm->blockAllocator.sweep();
|
|
|
|
mm->hugeItemAllocator.sweep(that->mm->gcCollectorStats ? increaseFreedCountForClass : nullptr);
|
|
|
|
mm->icAllocator.sweep();
|
|
|
|
|
|
|
|
// reset all black bits
|
|
|
|
mm->blockAllocator.resetBlackBits();
|
|
|
|
mm->hugeItemAllocator.resetBlackBits();
|
|
|
|
mm->icAllocator.resetBlackBits();
|
|
|
|
|
|
|
|
mm->usedSlotsAfterLastFullSweep = mm->blockAllocator.usedSlotsAfterLastSweep + mm->icAllocator.usedSlotsAfterLastSweep;
|
|
|
|
mm->gcBlocked = MemoryManager::Unblocked;
|
|
|
|
mm->m_markStack.reset();
|
|
|
|
mm->engine->isGCOngoing = false;
|
2024-01-17 15:19:54 +00:00
|
|
|
|
|
|
|
mm->updateUnmanagedHeapSizeGCLimit();
|
|
|
|
|
2024-08-30 15:44:16 +00:00
|
|
|
return GCState::Invalid;
|
2023-11-21 18:36:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-01-20 10:36:16 +00:00
|
|
|
|
2015-01-12 20:55:51 +00:00
|
|
|
MemoryManager::MemoryManager(ExecutionEngine *engine)
|
2015-08-28 11:48:52 +00:00
|
|
|
: engine(engine)
|
2016-12-22 14:20:05 +00:00
|
|
|
, chunkAllocator(new ChunkAllocator)
|
2017-11-23 09:05:30 +00:00
|
|
|
, blockAllocator(chunkAllocator, engine)
|
2018-01-05 14:30:23 +00:00
|
|
|
, icAllocator(chunkAllocator, engine)
|
2017-11-23 09:05:30 +00:00
|
|
|
, hugeItemAllocator(chunkAllocator, engine)
|
2015-01-12 20:55:51 +00:00
|
|
|
, m_persistentValues(new PersistentValueStorage(engine))
|
|
|
|
, m_weakValues(new PersistentValueStorage(engine))
|
2019-03-20 16:15:29 +00:00
|
|
|
, unmanagedHeapSizeGCLimit(MinUnmanagedHeapSizeGCLimit)
|
2017-01-03 11:06:25 +00:00
|
|
|
, aggressiveGC(!qEnvironmentVariableIsEmpty("QV4_MM_AGGRESSIVE_GC"))
|
Add a validation mode for the garbage collector
A new environment variable, "QV4_MM_CROSS_VALIDATE_INCREMENTAL_GC", is
introduced that turns on, when set, additional debug behavior in the GC.
In particular, when the mode is enabled and the GC is running in
incremental mode, after every mark phase, some additional validation will
be performed to catch some amount of issues that could be introduced by
the usage of the incremental garbage collector.
In more details, a snapshot of which object was marked by the
incremental run is stored, the mark phase of the GC is then run a second
time in a stop-the-world manner.
The result of the second phase is then compared to the original, making
sure that supposedly bug-induced discrepancies are logged.
Generally, any discrepancy should indicate something that has gone
wrong in one run or the other, possibly both independently.
Nonetheless, for this mode, we assume that non-incremental runs of the
GC can be considered to have a baseline correctness that can be used as
a verifier for the output of the mark phase of a GC run, and in
particular a run of the incremental mode of the GC.
A new state was added to the GC that is intended to run between the end
of the mark phase and the start of the sweep phase.
The implementation for the execution of the `markReady` state was
modified to traverse to this new state during an incremental run when
the relevant environment variable was set during the creation of a
`MemoryManager` instance.
The new state generally implements the algorithm described above.
In particular, a copy of the black bitmaps of each allocator, which are
the result of the mark phase and are enough to pinpoint which part of
the memory was marked, is stored away.
The relevant state that is hold by the GC, is reset to a state that
allows running the mark phase again.
The mark phase is then run from start to finish and the new state of the
black bitmaps of each allocator is compared to the one produced by the
latest run.
Errors are reported when we find that the incremental run has not marked
an object that was considered alive by the non-incremental run, that is,
that we are going to collect an object that is still alive.
Cases where the new run has found an object, that was considered to be
alive by an incremental-run, to be dead, are ignored.
This is due to the incremental run of a GC sometimes being unable to
directly identify an unreachable object as dead, for example when
allocations are performed at certain points in the incremental run.
The implementation of `Managed::className` was modified by extracting
the formatting part out so that it can be accessed as part of the newly
added error reporting.
Some documentation for the new environment variable with a brief and
generic description of the new mode was added to the "Configuring the
JavaScript Engine" documentation page, where similar GC-related
environment variables are documented.
A test was added to ensure that the specific case of discrepancies that
we are interested into are caught by enabling the validation mode.
To allow for the testing process itself to be performed by the
fictitious introduction of bugs of the class we intend to uncover, we
ensure that the entry of the new state in the relevant `GCState` enum
is positioned as if it was part of the sweep phase.
Normally, the state that performs the verification will need to redrain
the stack to take into account changes that can have occurred between
the last state and the start of the verification state, as otherwise
false positives could be introduced by the partial snapshot of the black
bitmaps.
Nonetheless, a redrain can and should re-observe some objects that could
have already been marked in precedence. When this is so, any object that
is unmarked fictitiously, could be marked back again, preventing the
test, which has to mutate the the state at the boundaries of the
computation, from correctly observing the process.
By ensuring that the validation step is performed as if "it was part of
the sweep phase", it will use, during a normal run, the general redrain
process that is commonly part of the execution loop (as performed by
`transition`), so that the embedding of the redrain in the validation
step itself can be avoided.
The test can then perform the necessary run of the GC without passing by
the normal execution loop, knowing that it controls when allocations are
performed, so that it can introduce the necessary mutations at the
boundaries without the risk of some of them being overwritten.
To simplify the testing process, and in particular to avoid having to
capture `qDebug` output during the test run, test-specific code that
saves an intermediate state that can be used as a witness of the
algorithm working correctly was added to the GC, behind the
"QT_BUILD_INTERNAL" flag.
Fixes: QTBUG-135064
Change-Id: If3f9ef029b51b77aaa5b68f349cbb1b20330be70
Reviewed-by: Fabian Kosmale <fabian.kosmale@qt.io>
2025-05-13 14:04:51 +00:00
|
|
|
, crossValidateIncrementalGC(qEnvironmentVariableIsSet("QV4_MM_CROSS_VALIDATE_INCREMENTAL_GC"))
|
2017-11-27 09:53:33 +00:00
|
|
|
, gcStats(lcGcStats().isDebugEnabled())
|
|
|
|
, gcCollectorStats(lcGcAllocatorStats().isDebugEnabled())
|
2012-12-04 12:40:18 +00:00
|
|
|
{
|
2013-03-12 18:49:13 +00:00
|
|
|
#ifdef V4_USE_VALGRIND
|
2014-12-12 15:38:09 +00:00
|
|
|
VALGRIND_CREATE_MEMPOOL(this, 0, true);
|
2013-03-12 18:49:13 +00:00
|
|
|
#endif
|
2017-11-27 09:53:33 +00:00
|
|
|
memset(statistics.allocations, 0, sizeof(statistics.allocations));
|
|
|
|
if (gcStats)
|
|
|
|
blockAllocator.allocationStats = statistics.allocations;
|
2023-11-21 18:36:26 +00:00
|
|
|
|
|
|
|
gcStateMachine = std::make_unique<GCStateMachine>();
|
|
|
|
gcStateMachine->mm = this;
|
|
|
|
|
|
|
|
gcStateMachine->stateInfoMap[GCState::MarkStart] = {
|
|
|
|
markStart,
|
|
|
|
false,
|
|
|
|
};
|
|
|
|
gcStateMachine->stateInfoMap[GCState::MarkGlobalObject] = {
|
|
|
|
markGlobalObject,
|
|
|
|
false,
|
|
|
|
};
|
|
|
|
gcStateMachine->stateInfoMap[GCState::MarkJSStack] = {
|
|
|
|
markJSStack,
|
|
|
|
false,
|
|
|
|
};
|
|
|
|
gcStateMachine->stateInfoMap[GCState::InitMarkPersistentValues] = {
|
|
|
|
initMarkPersistentValues,
|
|
|
|
false,
|
|
|
|
};
|
|
|
|
gcStateMachine->stateInfoMap[GCState::MarkPersistentValues] = {
|
|
|
|
markPersistentValues,
|
|
|
|
false,
|
|
|
|
};
|
|
|
|
gcStateMachine->stateInfoMap[GCState::InitMarkWeakValues] = {
|
|
|
|
initMarkWeakValues,
|
|
|
|
false,
|
|
|
|
};
|
|
|
|
gcStateMachine->stateInfoMap[GCState::MarkWeakValues] = {
|
|
|
|
markWeakValues,
|
|
|
|
false,
|
|
|
|
};
|
|
|
|
gcStateMachine->stateInfoMap[GCState::MarkDrain] = {
|
|
|
|
markDrain,
|
|
|
|
false,
|
|
|
|
};
|
|
|
|
gcStateMachine->stateInfoMap[GCState::MarkReady] = {
|
|
|
|
markReady,
|
2024-01-10 15:06:17 +00:00
|
|
|
false,
|
2023-11-21 18:36:26 +00:00
|
|
|
};
|
Add a validation mode for the garbage collector
A new environment variable, "QV4_MM_CROSS_VALIDATE_INCREMENTAL_GC", is
introduced that turns on, when set, additional debug behavior in the GC.
In particular, when the mode is enabled and the GC is running in
incremental mode, after every mark phase, some additional validation will
be performed to catch some amount of issues that could be introduced by
the usage of the incremental garbage collector.
In more details, a snapshot of which object was marked by the
incremental run is stored, the mark phase of the GC is then run a second
time in a stop-the-world manner.
The result of the second phase is then compared to the original, making
sure that supposedly bug-induced discrepancies are logged.
Generally, any discrepancy should indicate something that has gone
wrong in one run or the other, possibly both independently.
Nonetheless, for this mode, we assume that non-incremental runs of the
GC can be considered to have a baseline correctness that can be used as
a verifier for the output of the mark phase of a GC run, and in
particular a run of the incremental mode of the GC.
A new state was added to the GC that is intended to run between the end
of the mark phase and the start of the sweep phase.
The implementation for the execution of the `markReady` state was
modified to traverse to this new state during an incremental run when
the relevant environment variable was set during the creation of a
`MemoryManager` instance.
The new state generally implements the algorithm described above.
In particular, a copy of the black bitmaps of each allocator, which are
the result of the mark phase and are enough to pinpoint which part of
the memory was marked, is stored away.
The relevant state that is hold by the GC, is reset to a state that
allows running the mark phase again.
The mark phase is then run from start to finish and the new state of the
black bitmaps of each allocator is compared to the one produced by the
latest run.
Errors are reported when we find that the incremental run has not marked
an object that was considered alive by the non-incremental run, that is,
that we are going to collect an object that is still alive.
Cases where the new run has found an object, that was considered to be
alive by an incremental-run, to be dead, are ignored.
This is due to the incremental run of a GC sometimes being unable to
directly identify an unreachable object as dead, for example when
allocations are performed at certain points in the incremental run.
The implementation of `Managed::className` was modified by extracting
the formatting part out so that it can be accessed as part of the newly
added error reporting.
Some documentation for the new environment variable with a brief and
generic description of the new mode was added to the "Configuring the
JavaScript Engine" documentation page, where similar GC-related
environment variables are documented.
A test was added to ensure that the specific case of discrepancies that
we are interested into are caught by enabling the validation mode.
To allow for the testing process itself to be performed by the
fictitious introduction of bugs of the class we intend to uncover, we
ensure that the entry of the new state in the relevant `GCState` enum
is positioned as if it was part of the sweep phase.
Normally, the state that performs the verification will need to redrain
the stack to take into account changes that can have occurred between
the last state and the start of the verification state, as otherwise
false positives could be introduced by the partial snapshot of the black
bitmaps.
Nonetheless, a redrain can and should re-observe some objects that could
have already been marked in precedence. When this is so, any object that
is unmarked fictitiously, could be marked back again, preventing the
test, which has to mutate the the state at the boundaries of the
computation, from correctly observing the process.
By ensuring that the validation step is performed as if "it was part of
the sweep phase", it will use, during a normal run, the general redrain
process that is commonly part of the execution loop (as performed by
`transition`), so that the embedding of the redrain in the validation
step itself can be avoided.
The test can then perform the necessary run of the GC without passing by
the normal execution loop, knowing that it controls when allocations are
performed, so that it can introduce the necessary mutations at the
boundaries without the risk of some of them being overwritten.
To simplify the testing process, and in particular to avoid having to
capture `qDebug` output during the test run, test-specific code that
saves an intermediate state that can be used as a witness of the
algorithm working correctly was added to the GC, behind the
"QT_BUILD_INTERNAL" flag.
Fixes: QTBUG-135064
Change-Id: If3f9ef029b51b77aaa5b68f349cbb1b20330be70
Reviewed-by: Fabian Kosmale <fabian.kosmale@qt.io>
2025-05-13 14:04:51 +00:00
|
|
|
gcStateMachine->stateInfoMap[GCState::CrossValidateIncrementalMarkPhase] = {
|
|
|
|
crossValidateIncrementalMarkPhase,
|
|
|
|
false,
|
|
|
|
};
|
2024-01-10 15:06:17 +00:00
|
|
|
gcStateMachine->stateInfoMap[GCState::InitCallDestroyObjects] = {
|
|
|
|
initCallDestroyObjects,
|
|
|
|
false,
|
|
|
|
};
|
|
|
|
gcStateMachine->stateInfoMap[GCState::CallDestroyObjects] = {
|
|
|
|
callDestroyObject,
|
|
|
|
false,
|
|
|
|
};
|
|
|
|
gcStateMachine->stateInfoMap[GCState::FreeWeakMaps] = {
|
|
|
|
freeWeakMaps,
|
|
|
|
false,
|
|
|
|
};
|
|
|
|
gcStateMachine->stateInfoMap[GCState::FreeWeakSets] = {
|
|
|
|
freeWeakSets,
|
|
|
|
true, // ensure that handleQObjectWrappers runs in isolation
|
|
|
|
};
|
|
|
|
gcStateMachine->stateInfoMap[GCState::HandleQObjectWrappers] = {
|
|
|
|
handleQObjectWrappers,
|
|
|
|
false,
|
|
|
|
};
|
|
|
|
gcStateMachine->stateInfoMap[GCState::DoSweep] = {
|
|
|
|
doSweep,
|
2023-11-21 18:36:26 +00:00
|
|
|
false,
|
|
|
|
};
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
Heap::Base *MemoryManager::allocString(std::size_t unmanagedSize)
|
2012-12-04 12:40:18 +00:00
|
|
|
{
|
2017-02-10 10:51:43 +00:00
|
|
|
const size_t stringSize = align(sizeof(Heap::String));
|
2017-02-13 12:38:48 +00:00
|
|
|
#ifdef MM_STATS
|
2017-02-10 10:51:43 +00:00
|
|
|
lastAllocRequestedSlots = stringSize >> Chunk::SlotSizeShift;
|
2017-02-13 12:38:48 +00:00
|
|
|
++allocationCount;
|
2017-02-10 10:51:43 +00:00
|
|
|
#endif
|
2017-01-03 11:06:25 +00:00
|
|
|
unmanagedHeapSize += unmanagedSize;
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2019-03-20 16:15:29 +00:00
|
|
|
HeapItem *m = allocate(&blockAllocator, stringSize);
|
2017-01-04 09:49:23 +00:00
|
|
|
memset(m, 0, stringSize);
|
2017-01-03 10:49:15 +00:00
|
|
|
return *m;
|
|
|
|
}
|
2012-12-13 22:46:51 +00:00
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
Heap::Base *MemoryManager::allocData(std::size_t size)
|
|
|
|
{
|
2017-02-13 12:38:48 +00:00
|
|
|
#ifdef MM_STATS
|
2017-02-10 10:51:43 +00:00
|
|
|
lastAllocRequestedSlots = size >> Chunk::SlotSizeShift;
|
2017-02-13 12:38:48 +00:00
|
|
|
++allocationCount;
|
2017-02-10 10:51:43 +00:00
|
|
|
#endif
|
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
Q_ASSERT(size >= Chunk::SlotSize);
|
|
|
|
Q_ASSERT(size % Chunk::SlotSize == 0);
|
2015-01-09 17:52:56 +00:00
|
|
|
|
2019-03-20 16:15:29 +00:00
|
|
|
HeapItem *m = allocate(&blockAllocator, size);
|
2017-01-04 09:49:23 +00:00
|
|
|
memset(m, 0, size);
|
2017-01-03 10:49:15 +00:00
|
|
|
return *m;
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2017-05-03 06:45:28 +00:00
|
|
|
Heap::Object *MemoryManager::allocObjectWithMemberData(const QV4::VTable *vtable, uint nMembers)
|
2017-01-04 11:45:45 +00:00
|
|
|
{
|
2017-05-03 06:45:28 +00:00
|
|
|
uint size = (vtable->nInlineProperties + vtable->inlinePropertyOffset)*sizeof(Value);
|
|
|
|
Q_ASSERT(!(size % sizeof(HeapItem)));
|
2017-01-04 11:45:45 +00:00
|
|
|
|
2017-02-15 11:23:20 +00:00
|
|
|
Heap::Object *o;
|
2017-05-29 08:30:39 +00:00
|
|
|
if (nMembers <= vtable->nInlineProperties) {
|
2017-02-15 11:23:20 +00:00
|
|
|
o = static_cast<Heap::Object *>(allocData(size));
|
|
|
|
} else {
|
|
|
|
// Allocate both in one go through the block allocator
|
2017-05-03 06:45:28 +00:00
|
|
|
nMembers -= vtable->nInlineProperties;
|
2017-01-04 11:45:45 +00:00
|
|
|
std::size_t memberSize = align(sizeof(Heap::MemberData) + (nMembers - 1)*sizeof(Value));
|
2017-02-15 11:23:20 +00:00
|
|
|
size_t totalSize = size + memberSize;
|
|
|
|
Heap::MemberData *m;
|
|
|
|
if (totalSize > Chunk::DataSize) {
|
|
|
|
o = static_cast<Heap::Object *>(allocData(size));
|
|
|
|
m = hugeItemAllocator.allocate(memberSize)->as<Heap::MemberData>();
|
|
|
|
} else {
|
|
|
|
HeapItem *mh = reinterpret_cast<HeapItem *>(allocData(totalSize));
|
|
|
|
Heap::Base *b = *mh;
|
|
|
|
o = static_cast<Heap::Object *>(b);
|
|
|
|
mh += (size >> Chunk::SlotSizeShift);
|
|
|
|
m = mh->as<Heap::MemberData>();
|
|
|
|
Chunk *c = mh->chunk();
|
|
|
|
size_t index = mh - c->realBase();
|
|
|
|
Chunk::setBit(c->objectBitmap, index);
|
|
|
|
Chunk::clearBit(c->extendsBitmap, index);
|
|
|
|
}
|
2018-01-05 14:30:23 +00:00
|
|
|
m->internalClass.set(engine, engine->internalClasses(EngineBase::Class_MemberData));
|
2024-10-10 15:05:45 +00:00
|
|
|
o->memberData.set(engine, m);
|
2017-05-12 13:12:45 +00:00
|
|
|
Q_ASSERT(o->memberData->internalClass);
|
2017-02-15 11:23:20 +00:00
|
|
|
m->values.alloc = static_cast<uint>((memberSize - sizeof(Heap::MemberData) + sizeof(Value))/sizeof(Value));
|
|
|
|
m->values.size = o->memberData->values.alloc;
|
|
|
|
m->init();
|
2017-01-04 11:45:45 +00:00
|
|
|
}
|
2025-05-29 08:51:12 +00:00
|
|
|
|
2017-01-04 11:45:45 +00:00
|
|
|
return o;
|
|
|
|
}
|
|
|
|
|
2017-02-13 12:38:48 +00:00
|
|
|
static uint markStackSize = 0;
|
|
|
|
|
2017-03-09 09:36:16 +00:00
|
|
|
MarkStack::MarkStack(ExecutionEngine *engine)
|
2020-02-17 14:30:54 +00:00
|
|
|
: m_engine(engine)
|
2017-03-09 09:36:16 +00:00
|
|
|
{
|
2020-02-17 14:30:54 +00:00
|
|
|
m_base = (Heap::Base **)engine->gcStack->base();
|
|
|
|
m_top = m_base;
|
|
|
|
const size_t size = engine->maxGCStackSize() / sizeof(Heap::Base);
|
|
|
|
m_hardLimit = m_base + size;
|
|
|
|
m_softLimit = m_base + size * 3 / 4;
|
2017-03-09 09:36:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void MarkStack::drain()
|
2014-10-21 09:05:00 +00:00
|
|
|
{
|
2023-11-21 18:36:26 +00:00
|
|
|
// we're not calling drain(QDeadlineTimer::Forever) as that has higher overhead
|
2020-02-17 14:30:54 +00:00
|
|
|
while (m_top > m_base) {
|
2017-03-09 09:36:16 +00:00
|
|
|
Heap::Base *h = pop();
|
2017-02-13 12:38:48 +00:00
|
|
|
++markStackSize;
|
2016-05-26 15:46:24 +00:00
|
|
|
Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen.
|
2024-10-10 15:05:45 +00:00
|
|
|
Q_ASSERT(h->internalClass);
|
2018-01-08 11:00:00 +00:00
|
|
|
h->internalClass->vtable->markObjects(h, this);
|
2014-10-21 09:05:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-11-21 18:36:26 +00:00
|
|
|
MarkStack::DrainState MarkStack::drain(QDeadlineTimer deadline)
|
2012-12-04 12:40:18 +00:00
|
|
|
{
|
2023-11-21 18:36:26 +00:00
|
|
|
do {
|
|
|
|
for (int i = 0; i <= markLoopIterationCount * 10; ++i) {
|
|
|
|
if (m_top == m_base)
|
|
|
|
return DrainState::Complete;
|
|
|
|
Heap::Base *h = pop();
|
|
|
|
++markStackSize;
|
|
|
|
Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen.
|
2024-10-10 15:05:45 +00:00
|
|
|
Q_ASSERT(h->internalClass);
|
2023-11-21 18:36:26 +00:00
|
|
|
h->internalClass->vtable->markObjects(h, this);
|
2013-05-24 11:19:15 +00:00
|
|
|
}
|
2023-11-21 18:36:26 +00:00
|
|
|
} while (!deadline.hasExpired());
|
|
|
|
return DrainState::Ongoing;
|
|
|
|
}
|
2013-05-24 11:19:15 +00:00
|
|
|
|
2024-10-10 15:05:45 +00:00
|
|
|
void MarkStack::setSoftLimit(size_t size)
|
|
|
|
{
|
|
|
|
m_softLimit = m_base + size;
|
|
|
|
Q_ASSERT(m_softLimit < m_hardLimit);
|
|
|
|
}
|
|
|
|
|
2023-11-21 18:36:26 +00:00
|
|
|
void MemoryManager::onEventLoop()
|
|
|
|
{
|
|
|
|
if (engine->inShutdown)
|
|
|
|
return;
|
2024-02-09 20:21:05 +00:00
|
|
|
if (gcBlocked == InCriticalSection) {
|
|
|
|
QMetaObject::invokeMethod(engine->publicEngine, [this]{
|
|
|
|
onEventLoop();
|
|
|
|
}, Qt::QueuedConnection);
|
|
|
|
return;
|
|
|
|
}
|
2023-11-21 18:36:26 +00:00
|
|
|
if (gcStateMachine->inProgress()) {
|
2024-01-15 15:36:50 +00:00
|
|
|
gcStateMachine->step();
|
2013-05-24 11:19:15 +00:00
|
|
|
}
|
2017-03-09 09:36:16 +00:00
|
|
|
}
|
|
|
|
|
2023-11-21 18:36:26 +00:00
|
|
|
|
|
|
|
void MemoryManager::setGCTimeLimit(int timeMs)
|
2017-03-09 09:36:16 +00:00
|
|
|
{
|
2023-11-21 18:36:26 +00:00
|
|
|
gcStateMachine->timeLimit = std::chrono::milliseconds(timeMs);
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2017-02-14 13:03:56 +00:00
|
|
|
void MemoryManager::sweep(bool lastSweep, ClassDestroyStatsCallback classCountPtr)
|
2012-12-04 12:40:18 +00:00
|
|
|
{
|
2023-11-21 18:36:26 +00:00
|
|
|
|
2015-08-07 12:26:43 +00:00
|
|
|
for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) {
|
2016-05-26 15:46:24 +00:00
|
|
|
Managed *m = (*it).managed();
|
2016-11-24 14:39:07 +00:00
|
|
|
if (!m || m->markBit())
|
2015-08-07 12:26:43 +00:00
|
|
|
continue;
|
2016-07-25 14:07:16 +00:00
|
|
|
// we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed
|
2015-08-07 12:26:43 +00:00
|
|
|
// signal before we start sweeping the heap
|
2023-11-21 18:36:26 +00:00
|
|
|
if (QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>()) {
|
2015-08-07 12:26:43 +00:00
|
|
|
qobjectWrapper->destroyObject(lastSweep);
|
2023-11-21 18:36:26 +00:00
|
|
|
}
|
2013-05-23 20:13:42 +00:00
|
|
|
}
|
|
|
|
|
2024-01-10 15:06:17 +00:00
|
|
|
freeWeakMaps(this);
|
|
|
|
freeWeakSets(this);
|
2018-08-26 13:07:50 +00:00
|
|
|
|
2024-01-10 15:06:17 +00:00
|
|
|
cleanupDeletedQObjectWrappersInSweep();
|
2023-12-12 22:14:29 +00:00
|
|
|
|
2024-01-10 15:06:17 +00:00
|
|
|
if (!lastSweep) {
|
|
|
|
engine->identifierTable->sweep();
|
|
|
|
blockAllocator.sweep(/*classCountPtr*/);
|
|
|
|
hugeItemAllocator.sweep(classCountPtr);
|
|
|
|
icAllocator.sweep(/*classCountPtr*/);
|
2018-08-26 15:50:44 +00:00
|
|
|
}
|
|
|
|
|
2024-01-10 15:06:17 +00:00
|
|
|
// reset all black bits
|
|
|
|
blockAllocator.resetBlackBits();
|
|
|
|
hugeItemAllocator.resetBlackBits();
|
|
|
|
icAllocator.resetBlackBits();
|
|
|
|
|
|
|
|
usedSlotsAfterLastFullSweep = blockAllocator.usedSlotsAfterLastSweep + icAllocator.usedSlotsAfterLastSweep;
|
2024-01-17 15:19:54 +00:00
|
|
|
updateUnmanagedHeapSizeGCLimit();
|
|
|
|
gcBlocked = MemoryManager::Unblocked;
|
2024-01-10 15:06:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
\internal
|
|
|
|
Helper function used in sweep to clean up the (to-be-freed) QObjectWrapper
|
|
|
|
Used both in MemoryManager::sweep, and the corresponding gc statemachine phase
|
|
|
|
*/
|
|
|
|
void MemoryManager::cleanupDeletedQObjectWrappersInSweep()
|
|
|
|
{
|
2016-07-25 14:07:16 +00:00
|
|
|
// onDestruction handlers may have accessed other QObject wrappers and reset their value, so ensure
|
|
|
|
// that they are all set to undefined.
|
|
|
|
for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) {
|
2016-11-24 14:39:07 +00:00
|
|
|
Managed *m = (*it).managed();
|
|
|
|
if (!m || m->markBit())
|
2016-07-25 14:07:16 +00:00
|
|
|
continue;
|
2018-09-11 09:07:32 +00:00
|
|
|
(*it) = Value::undefinedValue();
|
2013-05-23 20:13:42 +00:00
|
|
|
}
|
|
|
|
|
2015-12-25 13:36:46 +00:00
|
|
|
// Now it is time to free QV4::QObjectWrapper Value, we must check the Value's tag to make sure its object has been destroyed
|
2022-10-05 05:29:16 +00:00
|
|
|
const int pendingCount = m_pendingFreedObjectWrapperValue.size();
|
2015-12-25 13:36:46 +00:00
|
|
|
if (pendingCount) {
|
|
|
|
QVector<Value *> remainingWeakQObjectWrappers;
|
|
|
|
remainingWeakQObjectWrappers.reserve(pendingCount);
|
|
|
|
for (int i = 0; i < pendingCount; ++i) {
|
|
|
|
Value *v = m_pendingFreedObjectWrapperValue.at(i);
|
2016-10-12 09:15:09 +00:00
|
|
|
if (v->isUndefined() || v->isEmpty())
|
2015-12-25 13:36:46 +00:00
|
|
|
PersistentValueStorage::free(v);
|
|
|
|
else
|
|
|
|
remainingWeakQObjectWrappers.append(v);
|
|
|
|
}
|
|
|
|
m_pendingFreedObjectWrapperValue = remainingWeakQObjectWrappers;
|
|
|
|
}
|
|
|
|
|
2015-08-28 11:48:52 +00:00
|
|
|
if (MultiplyWrappedQObjectMap *multiplyWrappedQObjects = engine->m_multiplyWrappedQObjects) {
|
2013-06-04 12:28:13 +00:00
|
|
|
for (MultiplyWrappedQObjectMap::Iterator it = multiplyWrappedQObjects->begin(); it != multiplyWrappedQObjects->end();) {
|
2021-10-10 18:04:21 +00:00
|
|
|
if (it.value().isNullOrUndefined())
|
2013-06-04 12:28:13 +00:00
|
|
|
it = multiplyWrappedQObjects->erase(it);
|
|
|
|
else
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
}
|
2017-01-03 10:49:15 +00:00
|
|
|
}
|
2013-11-14 11:05:42 +00:00
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
bool MemoryManager::shouldRunGC() const
|
|
|
|
{
|
2018-09-11 19:54:56 +00:00
|
|
|
size_t total = blockAllocator.totalSlots() + icAllocator.totalSlots();
|
2017-02-16 09:52:48 +00:00
|
|
|
if (total > MinSlotsGCLimit && usedSlotsAfterLastFullSweep * GCOverallocation < total * 100)
|
2017-01-03 10:49:15 +00:00
|
|
|
return true;
|
|
|
|
return false;
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 13:34:14 +00:00
|
|
|
static size_t dumpBins(BlockAllocator *b, const char *title)
|
2017-02-10 10:51:43 +00:00
|
|
|
{
|
2017-11-27 09:53:33 +00:00
|
|
|
const QLoggingCategory &stats = lcGcAllocatorStats();
|
2017-02-13 12:38:48 +00:00
|
|
|
size_t totalSlotMem = 0;
|
2019-03-26 13:34:14 +00:00
|
|
|
if (title)
|
|
|
|
qDebug(stats) << "Slot map for" << title << "allocator:";
|
2017-02-10 10:51:43 +00:00
|
|
|
for (uint i = 0; i < BlockAllocator::NumBins; ++i) {
|
|
|
|
uint nEntries = 0;
|
|
|
|
HeapItem *h = b->freeBins[i];
|
|
|
|
while (h) {
|
|
|
|
++nEntries;
|
2017-02-13 12:38:48 +00:00
|
|
|
totalSlotMem += h->freeData.availableSlots;
|
2017-02-10 10:51:43 +00:00
|
|
|
h = h->freeData.next;
|
|
|
|
}
|
2019-03-26 13:34:14 +00:00
|
|
|
if (title)
|
2017-11-27 09:53:33 +00:00
|
|
|
qDebug(stats) << " number of entries in slot" << i << ":" << nEntries;
|
2017-02-10 10:51:43 +00:00
|
|
|
}
|
2017-04-27 17:37:35 +00:00
|
|
|
SDUMP() << " large slot map";
|
|
|
|
HeapItem *h = b->freeBins[BlockAllocator::NumBins - 1];
|
|
|
|
while (h) {
|
2019-06-26 14:46:23 +00:00
|
|
|
SDUMP() << " " << Qt::hex << (quintptr(h)/32) << h->freeData.availableSlots;
|
2017-04-27 17:37:35 +00:00
|
|
|
h = h->freeData.next;
|
|
|
|
}
|
|
|
|
|
2019-03-26 13:34:14 +00:00
|
|
|
if (title)
|
2018-01-19 09:49:56 +00:00
|
|
|
qDebug(stats) << " total mem in bins" << totalSlotMem*Chunk::SlotSize;
|
2017-02-13 12:38:48 +00:00
|
|
|
return totalSlotMem*Chunk::SlotSize;
|
2017-02-10 10:51:43 +00:00
|
|
|
}
|
2017-01-03 10:49:15 +00:00
|
|
|
|
2024-01-15 15:36:50 +00:00
|
|
|
/*!
|
|
|
|
\internal
|
|
|
|
Precondition: Incremental garbage collection must be currently active
|
|
|
|
Finishes incremental garbage collection, unless in a critical section
|
|
|
|
Code entering a critical section is expected to check if we need to
|
|
|
|
force a gc completion, and to trigger the gc again if necessary
|
|
|
|
when exiting the critcial section.
|
|
|
|
Returns \c true if the gc cycle completed, false otherwise.
|
|
|
|
*/
|
|
|
|
bool MemoryManager::tryForceGCCompletion()
|
|
|
|
{
|
2024-09-02 13:18:32 +00:00
|
|
|
if (gcBlocked == InCriticalSection) {
|
|
|
|
qCDebug(lcGcForcedRuns)
|
|
|
|
<< "Tried to force the GC to complete a run but failed due to being in a critical section.";
|
2024-01-15 15:36:50 +00:00
|
|
|
return false;
|
2024-09-02 13:18:32 +00:00
|
|
|
}
|
|
|
|
|
2024-01-15 15:36:50 +00:00
|
|
|
const bool incrementalGCIsAlreadyRunning = m_markStack != nullptr;
|
|
|
|
Q_ASSERT(incrementalGCIsAlreadyRunning);
|
2024-09-02 13:18:32 +00:00
|
|
|
|
|
|
|
qCDebug(lcGcForcedRuns) << "Forcing the GC to complete a run.";
|
|
|
|
|
2024-01-15 15:36:50 +00:00
|
|
|
auto oldTimeLimit = std::exchange(gcStateMachine->timeLimit, std::chrono::microseconds::max());
|
2024-04-23 15:21:58 +00:00
|
|
|
while (gcStateMachine->inProgress()) {
|
2024-01-15 15:36:50 +00:00
|
|
|
gcStateMachine->step();
|
2024-04-23 15:21:58 +00:00
|
|
|
}
|
2024-01-15 15:36:50 +00:00
|
|
|
gcStateMachine->timeLimit = oldTimeLimit;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-03-18 13:28:33 +00:00
|
|
|
void MemoryManager::runFullGC()
|
|
|
|
{
|
|
|
|
runGC();
|
|
|
|
const bool incrementalGCStillRunning = m_markStack != nullptr;
|
|
|
|
if (incrementalGCStillRunning)
|
|
|
|
tryForceGCCompletion();
|
|
|
|
}
|
|
|
|
|
2017-04-04 08:35:45 +00:00
|
|
|
void MemoryManager::runGC()
|
2012-12-04 12:40:18 +00:00
|
|
|
{
|
2024-02-09 20:21:05 +00:00
|
|
|
if (gcBlocked != Unblocked) {
|
2013-01-02 15:43:47 +00:00
|
|
|
return;
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2024-02-09 20:21:05 +00:00
|
|
|
gcBlocked = MemoryManager::NormalBlocked;
|
2016-07-25 07:25:11 +00:00
|
|
|
|
2017-11-27 09:53:33 +00:00
|
|
|
if (gcStats) {
|
|
|
|
statistics.maxReservedMem = qMax(statistics.maxReservedMem, getAllocatedMem());
|
|
|
|
statistics.maxAllocatedMem = qMax(statistics.maxAllocatedMem, getUsedMem() + getLargeItemsMem());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!gcCollectorStats) {
|
2023-11-21 18:36:26 +00:00
|
|
|
gcStateMachine->step();
|
2014-03-25 08:46:43 +00:00
|
|
|
} else {
|
2017-02-10 10:51:43 +00:00
|
|
|
bool triggeredByUnmanagedHeap = (unmanagedHeapSize > unmanagedHeapSizeGCLimit);
|
|
|
|
size_t oldUnmanagedSize = unmanagedHeapSize;
|
2017-11-27 09:53:33 +00:00
|
|
|
|
QML: Fix MSVC 2013/64bit warnings.
compiler\qv4ssa.cpp(687) : warning C4267: 'argument' : conversion from 'size_t' to 'int', possible loss of data
compiler\qv4ssa.cpp(950) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
compiler\qv4ssa.cpp(1117) : warning C4267: 'return' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(1120) : warning C4267: 'return' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(1148) : warning C4267: 'initializing' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(1266) : warning C4267: 'initializing' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(1622) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
compiler\qv4ssa.cpp(2246) : warning C4267: 'initializing' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(4289) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
compiler\qv4ssa.cpp(4351) : warning C4267: 'initializing' : conversion from 'size_t' to 'unsigned int', possible loss of data
jit\qv4regalloc.cpp(1383) : warning C4267: 'argument' : conversion from 'size_t' to 'int', possible loss of data
jit\qv4regalloc.cpp(1769) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jit\qv4regalloc.cpp(1814) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jsruntime\qv4mm.cpp(496) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jsruntime\qv4mm.cpp(503) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jsruntime\qv4mm.cpp(506) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jsruntime\qv4regexp.cpp(60) : warning C4267: 'return' : conversion from 'size_t' to 'uint', possible loss of data
jsruntime\qv4typedarray.cpp(85) : warning C4309: '=' : truncation of constant value
Change-Id: I0b04e1a9d379c068fb3efe90a9db8b592061e448
Reviewed-by: Erik Verbruggen <erik.verbruggen@theqtcompany.com>
Reviewed-by: Simon Hausmann <simon.hausmann@digia.com>
2015-01-22 08:38:22 +00:00
|
|
|
const size_t totalMem = getAllocatedMem();
|
2017-02-10 10:51:43 +00:00
|
|
|
const size_t usedBefore = getUsedMem();
|
|
|
|
const size_t largeItemsBefore = getLargeItemsMem();
|
|
|
|
|
2017-11-27 09:53:33 +00:00
|
|
|
const QLoggingCategory &stats = lcGcAllocatorStats();
|
|
|
|
qDebug(stats) << "========== GC ==========";
|
2017-02-13 12:38:48 +00:00
|
|
|
#ifdef MM_STATS
|
2017-11-27 09:53:33 +00:00
|
|
|
qDebug(stats) << " Triggered by alloc request of" << lastAllocRequestedSlots << "slots.";
|
2018-01-19 09:49:56 +00:00
|
|
|
qDebug(stats) << " Allocations since last GC" << allocationCount;
|
2017-02-15 11:23:20 +00:00
|
|
|
allocationCount = 0;
|
2017-02-10 10:51:43 +00:00
|
|
|
#endif
|
2017-05-16 10:42:42 +00:00
|
|
|
size_t oldChunks = blockAllocator.chunks.size();
|
2017-11-27 09:53:33 +00:00
|
|
|
qDebug(stats) << "Allocated" << totalMem << "bytes in" << oldChunks << "chunks";
|
|
|
|
qDebug(stats) << "Fragmented memory before GC" << (totalMem - usedBefore);
|
2019-03-26 13:34:14 +00:00
|
|
|
dumpBins(&blockAllocator, "Block");
|
|
|
|
dumpBins(&icAllocator, "InternalClass");
|
2014-03-25 08:46:43 +00:00
|
|
|
|
2016-08-02 11:25:35 +00:00
|
|
|
QElapsedTimer t;
|
2014-03-25 08:46:43 +00:00
|
|
|
t.start();
|
2023-11-21 18:36:26 +00:00
|
|
|
gcStateMachine->step();
|
2017-02-16 09:25:52 +00:00
|
|
|
qint64 markTime = t.nsecsElapsed()/1000;
|
2025-03-14 11:56:19 +00:00
|
|
|
t.start();
|
QML: Fix MSVC 2013/64bit warnings.
compiler\qv4ssa.cpp(687) : warning C4267: 'argument' : conversion from 'size_t' to 'int', possible loss of data
compiler\qv4ssa.cpp(950) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
compiler\qv4ssa.cpp(1117) : warning C4267: 'return' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(1120) : warning C4267: 'return' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(1148) : warning C4267: 'initializing' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(1266) : warning C4267: 'initializing' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(1622) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
compiler\qv4ssa.cpp(2246) : warning C4267: 'initializing' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(4289) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
compiler\qv4ssa.cpp(4351) : warning C4267: 'initializing' : conversion from 'size_t' to 'unsigned int', possible loss of data
jit\qv4regalloc.cpp(1383) : warning C4267: 'argument' : conversion from 'size_t' to 'int', possible loss of data
jit\qv4regalloc.cpp(1769) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jit\qv4regalloc.cpp(1814) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jsruntime\qv4mm.cpp(496) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jsruntime\qv4mm.cpp(503) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jsruntime\qv4mm.cpp(506) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jsruntime\qv4regexp.cpp(60) : warning C4267: 'return' : conversion from 'size_t' to 'uint', possible loss of data
jsruntime\qv4typedarray.cpp(85) : warning C4309: '=' : truncation of constant value
Change-Id: I0b04e1a9d379c068fb3efe90a9db8b592061e448
Reviewed-by: Erik Verbruggen <erik.verbruggen@theqtcompany.com>
Reviewed-by: Simon Hausmann <simon.hausmann@digia.com>
2015-01-22 08:38:22 +00:00
|
|
|
const size_t usedAfter = getUsedMem();
|
2015-09-17 13:29:52 +00:00
|
|
|
const size_t largeItemsAfter = getLargeItemsMem();
|
2014-03-25 08:46:43 +00:00
|
|
|
|
2017-02-10 10:51:43 +00:00
|
|
|
if (triggeredByUnmanagedHeap) {
|
2017-11-27 09:53:33 +00:00
|
|
|
qDebug(stats) << "triggered by unmanaged heap:";
|
|
|
|
qDebug(stats) << " old unmanaged heap size:" << oldUnmanagedSize;
|
|
|
|
qDebug(stats) << " new unmanaged heap:" << unmanagedHeapSize;
|
|
|
|
qDebug(stats) << " unmanaged heap limit:" << unmanagedHeapSizeGCLimit;
|
2017-02-10 10:51:43 +00:00
|
|
|
}
|
2019-03-26 13:34:14 +00:00
|
|
|
size_t memInBins = dumpBins(&blockAllocator, "Block")
|
|
|
|
+ dumpBins(&icAllocator, "InternalClasss");
|
2018-01-19 09:49:56 +00:00
|
|
|
qDebug(stats) << "Marked object in" << markTime << "us.";
|
|
|
|
qDebug(stats) << " " << markStackSize << "objects marked";
|
2017-02-14 13:03:56 +00:00
|
|
|
|
|
|
|
// sort our object types by number of freed instances
|
|
|
|
MMStatsHash freedObjectStats;
|
|
|
|
std::swap(freedObjectStats, *freedObjectStatsGlobal());
|
|
|
|
typedef std::pair<const char*, int> ObjectStatInfo;
|
|
|
|
std::vector<ObjectStatInfo> freedObjectsSorted;
|
Port from container::count() and length() to size() - V5
This is a semantic patch using ClangTidyTransformator as in
qtbase/df9d882d41b741fef7c5beeddb0abe9d904443d8, but extended to
handle typedefs and accesses through pointers, too:
const std::string o = "object";
auto hasTypeIgnoringPointer = [](auto type) { return anyOf(hasType(type), hasType(pointsTo(type))); };
auto derivedFromAnyOfClasses = [&](ArrayRef<StringRef> classes) {
auto exprOfDeclaredType = [&](auto decl) {
return expr(hasTypeIgnoringPointer(hasUnqualifiedDesugaredType(recordType(hasDeclaration(decl))))).bind(o);
};
return exprOfDeclaredType(cxxRecordDecl(isSameOrDerivedFrom(hasAnyName(classes))));
};
auto renameMethod = [&] (ArrayRef<StringRef> classes,
StringRef from, StringRef to) {
return makeRule(cxxMemberCallExpr(on(derivedFromAnyOfClasses(classes)),
callee(cxxMethodDecl(hasName(from), parameterCountIs(0)))),
changeTo(cat(access(o, cat(to)), "()")),
cat("use '", to, "' instead of '", from, "'"));
};
renameMethod(<classes>, "count", "size");
renameMethod(<classes>, "length", "size");
except that on() was replaced with a matcher that doesn't ignoreParens().
a.k.a qt-port-to-std-compatible-api V5 with config Scope: 'Container'.
Change-Id: I58e1b41b91c34d2e860dbb5847b3752edbfc6fc9
Reviewed-by: Qt CI Bot <qt_ci_bot@qt-project.org>
Reviewed-by: Ulf Hermann <ulf.hermann@qt.io>
2022-10-08 17:56:03 +00:00
|
|
|
freedObjectsSorted.reserve(freedObjectStats.size());
|
2017-02-14 13:03:56 +00:00
|
|
|
for (auto it = freedObjectStats.constBegin(); it != freedObjectStats.constEnd(); ++it) {
|
|
|
|
freedObjectsSorted.push_back(std::make_pair(it.key(), it.value()));
|
|
|
|
}
|
|
|
|
std::sort(freedObjectsSorted.begin(), freedObjectsSorted.end(), [](const ObjectStatInfo &a, const ObjectStatInfo &b) {
|
|
|
|
return a.second > b.second && strcmp(a.first, b.first) < 0;
|
|
|
|
});
|
|
|
|
|
2017-11-27 09:53:33 +00:00
|
|
|
qDebug(stats) << "Used memory before GC:" << usedBefore;
|
|
|
|
qDebug(stats) << "Used memory after GC:" << usedAfter;
|
2018-01-19 09:49:56 +00:00
|
|
|
qDebug(stats) << "Freed up bytes :" << (usedBefore - usedAfter);
|
|
|
|
qDebug(stats) << "Freed up chunks :" << (oldChunks - blockAllocator.chunks.size());
|
2019-03-26 13:34:14 +00:00
|
|
|
size_t lost = blockAllocator.allocatedMem() + icAllocator.allocatedMem()
|
|
|
|
- memInBins - usedAfter;
|
2017-02-10 10:51:43 +00:00
|
|
|
if (lost)
|
2017-11-27 09:53:33 +00:00
|
|
|
qDebug(stats) << "!!!!!!!!!!!!!!!!!!!!! LOST MEM:" << lost << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!";
|
2017-02-10 10:51:43 +00:00
|
|
|
if (largeItemsBefore || largeItemsAfter) {
|
2017-11-27 09:53:33 +00:00
|
|
|
qDebug(stats) << "Large item memory before GC:" << largeItemsBefore;
|
|
|
|
qDebug(stats) << "Large item memory after GC:" << largeItemsAfter;
|
|
|
|
qDebug(stats) << "Large item memory freed up:" << (largeItemsBefore - largeItemsAfter);
|
2017-02-10 10:51:43 +00:00
|
|
|
}
|
2017-02-14 13:03:56 +00:00
|
|
|
|
|
|
|
for (auto it = freedObjectsSorted.cbegin(); it != freedObjectsSorted.cend(); ++it) {
|
2018-01-19 09:49:56 +00:00
|
|
|
qDebug(stats).noquote() << QString::fromLatin1("Freed JS type: %1 (%2 instances)").arg(QString::fromLatin1(it->first), QString::number(it->second));
|
2017-02-14 13:03:56 +00:00
|
|
|
}
|
|
|
|
|
2017-11-27 09:53:33 +00:00
|
|
|
qDebug(stats) << "======== End GC ========";
|
2014-03-25 08:46:43 +00:00
|
|
|
}
|
2017-02-10 10:51:43 +00:00
|
|
|
|
2017-11-27 09:53:33 +00:00
|
|
|
if (gcStats)
|
|
|
|
statistics.maxUsedMem = qMax(statistics.maxUsedMem, getUsedMem() + getLargeItemsMem());
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2014-06-12 12:33:05 +00:00
|
|
|
size_t MemoryManager::getUsedMem() const
|
2014-03-25 08:46:43 +00:00
|
|
|
{
|
2018-05-30 13:47:37 +00:00
|
|
|
return blockAllocator.usedMem() + icAllocator.usedMem();
|
2014-03-25 08:46:43 +00:00
|
|
|
}
|
|
|
|
|
2014-06-12 12:33:05 +00:00
|
|
|
size_t MemoryManager::getAllocatedMem() const
|
|
|
|
{
|
2018-05-30 13:47:37 +00:00
|
|
|
return blockAllocator.allocatedMem() + icAllocator.allocatedMem() + hugeItemAllocator.usedMem();
|
2014-06-12 12:33:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t MemoryManager::getLargeItemsMem() const
|
|
|
|
{
|
2017-01-20 10:36:16 +00:00
|
|
|
return hugeItemAllocator.usedMem();
|
2014-06-12 12:33:05 +00:00
|
|
|
}
|
|
|
|
|
2024-01-17 15:19:54 +00:00
|
|
|
void MemoryManager::updateUnmanagedHeapSizeGCLimit()
|
|
|
|
{
|
|
|
|
if (3*unmanagedHeapSizeGCLimit <= 4 * unmanagedHeapSize) {
|
|
|
|
// more than 75% full, raise limit
|
|
|
|
unmanagedHeapSizeGCLimit = std::max(unmanagedHeapSizeGCLimit,
|
|
|
|
unmanagedHeapSize) * 2;
|
|
|
|
} else if (unmanagedHeapSize * 4 <= unmanagedHeapSizeGCLimit) {
|
|
|
|
// less than 25% full, lower limit
|
|
|
|
unmanagedHeapSizeGCLimit = qMax(std::size_t(MinUnmanagedHeapSizeGCLimit),
|
|
|
|
unmanagedHeapSizeGCLimit/2);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (aggressiveGC && !engine->inShutdown) {
|
|
|
|
// ensure we don't 'loose' any memory
|
|
|
|
// but not during shutdown, because than we skip parts of sweep
|
|
|
|
// and use freeAll instead
|
|
|
|
Q_ASSERT(blockAllocator.allocatedMem()
|
|
|
|
== blockAllocator.usedMem() + dumpBins(&blockAllocator, nullptr));
|
|
|
|
Q_ASSERT(icAllocator.allocatedMem()
|
|
|
|
== icAllocator.usedMem() + dumpBins(&icAllocator, nullptr));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-26 13:07:50 +00:00
|
|
|
void MemoryManager::registerWeakMap(Heap::MapObject *map)
|
|
|
|
{
|
|
|
|
map->nextWeakMap = weakMaps;
|
|
|
|
weakMaps = map;
|
|
|
|
}
|
|
|
|
|
2018-08-26 15:50:44 +00:00
|
|
|
void MemoryManager::registerWeakSet(Heap::SetObject *set)
|
|
|
|
{
|
|
|
|
set->nextWeakSet = weakSets;
|
|
|
|
weakSets = set;
|
|
|
|
}
|
|
|
|
|
2012-12-04 12:40:18 +00:00
|
|
|
MemoryManager::~MemoryManager()
|
|
|
|
{
|
2015-01-12 20:55:51 +00:00
|
|
|
delete m_persistentValues;
|
2017-11-27 09:53:33 +00:00
|
|
|
dumpStats();
|
|
|
|
|
2023-11-21 18:36:26 +00:00
|
|
|
// do one last non-incremental sweep to clean up C++ objects
|
|
|
|
// first, abort any on-going incremental gc operation
|
|
|
|
setGCTimeLimit(-1);
|
|
|
|
if (engine->isGCOngoing) {
|
|
|
|
engine->isGCOngoing = false;
|
|
|
|
m_markStack.reset();
|
|
|
|
gcStateMachine->state = GCState::Invalid;
|
|
|
|
blockAllocator.resetBlackBits();
|
|
|
|
hugeItemAllocator.resetBlackBits();
|
|
|
|
icAllocator.resetBlackBits();
|
|
|
|
}
|
|
|
|
// then sweep
|
2013-06-13 13:27:00 +00:00
|
|
|
sweep(/*lastSweep*/true);
|
2023-11-21 18:36:26 +00:00
|
|
|
|
2017-01-20 10:36:16 +00:00
|
|
|
blockAllocator.freeAll();
|
|
|
|
hugeItemAllocator.freeAll();
|
2018-01-05 14:30:23 +00:00
|
|
|
icAllocator.freeAll();
|
2015-08-07 12:26:43 +00:00
|
|
|
|
|
|
|
delete m_weakValues;
|
2013-06-01 12:27:45 +00:00
|
|
|
#ifdef V4_USE_VALGRIND
|
2014-12-12 15:38:09 +00:00
|
|
|
VALGRIND_DESTROY_MEMPOOL(this);
|
2013-06-01 12:27:45 +00:00
|
|
|
#endif
|
2016-12-22 14:20:05 +00:00
|
|
|
delete chunkAllocator;
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2015-08-28 11:48:52 +00:00
|
|
|
|
2012-12-04 12:40:18 +00:00
|
|
|
void MemoryManager::dumpStats() const
|
|
|
|
{
|
2017-11-27 09:53:33 +00:00
|
|
|
if (!gcStats)
|
|
|
|
return;
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2017-11-27 09:53:33 +00:00
|
|
|
const QLoggingCategory &stats = lcGcStats();
|
|
|
|
qDebug(stats) << "Qml GC memory allocation statistics:";
|
|
|
|
qDebug(stats) << "Total memory allocated:" << statistics.maxReservedMem;
|
|
|
|
qDebug(stats) << "Max memory used before a GC run:" << statistics.maxAllocatedMem;
|
|
|
|
qDebug(stats) << "Max memory used after a GC run:" << statistics.maxUsedMem;
|
|
|
|
qDebug(stats) << "Requests for different item sizes:";
|
|
|
|
for (int i = 1; i < BlockAllocator::NumBins - 1; ++i)
|
|
|
|
qDebug(stats) << " <" << (i << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[i];
|
|
|
|
qDebug(stats) << " >=" << ((BlockAllocator::NumBins - 1) << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[BlockAllocator::NumBins - 1];
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
2012-12-13 22:46:51 +00:00
|
|
|
|
2017-03-09 09:36:16 +00:00
|
|
|
void MemoryManager::collectFromJSStack(MarkStack *markStack) const
|
2013-09-03 10:40:07 +00:00
|
|
|
{
|
2015-08-28 11:48:52 +00:00
|
|
|
Value *v = engine->jsStackBase;
|
|
|
|
Value *top = engine->jsStackTop;
|
2013-09-03 10:40:07 +00:00
|
|
|
while (v < top) {
|
2016-05-26 15:46:24 +00:00
|
|
|
Managed *m = v->managed();
|
2017-10-17 13:14:59 +00:00
|
|
|
if (m) {
|
|
|
|
Q_ASSERT(m->inUse());
|
2013-09-03 10:40:07 +00:00
|
|
|
// Skip pointers to already freed objects, they are bogus as well
|
2017-03-09 09:36:16 +00:00
|
|
|
m->mark(markStack);
|
2017-10-17 13:14:59 +00:00
|
|
|
}
|
2013-09-03 10:40:07 +00:00
|
|
|
++v;
|
|
|
|
}
|
2025-08-11 09:43:55 +00:00
|
|
|
|
|
|
|
for (auto *frame = engine->currentStackFrame; frame; frame = frame->parentFrame()) {
|
|
|
|
if (!frame->isMetaTypesFrame())
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const QQmlPrivate::AOTTrackedLocalsStorage *locals
|
|
|
|
= static_cast<const MetaTypesStackFrame *>(frame)->locals();
|
|
|
|
|
|
|
|
// locals have to be initialized first thing when calling the function
|
|
|
|
Q_ASSERT(locals);
|
|
|
|
|
|
|
|
locals->markObjects(markStack);
|
|
|
|
}
|
2013-09-03 10:40:07 +00:00
|
|
|
}
|
2016-12-22 14:20:05 +00:00
|
|
|
|
2023-11-21 18:36:26 +00:00
|
|
|
GCStateMachine::GCStateMachine()
|
2024-09-25 13:01:37 +00:00
|
|
|
: collectTimings(lcGcStepExecution().isDebugEnabled())
|
2023-11-21 18:36:26 +00:00
|
|
|
{
|
|
|
|
// base assumption: target 60fps, use at most 1/3 of time for gc
|
2024-09-30 07:17:14 +00:00
|
|
|
// unless overridden by env variable
|
|
|
|
bool ok = false;
|
|
|
|
auto envTimeLimit = qEnvironmentVariableIntValue("QV4_GC_TIMELIMIT", &ok );
|
|
|
|
if (!ok)
|
|
|
|
envTimeLimit = (1000 / 60) / 3;
|
|
|
|
if (envTimeLimit > 0)
|
|
|
|
timeLimit = std::chrono::milliseconds { envTimeLimit };
|
|
|
|
else
|
|
|
|
timeLimit = std::chrono::milliseconds { 0 };
|
2023-11-21 18:36:26 +00:00
|
|
|
}
|
|
|
|
|
2024-09-25 13:01:37 +00:00
|
|
|
static void logStepTiming(GCStateMachine* that, quint64 timing) {
|
|
|
|
auto registerTimingWithResetOnOverflow = [](
|
|
|
|
GCStateMachine::StepTiming& storage, quint64 timing, GCState state
|
|
|
|
) {
|
|
|
|
auto wouldOverflow = [](quint64 lhs, quint64 rhs) {
|
|
|
|
return rhs > 0 && lhs > std::numeric_limits<quint64>::max() - rhs;
|
|
|
|
};
|
|
|
|
|
|
|
|
if (wouldOverflow(storage.rolling_sum, timing) || wouldOverflow(storage.count, 1)) {
|
|
|
|
qDebug(lcGcStepExecution) << "Resetting timings storage for"
|
|
|
|
<< QMetaEnum::fromType<GCState>().key(state) << "due to overflow.";
|
|
|
|
storage.rolling_sum = timing;
|
|
|
|
storage.count = 1;
|
|
|
|
} else {
|
|
|
|
storage.rolling_sum += timing;
|
|
|
|
storage.count += 1;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
GCStateMachine::StepTiming& storage = that->executionTiming[that->state];
|
|
|
|
registerTimingWithResetOnOverflow(storage, timing, that->state);
|
|
|
|
|
|
|
|
qDebug(lcGcStepExecution) << "Performed" << QMetaEnum::fromType<GCState>().key(that->state)
|
|
|
|
<< "in" << timing << "microseconds";
|
|
|
|
qDebug(lcGcStepExecution) << "This step was performed" << storage.count << " time(s), executing in"
|
|
|
|
<< (storage.rolling_sum / storage.count) << "microseconds on average.";
|
|
|
|
}
|
|
|
|
|
|
|
|
static GCState executeWithLoggingIfEnabled(GCStateMachine* that, GCStateInfo& stateInfo) {
|
|
|
|
if (!that->collectTimings)
|
|
|
|
return stateInfo.execute(that, that->stateData);
|
|
|
|
|
|
|
|
QElapsedTimer timer;
|
|
|
|
timer.start();
|
|
|
|
GCState next = stateInfo.execute(that, that->stateData);
|
|
|
|
logStepTiming(that, timer.nsecsElapsed()/1000);
|
|
|
|
return next;
|
|
|
|
}
|
|
|
|
|
2023-11-21 18:36:26 +00:00
|
|
|
void GCStateMachine::transition() {
|
|
|
|
if (timeLimit.count() > 0) {
|
|
|
|
deadline = QDeadlineTimer(timeLimit);
|
|
|
|
bool deadlineExpired = false;
|
|
|
|
while (!(deadlineExpired = deadline.hasExpired()) && state != GCState::Invalid) {
|
2024-01-10 15:06:17 +00:00
|
|
|
if (state > GCState::InitCallDestroyObjects) {
|
|
|
|
/* initCallDestroyObjects is the last action which drains the mark
|
|
|
|
stack by default. But as our write-barrier might end up putting
|
|
|
|
objects on the markStack which still reference other objects.
|
|
|
|
Especially when we call user code triggered by Component.onDestruction,
|
|
|
|
but also when we run into a timeout.
|
|
|
|
We don't redrain before InitCallDestroyObjects, as that would
|
|
|
|
potentially lead to useless busy-work (e.g., if the last referencs
|
|
|
|
to objects are removed while the mark phase is running)
|
|
|
|
*/
|
|
|
|
redrain(this);
|
|
|
|
}
|
2024-08-30 15:44:16 +00:00
|
|
|
qCDebug(lcGcStateTransitions) << "Preparing to execute the"
|
|
|
|
<< QMetaEnum::fromType<GCState>().key(state) << "state";
|
2023-11-21 18:36:26 +00:00
|
|
|
GCStateInfo& stateInfo = stateInfoMap[int(state)];
|
2024-09-25 13:01:37 +00:00
|
|
|
state = executeWithLoggingIfEnabled(this, stateInfo);
|
2024-08-30 15:44:16 +00:00
|
|
|
qCDebug(lcGcStateTransitions) << "Transitioning to the"
|
|
|
|
<< QMetaEnum::fromType<GCState>().key(state) << "state";
|
2023-11-21 18:36:26 +00:00
|
|
|
if (stateInfo.breakAfter)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (deadlineExpired)
|
|
|
|
handleTimeout(state);
|
|
|
|
if (state != GCState::Invalid)
|
|
|
|
QMetaObject::invokeMethod(mm->engine->publicEngine, [this]{
|
|
|
|
mm->onEventLoop();
|
|
|
|
}, Qt::QueuedConnection);
|
|
|
|
} else {
|
|
|
|
deadline = QDeadlineTimer::Forever;
|
|
|
|
while (state != GCState::Invalid) {
|
2024-08-30 15:44:16 +00:00
|
|
|
qCDebug(lcGcStateTransitions) << "Preparing to execute the"
|
|
|
|
<< QMetaEnum::fromType<GCState>().key(state) << "state";
|
2023-11-21 18:36:26 +00:00
|
|
|
GCStateInfo& stateInfo = stateInfoMap[int(state)];
|
2024-09-25 13:01:37 +00:00
|
|
|
state = executeWithLoggingIfEnabled(this, stateInfo);
|
2024-08-30 15:44:16 +00:00
|
|
|
qCDebug(lcGcStateTransitions) << "Transitioning to the"
|
|
|
|
<< QMetaEnum::fromType<GCState>().key(state) << "state";
|
2023-11-21 18:36:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-22 14:20:05 +00:00
|
|
|
} // namespace QV4
|
|
|
|
|
2013-06-24 13:28:00 +00:00
|
|
|
QT_END_NAMESPACE
|
2024-08-30 15:44:16 +00:00
|
|
|
|
|
|
|
#include "moc_qv4mm_p.cpp"
|