2022-05-13 13:12:05 +00:00
|
|
|
// Copyright (C) 2021 The Qt Company Ltd.
|
|
|
|
// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2013-04-15 09:50:16 +00:00
|
|
|
#include "qv4engine_p.h"
|
|
|
|
#include "qv4object_p.h"
|
|
|
|
#include "qv4mm_p.h"
|
2013-06-07 09:21:18 +00:00
|
|
|
#include "qv4qobjectwrapper_p.h"
|
2018-04-06 14:24:59 +00:00
|
|
|
#include "qv4identifiertable_p.h"
|
2016-12-22 14:20:05 +00:00
|
|
|
#include <QtCore/qalgorithms.h>
|
|
|
|
#include <QtCore/private/qnumeric_p.h>
|
2017-11-27 09:53:33 +00:00
|
|
|
#include <QtCore/qloggingcategory.h>
|
2019-07-10 08:46:05 +00:00
|
|
|
#include <private/qv4alloca_p.h>
|
2013-05-24 11:19:15 +00:00
|
|
|
#include <qqmlengine.h>
|
2016-12-22 14:20:05 +00:00
|
|
|
#include "PageReservation.h"
|
2012-12-18 14:03:26 +00:00
|
|
|
#include "PageAllocation.h"
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2016-08-02 11:25:35 +00:00
|
|
|
#include <QElapsedTimer>
|
2012-12-13 22:46:51 +00:00
|
|
|
#include <QMap>
|
2016-07-25 07:25:11 +00:00
|
|
|
#include <QScopedValueRollback>
|
2012-12-04 12:40:18 +00:00
|
|
|
|
|
|
|
#include <iostream>
|
2012-12-10 08:56:30 +00:00
|
|
|
#include <cstdlib>
|
2013-09-12 09:06:59 +00:00
|
|
|
#include <algorithm>
|
2014-06-02 16:33:19 +00:00
|
|
|
#include "qv4profiling_p.h"
|
2018-08-26 13:07:50 +00:00
|
|
|
#include "qv4mapobject_p.h"
|
2018-08-26 15:50:44 +00:00
|
|
|
#include "qv4setobject_p.h"
|
2019-05-09 13:50:40 +00:00
|
|
|
#include "qv4writebarrier_p.h"
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2017-02-13 12:38:48 +00:00
|
|
|
//#define MM_STATS
|
|
|
|
|
|
|
|
#if !defined(MM_STATS) && !defined(QT_NO_DEBUG)
|
|
|
|
#define MM_STATS
|
|
|
|
#endif
|
2016-12-22 14:20:05 +00:00
|
|
|
|
|
|
|
#if MM_DEBUG
|
|
|
|
#define DEBUG qDebug() << "MM:"
|
|
|
|
#else
|
|
|
|
#define DEBUG if (1) ; else qDebug() << "MM:"
|
|
|
|
#endif
|
|
|
|
|
2013-03-12 18:49:13 +00:00
|
|
|
#ifdef V4_USE_VALGRIND
|
|
|
|
#include <valgrind/valgrind.h>
|
|
|
|
#include <valgrind/memcheck.h>
|
|
|
|
#endif
|
|
|
|
|
2016-06-20 17:31:29 +00:00
|
|
|
#ifdef V4_USE_HEAPTRACK
|
|
|
|
#include <heaptrack_api.h>
|
|
|
|
#endif
|
|
|
|
|
2013-06-27 19:51:22 +00:00
|
|
|
#if OS(QNX)
|
|
|
|
#include <sys/storage.h> // __tls()
|
|
|
|
#endif
|
|
|
|
|
2014-01-21 09:55:18 +00:00
|
|
|
#if USE(PTHREADS) && HAVE(PTHREAD_NP_H)
|
|
|
|
#include <pthread_np.h>
|
|
|
|
#endif
|
|
|
|
|
2017-11-27 09:53:33 +00:00
|
|
|
Q_LOGGING_CATEGORY(lcGcStats, "qt.qml.gc.statistics")
|
|
|
|
Q_DECLARE_LOGGING_CATEGORY(lcGcStats)
|
|
|
|
Q_LOGGING_CATEGORY(lcGcAllocatorStats, "qt.qml.gc.allocatorStats")
|
|
|
|
Q_DECLARE_LOGGING_CATEGORY(lcGcAllocatorStats)
|
|
|
|
|
2015-01-09 17:52:56 +00:00
|
|
|
using namespace WTF;
|
|
|
|
|
2013-06-24 13:28:00 +00:00
|
|
|
QT_BEGIN_NAMESPACE
|
|
|
|
|
2016-12-22 14:20:05 +00:00
|
|
|
namespace QV4 {
|
|
|
|
|
|
|
|
enum {
|
|
|
|
MinSlotsGCLimit = QV4::Chunk::AvailableSlots*16,
|
|
|
|
GCOverallocation = 200 /* Max overallocation by the GC in % */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct MemorySegment {
|
|
|
|
enum {
|
2019-06-06 11:12:04 +00:00
|
|
|
#ifdef Q_OS_RTEMS
|
|
|
|
NumChunks = sizeof(quint64),
|
|
|
|
#else
|
2016-12-22 14:20:05 +00:00
|
|
|
NumChunks = 8*sizeof(quint64),
|
2019-06-06 11:12:04 +00:00
|
|
|
#endif
|
2016-12-22 14:20:05 +00:00
|
|
|
SegmentSize = NumChunks*Chunk::ChunkSize,
|
|
|
|
};
|
|
|
|
|
|
|
|
MemorySegment(size_t size)
|
|
|
|
{
|
2021-10-12 13:56:39 +00:00
|
|
|
size += Chunk::ChunkSize; // make sure we can get enough 64k alignment memory
|
2016-12-22 14:20:05 +00:00
|
|
|
if (size < SegmentSize)
|
|
|
|
size = SegmentSize;
|
|
|
|
|
|
|
|
pageReservation = PageReservation::reserve(size, OSAllocator::JSGCHeapPages);
|
|
|
|
base = reinterpret_cast<Chunk *>((reinterpret_cast<quintptr>(pageReservation.base()) + Chunk::ChunkSize - 1) & ~(Chunk::ChunkSize - 1));
|
|
|
|
nChunks = NumChunks;
|
2017-03-21 13:49:10 +00:00
|
|
|
availableBytes = size - (reinterpret_cast<quintptr>(base) - reinterpret_cast<quintptr>(pageReservation.base()));
|
|
|
|
if (availableBytes < SegmentSize)
|
2016-12-22 14:20:05 +00:00
|
|
|
--nChunks;
|
|
|
|
}
|
|
|
|
MemorySegment(MemorySegment &&other) {
|
|
|
|
qSwap(pageReservation, other.pageReservation);
|
|
|
|
qSwap(base, other.base);
|
|
|
|
qSwap(allocatedMap, other.allocatedMap);
|
2017-03-21 13:49:10 +00:00
|
|
|
qSwap(availableBytes, other.availableBytes);
|
|
|
|
qSwap(nChunks, other.nChunks);
|
2016-12-22 14:20:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
~MemorySegment() {
|
|
|
|
if (base)
|
|
|
|
pageReservation.deallocate();
|
|
|
|
}
|
|
|
|
|
|
|
|
void setBit(size_t index) {
|
|
|
|
Q_ASSERT(index < nChunks);
|
|
|
|
quint64 bit = static_cast<quint64>(1) << index;
|
|
|
|
// qDebug() << " setBit" << hex << index << (index & (Bits - 1)) << bit;
|
|
|
|
allocatedMap |= bit;
|
|
|
|
}
|
|
|
|
void clearBit(size_t index) {
|
|
|
|
Q_ASSERT(index < nChunks);
|
|
|
|
quint64 bit = static_cast<quint64>(1) << index;
|
|
|
|
// qDebug() << " setBit" << hex << index << (index & (Bits - 1)) << bit;
|
|
|
|
allocatedMap &= ~bit;
|
|
|
|
}
|
|
|
|
bool testBit(size_t index) const {
|
|
|
|
Q_ASSERT(index < nChunks);
|
|
|
|
quint64 bit = static_cast<quint64>(1) << index;
|
|
|
|
return (allocatedMap & bit);
|
|
|
|
}
|
|
|
|
|
|
|
|
Chunk *allocate(size_t size);
|
|
|
|
void free(Chunk *chunk, size_t size) {
|
|
|
|
DEBUG << "freeing chunk" << chunk;
|
|
|
|
size_t index = static_cast<size_t>(chunk - base);
|
2017-03-21 13:49:10 +00:00
|
|
|
size_t end = qMin(static_cast<size_t>(NumChunks), index + (size - 1)/Chunk::ChunkSize + 1);
|
2016-12-22 14:20:05 +00:00
|
|
|
while (index < end) {
|
|
|
|
Q_ASSERT(testBit(index));
|
|
|
|
clearBit(index);
|
|
|
|
++index;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t pageSize = WTF::pageSize();
|
|
|
|
size = (size + pageSize - 1) & ~(pageSize - 1);
|
2017-04-21 09:57:22 +00:00
|
|
|
#if !defined(Q_OS_LINUX) && !defined(Q_OS_WIN)
|
|
|
|
// Linux and Windows zero out pages that have been decommitted and get committed again.
|
|
|
|
// unfortunately that's not true on other OSes (e.g. BSD based ones), so zero out the
|
|
|
|
// memory before decommit, so that we can be sure that all chunks we allocate will be
|
|
|
|
// zero initialized.
|
|
|
|
memset(chunk, 0, size);
|
|
|
|
#endif
|
2016-12-22 14:20:05 +00:00
|
|
|
pageReservation.decommit(chunk, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool contains(Chunk *c) const {
|
|
|
|
return c >= base && c < base + nChunks;
|
|
|
|
}
|
|
|
|
|
|
|
|
PageReservation pageReservation;
|
2018-02-21 09:41:54 +00:00
|
|
|
Chunk *base = nullptr;
|
2016-12-22 14:20:05 +00:00
|
|
|
quint64 allocatedMap = 0;
|
2017-03-21 13:49:10 +00:00
|
|
|
size_t availableBytes = 0;
|
2016-12-22 14:20:05 +00:00
|
|
|
uint nChunks = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
Chunk *MemorySegment::allocate(size_t size)
|
|
|
|
{
|
2017-03-21 13:49:10 +00:00
|
|
|
if (!allocatedMap && size >= SegmentSize) {
|
|
|
|
// chunk allocated for one huge allocation
|
|
|
|
Q_ASSERT(availableBytes >= size);
|
|
|
|
pageReservation.commit(base, size);
|
2019-02-14 11:07:45 +00:00
|
|
|
allocatedMap = ~static_cast<quint64>(0);
|
2017-03-21 13:49:10 +00:00
|
|
|
return base;
|
|
|
|
}
|
2016-12-22 14:20:05 +00:00
|
|
|
size_t requiredChunks = (size + sizeof(Chunk) - 1)/sizeof(Chunk);
|
|
|
|
uint sequence = 0;
|
2018-02-21 09:41:54 +00:00
|
|
|
Chunk *candidate = nullptr;
|
2016-12-22 14:20:05 +00:00
|
|
|
for (uint i = 0; i < nChunks; ++i) {
|
|
|
|
if (!testBit(i)) {
|
|
|
|
if (!candidate)
|
|
|
|
candidate = base + i;
|
|
|
|
++sequence;
|
|
|
|
} else {
|
2018-02-21 09:41:54 +00:00
|
|
|
candidate = nullptr;
|
2016-12-22 14:20:05 +00:00
|
|
|
sequence = 0;
|
|
|
|
}
|
|
|
|
if (sequence == requiredChunks) {
|
|
|
|
pageReservation.commit(candidate, size);
|
|
|
|
for (uint i = 0; i < requiredChunks; ++i)
|
|
|
|
setBit(candidate - base + i);
|
2019-06-26 14:46:23 +00:00
|
|
|
DEBUG << "allocated chunk " << candidate << Qt::hex << size;
|
|
|
|
|
2016-12-22 14:20:05 +00:00
|
|
|
return candidate;
|
|
|
|
}
|
|
|
|
}
|
2018-02-21 09:41:54 +00:00
|
|
|
return nullptr;
|
2016-12-22 14:20:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
struct ChunkAllocator {
|
|
|
|
ChunkAllocator() {}
|
|
|
|
|
|
|
|
size_t requiredChunkSize(size_t size) {
|
|
|
|
size += Chunk::HeaderSize; // space required for the Chunk header
|
|
|
|
size_t pageSize = WTF::pageSize();
|
|
|
|
size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes
|
|
|
|
if (size < Chunk::ChunkSize)
|
|
|
|
size = Chunk::ChunkSize;
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
Chunk *allocate(size_t size = 0);
|
|
|
|
void free(Chunk *chunk, size_t size = 0);
|
|
|
|
|
|
|
|
std::vector<MemorySegment> memorySegments;
|
|
|
|
};
|
|
|
|
|
|
|
|
Chunk *ChunkAllocator::allocate(size_t size)
|
|
|
|
{
|
|
|
|
size = requiredChunkSize(size);
|
|
|
|
for (auto &m : memorySegments) {
|
|
|
|
if (~m.allocatedMap) {
|
|
|
|
Chunk *c = m.allocate(size);
|
|
|
|
if (c)
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// allocate a new segment
|
|
|
|
memorySegments.push_back(MemorySegment(size));
|
|
|
|
Chunk *c = memorySegments.back().allocate(size);
|
|
|
|
Q_ASSERT(c);
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ChunkAllocator::free(Chunk *chunk, size_t size)
|
|
|
|
{
|
|
|
|
size = requiredChunkSize(size);
|
|
|
|
for (auto &m : memorySegments) {
|
|
|
|
if (m.contains(chunk)) {
|
|
|
|
m.free(chunk, size);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Q_ASSERT(false);
|
|
|
|
}
|
|
|
|
|
2017-04-27 17:37:35 +00:00
|
|
|
#ifdef DUMP_SWEEP
|
|
|
|
QString binary(quintptr n) {
|
|
|
|
QString s = QString::number(n, 2);
|
|
|
|
while (s.length() < 64)
|
|
|
|
s.prepend(QChar::fromLatin1('0'));
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
#define SDUMP qDebug
|
|
|
|
#else
|
|
|
|
QString binary(quintptr) { return QString(); }
|
|
|
|
#define SDUMP if (1) ; else qDebug
|
|
|
|
#endif
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2017-02-14 13:03:56 +00:00
|
|
|
// Stores a classname -> freed count mapping.
|
|
|
|
typedef QHash<const char*, int> MMStatsHash;
|
|
|
|
Q_GLOBAL_STATIC(MMStatsHash, freedObjectStatsGlobal)
|
2017-04-04 08:05:58 +00:00
|
|
|
|
2017-02-14 13:03:56 +00:00
|
|
|
// This indirection avoids sticking QHash code in each of the call sites, which
|
|
|
|
// shaves off some instructions in the case that it's unused.
|
|
|
|
static void increaseFreedCountForClass(const char *className)
|
|
|
|
{
|
|
|
|
(*freedObjectStatsGlobal())[className]++;
|
|
|
|
}
|
|
|
|
|
2017-12-12 09:35:21 +00:00
|
|
|
//bool Chunk::sweep(ClassDestroyStatsCallback classCountPtr)
|
2017-11-23 09:05:30 +00:00
|
|
|
bool Chunk::sweep(ExecutionEngine *engine)
|
2017-01-03 10:49:15 +00:00
|
|
|
{
|
2017-05-16 10:42:42 +00:00
|
|
|
bool hasUsedSlots = false;
|
2017-04-27 17:37:35 +00:00
|
|
|
SDUMP() << "sweeping chunk" << this;
|
2017-01-03 10:49:15 +00:00
|
|
|
HeapItem *o = realBase();
|
2017-04-27 17:37:35 +00:00
|
|
|
bool lastSlotFree = false;
|
2017-01-03 10:49:15 +00:00
|
|
|
for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
|
2017-02-08 15:21:02 +00:00
|
|
|
#if WRITEBARRIER(none)
|
2017-01-03 10:49:15 +00:00
|
|
|
Q_ASSERT((grayBitmap[i] | blackBitmap[i]) == blackBitmap[i]); // check that we don't have gray only objects
|
2017-02-08 15:21:02 +00:00
|
|
|
#endif
|
2017-01-03 10:49:15 +00:00
|
|
|
quintptr toFree = objectBitmap[i] ^ blackBitmap[i];
|
|
|
|
Q_ASSERT((toFree & objectBitmap[i]) == toFree); // check all black objects are marked as being used
|
|
|
|
quintptr e = extendsBitmap[i];
|
2017-04-27 17:37:35 +00:00
|
|
|
SDUMP() << " index=" << i;
|
|
|
|
SDUMP() << " toFree =" << binary(toFree);
|
|
|
|
SDUMP() << " black =" << binary(blackBitmap[i]);
|
|
|
|
SDUMP() << " object =" << binary(objectBitmap[i]);
|
|
|
|
SDUMP() << " extends =" << binary(e);
|
|
|
|
if (lastSlotFree)
|
|
|
|
e &= (e + 1); // clear all lowest extent bits
|
2017-01-03 10:49:15 +00:00
|
|
|
while (toFree) {
|
|
|
|
uint index = qCountTrailingZeroBits(toFree);
|
|
|
|
quintptr bit = (static_cast<quintptr>(1) << index);
|
|
|
|
|
|
|
|
toFree ^= bit; // mask out freed slot
|
|
|
|
// DEBUG << " index" << hex << index << toFree;
|
|
|
|
|
|
|
|
// remove all extends slots that have been freed
|
|
|
|
// this is a bit of bit trickery.
|
|
|
|
quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit
|
|
|
|
quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object
|
|
|
|
quintptr result = objmask + 1;
|
|
|
|
Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something
|
|
|
|
result |= mask; // ensure we don't clear stuff to the right of the current object
|
|
|
|
e &= result;
|
|
|
|
|
|
|
|
HeapItem *itemToFree = o + index;
|
|
|
|
Heap::Base *b = *itemToFree;
|
2018-01-08 11:00:00 +00:00
|
|
|
const VTable *v = b->internalClass->vtable;
|
2017-12-12 09:35:21 +00:00
|
|
|
// if (Q_UNLIKELY(classCountPtr))
|
|
|
|
// classCountPtr(v->className);
|
2017-02-14 13:03:56 +00:00
|
|
|
if (v->destroy) {
|
|
|
|
v->destroy(b);
|
2017-01-03 10:49:15 +00:00
|
|
|
b->_checkIsDestroyed();
|
|
|
|
}
|
2018-02-01 18:01:13 +00:00
|
|
|
#ifdef V4_USE_HEAPTRACK
|
|
|
|
heaptrack_report_free(itemToFree);
|
|
|
|
#endif
|
2017-01-03 10:49:15 +00:00
|
|
|
}
|
2017-11-23 09:05:30 +00:00
|
|
|
Q_V4_PROFILE_DEALLOC(engine, qPopulationCount((objectBitmap[i] | extendsBitmap[i])
|
|
|
|
- (blackBitmap[i] | e)) * Chunk::SlotSize,
|
|
|
|
Profiling::SmallItem);
|
2017-01-03 10:49:15 +00:00
|
|
|
objectBitmap[i] = blackBitmap[i];
|
2017-02-15 14:40:27 +00:00
|
|
|
grayBitmap[i] = 0;
|
2017-05-16 10:42:42 +00:00
|
|
|
hasUsedSlots |= (blackBitmap[i] != 0);
|
2017-01-03 10:49:15 +00:00
|
|
|
extendsBitmap[i] = e;
|
2017-04-27 17:37:35 +00:00
|
|
|
lastSlotFree = !((objectBitmap[i]|extendsBitmap[i]) >> (sizeof(quintptr)*8 - 1));
|
|
|
|
SDUMP() << " new extends =" << binary(e);
|
|
|
|
SDUMP() << " lastSlotFree" << lastSlotFree;
|
|
|
|
Q_ASSERT((objectBitmap[i] & extendsBitmap[i]) == 0);
|
2017-01-03 10:49:15 +00:00
|
|
|
o += Chunk::Bits;
|
|
|
|
}
|
|
|
|
// DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots.";
|
2017-05-16 10:42:42 +00:00
|
|
|
return hasUsedSlots;
|
2017-01-03 10:49:15 +00:00
|
|
|
}
|
|
|
|
|
2017-11-23 09:05:30 +00:00
|
|
|
void Chunk::freeAll(ExecutionEngine *engine)
|
2017-01-03 10:49:15 +00:00
|
|
|
{
|
|
|
|
// DEBUG << "sweeping chunk" << this << (*freeList);
|
|
|
|
HeapItem *o = realBase();
|
|
|
|
for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
|
|
|
|
quintptr toFree = objectBitmap[i];
|
|
|
|
quintptr e = extendsBitmap[i];
|
|
|
|
// DEBUG << hex << " index=" << i << toFree;
|
|
|
|
while (toFree) {
|
|
|
|
uint index = qCountTrailingZeroBits(toFree);
|
|
|
|
quintptr bit = (static_cast<quintptr>(1) << index);
|
|
|
|
|
|
|
|
toFree ^= bit; // mask out freed slot
|
|
|
|
// DEBUG << " index" << hex << index << toFree;
|
|
|
|
|
|
|
|
// remove all extends slots that have been freed
|
|
|
|
// this is a bit of bit trickery.
|
|
|
|
quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit
|
|
|
|
quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object
|
|
|
|
quintptr result = objmask + 1;
|
|
|
|
Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something
|
|
|
|
result |= mask; // ensure we don't clear stuff to the right of the current object
|
|
|
|
e &= result;
|
|
|
|
|
|
|
|
HeapItem *itemToFree = o + index;
|
|
|
|
Heap::Base *b = *itemToFree;
|
2018-01-08 11:00:00 +00:00
|
|
|
if (b->internalClass->vtable->destroy) {
|
|
|
|
b->internalClass->vtable->destroy(b);
|
2017-01-03 10:49:15 +00:00
|
|
|
b->_checkIsDestroyed();
|
|
|
|
}
|
2018-02-01 18:01:13 +00:00
|
|
|
#ifdef V4_USE_HEAPTRACK
|
|
|
|
heaptrack_report_free(itemToFree);
|
|
|
|
#endif
|
2017-01-03 10:49:15 +00:00
|
|
|
}
|
2017-11-23 09:05:30 +00:00
|
|
|
Q_V4_PROFILE_DEALLOC(engine, (qPopulationCount(objectBitmap[i]|extendsBitmap[i])
|
|
|
|
- qPopulationCount(e)) * Chunk::SlotSize, Profiling::SmallItem);
|
2017-01-03 10:49:15 +00:00
|
|
|
objectBitmap[i] = 0;
|
2017-02-15 14:40:27 +00:00
|
|
|
grayBitmap[i] = 0;
|
2017-01-03 10:49:15 +00:00
|
|
|
extendsBitmap[i] = e;
|
|
|
|
o += Chunk::Bits;
|
|
|
|
}
|
|
|
|
// DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots.";
|
|
|
|
}
|
|
|
|
|
2017-02-15 14:40:27 +00:00
|
|
|
void Chunk::resetBlackBits()
|
|
|
|
{
|
|
|
|
memset(blackBitmap, 0, sizeof(blackBitmap));
|
|
|
|
}
|
|
|
|
|
2017-03-09 09:36:16 +00:00
|
|
|
void Chunk::collectGrayItems(MarkStack *markStack)
|
2017-02-15 14:40:27 +00:00
|
|
|
{
|
|
|
|
// DEBUG << "sweeping chunk" << this << (*freeList);
|
|
|
|
HeapItem *o = realBase();
|
|
|
|
for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
|
|
|
|
#if WRITEBARRIER(none)
|
|
|
|
Q_ASSERT((grayBitmap[i] | blackBitmap[i]) == blackBitmap[i]); // check that we don't have gray only objects
|
|
|
|
#endif
|
|
|
|
quintptr toMark = blackBitmap[i] & grayBitmap[i]; // correct for a Steele type barrier
|
|
|
|
Q_ASSERT((toMark & objectBitmap[i]) == toMark); // check all black objects are marked as being used
|
|
|
|
// DEBUG << hex << " index=" << i << toFree;
|
|
|
|
while (toMark) {
|
|
|
|
uint index = qCountTrailingZeroBits(toMark);
|
|
|
|
quintptr bit = (static_cast<quintptr>(1) << index);
|
|
|
|
|
|
|
|
toMark ^= bit; // mask out marked slot
|
|
|
|
// DEBUG << " index" << hex << index << toFree;
|
|
|
|
|
|
|
|
HeapItem *itemToFree = o + index;
|
|
|
|
Heap::Base *b = *itemToFree;
|
|
|
|
Q_ASSERT(b->inUse());
|
2017-03-09 09:36:16 +00:00
|
|
|
markStack->push(b);
|
2017-02-15 14:40:27 +00:00
|
|
|
}
|
|
|
|
grayBitmap[i] = 0;
|
|
|
|
o += Chunk::Bits;
|
|
|
|
}
|
|
|
|
// DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots.";
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
void Chunk::sortIntoBins(HeapItem **bins, uint nBins)
|
|
|
|
{
|
2017-02-10 10:51:43 +00:00
|
|
|
// qDebug() << "sortIntoBins:";
|
2017-01-03 10:49:15 +00:00
|
|
|
HeapItem *base = realBase();
|
|
|
|
#if QT_POINTER_SIZE == 8
|
|
|
|
const int start = 0;
|
|
|
|
#else
|
|
|
|
const int start = 1;
|
2017-02-10 10:51:43 +00:00
|
|
|
#endif
|
2018-04-23 08:17:13 +00:00
|
|
|
#ifndef QT_NO_DEBUG
|
2017-02-10 10:51:43 +00:00
|
|
|
uint freeSlots = 0;
|
|
|
|
uint allocatedSlots = 0;
|
2017-01-03 10:49:15 +00:00
|
|
|
#endif
|
|
|
|
for (int i = start; i < EntriesInBitmap; ++i) {
|
|
|
|
quintptr usedSlots = (objectBitmap[i]|extendsBitmap[i]);
|
|
|
|
#if QT_POINTER_SIZE == 8
|
|
|
|
if (!i)
|
|
|
|
usedSlots |= (static_cast<quintptr>(1) << (HeaderSize/SlotSize)) - 1;
|
|
|
|
#endif
|
2018-04-23 08:17:13 +00:00
|
|
|
#ifndef QT_NO_DEBUG
|
2017-02-10 10:51:43 +00:00
|
|
|
allocatedSlots += qPopulationCount(usedSlots);
|
|
|
|
// qDebug() << hex << " i=" << i << "used=" << usedSlots;
|
|
|
|
#endif
|
|
|
|
while (1) {
|
|
|
|
uint index = qCountTrailingZeroBits(usedSlots + 1);
|
|
|
|
if (index == Bits)
|
|
|
|
break;
|
|
|
|
uint freeStart = i*Bits + index;
|
|
|
|
usedSlots &= ~((static_cast<quintptr>(1) << index) - 1);
|
|
|
|
while (!usedSlots) {
|
2020-05-20 15:23:13 +00:00
|
|
|
if (++i < EntriesInBitmap) {
|
|
|
|
usedSlots = (objectBitmap[i]|extendsBitmap[i]);
|
|
|
|
} else {
|
|
|
|
Q_ASSERT(i == EntriesInBitmap);
|
|
|
|
// Overflows to 0 when counting trailing zeroes above in next iteration.
|
|
|
|
// Then, all the bits are zeroes and we break.
|
|
|
|
usedSlots = std::numeric_limits<quintptr>::max();
|
2017-02-10 10:51:43 +00:00
|
|
|
break;
|
|
|
|
}
|
2018-04-23 08:17:13 +00:00
|
|
|
#ifndef QT_NO_DEBUG
|
2017-02-10 10:51:43 +00:00
|
|
|
allocatedSlots += qPopulationCount(usedSlots);
|
|
|
|
// qDebug() << hex << " i=" << i << "used=" << usedSlots;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
HeapItem *freeItem = base + freeStart;
|
|
|
|
|
|
|
|
index = qCountTrailingZeroBits(usedSlots);
|
|
|
|
usedSlots |= (quintptr(1) << index) - 1;
|
|
|
|
uint freeEnd = i*Bits + index;
|
|
|
|
uint nSlots = freeEnd - freeStart;
|
2018-04-23 08:17:13 +00:00
|
|
|
#ifndef QT_NO_DEBUG
|
2017-02-10 10:51:43 +00:00
|
|
|
// qDebug() << hex << " got free slots from" << freeStart << "to" << freeEnd << "n=" << nSlots << "usedSlots=" << usedSlots;
|
|
|
|
freeSlots += nSlots;
|
|
|
|
#endif
|
|
|
|
Q_ASSERT(freeEnd > freeStart && freeEnd <= NumSlots);
|
|
|
|
freeItem->freeData.availableSlots = nSlots;
|
|
|
|
uint bin = qMin(nBins - 1, nSlots);
|
|
|
|
freeItem->freeData.next = bins[bin];
|
|
|
|
bins[bin] = freeItem;
|
2017-01-03 10:49:15 +00:00
|
|
|
}
|
|
|
|
}
|
2018-04-23 08:17:13 +00:00
|
|
|
#ifndef QT_NO_DEBUG
|
2017-02-15 11:48:07 +00:00
|
|
|
Q_ASSERT(freeSlots + allocatedSlots == (EntriesInBitmap - start) * 8 * sizeof(quintptr));
|
2017-02-10 10:51:43 +00:00
|
|
|
#endif
|
2017-01-03 10:49:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
HeapItem *BlockAllocator::allocate(size_t size, bool forceAllocation) {
|
|
|
|
Q_ASSERT((size % Chunk::SlotSize) == 0);
|
|
|
|
size_t slotsRequired = size >> Chunk::SlotSizeShift;
|
2017-11-27 09:53:33 +00:00
|
|
|
|
|
|
|
if (allocationStats)
|
|
|
|
++allocationStats[binForSlots(slotsRequired)];
|
2017-01-03 10:49:15 +00:00
|
|
|
|
|
|
|
HeapItem **last;
|
|
|
|
|
|
|
|
HeapItem *m;
|
|
|
|
|
|
|
|
if (slotsRequired < NumBins - 1) {
|
|
|
|
m = freeBins[slotsRequired];
|
|
|
|
if (m) {
|
|
|
|
freeBins[slotsRequired] = m->freeData.next;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
2017-02-10 10:51:43 +00:00
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
if (nFree >= slotsRequired) {
|
|
|
|
// use bump allocation
|
|
|
|
Q_ASSERT(nextFree);
|
|
|
|
m = nextFree;
|
|
|
|
nextFree += slotsRequired;
|
|
|
|
nFree -= slotsRequired;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
// DEBUG << "No matching bin found for item" << size << bin;
|
|
|
|
// search last bin for a large enough item
|
|
|
|
last = &freeBins[NumBins - 1];
|
|
|
|
while ((m = *last)) {
|
|
|
|
if (m->freeData.availableSlots >= slotsRequired) {
|
|
|
|
*last = m->freeData.next; // take it out of the list
|
|
|
|
|
|
|
|
size_t remainingSlots = m->freeData.availableSlots - slotsRequired;
|
|
|
|
// DEBUG << "found large free slots of size" << m->freeData.availableSlots << m << "remaining" << remainingSlots;
|
2017-02-10 10:51:43 +00:00
|
|
|
if (remainingSlots == 0)
|
2017-01-03 10:49:15 +00:00
|
|
|
goto done;
|
2017-02-10 10:51:43 +00:00
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
HeapItem *remainder = m + slotsRequired;
|
2017-02-10 11:17:35 +00:00
|
|
|
if (remainingSlots > nFree) {
|
2017-01-03 10:49:15 +00:00
|
|
|
if (nFree) {
|
|
|
|
size_t bin = binForSlots(nFree);
|
|
|
|
nextFree->freeData.next = freeBins[bin];
|
|
|
|
nextFree->freeData.availableSlots = nFree;
|
|
|
|
freeBins[bin] = nextFree;
|
|
|
|
}
|
|
|
|
nextFree = remainder;
|
|
|
|
nFree = remainingSlots;
|
|
|
|
} else {
|
|
|
|
remainder->freeData.availableSlots = remainingSlots;
|
|
|
|
size_t binForRemainder = binForSlots(remainingSlots);
|
|
|
|
remainder->freeData.next = freeBins[binForRemainder];
|
|
|
|
freeBins[binForRemainder] = remainder;
|
|
|
|
}
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
last = &m->freeData.next;
|
|
|
|
}
|
|
|
|
|
2017-02-10 11:17:35 +00:00
|
|
|
if (slotsRequired < NumBins - 1) {
|
|
|
|
// check if we can split up another slot
|
|
|
|
for (size_t i = slotsRequired + 1; i < NumBins - 1; ++i) {
|
|
|
|
m = freeBins[i];
|
|
|
|
if (m) {
|
|
|
|
freeBins[i] = m->freeData.next; // take it out of the list
|
|
|
|
// qDebug() << "got item" << slotsRequired << "from slot" << i;
|
|
|
|
size_t remainingSlots = i - slotsRequired;
|
|
|
|
Q_ASSERT(remainingSlots < NumBins - 1);
|
|
|
|
HeapItem *remainder = m + slotsRequired;
|
|
|
|
remainder->freeData.availableSlots = remainingSlots;
|
|
|
|
remainder->freeData.next = freeBins[remainingSlots];
|
|
|
|
freeBins[remainingSlots] = remainder;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
if (!m) {
|
|
|
|
if (!forceAllocation)
|
2018-02-21 09:41:54 +00:00
|
|
|
return nullptr;
|
2017-01-03 10:49:15 +00:00
|
|
|
Chunk *newChunk = chunkAllocator->allocate();
|
2017-11-23 09:05:30 +00:00
|
|
|
Q_V4_PROFILE_ALLOC(engine, Chunk::DataSize, Profiling::HeapPage);
|
2017-01-03 10:49:15 +00:00
|
|
|
chunks.push_back(newChunk);
|
|
|
|
nextFree = newChunk->first();
|
|
|
|
nFree = Chunk::AvailableSlots;
|
|
|
|
m = nextFree;
|
|
|
|
nextFree += slotsRequired;
|
|
|
|
nFree -= slotsRequired;
|
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
|
|
|
m->setAllocatedSlots(slotsRequired);
|
2017-11-23 09:05:30 +00:00
|
|
|
Q_V4_PROFILE_ALLOC(engine, slotsRequired * Chunk::SlotSize, Profiling::SmallItem);
|
2018-02-01 18:01:13 +00:00
|
|
|
#ifdef V4_USE_HEAPTRACK
|
|
|
|
heaptrack_report_alloc(m, slotsRequired * Chunk::SlotSize);
|
|
|
|
#endif
|
2017-01-03 10:49:15 +00:00
|
|
|
// DEBUG << " " << hex << m->chunk() << m->chunk()->objectBitmap[0] << m->chunk()->extendsBitmap[0] << (m - m->chunk()->realBase());
|
|
|
|
return m;
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockAllocator::sweep()
|
|
|
|
{
|
2018-02-21 09:41:54 +00:00
|
|
|
nextFree = nullptr;
|
2017-01-03 10:49:15 +00:00
|
|
|
nFree = 0;
|
|
|
|
memset(freeBins, 0, sizeof(freeBins));
|
|
|
|
|
|
|
|
// qDebug() << "BlockAlloc: sweep";
|
|
|
|
usedSlotsAfterLastSweep = 0;
|
2017-05-16 10:42:42 +00:00
|
|
|
|
2018-08-03 13:09:20 +00:00
|
|
|
auto firstEmptyChunk = std::partition(chunks.begin(), chunks.end(), [this](Chunk *c) {
|
|
|
|
return c->sweep(engine);
|
|
|
|
});
|
2018-08-03 12:33:40 +00:00
|
|
|
|
2018-08-03 13:09:20 +00:00
|
|
|
std::for_each(chunks.begin(), firstEmptyChunk, [this](Chunk *c) {
|
|
|
|
c->sortIntoBins(freeBins, NumBins);
|
|
|
|
usedSlotsAfterLastSweep += c->nUsedSlots();
|
|
|
|
});
|
2018-08-03 12:33:40 +00:00
|
|
|
|
|
|
|
// only free the chunks at the end to avoid that the sweep() calls indirectly
|
|
|
|
// access freed memory
|
2018-08-03 13:09:20 +00:00
|
|
|
std::for_each(firstEmptyChunk, chunks.end(), [this](Chunk *c) {
|
2018-08-03 12:33:40 +00:00
|
|
|
Q_V4_PROFILE_DEALLOC(engine, Chunk::DataSize, Profiling::HeapPage);
|
|
|
|
chunkAllocator->free(c);
|
2018-08-03 13:09:20 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
chunks.erase(firstEmptyChunk, chunks.end());
|
2017-01-03 10:49:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void BlockAllocator::freeAll()
|
|
|
|
{
|
2018-01-05 14:30:23 +00:00
|
|
|
for (auto c : chunks)
|
2017-11-23 09:05:30 +00:00
|
|
|
c->freeAll(engine);
|
2018-01-05 14:30:23 +00:00
|
|
|
for (auto c : chunks) {
|
2017-11-23 09:05:30 +00:00
|
|
|
Q_V4_PROFILE_DEALLOC(engine, Chunk::DataSize, Profiling::HeapPage);
|
2017-01-03 10:49:15 +00:00
|
|
|
chunkAllocator->free(c);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-15 14:40:27 +00:00
|
|
|
void BlockAllocator::resetBlackBits()
|
|
|
|
{
|
|
|
|
for (auto c : chunks)
|
|
|
|
c->resetBlackBits();
|
|
|
|
}
|
|
|
|
|
2017-03-09 09:36:16 +00:00
|
|
|
void BlockAllocator::collectGrayItems(MarkStack *markStack)
|
2017-02-15 14:40:27 +00:00
|
|
|
{
|
|
|
|
for (auto c : chunks)
|
2017-03-09 09:36:16 +00:00
|
|
|
c->collectGrayItems(markStack);
|
2017-02-15 14:40:27 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-01-20 10:36:16 +00:00
|
|
|
HeapItem *HugeItemAllocator::allocate(size_t size) {
|
2018-03-07 07:48:57 +00:00
|
|
|
MemorySegment *m = nullptr;
|
|
|
|
Chunk *c = nullptr;
|
|
|
|
if (size >= MemorySegment::SegmentSize/2) {
|
|
|
|
// too large to handle through the ChunkAllocator, let's get our own memory segement
|
2018-10-15 06:36:17 +00:00
|
|
|
size += Chunk::HeaderSize; // space required for the Chunk header
|
2018-03-07 07:48:57 +00:00
|
|
|
size_t pageSize = WTF::pageSize();
|
|
|
|
size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes
|
2018-10-15 06:36:17 +00:00
|
|
|
m = new MemorySegment(size);
|
2018-03-07 07:48:57 +00:00
|
|
|
c = m->allocate(size);
|
|
|
|
} else {
|
|
|
|
c = chunkAllocator->allocate(size);
|
|
|
|
}
|
|
|
|
Q_ASSERT(c);
|
|
|
|
chunks.push_back(HugeChunk{m, c, size});
|
2017-01-20 10:36:16 +00:00
|
|
|
Chunk::setBit(c->objectBitmap, c->first() - c->realBase());
|
2017-11-23 09:05:30 +00:00
|
|
|
Q_V4_PROFILE_ALLOC(engine, size, Profiling::LargeItem);
|
2018-02-01 18:01:13 +00:00
|
|
|
#ifdef V4_USE_HEAPTRACK
|
|
|
|
heaptrack_report_alloc(c, size);
|
|
|
|
#endif
|
2017-01-20 10:36:16 +00:00
|
|
|
return c->first();
|
|
|
|
}
|
|
|
|
|
2017-02-14 13:03:56 +00:00
|
|
|
static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocator::HugeChunk &c, ClassDestroyStatsCallback classCountPtr)
|
2017-01-20 10:36:16 +00:00
|
|
|
{
|
|
|
|
HeapItem *itemToFree = c.chunk->first();
|
|
|
|
Heap::Base *b = *itemToFree;
|
2018-01-08 11:00:00 +00:00
|
|
|
const VTable *v = b->internalClass->vtable;
|
2017-02-14 13:03:56 +00:00
|
|
|
if (Q_UNLIKELY(classCountPtr))
|
|
|
|
classCountPtr(v->className);
|
|
|
|
|
|
|
|
if (v->destroy) {
|
|
|
|
v->destroy(b);
|
2017-01-20 10:36:16 +00:00
|
|
|
b->_checkIsDestroyed();
|
|
|
|
}
|
2018-03-07 07:48:57 +00:00
|
|
|
if (c.segment) {
|
|
|
|
// own memory segment
|
|
|
|
c.segment->free(c.chunk, c.size);
|
|
|
|
delete c.segment;
|
|
|
|
} else {
|
|
|
|
chunkAllocator->free(c.chunk, c.size);
|
|
|
|
}
|
2018-02-01 18:01:13 +00:00
|
|
|
#ifdef V4_USE_HEAPTRACK
|
|
|
|
heaptrack_report_free(c.chunk);
|
|
|
|
#endif
|
2017-01-20 10:36:16 +00:00
|
|
|
}
|
|
|
|
|
2017-02-14 13:03:56 +00:00
|
|
|
void HugeItemAllocator::sweep(ClassDestroyStatsCallback classCountPtr)
|
|
|
|
{
|
|
|
|
auto isBlack = [this, classCountPtr] (const HugeChunk &c) {
|
2017-01-20 10:36:16 +00:00
|
|
|
bool b = c.chunk->first()->isBlack();
|
|
|
|
Chunk::clearBit(c.chunk->blackBitmap, c.chunk->first() - c.chunk->realBase());
|
2017-11-23 09:05:30 +00:00
|
|
|
if (!b) {
|
|
|
|
Q_V4_PROFILE_DEALLOC(engine, c.size, Profiling::LargeItem);
|
2017-02-14 13:03:56 +00:00
|
|
|
freeHugeChunk(chunkAllocator, c, classCountPtr);
|
2017-11-23 09:05:30 +00:00
|
|
|
}
|
2017-01-20 10:36:16 +00:00
|
|
|
return !b;
|
|
|
|
};
|
|
|
|
|
|
|
|
auto newEnd = std::remove_if(chunks.begin(), chunks.end(), isBlack);
|
|
|
|
chunks.erase(newEnd, chunks.end());
|
|
|
|
}
|
|
|
|
|
2017-02-15 14:40:27 +00:00
|
|
|
void HugeItemAllocator::resetBlackBits()
|
|
|
|
{
|
|
|
|
for (auto c : chunks)
|
|
|
|
Chunk::clearBit(c.chunk->blackBitmap, c.chunk->first() - c.chunk->realBase());
|
|
|
|
}
|
|
|
|
|
2017-03-09 09:36:16 +00:00
|
|
|
void HugeItemAllocator::collectGrayItems(MarkStack *markStack)
|
2017-02-15 14:40:27 +00:00
|
|
|
{
|
|
|
|
for (auto c : chunks)
|
|
|
|
// Correct for a Steele type barrier
|
|
|
|
if (Chunk::testBit(c.chunk->blackBitmap, c.chunk->first() - c.chunk->realBase()) &&
|
|
|
|
Chunk::testBit(c.chunk->grayBitmap, c.chunk->first() - c.chunk->realBase())) {
|
|
|
|
HeapItem *i = c.chunk->first();
|
|
|
|
Heap::Base *b = *i;
|
2017-03-09 09:36:16 +00:00
|
|
|
b->mark(markStack);
|
2017-02-15 14:40:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-20 10:36:16 +00:00
|
|
|
void HugeItemAllocator::freeAll()
|
|
|
|
{
|
|
|
|
for (auto &c : chunks) {
|
2017-11-23 09:05:30 +00:00
|
|
|
Q_V4_PROFILE_DEALLOC(engine, c.size, Profiling::LargeItem);
|
2017-02-14 13:03:56 +00:00
|
|
|
freeHugeChunk(chunkAllocator, c, nullptr);
|
2017-01-20 10:36:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-01-12 20:55:51 +00:00
|
|
|
MemoryManager::MemoryManager(ExecutionEngine *engine)
|
2015-08-28 11:48:52 +00:00
|
|
|
: engine(engine)
|
2016-12-22 14:20:05 +00:00
|
|
|
, chunkAllocator(new ChunkAllocator)
|
2017-11-23 09:05:30 +00:00
|
|
|
, blockAllocator(chunkAllocator, engine)
|
2018-01-05 14:30:23 +00:00
|
|
|
, icAllocator(chunkAllocator, engine)
|
2017-11-23 09:05:30 +00:00
|
|
|
, hugeItemAllocator(chunkAllocator, engine)
|
2015-01-12 20:55:51 +00:00
|
|
|
, m_persistentValues(new PersistentValueStorage(engine))
|
|
|
|
, m_weakValues(new PersistentValueStorage(engine))
|
2019-03-20 16:15:29 +00:00
|
|
|
, unmanagedHeapSizeGCLimit(MinUnmanagedHeapSizeGCLimit)
|
2017-01-03 11:06:25 +00:00
|
|
|
, aggressiveGC(!qEnvironmentVariableIsEmpty("QV4_MM_AGGRESSIVE_GC"))
|
2017-11-27 09:53:33 +00:00
|
|
|
, gcStats(lcGcStats().isDebugEnabled())
|
|
|
|
, gcCollectorStats(lcGcAllocatorStats().isDebugEnabled())
|
2012-12-04 12:40:18 +00:00
|
|
|
{
|
2013-03-12 18:49:13 +00:00
|
|
|
#ifdef V4_USE_VALGRIND
|
2014-12-12 15:38:09 +00:00
|
|
|
VALGRIND_CREATE_MEMPOOL(this, 0, true);
|
2013-03-12 18:49:13 +00:00
|
|
|
#endif
|
2017-11-27 09:53:33 +00:00
|
|
|
memset(statistics.allocations, 0, sizeof(statistics.allocations));
|
|
|
|
if (gcStats)
|
|
|
|
blockAllocator.allocationStats = statistics.allocations;
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
Heap::Base *MemoryManager::allocString(std::size_t unmanagedSize)
|
2012-12-04 12:40:18 +00:00
|
|
|
{
|
2017-02-10 10:51:43 +00:00
|
|
|
const size_t stringSize = align(sizeof(Heap::String));
|
2017-02-13 12:38:48 +00:00
|
|
|
#ifdef MM_STATS
|
2017-02-10 10:51:43 +00:00
|
|
|
lastAllocRequestedSlots = stringSize >> Chunk::SlotSizeShift;
|
2017-02-13 12:38:48 +00:00
|
|
|
++allocationCount;
|
2017-02-10 10:51:43 +00:00
|
|
|
#endif
|
2017-01-03 11:06:25 +00:00
|
|
|
unmanagedHeapSize += unmanagedSize;
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2019-03-20 16:15:29 +00:00
|
|
|
HeapItem *m = allocate(&blockAllocator, stringSize);
|
2017-01-04 09:49:23 +00:00
|
|
|
memset(m, 0, stringSize);
|
2020-11-05 11:43:18 +00:00
|
|
|
if (gcBlocked) {
|
|
|
|
// If the gc is running right now, it will not have a chance to mark the newly created item
|
|
|
|
// and may therefore sweep it right away.
|
|
|
|
// Protect the new object from the current GC run to avoid this.
|
|
|
|
m->as<Heap::Base>()->setMarkBit();
|
|
|
|
}
|
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
return *m;
|
|
|
|
}
|
2012-12-13 22:46:51 +00:00
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
Heap::Base *MemoryManager::allocData(std::size_t size)
|
|
|
|
{
|
2017-02-13 12:38:48 +00:00
|
|
|
#ifdef MM_STATS
|
2017-02-10 10:51:43 +00:00
|
|
|
lastAllocRequestedSlots = size >> Chunk::SlotSizeShift;
|
2017-02-13 12:38:48 +00:00
|
|
|
++allocationCount;
|
2017-02-10 10:51:43 +00:00
|
|
|
#endif
|
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
Q_ASSERT(size >= Chunk::SlotSize);
|
|
|
|
Q_ASSERT(size % Chunk::SlotSize == 0);
|
2015-01-09 17:52:56 +00:00
|
|
|
|
2019-03-20 16:15:29 +00:00
|
|
|
HeapItem *m = allocate(&blockAllocator, size);
|
2017-01-04 09:49:23 +00:00
|
|
|
memset(m, 0, size);
|
2020-11-05 11:43:18 +00:00
|
|
|
if (gcBlocked) {
|
|
|
|
// If the gc is running right now, it will not have a chance to mark the newly created item
|
|
|
|
// and may therefore sweep it right away.
|
|
|
|
// Protect the new object from the current GC run to avoid this.
|
|
|
|
m->as<Heap::Base>()->setMarkBit();
|
|
|
|
}
|
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
return *m;
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2017-05-03 06:45:28 +00:00
|
|
|
Heap::Object *MemoryManager::allocObjectWithMemberData(const QV4::VTable *vtable, uint nMembers)
|
2017-01-04 11:45:45 +00:00
|
|
|
{
|
2017-05-03 06:45:28 +00:00
|
|
|
uint size = (vtable->nInlineProperties + vtable->inlinePropertyOffset)*sizeof(Value);
|
|
|
|
Q_ASSERT(!(size % sizeof(HeapItem)));
|
2017-01-04 11:45:45 +00:00
|
|
|
|
2017-02-15 11:23:20 +00:00
|
|
|
Heap::Object *o;
|
2017-05-29 08:30:39 +00:00
|
|
|
if (nMembers <= vtable->nInlineProperties) {
|
2017-02-15 11:23:20 +00:00
|
|
|
o = static_cast<Heap::Object *>(allocData(size));
|
|
|
|
} else {
|
|
|
|
// Allocate both in one go through the block allocator
|
2017-05-03 06:45:28 +00:00
|
|
|
nMembers -= vtable->nInlineProperties;
|
2017-01-04 11:45:45 +00:00
|
|
|
std::size_t memberSize = align(sizeof(Heap::MemberData) + (nMembers - 1)*sizeof(Value));
|
2017-02-15 11:23:20 +00:00
|
|
|
size_t totalSize = size + memberSize;
|
|
|
|
Heap::MemberData *m;
|
|
|
|
if (totalSize > Chunk::DataSize) {
|
|
|
|
o = static_cast<Heap::Object *>(allocData(size));
|
|
|
|
m = hugeItemAllocator.allocate(memberSize)->as<Heap::MemberData>();
|
|
|
|
} else {
|
|
|
|
HeapItem *mh = reinterpret_cast<HeapItem *>(allocData(totalSize));
|
|
|
|
Heap::Base *b = *mh;
|
|
|
|
o = static_cast<Heap::Object *>(b);
|
|
|
|
mh += (size >> Chunk::SlotSizeShift);
|
|
|
|
m = mh->as<Heap::MemberData>();
|
|
|
|
Chunk *c = mh->chunk();
|
|
|
|
size_t index = mh - c->realBase();
|
|
|
|
Chunk::setBit(c->objectBitmap, index);
|
|
|
|
Chunk::clearBit(c->extendsBitmap, index);
|
|
|
|
}
|
|
|
|
o->memberData.set(engine, m);
|
2018-01-05 14:30:23 +00:00
|
|
|
m->internalClass.set(engine, engine->internalClasses(EngineBase::Class_MemberData));
|
2017-05-12 13:12:45 +00:00
|
|
|
Q_ASSERT(o->memberData->internalClass);
|
2017-02-15 11:23:20 +00:00
|
|
|
m->values.alloc = static_cast<uint>((memberSize - sizeof(Heap::MemberData) + sizeof(Value))/sizeof(Value));
|
|
|
|
m->values.size = o->memberData->values.alloc;
|
|
|
|
m->init();
|
2017-01-04 11:45:45 +00:00
|
|
|
// qDebug() << " got" << o->memberData << o->memberData->size;
|
|
|
|
}
|
2017-02-15 14:40:27 +00:00
|
|
|
// qDebug() << "allocating object with memberData" << o << o->memberData.operator->();
|
2017-01-04 11:45:45 +00:00
|
|
|
return o;
|
|
|
|
}
|
|
|
|
|
2017-02-13 12:38:48 +00:00
|
|
|
static uint markStackSize = 0;
|
|
|
|
|
2017-03-09 09:36:16 +00:00
|
|
|
MarkStack::MarkStack(ExecutionEngine *engine)
|
2020-02-17 14:30:54 +00:00
|
|
|
: m_engine(engine)
|
2017-03-09 09:36:16 +00:00
|
|
|
{
|
2020-02-17 14:30:54 +00:00
|
|
|
m_base = (Heap::Base **)engine->gcStack->base();
|
|
|
|
m_top = m_base;
|
|
|
|
const size_t size = engine->maxGCStackSize() / sizeof(Heap::Base);
|
|
|
|
m_hardLimit = m_base + size;
|
|
|
|
m_softLimit = m_base + size * 3 / 4;
|
2017-03-09 09:36:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void MarkStack::drain()
|
2014-10-21 09:05:00 +00:00
|
|
|
{
|
2020-02-17 14:30:54 +00:00
|
|
|
while (m_top > m_base) {
|
2017-03-09 09:36:16 +00:00
|
|
|
Heap::Base *h = pop();
|
2017-02-13 12:38:48 +00:00
|
|
|
++markStackSize;
|
2016-05-26 15:46:24 +00:00
|
|
|
Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen.
|
2018-01-08 11:00:00 +00:00
|
|
|
h->internalClass->vtable->markObjects(h, this);
|
2014-10-21 09:05:00 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-09 09:36:16 +00:00
|
|
|
void MemoryManager::collectRoots(MarkStack *markStack)
|
2012-12-04 12:40:18 +00:00
|
|
|
{
|
2017-04-04 08:35:45 +00:00
|
|
|
engine->markObjects(markStack);
|
2013-01-28 15:46:09 +00:00
|
|
|
|
2017-02-13 12:38:48 +00:00
|
|
|
// qDebug() << " mark stack after engine->mark" << (engine->jsStackTop - markBase);
|
|
|
|
|
2017-03-09 09:36:16 +00:00
|
|
|
collectFromJSStack(markStack);
|
2013-10-15 14:00:49 +00:00
|
|
|
|
2017-02-13 12:38:48 +00:00
|
|
|
// qDebug() << " mark stack after js stack collect" << (engine->jsStackTop - markBase);
|
2017-03-09 09:36:16 +00:00
|
|
|
m_persistentValues->mark(markStack);
|
2015-09-08 12:58:55 +00:00
|
|
|
|
2017-02-13 12:38:48 +00:00
|
|
|
// qDebug() << " mark stack after persistants" << (engine->jsStackTop - markBase);
|
|
|
|
|
2013-05-24 11:19:15 +00:00
|
|
|
// Preserve QObject ownership rules within JavaScript: A parent with c++ ownership
|
|
|
|
// keeps all of its children alive in JavaScript.
|
|
|
|
|
|
|
|
// Do this _after_ collectFromStack to ensure that processing the weak
|
|
|
|
// managed objects in the loop down there doesn't make then end up as leftovers
|
|
|
|
// on the stack and thus always get collected.
|
2015-01-12 20:55:51 +00:00
|
|
|
for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) {
|
2016-11-24 14:39:07 +00:00
|
|
|
QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>();
|
|
|
|
if (!qobjectWrapper)
|
2013-05-24 11:19:15 +00:00
|
|
|
continue;
|
2014-11-11 22:30:54 +00:00
|
|
|
QObject *qobject = qobjectWrapper->object();
|
2013-05-24 11:19:15 +00:00
|
|
|
if (!qobject)
|
|
|
|
continue;
|
2013-05-29 10:18:31 +00:00
|
|
|
bool keepAlive = QQmlData::keepAliveDuringGarbageCollection(qobject);
|
2013-05-24 11:19:15 +00:00
|
|
|
|
|
|
|
if (!keepAlive) {
|
|
|
|
if (QObject *parent = qobject->parent()) {
|
|
|
|
while (parent->parent())
|
|
|
|
parent = parent->parent();
|
|
|
|
|
2013-05-29 10:18:31 +00:00
|
|
|
keepAlive = QQmlData::keepAliveDuringGarbageCollection(parent);
|
2013-05-24 11:19:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (keepAlive)
|
2017-03-09 09:36:16 +00:00
|
|
|
qobjectWrapper->mark(markStack);
|
2013-05-24 11:19:15 +00:00
|
|
|
}
|
2017-03-09 09:36:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryManager::mark()
|
|
|
|
{
|
|
|
|
markStackSize = 0;
|
|
|
|
MarkStack markStack(engine);
|
|
|
|
collectRoots(&markStack);
|
2020-02-17 14:30:54 +00:00
|
|
|
// dtor of MarkStack drains
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2017-02-14 13:03:56 +00:00
|
|
|
void MemoryManager::sweep(bool lastSweep, ClassDestroyStatsCallback classCountPtr)
|
2012-12-04 12:40:18 +00:00
|
|
|
{
|
2015-08-07 12:26:43 +00:00
|
|
|
for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) {
|
2016-05-26 15:46:24 +00:00
|
|
|
Managed *m = (*it).managed();
|
2016-11-24 14:39:07 +00:00
|
|
|
if (!m || m->markBit())
|
2015-08-07 12:26:43 +00:00
|
|
|
continue;
|
2016-07-25 14:07:16 +00:00
|
|
|
// we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed
|
2015-08-07 12:26:43 +00:00
|
|
|
// signal before we start sweeping the heap
|
2015-08-14 23:31:13 +00:00
|
|
|
if (QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>())
|
2015-08-07 12:26:43 +00:00
|
|
|
qobjectWrapper->destroyObject(lastSweep);
|
2013-05-23 20:13:42 +00:00
|
|
|
}
|
|
|
|
|
2018-08-26 13:07:50 +00:00
|
|
|
// remove objects from weak maps and sets
|
|
|
|
Heap::MapObject *map = weakMaps;
|
|
|
|
Heap::MapObject **lastMap = &weakMaps;
|
|
|
|
while (map) {
|
|
|
|
if (map->isMarked()) {
|
|
|
|
map->removeUnmarkedKeys();
|
|
|
|
*lastMap = map;
|
|
|
|
lastMap = &map->nextWeakMap;
|
|
|
|
}
|
|
|
|
map = map->nextWeakMap;
|
|
|
|
}
|
|
|
|
|
2018-08-26 15:50:44 +00:00
|
|
|
Heap::SetObject *set = weakSets;
|
|
|
|
Heap::SetObject **lastSet = &weakSets;
|
|
|
|
while (set) {
|
|
|
|
if (set->isMarked()) {
|
|
|
|
set->removeUnmarkedKeys();
|
|
|
|
*lastSet = set;
|
|
|
|
lastSet = &set->nextWeakSet;
|
|
|
|
}
|
|
|
|
set = set->nextWeakSet;
|
|
|
|
}
|
|
|
|
|
2016-07-25 14:07:16 +00:00
|
|
|
// onDestruction handlers may have accessed other QObject wrappers and reset their value, so ensure
|
|
|
|
// that they are all set to undefined.
|
|
|
|
for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) {
|
2016-11-24 14:39:07 +00:00
|
|
|
Managed *m = (*it).managed();
|
|
|
|
if (!m || m->markBit())
|
2016-07-25 14:07:16 +00:00
|
|
|
continue;
|
2018-09-11 09:07:32 +00:00
|
|
|
(*it) = Value::undefinedValue();
|
2013-05-23 20:13:42 +00:00
|
|
|
}
|
|
|
|
|
2015-12-25 13:36:46 +00:00
|
|
|
// Now it is time to free QV4::QObjectWrapper Value, we must check the Value's tag to make sure its object has been destroyed
|
|
|
|
const int pendingCount = m_pendingFreedObjectWrapperValue.count();
|
|
|
|
if (pendingCount) {
|
|
|
|
QVector<Value *> remainingWeakQObjectWrappers;
|
|
|
|
remainingWeakQObjectWrappers.reserve(pendingCount);
|
|
|
|
for (int i = 0; i < pendingCount; ++i) {
|
|
|
|
Value *v = m_pendingFreedObjectWrapperValue.at(i);
|
2016-10-12 09:15:09 +00:00
|
|
|
if (v->isUndefined() || v->isEmpty())
|
2015-12-25 13:36:46 +00:00
|
|
|
PersistentValueStorage::free(v);
|
|
|
|
else
|
|
|
|
remainingWeakQObjectWrappers.append(v);
|
|
|
|
}
|
|
|
|
m_pendingFreedObjectWrapperValue = remainingWeakQObjectWrappers;
|
|
|
|
}
|
|
|
|
|
2015-08-28 11:48:52 +00:00
|
|
|
if (MultiplyWrappedQObjectMap *multiplyWrappedQObjects = engine->m_multiplyWrappedQObjects) {
|
2013-06-04 12:28:13 +00:00
|
|
|
for (MultiplyWrappedQObjectMap::Iterator it = multiplyWrappedQObjects->begin(); it != multiplyWrappedQObjects->end();) {
|
2021-10-10 18:04:21 +00:00
|
|
|
if (it.value().isNullOrUndefined())
|
2013-06-04 12:28:13 +00:00
|
|
|
it = multiplyWrappedQObjects->erase(it);
|
|
|
|
else
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-06 14:24:59 +00:00
|
|
|
|
2018-01-05 14:30:23 +00:00
|
|
|
if (!lastSweep) {
|
2018-04-06 14:24:59 +00:00
|
|
|
engine->identifierTable->sweep();
|
2018-01-05 14:30:23 +00:00
|
|
|
blockAllocator.sweep(/*classCountPtr*/);
|
|
|
|
hugeItemAllocator.sweep(classCountPtr);
|
|
|
|
icAllocator.sweep(/*classCountPtr*/);
|
|
|
|
}
|
2017-01-03 10:49:15 +00:00
|
|
|
}
|
2013-11-14 11:05:42 +00:00
|
|
|
|
2017-01-03 10:49:15 +00:00
|
|
|
bool MemoryManager::shouldRunGC() const
|
|
|
|
{
|
2018-09-11 19:54:56 +00:00
|
|
|
size_t total = blockAllocator.totalSlots() + icAllocator.totalSlots();
|
2017-02-16 09:52:48 +00:00
|
|
|
if (total > MinSlotsGCLimit && usedSlotsAfterLastFullSweep * GCOverallocation < total * 100)
|
2017-01-03 10:49:15 +00:00
|
|
|
return true;
|
|
|
|
return false;
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 13:34:14 +00:00
|
|
|
static size_t dumpBins(BlockAllocator *b, const char *title)
|
2017-02-10 10:51:43 +00:00
|
|
|
{
|
2017-11-27 09:53:33 +00:00
|
|
|
const QLoggingCategory &stats = lcGcAllocatorStats();
|
2017-02-13 12:38:48 +00:00
|
|
|
size_t totalSlotMem = 0;
|
2019-03-26 13:34:14 +00:00
|
|
|
if (title)
|
|
|
|
qDebug(stats) << "Slot map for" << title << "allocator:";
|
2017-02-10 10:51:43 +00:00
|
|
|
for (uint i = 0; i < BlockAllocator::NumBins; ++i) {
|
|
|
|
uint nEntries = 0;
|
|
|
|
HeapItem *h = b->freeBins[i];
|
|
|
|
while (h) {
|
|
|
|
++nEntries;
|
2017-02-13 12:38:48 +00:00
|
|
|
totalSlotMem += h->freeData.availableSlots;
|
2017-02-10 10:51:43 +00:00
|
|
|
h = h->freeData.next;
|
|
|
|
}
|
2019-03-26 13:34:14 +00:00
|
|
|
if (title)
|
2017-11-27 09:53:33 +00:00
|
|
|
qDebug(stats) << " number of entries in slot" << i << ":" << nEntries;
|
2017-02-10 10:51:43 +00:00
|
|
|
}
|
2017-04-27 17:37:35 +00:00
|
|
|
SDUMP() << " large slot map";
|
|
|
|
HeapItem *h = b->freeBins[BlockAllocator::NumBins - 1];
|
|
|
|
while (h) {
|
2019-06-26 14:46:23 +00:00
|
|
|
SDUMP() << " " << Qt::hex << (quintptr(h)/32) << h->freeData.availableSlots;
|
2017-04-27 17:37:35 +00:00
|
|
|
h = h->freeData.next;
|
|
|
|
}
|
|
|
|
|
2019-03-26 13:34:14 +00:00
|
|
|
if (title)
|
2018-01-19 09:49:56 +00:00
|
|
|
qDebug(stats) << " total mem in bins" << totalSlotMem*Chunk::SlotSize;
|
2017-02-13 12:38:48 +00:00
|
|
|
return totalSlotMem*Chunk::SlotSize;
|
2017-02-10 10:51:43 +00:00
|
|
|
}
|
2017-01-03 10:49:15 +00:00
|
|
|
|
2017-04-04 08:35:45 +00:00
|
|
|
void MemoryManager::runGC()
|
2012-12-04 12:40:18 +00:00
|
|
|
{
|
2017-01-03 11:06:25 +00:00
|
|
|
if (gcBlocked) {
|
2012-12-04 12:40:18 +00:00
|
|
|
// qDebug() << "Not running GC.";
|
2013-01-02 15:43:47 +00:00
|
|
|
return;
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 11:06:25 +00:00
|
|
|
QScopedValueRollback<bool> gcBlocker(gcBlocked, true);
|
2017-02-15 14:40:27 +00:00
|
|
|
// qDebug() << "runGC";
|
2016-07-25 07:25:11 +00:00
|
|
|
|
2017-11-27 09:53:33 +00:00
|
|
|
if (gcStats) {
|
|
|
|
statistics.maxReservedMem = qMax(statistics.maxReservedMem, getAllocatedMem());
|
|
|
|
statistics.maxAllocatedMem = qMax(statistics.maxAllocatedMem, getUsedMem() + getLargeItemsMem());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!gcCollectorStats) {
|
2014-03-25 08:46:43 +00:00
|
|
|
mark();
|
|
|
|
sweep();
|
|
|
|
} else {
|
2017-02-10 10:51:43 +00:00
|
|
|
bool triggeredByUnmanagedHeap = (unmanagedHeapSize > unmanagedHeapSizeGCLimit);
|
|
|
|
size_t oldUnmanagedSize = unmanagedHeapSize;
|
2017-11-27 09:53:33 +00:00
|
|
|
|
QML: Fix MSVC 2013/64bit warnings.
compiler\qv4ssa.cpp(687) : warning C4267: 'argument' : conversion from 'size_t' to 'int', possible loss of data
compiler\qv4ssa.cpp(950) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
compiler\qv4ssa.cpp(1117) : warning C4267: 'return' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(1120) : warning C4267: 'return' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(1148) : warning C4267: 'initializing' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(1266) : warning C4267: 'initializing' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(1622) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
compiler\qv4ssa.cpp(2246) : warning C4267: 'initializing' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(4289) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
compiler\qv4ssa.cpp(4351) : warning C4267: 'initializing' : conversion from 'size_t' to 'unsigned int', possible loss of data
jit\qv4regalloc.cpp(1383) : warning C4267: 'argument' : conversion from 'size_t' to 'int', possible loss of data
jit\qv4regalloc.cpp(1769) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jit\qv4regalloc.cpp(1814) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jsruntime\qv4mm.cpp(496) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jsruntime\qv4mm.cpp(503) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jsruntime\qv4mm.cpp(506) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jsruntime\qv4regexp.cpp(60) : warning C4267: 'return' : conversion from 'size_t' to 'uint', possible loss of data
jsruntime\qv4typedarray.cpp(85) : warning C4309: '=' : truncation of constant value
Change-Id: I0b04e1a9d379c068fb3efe90a9db8b592061e448
Reviewed-by: Erik Verbruggen <erik.verbruggen@theqtcompany.com>
Reviewed-by: Simon Hausmann <simon.hausmann@digia.com>
2015-01-22 08:38:22 +00:00
|
|
|
const size_t totalMem = getAllocatedMem();
|
2017-02-10 10:51:43 +00:00
|
|
|
const size_t usedBefore = getUsedMem();
|
|
|
|
const size_t largeItemsBefore = getLargeItemsMem();
|
|
|
|
|
2017-11-27 09:53:33 +00:00
|
|
|
const QLoggingCategory &stats = lcGcAllocatorStats();
|
|
|
|
qDebug(stats) << "========== GC ==========";
|
2017-02-13 12:38:48 +00:00
|
|
|
#ifdef MM_STATS
|
2017-11-27 09:53:33 +00:00
|
|
|
qDebug(stats) << " Triggered by alloc request of" << lastAllocRequestedSlots << "slots.";
|
2018-01-19 09:49:56 +00:00
|
|
|
qDebug(stats) << " Allocations since last GC" << allocationCount;
|
2017-02-15 11:23:20 +00:00
|
|
|
allocationCount = 0;
|
2017-02-10 10:51:43 +00:00
|
|
|
#endif
|
2017-05-16 10:42:42 +00:00
|
|
|
size_t oldChunks = blockAllocator.chunks.size();
|
2017-11-27 09:53:33 +00:00
|
|
|
qDebug(stats) << "Allocated" << totalMem << "bytes in" << oldChunks << "chunks";
|
|
|
|
qDebug(stats) << "Fragmented memory before GC" << (totalMem - usedBefore);
|
2019-03-26 13:34:14 +00:00
|
|
|
dumpBins(&blockAllocator, "Block");
|
|
|
|
dumpBins(&icAllocator, "InternalClass");
|
2014-03-25 08:46:43 +00:00
|
|
|
|
2016-08-02 11:25:35 +00:00
|
|
|
QElapsedTimer t;
|
2014-03-25 08:46:43 +00:00
|
|
|
t.start();
|
|
|
|
mark();
|
2017-02-16 09:25:52 +00:00
|
|
|
qint64 markTime = t.nsecsElapsed()/1000;
|
|
|
|
t.restart();
|
2017-02-14 13:03:56 +00:00
|
|
|
sweep(false, increaseFreedCountForClass);
|
QML: Fix MSVC 2013/64bit warnings.
compiler\qv4ssa.cpp(687) : warning C4267: 'argument' : conversion from 'size_t' to 'int', possible loss of data
compiler\qv4ssa.cpp(950) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
compiler\qv4ssa.cpp(1117) : warning C4267: 'return' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(1120) : warning C4267: 'return' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(1148) : warning C4267: 'initializing' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(1266) : warning C4267: 'initializing' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(1622) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
compiler\qv4ssa.cpp(2246) : warning C4267: 'initializing' : conversion from 'size_t' to 'unsigned int', possible loss of data
compiler\qv4ssa.cpp(4289) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
compiler\qv4ssa.cpp(4351) : warning C4267: 'initializing' : conversion from 'size_t' to 'unsigned int', possible loss of data
jit\qv4regalloc.cpp(1383) : warning C4267: 'argument' : conversion from 'size_t' to 'int', possible loss of data
jit\qv4regalloc.cpp(1769) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jit\qv4regalloc.cpp(1814) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jsruntime\qv4mm.cpp(496) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jsruntime\qv4mm.cpp(503) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jsruntime\qv4mm.cpp(506) : warning C4267: 'initializing' : conversion from 'size_t' to 'int', possible loss of data
jsruntime\qv4regexp.cpp(60) : warning C4267: 'return' : conversion from 'size_t' to 'uint', possible loss of data
jsruntime\qv4typedarray.cpp(85) : warning C4309: '=' : truncation of constant value
Change-Id: I0b04e1a9d379c068fb3efe90a9db8b592061e448
Reviewed-by: Erik Verbruggen <erik.verbruggen@theqtcompany.com>
Reviewed-by: Simon Hausmann <simon.hausmann@digia.com>
2015-01-22 08:38:22 +00:00
|
|
|
const size_t usedAfter = getUsedMem();
|
2015-09-17 13:29:52 +00:00
|
|
|
const size_t largeItemsAfter = getLargeItemsMem();
|
2017-02-16 09:25:52 +00:00
|
|
|
qint64 sweepTime = t.nsecsElapsed()/1000;
|
2014-03-25 08:46:43 +00:00
|
|
|
|
2017-02-10 10:51:43 +00:00
|
|
|
if (triggeredByUnmanagedHeap) {
|
2017-11-27 09:53:33 +00:00
|
|
|
qDebug(stats) << "triggered by unmanaged heap:";
|
|
|
|
qDebug(stats) << " old unmanaged heap size:" << oldUnmanagedSize;
|
|
|
|
qDebug(stats) << " new unmanaged heap:" << unmanagedHeapSize;
|
|
|
|
qDebug(stats) << " unmanaged heap limit:" << unmanagedHeapSizeGCLimit;
|
2017-02-10 10:51:43 +00:00
|
|
|
}
|
2019-03-26 13:34:14 +00:00
|
|
|
size_t memInBins = dumpBins(&blockAllocator, "Block")
|
|
|
|
+ dumpBins(&icAllocator, "InternalClasss");
|
2018-01-19 09:49:56 +00:00
|
|
|
qDebug(stats) << "Marked object in" << markTime << "us.";
|
|
|
|
qDebug(stats) << " " << markStackSize << "objects marked";
|
|
|
|
qDebug(stats) << "Sweeped object in" << sweepTime << "us.";
|
2017-02-14 13:03:56 +00:00
|
|
|
|
|
|
|
// sort our object types by number of freed instances
|
|
|
|
MMStatsHash freedObjectStats;
|
|
|
|
std::swap(freedObjectStats, *freedObjectStatsGlobal());
|
|
|
|
typedef std::pair<const char*, int> ObjectStatInfo;
|
|
|
|
std::vector<ObjectStatInfo> freedObjectsSorted;
|
|
|
|
freedObjectsSorted.reserve(freedObjectStats.count());
|
|
|
|
for (auto it = freedObjectStats.constBegin(); it != freedObjectStats.constEnd(); ++it) {
|
|
|
|
freedObjectsSorted.push_back(std::make_pair(it.key(), it.value()));
|
|
|
|
}
|
|
|
|
std::sort(freedObjectsSorted.begin(), freedObjectsSorted.end(), [](const ObjectStatInfo &a, const ObjectStatInfo &b) {
|
|
|
|
return a.second > b.second && strcmp(a.first, b.first) < 0;
|
|
|
|
});
|
|
|
|
|
2017-11-27 09:53:33 +00:00
|
|
|
qDebug(stats) << "Used memory before GC:" << usedBefore;
|
|
|
|
qDebug(stats) << "Used memory after GC:" << usedAfter;
|
2018-01-19 09:49:56 +00:00
|
|
|
qDebug(stats) << "Freed up bytes :" << (usedBefore - usedAfter);
|
|
|
|
qDebug(stats) << "Freed up chunks :" << (oldChunks - blockAllocator.chunks.size());
|
2019-03-26 13:34:14 +00:00
|
|
|
size_t lost = blockAllocator.allocatedMem() + icAllocator.allocatedMem()
|
|
|
|
- memInBins - usedAfter;
|
2017-02-10 10:51:43 +00:00
|
|
|
if (lost)
|
2017-11-27 09:53:33 +00:00
|
|
|
qDebug(stats) << "!!!!!!!!!!!!!!!!!!!!! LOST MEM:" << lost << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!";
|
2017-02-10 10:51:43 +00:00
|
|
|
if (largeItemsBefore || largeItemsAfter) {
|
2017-11-27 09:53:33 +00:00
|
|
|
qDebug(stats) << "Large item memory before GC:" << largeItemsBefore;
|
|
|
|
qDebug(stats) << "Large item memory after GC:" << largeItemsAfter;
|
|
|
|
qDebug(stats) << "Large item memory freed up:" << (largeItemsBefore - largeItemsAfter);
|
2017-02-10 10:51:43 +00:00
|
|
|
}
|
2017-02-14 13:03:56 +00:00
|
|
|
|
|
|
|
for (auto it = freedObjectsSorted.cbegin(); it != freedObjectsSorted.cend(); ++it) {
|
2018-01-19 09:49:56 +00:00
|
|
|
qDebug(stats).noquote() << QString::fromLatin1("Freed JS type: %1 (%2 instances)").arg(QString::fromLatin1(it->first), QString::number(it->second));
|
2017-02-14 13:03:56 +00:00
|
|
|
}
|
|
|
|
|
2017-11-27 09:53:33 +00:00
|
|
|
qDebug(stats) << "======== End GC ========";
|
2014-03-25 08:46:43 +00:00
|
|
|
}
|
2017-02-10 10:51:43 +00:00
|
|
|
|
2017-11-27 09:53:33 +00:00
|
|
|
if (gcStats)
|
|
|
|
statistics.maxUsedMem = qMax(statistics.maxUsedMem, getUsedMem() + getLargeItemsMem());
|
|
|
|
|
2017-02-10 10:51:43 +00:00
|
|
|
if (aggressiveGC) {
|
|
|
|
// ensure we don't 'loose' any memory
|
2018-05-30 13:47:37 +00:00
|
|
|
Q_ASSERT(blockAllocator.allocatedMem()
|
2019-03-26 13:34:14 +00:00
|
|
|
== blockAllocator.usedMem() + dumpBins(&blockAllocator, nullptr));
|
|
|
|
Q_ASSERT(icAllocator.allocatedMem()
|
|
|
|
== icAllocator.usedMem() + dumpBins(&icAllocator, nullptr));
|
2017-02-10 10:51:43 +00:00
|
|
|
}
|
2017-02-15 14:40:27 +00:00
|
|
|
|
2018-09-11 19:54:56 +00:00
|
|
|
usedSlotsAfterLastFullSweep = blockAllocator.usedSlotsAfterLastSweep + icAllocator.usedSlotsAfterLastSweep;
|
2017-02-16 09:52:48 +00:00
|
|
|
|
2017-04-04 08:35:45 +00:00
|
|
|
// reset all black bits
|
|
|
|
blockAllocator.resetBlackBits();
|
|
|
|
hugeItemAllocator.resetBlackBits();
|
2018-01-05 14:30:23 +00:00
|
|
|
icAllocator.resetBlackBits();
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2014-06-12 12:33:05 +00:00
|
|
|
size_t MemoryManager::getUsedMem() const
|
2014-03-25 08:46:43 +00:00
|
|
|
{
|
2018-05-30 13:47:37 +00:00
|
|
|
return blockAllocator.usedMem() + icAllocator.usedMem();
|
2014-03-25 08:46:43 +00:00
|
|
|
}
|
|
|
|
|
2014-06-12 12:33:05 +00:00
|
|
|
size_t MemoryManager::getAllocatedMem() const
|
|
|
|
{
|
2018-05-30 13:47:37 +00:00
|
|
|
return blockAllocator.allocatedMem() + icAllocator.allocatedMem() + hugeItemAllocator.usedMem();
|
2014-06-12 12:33:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t MemoryManager::getLargeItemsMem() const
|
|
|
|
{
|
2017-01-20 10:36:16 +00:00
|
|
|
return hugeItemAllocator.usedMem();
|
2014-06-12 12:33:05 +00:00
|
|
|
}
|
|
|
|
|
2018-08-26 13:07:50 +00:00
|
|
|
void MemoryManager::registerWeakMap(Heap::MapObject *map)
|
|
|
|
{
|
|
|
|
map->nextWeakMap = weakMaps;
|
|
|
|
weakMaps = map;
|
|
|
|
}
|
|
|
|
|
2018-08-26 15:50:44 +00:00
|
|
|
void MemoryManager::registerWeakSet(Heap::SetObject *set)
|
|
|
|
{
|
|
|
|
set->nextWeakSet = weakSets;
|
|
|
|
weakSets = set;
|
|
|
|
}
|
|
|
|
|
2012-12-04 12:40:18 +00:00
|
|
|
MemoryManager::~MemoryManager()
|
|
|
|
{
|
2015-01-12 20:55:51 +00:00
|
|
|
delete m_persistentValues;
|
2013-04-16 09:36:56 +00:00
|
|
|
|
2017-11-27 09:53:33 +00:00
|
|
|
dumpStats();
|
|
|
|
|
2013-06-13 13:27:00 +00:00
|
|
|
sweep(/*lastSweep*/true);
|
2017-01-20 10:36:16 +00:00
|
|
|
blockAllocator.freeAll();
|
|
|
|
hugeItemAllocator.freeAll();
|
2018-01-05 14:30:23 +00:00
|
|
|
icAllocator.freeAll();
|
2015-08-07 12:26:43 +00:00
|
|
|
|
|
|
|
delete m_weakValues;
|
2013-06-01 12:27:45 +00:00
|
|
|
#ifdef V4_USE_VALGRIND
|
2014-12-12 15:38:09 +00:00
|
|
|
VALGRIND_DESTROY_MEMPOOL(this);
|
2013-06-01 12:27:45 +00:00
|
|
|
#endif
|
2016-12-22 14:20:05 +00:00
|
|
|
delete chunkAllocator;
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2015-08-28 11:48:52 +00:00
|
|
|
|
2012-12-04 12:40:18 +00:00
|
|
|
void MemoryManager::dumpStats() const
|
|
|
|
{
|
2017-11-27 09:53:33 +00:00
|
|
|
if (!gcStats)
|
|
|
|
return;
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2017-11-27 09:53:33 +00:00
|
|
|
const QLoggingCategory &stats = lcGcStats();
|
|
|
|
qDebug(stats) << "Qml GC memory allocation statistics:";
|
|
|
|
qDebug(stats) << "Total memory allocated:" << statistics.maxReservedMem;
|
|
|
|
qDebug(stats) << "Max memory used before a GC run:" << statistics.maxAllocatedMem;
|
|
|
|
qDebug(stats) << "Max memory used after a GC run:" << statistics.maxUsedMem;
|
|
|
|
qDebug(stats) << "Requests for different item sizes:";
|
|
|
|
for (int i = 1; i < BlockAllocator::NumBins - 1; ++i)
|
|
|
|
qDebug(stats) << " <" << (i << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[i];
|
|
|
|
qDebug(stats) << " >=" << ((BlockAllocator::NumBins - 1) << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[BlockAllocator::NumBins - 1];
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
2012-12-13 22:46:51 +00:00
|
|
|
|
2017-03-09 09:36:16 +00:00
|
|
|
void MemoryManager::collectFromJSStack(MarkStack *markStack) const
|
2013-09-03 10:40:07 +00:00
|
|
|
{
|
2015-08-28 11:48:52 +00:00
|
|
|
Value *v = engine->jsStackBase;
|
|
|
|
Value *top = engine->jsStackTop;
|
2013-09-03 10:40:07 +00:00
|
|
|
while (v < top) {
|
2016-05-26 15:46:24 +00:00
|
|
|
Managed *m = v->managed();
|
2017-10-17 13:14:59 +00:00
|
|
|
if (m) {
|
|
|
|
Q_ASSERT(m->inUse());
|
2013-09-03 10:40:07 +00:00
|
|
|
// Skip pointers to already freed objects, they are bogus as well
|
2017-03-09 09:36:16 +00:00
|
|
|
m->mark(markStack);
|
2017-10-17 13:14:59 +00:00
|
|
|
}
|
2013-09-03 10:40:07 +00:00
|
|
|
++v;
|
|
|
|
}
|
|
|
|
}
|
2016-12-22 14:20:05 +00:00
|
|
|
|
|
|
|
} // namespace QV4
|
|
|
|
|
2013-06-24 13:28:00 +00:00
|
|
|
QT_END_NAMESPACE
|