2012-12-04 12:40:18 +00:00
|
|
|
/****************************************************************************
|
|
|
|
**
|
2013-06-24 11:50:51 +00:00
|
|
|
** Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
|
2012-12-04 12:40:18 +00:00
|
|
|
** Contact: http://www.qt-project.org/legal
|
|
|
|
**
|
2013-06-24 11:50:51 +00:00
|
|
|
** This file is part of the QtQml module of the Qt Toolkit.
|
2012-12-04 12:40:18 +00:00
|
|
|
**
|
2013-03-29 03:00:03 +00:00
|
|
|
** $QT_BEGIN_LICENSE:LGPL$
|
2012-12-04 12:40:18 +00:00
|
|
|
** Commercial License Usage
|
|
|
|
** Licensees holding valid commercial Qt licenses may use this file in
|
|
|
|
** accordance with the commercial license agreement provided with the
|
|
|
|
** Software or, alternatively, in accordance with the terms contained in
|
|
|
|
** a written agreement between you and Digia. For licensing terms and
|
|
|
|
** conditions see http://qt.digia.com/licensing. For further information
|
|
|
|
** use the contact form at http://qt.digia.com/contact-us.
|
|
|
|
**
|
|
|
|
** GNU Lesser General Public License Usage
|
|
|
|
** Alternatively, this file may be used under the terms of the GNU Lesser
|
|
|
|
** General Public License version 2.1 as published by the Free Software
|
|
|
|
** Foundation and appearing in the file LICENSE.LGPL included in the
|
|
|
|
** packaging of this file. Please review the following information to
|
|
|
|
** ensure the GNU Lesser General Public License version 2.1 requirements
|
|
|
|
** will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
|
|
|
|
**
|
|
|
|
** In addition, as a special exception, Digia gives you certain additional
|
|
|
|
** rights. These rights are described in the Digia Qt LGPL Exception
|
|
|
|
** version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
|
|
|
|
**
|
2013-03-29 03:00:03 +00:00
|
|
|
** GNU General Public License Usage
|
|
|
|
** Alternatively, this file may be used under the terms of the GNU
|
|
|
|
** General Public License version 3.0 as published by the Free Software
|
|
|
|
** Foundation and appearing in the file LICENSE.GPL included in the
|
|
|
|
** packaging of this file. Please review the following information to
|
|
|
|
** ensure the GNU General Public License version 3.0 requirements will be
|
|
|
|
** met: http://www.gnu.org/copyleft/gpl.html.
|
|
|
|
**
|
|
|
|
**
|
|
|
|
** $QT_END_LICENSE$
|
|
|
|
**
|
2012-12-04 12:40:18 +00:00
|
|
|
****************************************************************************/
|
|
|
|
|
2013-04-15 09:50:16 +00:00
|
|
|
#include "qv4engine_p.h"
|
|
|
|
#include "qv4object_p.h"
|
|
|
|
#include "qv4objectproto_p.h"
|
|
|
|
#include "qv4mm_p.h"
|
2013-06-07 09:21:18 +00:00
|
|
|
#include "qv4qobjectwrapper_p.h"
|
2013-05-24 11:19:15 +00:00
|
|
|
#include <qqmlengine.h>
|
2012-12-18 14:03:26 +00:00
|
|
|
#include "PageAllocation.h"
|
|
|
|
#include "StdLibExtras.h"
|
2012-12-04 12:40:18 +00:00
|
|
|
|
|
|
|
#include <QTime>
|
|
|
|
#include <QVector>
|
2013-01-02 15:43:47 +00:00
|
|
|
#include <QVector>
|
2012-12-13 22:46:51 +00:00
|
|
|
#include <QMap>
|
2012-12-04 12:40:18 +00:00
|
|
|
|
|
|
|
#include <iostream>
|
2012-12-10 08:56:30 +00:00
|
|
|
#include <cstdlib>
|
2013-09-12 09:06:59 +00:00
|
|
|
#include <algorithm>
|
2013-02-08 08:30:40 +00:00
|
|
|
#include "qv4alloca_p.h"
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2013-03-12 18:49:13 +00:00
|
|
|
#ifdef V4_USE_VALGRIND
|
|
|
|
#include <valgrind/valgrind.h>
|
|
|
|
#include <valgrind/memcheck.h>
|
|
|
|
#endif
|
|
|
|
|
2013-06-27 19:51:22 +00:00
|
|
|
#if OS(QNX)
|
|
|
|
#include <sys/storage.h> // __tls()
|
|
|
|
#endif
|
|
|
|
|
2013-06-24 13:28:00 +00:00
|
|
|
QT_BEGIN_NAMESPACE
|
|
|
|
|
2013-04-19 11:03:42 +00:00
|
|
|
using namespace QV4;
|
2012-12-18 14:03:26 +00:00
|
|
|
using namespace WTF;
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2013-03-14 20:56:42 +00:00
|
|
|
static const std::size_t CHUNK_SIZE = 1024*32;
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2013-08-29 14:21:40 +00:00
|
|
|
#if OS(WINCE)
|
|
|
|
void* g_stackBase = 0;
|
|
|
|
|
|
|
|
inline bool isPageWritable(void* page)
|
|
|
|
{
|
|
|
|
MEMORY_BASIC_INFORMATION memoryInformation;
|
|
|
|
DWORD result = VirtualQuery(page, &memoryInformation, sizeof(memoryInformation));
|
|
|
|
|
|
|
|
// return false on error, including ptr outside memory
|
|
|
|
if (result != sizeof(memoryInformation))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
DWORD protect = memoryInformation.Protect & ~(PAGE_GUARD | PAGE_NOCACHE);
|
|
|
|
return protect == PAGE_READWRITE
|
|
|
|
|| protect == PAGE_WRITECOPY
|
|
|
|
|| protect == PAGE_EXECUTE_READWRITE
|
|
|
|
|| protect == PAGE_EXECUTE_WRITECOPY;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void* getStackBase(void* previousFrame)
|
|
|
|
{
|
|
|
|
// find the address of this stack frame by taking the address of a local variable
|
|
|
|
bool isGrowingDownward;
|
|
|
|
void* thisFrame = (void*)(&isGrowingDownward);
|
|
|
|
|
|
|
|
isGrowingDownward = previousFrame < &thisFrame;
|
|
|
|
static DWORD pageSize = 0;
|
|
|
|
if (!pageSize) {
|
|
|
|
SYSTEM_INFO systemInfo;
|
|
|
|
GetSystemInfo(&systemInfo);
|
|
|
|
pageSize = systemInfo.dwPageSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
// scan all of memory starting from this frame, and return the last writeable page found
|
|
|
|
register char* currentPage = (char*)((DWORD)thisFrame & ~(pageSize - 1));
|
|
|
|
if (isGrowingDownward) {
|
|
|
|
while (currentPage > 0) {
|
|
|
|
// check for underflow
|
|
|
|
if (currentPage >= (char*)pageSize)
|
|
|
|
currentPage -= pageSize;
|
|
|
|
else
|
|
|
|
currentPage = 0;
|
|
|
|
if (!isPageWritable(currentPage))
|
|
|
|
return currentPage + pageSize;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
while (true) {
|
|
|
|
// guaranteed to complete because isPageWritable returns false at end of memory
|
|
|
|
currentPage += pageSize;
|
|
|
|
if (!isPageWritable(currentPage))
|
|
|
|
return currentPage;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2012-12-04 12:40:18 +00:00
|
|
|
struct MemoryManager::Data
|
|
|
|
{
|
|
|
|
bool enableGC;
|
|
|
|
bool gcBlocked;
|
|
|
|
bool scribble;
|
|
|
|
bool aggressiveGC;
|
2013-09-19 14:05:25 +00:00
|
|
|
bool exactGC;
|
2012-12-04 12:40:18 +00:00
|
|
|
ExecutionEngine *engine;
|
2013-03-14 20:56:42 +00:00
|
|
|
quintptr *stackTop;
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2013-04-15 10:40:04 +00:00
|
|
|
enum { MaxItemSize = 512 };
|
2013-01-02 21:09:31 +00:00
|
|
|
Managed *smallItems[MaxItemSize/16];
|
2013-01-30 07:58:27 +00:00
|
|
|
uint nChunks[MaxItemSize/16];
|
2013-03-01 12:34:52 +00:00
|
|
|
uint availableItems[MaxItemSize/16];
|
|
|
|
uint allocCount[MaxItemSize/16];
|
2013-11-02 17:39:55 +00:00
|
|
|
int totalItems;
|
|
|
|
int totalAlloc;
|
2013-01-02 15:43:47 +00:00
|
|
|
struct Chunk {
|
|
|
|
PageAllocation memory;
|
|
|
|
int chunkSize;
|
|
|
|
};
|
|
|
|
|
|
|
|
QVector<Chunk> heapChunks;
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2013-11-14 11:05:42 +00:00
|
|
|
|
|
|
|
struct LargeItem {
|
|
|
|
LargeItem *next;
|
|
|
|
void *data;
|
|
|
|
|
|
|
|
Managed *managed() {
|
|
|
|
return reinterpret_cast<Managed *>(&data);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
LargeItem *largeItems;
|
|
|
|
|
|
|
|
|
2012-12-04 12:40:18 +00:00
|
|
|
// statistics:
|
|
|
|
#ifdef DETAILED_MM_STATS
|
|
|
|
QVector<unsigned> allocSizeCounters;
|
|
|
|
#endif // DETAILED_MM_STATS
|
|
|
|
|
|
|
|
Data(bool enableGC)
|
|
|
|
: enableGC(enableGC)
|
|
|
|
, gcBlocked(false)
|
|
|
|
, engine(0)
|
2013-03-14 20:56:42 +00:00
|
|
|
, stackTop(0)
|
2013-11-06 15:04:10 +00:00
|
|
|
, totalItems(0)
|
|
|
|
, totalAlloc(0)
|
2013-11-14 11:05:42 +00:00
|
|
|
, largeItems(0)
|
2012-12-04 12:40:18 +00:00
|
|
|
{
|
2012-12-13 22:46:51 +00:00
|
|
|
memset(smallItems, 0, sizeof(smallItems));
|
2013-01-30 07:58:27 +00:00
|
|
|
memset(nChunks, 0, sizeof(nChunks));
|
2013-03-01 12:34:52 +00:00
|
|
|
memset(availableItems, 0, sizeof(availableItems));
|
|
|
|
memset(allocCount, 0, sizeof(allocCount));
|
2013-10-07 12:22:02 +00:00
|
|
|
scribble = !qgetenv("QV4_MM_SCRIBBLE").isEmpty();
|
|
|
|
aggressiveGC = !qgetenv("QV4_MM_AGGRESSIVE_GC").isEmpty();
|
2013-10-15 14:00:49 +00:00
|
|
|
exactGC = qgetenv("QV4_MM_CONSERVATIVE_GC").isEmpty();
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
~Data()
|
|
|
|
{
|
2013-01-02 15:43:47 +00:00
|
|
|
for (QVector<Chunk>::iterator i = heapChunks.begin(), ei = heapChunks.end(); i != ei; ++i)
|
|
|
|
i->memory.deallocate();
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2013-02-11 15:41:42 +00:00
|
|
|
#define SCRIBBLE(obj, c, size) \
|
|
|
|
if (m_d->scribble) \
|
|
|
|
::memset((void *)(obj + 1), c, size - sizeof(Managed));
|
|
|
|
|
|
|
|
|
2013-04-19 11:03:42 +00:00
|
|
|
namespace QV4 {
|
2013-01-03 13:00:59 +00:00
|
|
|
|
|
|
|
bool operator<(const MemoryManager::Data::Chunk &a, const MemoryManager::Data::Chunk &b)
|
2013-01-02 15:43:47 +00:00
|
|
|
{
|
|
|
|
return a.memory.base() < b.memory.base();
|
|
|
|
}
|
|
|
|
|
2013-04-19 11:03:42 +00:00
|
|
|
} // namespace QV4
|
2013-01-02 15:43:47 +00:00
|
|
|
|
2012-12-04 12:40:18 +00:00
|
|
|
MemoryManager::MemoryManager()
|
2013-04-03 18:35:33 +00:00
|
|
|
: m_d(new Data(true))
|
2013-04-16 09:36:56 +00:00
|
|
|
, m_persistentValues(0)
|
2013-05-23 20:13:42 +00:00
|
|
|
, m_weakValues(0)
|
2012-12-04 12:40:18 +00:00
|
|
|
{
|
2013-01-02 14:25:25 +00:00
|
|
|
setEnableGC(true);
|
2013-03-12 18:49:13 +00:00
|
|
|
#ifdef V4_USE_VALGRIND
|
|
|
|
VALGRIND_CREATE_MEMPOOL(this, 0, true);
|
|
|
|
#endif
|
2013-03-14 20:56:42 +00:00
|
|
|
|
2013-06-27 19:51:22 +00:00
|
|
|
#if OS(QNX)
|
|
|
|
// TLS is at the top of each thread's stack,
|
|
|
|
// so the stack base for thread is the result of __tls()
|
|
|
|
m_d->stackTop = reinterpret_cast<quintptr *>(
|
2013-08-20 11:38:15 +00:00
|
|
|
(((quintptr)__tls() + __PAGESIZE - 1) & ~(__PAGESIZE - 1)));
|
2013-06-27 19:51:22 +00:00
|
|
|
#elif USE(PTHREADS)
|
2013-03-14 20:56:42 +00:00
|
|
|
# if OS(DARWIN)
|
|
|
|
void *st = pthread_get_stackaddr_np(pthread_self());
|
|
|
|
m_d->stackTop = static_cast<quintptr *>(st);
|
|
|
|
# else
|
|
|
|
void* stackBottom = 0;
|
|
|
|
pthread_attr_t attr;
|
|
|
|
pthread_getattr_np(pthread_self(), &attr);
|
|
|
|
size_t stackSize = 0;
|
|
|
|
pthread_attr_getstack(&attr, &stackBottom, &stackSize);
|
|
|
|
pthread_attr_destroy(&attr);
|
|
|
|
|
|
|
|
m_d->stackTop = static_cast<quintptr *>(stackBottom) + stackSize/sizeof(quintptr);
|
|
|
|
# endif
|
2013-08-29 14:21:40 +00:00
|
|
|
#elif OS(WINCE)
|
|
|
|
if (false && g_stackBase) {
|
|
|
|
// This code path is disabled as we have no way of initializing it yet
|
|
|
|
m_d->stackTop = static_cast<quintptr *>(g_stackBase);
|
|
|
|
} else {
|
|
|
|
int dummy;
|
|
|
|
m_d->stackTop = static_cast<quintptr *>(getStackBase(&dummy));
|
|
|
|
}
|
2013-03-14 20:56:42 +00:00
|
|
|
#elif OS(WINDOWS)
|
|
|
|
PNT_TIB tib = (PNT_TIB)NtCurrentTeb();
|
|
|
|
m_d->stackTop = static_cast<quintptr*>(tib->StackBase);
|
|
|
|
#else
|
|
|
|
# error "Unsupported platform: no way to get the top-of-stack."
|
|
|
|
#endif
|
|
|
|
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2013-01-02 21:09:31 +00:00
|
|
|
Managed *MemoryManager::alloc(std::size_t size)
|
2012-12-04 12:40:18 +00:00
|
|
|
{
|
|
|
|
if (m_d->aggressiveGC)
|
|
|
|
runGC();
|
|
|
|
#ifdef DETAILED_MM_STATS
|
|
|
|
willAllocate(size);
|
|
|
|
#endif // DETAILED_MM_STATS
|
|
|
|
|
2013-11-02 17:39:55 +00:00
|
|
|
Q_ASSERT(size >= 16);
|
|
|
|
Q_ASSERT(size % 16 == 0);
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2012-12-13 22:46:51 +00:00
|
|
|
size_t pos = size >> 4;
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2013-11-14 11:05:42 +00:00
|
|
|
// doesn't fit into a small bucket
|
|
|
|
if (size >= MemoryManager::Data::MaxItemSize) {
|
|
|
|
// we use malloc for this
|
|
|
|
MemoryManager::Data::LargeItem *item = static_cast<MemoryManager::Data::LargeItem *>(malloc(size + sizeof(MemoryManager::Data::LargeItem)));
|
|
|
|
item->next = m_d->largeItems;
|
|
|
|
m_d->largeItems = item;
|
|
|
|
return item->managed();
|
|
|
|
}
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2013-01-02 21:09:31 +00:00
|
|
|
Managed *m = m_d->smallItems[pos];
|
2013-01-02 15:43:47 +00:00
|
|
|
if (m)
|
|
|
|
goto found;
|
2012-12-13 22:46:51 +00:00
|
|
|
|
2013-01-02 15:43:47 +00:00
|
|
|
// try to free up space, otherwise allocate
|
2013-11-02 17:39:55 +00:00
|
|
|
if (m_d->allocCount[pos] > (m_d->availableItems[pos] >> 1) && m_d->totalAlloc > (m_d->totalItems >> 1) && !m_d->aggressiveGC) {
|
2013-01-02 21:09:31 +00:00
|
|
|
runGC();
|
2013-01-14 15:15:01 +00:00
|
|
|
m = m_d->smallItems[pos];
|
|
|
|
if (m)
|
|
|
|
goto found;
|
|
|
|
}
|
2012-12-13 22:46:51 +00:00
|
|
|
|
2013-01-02 15:43:47 +00:00
|
|
|
// no free item available, allocate a new chunk
|
|
|
|
{
|
2013-01-30 07:58:27 +00:00
|
|
|
// allocate larger chunks at a time to avoid excessive GC, but cap at 64M chunks
|
|
|
|
uint shift = ++m_d->nChunks[pos];
|
|
|
|
if (shift > 10)
|
|
|
|
shift = 10;
|
2013-11-25 13:01:12 +00:00
|
|
|
std::size_t allocSize = CHUNK_SIZE*(size_t(1) << shift);
|
2013-01-02 15:43:47 +00:00
|
|
|
allocSize = roundUpToMultipleOf(WTF::pageSize(), allocSize);
|
|
|
|
Data::Chunk allocation;
|
|
|
|
allocation.memory = PageAllocation::allocate(allocSize, OSAllocator::JSGCHeapPages);
|
|
|
|
allocation.chunkSize = size;
|
|
|
|
m_d->heapChunks.append(allocation);
|
2013-09-12 09:06:59 +00:00
|
|
|
std::sort(m_d->heapChunks.begin(), m_d->heapChunks.end());
|
2013-01-02 15:43:47 +00:00
|
|
|
char *chunk = (char *)allocation.memory.base();
|
|
|
|
char *end = chunk + allocation.memory.size() - size;
|
2013-10-15 20:27:10 +00:00
|
|
|
#ifndef QT_NO_DEBUG
|
2013-01-28 15:46:09 +00:00
|
|
|
memset(chunk, 0, allocation.memory.size());
|
2013-10-15 20:27:10 +00:00
|
|
|
#endif
|
2013-01-02 21:09:31 +00:00
|
|
|
Managed **last = &m_d->smallItems[pos];
|
2013-01-02 15:43:47 +00:00
|
|
|
while (chunk <= end) {
|
2013-01-02 21:09:31 +00:00
|
|
|
Managed *o = reinterpret_cast<Managed *>(chunk);
|
2013-02-11 15:41:42 +00:00
|
|
|
o->_data = 0;
|
2013-01-02 15:43:47 +00:00
|
|
|
*last = o;
|
2013-02-11 15:41:42 +00:00
|
|
|
last = o->nextFreeRef();
|
2013-01-02 15:43:47 +00:00
|
|
|
chunk += size;
|
|
|
|
}
|
2013-01-02 21:09:31 +00:00
|
|
|
*last = 0;
|
2013-01-02 15:43:47 +00:00
|
|
|
m = m_d->smallItems[pos];
|
2013-03-01 12:34:52 +00:00
|
|
|
m_d->availableItems[pos] += allocation.memory.size()/size - 1;
|
2013-11-02 17:39:55 +00:00
|
|
|
m_d->totalItems += allocation.memory.size()/size - 1;
|
2013-03-12 18:49:13 +00:00
|
|
|
#ifdef V4_USE_VALGRIND
|
|
|
|
VALGRIND_MAKE_MEM_NOACCESS(allocation.memory, allocation.chunkSize);
|
|
|
|
#endif
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2013-01-02 15:43:47 +00:00
|
|
|
found:
|
2013-03-12 18:49:13 +00:00
|
|
|
#ifdef V4_USE_VALGRIND
|
|
|
|
VALGRIND_MEMPOOL_ALLOC(this, m, size);
|
|
|
|
#endif
|
|
|
|
|
2013-11-02 17:39:55 +00:00
|
|
|
++m_d->allocCount[pos];
|
|
|
|
++m_d->totalAlloc;
|
2013-02-11 15:41:42 +00:00
|
|
|
m_d->smallItems[pos] = m->nextFree();
|
2012-12-04 12:40:18 +00:00
|
|
|
return m;
|
|
|
|
}
|
|
|
|
|
2013-01-28 15:46:09 +00:00
|
|
|
void MemoryManager::mark()
|
2012-12-04 12:40:18 +00:00
|
|
|
{
|
2013-11-02 15:30:26 +00:00
|
|
|
SafeValue *markBase = m_d->engine->jsStackTop;
|
2013-01-28 15:46:09 +00:00
|
|
|
|
2013-11-02 15:30:26 +00:00
|
|
|
m_d->engine->markObjects();
|
2013-01-28 15:46:09 +00:00
|
|
|
|
2013-04-16 09:36:56 +00:00
|
|
|
PersistentValuePrivate *persistent = m_persistentValues;
|
|
|
|
while (persistent) {
|
|
|
|
if (!persistent->refcount) {
|
|
|
|
PersistentValuePrivate *n = persistent->next;
|
2013-05-23 20:13:42 +00:00
|
|
|
persistent->removeFromList();
|
2013-04-16 09:36:56 +00:00
|
|
|
delete persistent;
|
|
|
|
persistent = n;
|
|
|
|
continue;
|
|
|
|
}
|
2013-11-02 15:30:26 +00:00
|
|
|
persistent->value.mark(m_d->engine);
|
2013-04-16 09:36:56 +00:00
|
|
|
persistent = persistent->next;
|
|
|
|
}
|
|
|
|
|
2013-10-15 14:00:49 +00:00
|
|
|
collectFromJSStack();
|
|
|
|
|
|
|
|
if (!m_d->exactGC) {
|
|
|
|
// push all caller saved registers to the stack, so we can find the objects living in these registers
|
2013-11-05 22:23:44 +00:00
|
|
|
#if COMPILER(MSVC) && !OS(WINRT) // WinRT must use exact GC
|
2013-02-27 11:17:38 +00:00
|
|
|
# if CPU(X86_64)
|
2013-10-15 14:00:49 +00:00
|
|
|
HANDLE thread = GetCurrentThread();
|
|
|
|
WOW64_CONTEXT ctxt;
|
|
|
|
/*bool success =*/ Wow64GetThreadContext(thread, &ctxt);
|
2013-02-27 11:17:38 +00:00
|
|
|
# elif CPU(X86)
|
2013-10-15 14:00:49 +00:00
|
|
|
HANDLE thread = GetCurrentThread();
|
|
|
|
CONTEXT ctxt;
|
|
|
|
/*bool success =*/ GetThreadContext(thread, &ctxt);
|
2013-02-27 11:17:38 +00:00
|
|
|
# endif // CPU
|
|
|
|
#elif COMPILER(CLANG) || COMPILER(GCC)
|
|
|
|
# if CPU(X86_64)
|
2013-10-15 14:00:49 +00:00
|
|
|
quintptr regs[5];
|
|
|
|
asm(
|
|
|
|
"mov %%rbp, %0\n"
|
|
|
|
"mov %%r12, %1\n"
|
|
|
|
"mov %%r13, %2\n"
|
|
|
|
"mov %%r14, %3\n"
|
|
|
|
"mov %%r15, %4\n"
|
|
|
|
: "=m" (regs[0]), "=m" (regs[1]), "=m" (regs[2]), "=m" (regs[3]), "=m" (regs[4])
|
|
|
|
:
|
|
|
|
:
|
|
|
|
);
|
2013-02-27 11:17:38 +00:00
|
|
|
# endif // CPU
|
|
|
|
#endif // COMPILER
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2013-09-19 14:05:25 +00:00
|
|
|
collectFromStack();
|
2013-10-15 14:00:49 +00:00
|
|
|
}
|
2013-09-19 14:05:25 +00:00
|
|
|
|
2013-05-24 11:19:15 +00:00
|
|
|
// Preserve QObject ownership rules within JavaScript: A parent with c++ ownership
|
|
|
|
// keeps all of its children alive in JavaScript.
|
|
|
|
|
|
|
|
// Do this _after_ collectFromStack to ensure that processing the weak
|
|
|
|
// managed objects in the loop down there doesn't make then end up as leftovers
|
|
|
|
// on the stack and thus always get collected.
|
|
|
|
for (PersistentValuePrivate *weak = m_weakValues; weak; weak = weak->next) {
|
|
|
|
if (!weak->refcount)
|
|
|
|
continue;
|
2013-09-24 13:25:10 +00:00
|
|
|
Returned<QObjectWrapper> *qobjectWrapper = weak->value.as<QObjectWrapper>();
|
2013-05-24 11:19:15 +00:00
|
|
|
if (!qobjectWrapper)
|
|
|
|
continue;
|
2013-09-24 13:25:10 +00:00
|
|
|
QObject *qobject = qobjectWrapper->getPointer()->object();
|
2013-05-24 11:19:15 +00:00
|
|
|
if (!qobject)
|
|
|
|
continue;
|
2013-05-29 10:18:31 +00:00
|
|
|
bool keepAlive = QQmlData::keepAliveDuringGarbageCollection(qobject);
|
2013-05-24 11:19:15 +00:00
|
|
|
|
|
|
|
if (!keepAlive) {
|
|
|
|
if (QObject *parent = qobject->parent()) {
|
|
|
|
while (parent->parent())
|
|
|
|
parent = parent->parent();
|
|
|
|
|
2013-05-29 10:18:31 +00:00
|
|
|
keepAlive = QQmlData::keepAliveDuringGarbageCollection(parent);
|
2013-05-24 11:19:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (keepAlive)
|
2013-11-02 15:30:26 +00:00
|
|
|
qobjectWrapper->getPointer()->mark(m_d->engine);
|
|
|
|
}
|
|
|
|
|
|
|
|
// now that we marked all roots, start marking recursively and popping from the mark stack
|
|
|
|
while (m_d->engine->jsStackTop > markBase) {
|
|
|
|
Managed *m = m_d->engine->popForGC();
|
|
|
|
Q_ASSERT (m->vtbl->markObjects);
|
|
|
|
m->vtbl->markObjects(m, m_d->engine);
|
2013-05-24 11:19:15 +00:00
|
|
|
}
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2013-10-15 20:27:10 +00:00
|
|
|
void MemoryManager::sweep(bool lastSweep)
|
2012-12-04 12:40:18 +00:00
|
|
|
{
|
2013-05-23 20:13:42 +00:00
|
|
|
PersistentValuePrivate *weak = m_weakValues;
|
|
|
|
while (weak) {
|
|
|
|
if (!weak->refcount) {
|
|
|
|
PersistentValuePrivate *n = weak->next;
|
|
|
|
weak->removeFromList();
|
|
|
|
delete weak;
|
|
|
|
weak = n;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (Managed *m = weak->value.asManaged()) {
|
|
|
|
if (!m->markBit) {
|
2013-09-25 10:24:36 +00:00
|
|
|
weak->value = Primitive::undefinedValue();
|
2013-05-29 12:58:52 +00:00
|
|
|
PersistentValuePrivate *n = weak->next;
|
2013-05-23 20:13:42 +00:00
|
|
|
weak->removeFromList();
|
2013-05-29 12:58:52 +00:00
|
|
|
weak = n;
|
2013-05-23 20:13:42 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
weak = weak->next;
|
|
|
|
}
|
|
|
|
|
2013-06-04 12:28:13 +00:00
|
|
|
if (MultiplyWrappedQObjectMap *multiplyWrappedQObjects = m_d->engine->m_multiplyWrappedQObjects) {
|
|
|
|
for (MultiplyWrappedQObjectMap::Iterator it = multiplyWrappedQObjects->begin(); it != multiplyWrappedQObjects->end();) {
|
|
|
|
if (!it.value()->markBit)
|
|
|
|
it = multiplyWrappedQObjects->erase(it);
|
|
|
|
else
|
|
|
|
++it;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-13 13:27:00 +00:00
|
|
|
GCDeletable *deletable = 0;
|
|
|
|
GCDeletable **firstDeletable = &deletable;
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2013-01-02 15:43:47 +00:00
|
|
|
for (QVector<Data::Chunk>::iterator i = m_d->heapChunks.begin(), ei = m_d->heapChunks.end(); i != ei; ++i)
|
2013-10-15 20:27:10 +00:00
|
|
|
sweep(reinterpret_cast<char*>(i->memory.base()), i->memory.size(), i->chunkSize, &deletable);
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2013-11-14 11:05:42 +00:00
|
|
|
Data::LargeItem *i = m_d->largeItems;
|
|
|
|
Data::LargeItem **last = &m_d->largeItems;
|
|
|
|
while (i) {
|
|
|
|
Managed *m = i->managed();
|
|
|
|
Q_ASSERT(m->inUse);
|
|
|
|
if (m->markBit) {
|
|
|
|
m->markBit = 0;
|
|
|
|
last = &i->next;
|
|
|
|
i = i->next;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
*last = i->next;
|
|
|
|
free(i);
|
|
|
|
i = *last;
|
|
|
|
}
|
|
|
|
|
2013-06-13 13:27:00 +00:00
|
|
|
deletable = *firstDeletable;
|
|
|
|
while (deletable) {
|
|
|
|
GCDeletable *next = deletable->next;
|
|
|
|
deletable->lastCall = lastSweep;
|
|
|
|
delete deletable;
|
|
|
|
deletable = next;
|
|
|
|
}
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
2013-10-15 20:27:10 +00:00
|
|
|
void MemoryManager::sweep(char *chunkStart, std::size_t chunkSize, size_t size, GCDeletable **deletable)
|
2012-12-04 12:40:18 +00:00
|
|
|
{
|
2013-01-22 16:24:25 +00:00
|
|
|
// qDebug("chunkStart @ %p, size=%x, pos=%x (%x)", chunkStart, size, size>>4, m_d->smallItems[size >> 4]);
|
2013-01-02 21:09:31 +00:00
|
|
|
Managed **f = &m_d->smallItems[size >> 4];
|
|
|
|
|
2013-03-12 18:49:13 +00:00
|
|
|
#ifdef V4_USE_VALGRIND
|
|
|
|
VALGRIND_DISABLE_ERROR_REPORTING;
|
|
|
|
#endif
|
2013-02-11 15:41:42 +00:00
|
|
|
for (char *chunk = chunkStart, *chunkEnd = chunk + chunkSize - size; chunk <= chunkEnd; chunk += size) {
|
2013-01-02 21:09:31 +00:00
|
|
|
Managed *m = reinterpret_cast<Managed *>(chunk);
|
2012-12-04 12:40:18 +00:00
|
|
|
// qDebug("chunk @ %p, size = %lu, in use: %s, mark bit: %s",
|
2013-01-02 21:09:31 +00:00
|
|
|
// chunk, m->size, (m->inUse ? "yes" : "no"), (m->markBit ? "true" : "false"));
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2013-11-02 17:39:55 +00:00
|
|
|
Q_ASSERT((qintptr) chunk % 16 == 0);
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2013-01-02 21:09:31 +00:00
|
|
|
if (m->inUse) {
|
|
|
|
if (m->markBit) {
|
|
|
|
m->markBit = 0;
|
2012-12-04 12:40:18 +00:00
|
|
|
} else {
|
2013-02-11 15:41:42 +00:00
|
|
|
// qDebug() << "-- collecting it." << m << *f << m->nextFree();
|
2013-03-12 18:49:13 +00:00
|
|
|
#ifdef V4_USE_VALGRIND
|
|
|
|
VALGRIND_ENABLE_ERROR_REPORTING;
|
|
|
|
#endif
|
2013-06-13 13:27:00 +00:00
|
|
|
if (m->vtbl->collectDeletables)
|
|
|
|
m->vtbl->collectDeletables(m, deletable);
|
2013-02-14 14:00:06 +00:00
|
|
|
m->vtbl->destroy(m);
|
2013-01-02 21:09:31 +00:00
|
|
|
|
2013-02-11 15:41:42 +00:00
|
|
|
m->setNextFree(*f);
|
2013-03-12 18:49:13 +00:00
|
|
|
#ifdef V4_USE_VALGRIND
|
|
|
|
VALGRIND_DISABLE_ERROR_REPORTING;
|
|
|
|
VALGRIND_MEMPOOL_FREE(this, m);
|
|
|
|
#endif
|
2013-02-11 15:41:42 +00:00
|
|
|
*f = m;
|
2013-02-05 21:47:10 +00:00
|
|
|
SCRIBBLE(m, 0x99, size);
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-03-12 18:49:13 +00:00
|
|
|
#ifdef V4_USE_VALGRIND
|
|
|
|
VALGRIND_ENABLE_ERROR_REPORTING;
|
|
|
|
#endif
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool MemoryManager::isGCBlocked() const
|
|
|
|
{
|
|
|
|
return m_d->gcBlocked;
|
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryManager::setGCBlocked(bool blockGC)
|
|
|
|
{
|
|
|
|
m_d->gcBlocked = blockGC;
|
|
|
|
}
|
|
|
|
|
2013-01-02 15:43:47 +00:00
|
|
|
void MemoryManager::runGC()
|
2012-12-04 12:40:18 +00:00
|
|
|
{
|
|
|
|
if (!m_d->enableGC || m_d->gcBlocked) {
|
|
|
|
// qDebug() << "Not running GC.";
|
2013-01-02 15:43:47 +00:00
|
|
|
return;
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// QTime t; t.start();
|
|
|
|
|
2013-01-02 15:43:47 +00:00
|
|
|
// qDebug() << ">>>>>>>>runGC";
|
2012-12-04 12:40:18 +00:00
|
|
|
|
2013-01-28 15:46:09 +00:00
|
|
|
mark();
|
2012-12-04 12:40:18 +00:00
|
|
|
// std::cerr << "GC: marked " << marks
|
|
|
|
// << " objects in " << t.elapsed()
|
|
|
|
// << "ms" << std::endl;
|
|
|
|
|
|
|
|
// t.restart();
|
2013-01-02 15:43:47 +00:00
|
|
|
/*std::size_t freedCount =*/ sweep();
|
2012-12-04 12:40:18 +00:00
|
|
|
// std::cerr << "GC: sweep freed " << freedCount
|
|
|
|
// << " objects in " << t.elapsed()
|
|
|
|
// << "ms" << std::endl;
|
2013-03-01 12:34:52 +00:00
|
|
|
memset(m_d->allocCount, 0, sizeof(m_d->allocCount));
|
2013-11-02 17:39:55 +00:00
|
|
|
m_d->totalAlloc = 0;
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryManager::setEnableGC(bool enableGC)
|
|
|
|
{
|
|
|
|
m_d->enableGC = enableGC;
|
|
|
|
}
|
|
|
|
|
|
|
|
MemoryManager::~MemoryManager()
|
|
|
|
{
|
2013-04-16 09:36:56 +00:00
|
|
|
PersistentValuePrivate *persistent = m_persistentValues;
|
|
|
|
while (persistent) {
|
|
|
|
PersistentValuePrivate *n = persistent->next;
|
2013-09-25 10:24:36 +00:00
|
|
|
persistent->value = Primitive::undefinedValue();
|
2013-06-21 13:19:20 +00:00
|
|
|
persistent->engine = 0;
|
2013-05-06 13:11:01 +00:00
|
|
|
persistent->prev = 0;
|
2013-04-16 09:36:56 +00:00
|
|
|
persistent->next = 0;
|
|
|
|
persistent = n;
|
|
|
|
}
|
|
|
|
|
2013-06-13 13:27:00 +00:00
|
|
|
sweep(/*lastSweep*/true);
|
2013-06-01 12:27:45 +00:00
|
|
|
#ifdef V4_USE_VALGRIND
|
|
|
|
VALGRIND_DESTROY_MEMPOOL(this);
|
|
|
|
#endif
|
2012-12-04 12:40:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryManager::setExecutionEngine(ExecutionEngine *engine)
|
|
|
|
{
|
|
|
|
m_d->engine = engine;
|
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryManager::dumpStats() const
|
|
|
|
{
|
2013-01-25 18:32:30 +00:00
|
|
|
#ifdef DETAILED_MM_STATS
|
2012-12-04 12:40:18 +00:00
|
|
|
std::cerr << "=================" << std::endl;
|
|
|
|
std::cerr << "Allocation stats:" << std::endl;
|
|
|
|
std::cerr << "Requests for each chunk size:" << std::endl;
|
|
|
|
for (int i = 0; i < m_d->allocSizeCounters.size(); ++i) {
|
|
|
|
if (unsigned count = m_d->allocSizeCounters[i]) {
|
|
|
|
std::cerr << "\t" << (i << 4) << " bytes chunks: " << count << std::endl;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif // DETAILED_MM_STATS
|
|
|
|
}
|
|
|
|
|
|
|
|
ExecutionEngine *MemoryManager::engine() const
|
|
|
|
{
|
|
|
|
return m_d->engine;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef DETAILED_MM_STATS
|
|
|
|
void MemoryManager::willAllocate(std::size_t size)
|
|
|
|
{
|
|
|
|
unsigned alignedSize = (size + 15) >> 4;
|
|
|
|
QVector<unsigned> &counters = m_d->allocSizeCounters;
|
|
|
|
if ((unsigned) counters.size() < alignedSize + 1)
|
|
|
|
counters.resize(alignedSize + 1);
|
|
|
|
counters[alignedSize]++;
|
|
|
|
}
|
2012-12-13 22:46:51 +00:00
|
|
|
|
2012-12-04 12:40:18 +00:00
|
|
|
#endif // DETAILED_MM_STATS
|
|
|
|
|
2013-01-28 15:46:09 +00:00
|
|
|
void MemoryManager::collectFromStack() const
|
2012-12-08 17:22:25 +00:00
|
|
|
{
|
2013-03-12 18:49:13 +00:00
|
|
|
quintptr valueOnStack = 0;
|
|
|
|
|
2012-12-17 21:43:22 +00:00
|
|
|
if (!m_d->heapChunks.count())
|
|
|
|
return;
|
2013-01-02 21:09:31 +00:00
|
|
|
|
2013-01-28 15:46:09 +00:00
|
|
|
quintptr *current = (&valueOnStack) + 1;
|
2013-02-11 15:41:42 +00:00
|
|
|
// qDebug() << "collectFromStack";// << top << current << &valueOnStack;
|
2012-12-17 21:43:22 +00:00
|
|
|
|
2013-03-12 18:49:13 +00:00
|
|
|
#if V4_USE_VALGRIND
|
2013-03-14 20:56:42 +00:00
|
|
|
VALGRIND_MAKE_MEM_DEFINED(current, (m_d->stackTop - current)*sizeof(quintptr));
|
2013-03-12 18:49:13 +00:00
|
|
|
#endif
|
|
|
|
|
2012-12-17 21:43:22 +00:00
|
|
|
char** heapChunkBoundaries = (char**)alloca(m_d->heapChunks.count() * 2 * sizeof(char*));
|
|
|
|
char** heapChunkBoundariesEnd = heapChunkBoundaries + 2 * m_d->heapChunks.count();
|
|
|
|
int i = 0;
|
2013-01-02 15:43:47 +00:00
|
|
|
for (QVector<Data::Chunk>::Iterator it = m_d->heapChunks.begin(), end =
|
2012-12-17 21:43:22 +00:00
|
|
|
m_d->heapChunks.end(); it != end; ++it) {
|
2013-03-10 13:58:35 +00:00
|
|
|
heapChunkBoundaries[i++] = reinterpret_cast<char*>(it->memory.base()) - 1;
|
2013-02-01 13:31:24 +00:00
|
|
|
heapChunkBoundaries[i++] = reinterpret_cast<char*>(it->memory.base()) + it->memory.size() - it->chunkSize;
|
2012-12-17 21:43:22 +00:00
|
|
|
}
|
2013-11-02 17:39:55 +00:00
|
|
|
Q_ASSERT(i == m_d->heapChunks.count() * 2);
|
2012-12-17 21:43:22 +00:00
|
|
|
|
2013-03-14 20:56:42 +00:00
|
|
|
for (; current < m_d->stackTop; ++current) {
|
2013-09-19 14:05:25 +00:00
|
|
|
char* genericPtr = reinterpret_cast<char *>(*current);
|
2013-01-02 15:43:47 +00:00
|
|
|
|
2013-02-11 15:41:42 +00:00
|
|
|
if (genericPtr < *heapChunkBoundaries || genericPtr > *(heapChunkBoundariesEnd - 1))
|
2012-12-17 21:43:22 +00:00
|
|
|
continue;
|
2013-09-12 09:06:59 +00:00
|
|
|
int index = std::lower_bound(heapChunkBoundaries, heapChunkBoundariesEnd, genericPtr) - heapChunkBoundaries;
|
2012-12-17 21:43:22 +00:00
|
|
|
// An odd index means the pointer is _before_ the end of a heap chunk and therefore valid.
|
2013-11-02 17:39:55 +00:00
|
|
|
Q_ASSERT(index >= 0 && index < m_d->heapChunks.count() * 2);
|
2012-12-17 21:43:22 +00:00
|
|
|
if (index & 1) {
|
2013-01-02 15:43:47 +00:00
|
|
|
int size = m_d->heapChunks.at(index >> 1).chunkSize;
|
2013-01-28 15:46:09 +00:00
|
|
|
Managed *m = reinterpret_cast<Managed *>(genericPtr);
|
2013-02-11 15:41:42 +00:00
|
|
|
// qDebug() << " inside" << size;
|
2013-01-02 15:43:47 +00:00
|
|
|
|
2013-03-10 13:58:35 +00:00
|
|
|
if (((quintptr)m - (quintptr)heapChunkBoundaries[index-1] - 1 ) % size)
|
2013-01-02 15:43:47 +00:00
|
|
|
// wrongly aligned value, skip it
|
|
|
|
continue;
|
|
|
|
|
2013-01-02 21:09:31 +00:00
|
|
|
if (!m->inUse)
|
2013-01-02 15:43:47 +00:00
|
|
|
// Skip pointers to already freed objects, they are bogus as well
|
|
|
|
continue;
|
|
|
|
|
2013-01-28 15:46:09 +00:00
|
|
|
// qDebug() << " marking";
|
2013-11-02 15:30:26 +00:00
|
|
|
m->mark(m_d->engine);
|
2012-12-17 21:43:22 +00:00
|
|
|
}
|
2012-12-08 17:22:25 +00:00
|
|
|
}
|
|
|
|
}
|
2013-06-24 13:28:00 +00:00
|
|
|
|
2013-09-03 10:40:07 +00:00
|
|
|
void MemoryManager::collectFromJSStack() const
|
|
|
|
{
|
2013-10-10 14:17:28 +00:00
|
|
|
SafeValue *v = engine()->jsStackBase;
|
|
|
|
SafeValue *top = engine()->jsStackTop;
|
2013-09-03 10:40:07 +00:00
|
|
|
while (v < top) {
|
|
|
|
Managed *m = v->asManaged();
|
|
|
|
if (m && m->inUse)
|
|
|
|
// Skip pointers to already freed objects, they are bogus as well
|
2013-11-02 15:30:26 +00:00
|
|
|
m->mark(m_d->engine);
|
2013-09-03 10:40:07 +00:00
|
|
|
++v;
|
|
|
|
}
|
|
|
|
}
|
2013-06-24 13:28:00 +00:00
|
|
|
QT_END_NAMESPACE
|