tst_librarymetrics_performance: Use QBENCHMARK macro instead of rolling our own
This means we now respect -callgrind to show instruction counts (for instance). If benchmarks don't already throw out outliers and perform averaging, we should roll those features into testlib, not replace it. Change-Id: I21a3c4b41ec80a49b5b61bfe957f1165ac865010 Reviewed-by: Simon Hausmann <simon.hausmann@qt.io>
This commit is contained in:
parent
fde6e1fe83
commit
7377e8f950
|
@ -41,9 +41,6 @@
|
||||||
// for the standard set of elements, properties and expressions which
|
// for the standard set of elements, properties and expressions which
|
||||||
// are provided in the QtDeclarative library (QtQml and QtQuick).
|
// are provided in the QtDeclarative library (QtQml and QtQuick).
|
||||||
|
|
||||||
#define AVERAGE_OVER_N 10
|
|
||||||
#define IGNORE_N_OUTLIERS 2
|
|
||||||
|
|
||||||
class ModuleApi : public QObject
|
class ModuleApi : public QObject
|
||||||
{
|
{
|
||||||
Q_OBJECT
|
Q_OBJECT
|
||||||
|
@ -219,125 +216,39 @@ void tst_librarymetrics_performance::compilation()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
QList<qint64> nResults;
|
QBENCHMARK {
|
||||||
|
|
||||||
// generate AVERAGE_OVER_N results
|
|
||||||
for (int i = 0; i < AVERAGE_OVER_N; ++i) {
|
|
||||||
cleanState(&e);
|
cleanState(&e);
|
||||||
{
|
|
||||||
QElapsedTimer et;
|
|
||||||
et.start();
|
|
||||||
QQmlComponent c(e, this);
|
QQmlComponent c(e, this);
|
||||||
c.loadUrl(qmlfile); // just compile.
|
c.loadUrl(qmlfile); // just compile.
|
||||||
qint64 etime = et.nsecsElapsed();
|
|
||||||
nResults.append(etime);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// sort the list
|
|
||||||
qSort(nResults);
|
|
||||||
|
|
||||||
// remove IGNORE_N_OUTLIERS*2 from ONLY the worst end (remove gc interference)
|
|
||||||
for (int i = 0; i < IGNORE_N_OUTLIERS; ++i) {
|
|
||||||
if (!nResults.isEmpty()) nResults.removeLast();
|
|
||||||
if (!nResults.isEmpty()) nResults.removeLast();
|
|
||||||
}
|
|
||||||
|
|
||||||
// now generate an average
|
|
||||||
qint64 totaltime = 0;
|
|
||||||
if (nResults.size() == 0) nResults.append(9999);
|
|
||||||
for (int i = 0; i < nResults.size(); ++i)
|
|
||||||
totaltime += nResults.at(i);
|
|
||||||
double average = ((double)totaltime) / nResults.count();
|
|
||||||
|
|
||||||
// and return it as the result
|
|
||||||
QTest::setBenchmarkResult(average, QTest::WalltimeNanoseconds);
|
|
||||||
QTest::setBenchmarkResult(average, QTest::WalltimeNanoseconds); // twice to workaround bug in QTestLib
|
|
||||||
}
|
|
||||||
|
|
||||||
void tst_librarymetrics_performance::instantiation_cached()
|
void tst_librarymetrics_performance::instantiation_cached()
|
||||||
{
|
{
|
||||||
QFETCH(QUrl, qmlfile);
|
QFETCH(QUrl, qmlfile);
|
||||||
|
|
||||||
cleanState(&e);
|
cleanState(&e);
|
||||||
QList<qint64> nResults;
|
|
||||||
|
|
||||||
// generate AVERAGE_OVER_N results
|
QBENCHMARK {
|
||||||
for (int i = 0; i < AVERAGE_OVER_N; ++i) {
|
|
||||||
QElapsedTimer et;
|
|
||||||
et.start();
|
|
||||||
QQmlComponent c(e, this);
|
QQmlComponent c(e, this);
|
||||||
c.loadUrl(qmlfile); // just compile.
|
c.loadUrl(qmlfile); // just compile.
|
||||||
QObject *o = c.create();
|
QObject *o = c.create();
|
||||||
qint64 etime = et.nsecsElapsed();
|
|
||||||
nResults.append(etime);
|
|
||||||
delete o;
|
delete o;
|
||||||
}
|
}
|
||||||
|
|
||||||
// sort the list
|
|
||||||
qSort(nResults);
|
|
||||||
|
|
||||||
// remove IGNORE_N_OUTLIERS*2 from ONLY the worst end (remove gc interference)
|
|
||||||
for (int i = 0; i < IGNORE_N_OUTLIERS; ++i) {
|
|
||||||
if (!nResults.isEmpty()) nResults.removeLast();
|
|
||||||
if (!nResults.isEmpty()) nResults.removeLast();
|
|
||||||
}
|
|
||||||
|
|
||||||
// now generate an average
|
|
||||||
qint64 totaltime = 0;
|
|
||||||
if (nResults.size() == 0) nResults.append(9999);
|
|
||||||
for (int i = 0; i < nResults.size(); ++i)
|
|
||||||
totaltime += nResults.at(i);
|
|
||||||
double average = ((double)totaltime) / nResults.count();
|
|
||||||
|
|
||||||
// and return it as the result
|
|
||||||
QTest::setBenchmarkResult(average, QTest::WalltimeNanoseconds);
|
|
||||||
QTest::setBenchmarkResult(average, QTest::WalltimeNanoseconds); // twice to workaround bug in QTestLib
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void tst_librarymetrics_performance::instantiation()
|
void tst_librarymetrics_performance::instantiation()
|
||||||
{
|
{
|
||||||
QFETCH(QUrl, qmlfile);
|
QFETCH(QUrl, qmlfile);
|
||||||
|
|
||||||
|
QBENCHMARK {
|
||||||
cleanState(&e);
|
cleanState(&e);
|
||||||
QList<qint64> nResults;
|
|
||||||
|
|
||||||
// generate AVERAGE_OVER_N results
|
|
||||||
for (int i = 0; i < AVERAGE_OVER_N; ++i) {
|
|
||||||
cleanState(&e);
|
|
||||||
{
|
|
||||||
QElapsedTimer et;
|
|
||||||
et.start();
|
|
||||||
QQmlComponent c(e, this);
|
QQmlComponent c(e, this);
|
||||||
c.loadUrl(qmlfile); // just compile.
|
c.loadUrl(qmlfile); // just compile.
|
||||||
QObject *o = c.create();
|
QObject *o = c.create();
|
||||||
qint64 etime = et.nsecsElapsed();
|
|
||||||
nResults.append(etime);
|
|
||||||
delete o;
|
delete o;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// sort the list
|
|
||||||
qSort(nResults);
|
|
||||||
|
|
||||||
// remove IGNORE_N_OUTLIERS*2 from ONLY the worst end (remove gc interference)
|
|
||||||
for (int i = 0; i < IGNORE_N_OUTLIERS; ++i) {
|
|
||||||
if (!nResults.isEmpty()) nResults.removeLast();
|
|
||||||
if (!nResults.isEmpty()) nResults.removeLast();
|
|
||||||
}
|
|
||||||
|
|
||||||
// now generate an average
|
|
||||||
qint64 totaltime = 0;
|
|
||||||
if (nResults.size() == 0) nResults.append(9999);
|
|
||||||
for (int i = 0; i < nResults.size(); ++i)
|
|
||||||
totaltime += nResults.at(i);
|
|
||||||
double average = ((double)totaltime) / nResults.count();
|
|
||||||
|
|
||||||
// and return it as the result
|
|
||||||
QTest::setBenchmarkResult(average, QTest::WalltimeNanoseconds);
|
|
||||||
QTest::setBenchmarkResult(average, QTest::WalltimeNanoseconds); // twice to workaround bug in QTestLib
|
|
||||||
}
|
|
||||||
|
|
||||||
void tst_librarymetrics_performance::positioners_data()
|
void tst_librarymetrics_performance::positioners_data()
|
||||||
{
|
{
|
||||||
QTest::addColumn<QUrl>("qmlfile");
|
QTest::addColumn<QUrl>("qmlfile");
|
||||||
|
@ -356,45 +267,15 @@ void tst_librarymetrics_performance::positioners()
|
||||||
{
|
{
|
||||||
QFETCH(QUrl, qmlfile);
|
QFETCH(QUrl, qmlfile);
|
||||||
|
|
||||||
|
QBENCHMARK {
|
||||||
cleanState(&e);
|
cleanState(&e);
|
||||||
QList<qint64> nResults;
|
|
||||||
|
|
||||||
// generate AVERAGE_OVER_N results
|
|
||||||
for (int i = 0; i < AVERAGE_OVER_N; ++i) {
|
|
||||||
cleanState(&e);
|
|
||||||
{
|
|
||||||
QElapsedTimer et;
|
|
||||||
et.start();
|
|
||||||
QQmlComponent c(e, this);
|
QQmlComponent c(e, this);
|
||||||
c.loadUrl(qmlfile); // just compile.
|
c.loadUrl(qmlfile); // just compile.
|
||||||
QObject *o = c.create();
|
QObject *o = c.create();
|
||||||
qint64 etime = et.nsecsElapsed();
|
|
||||||
nResults.append(etime);
|
|
||||||
delete o;
|
delete o;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// sort the list
|
|
||||||
qSort(nResults);
|
|
||||||
|
|
||||||
// remove IGNORE_N_OUTLIERS*2 from ONLY the worst end (remove gc interference)
|
|
||||||
for (int i = 0; i < IGNORE_N_OUTLIERS; ++i) {
|
|
||||||
if (!nResults.isEmpty()) nResults.removeLast();
|
|
||||||
if (!nResults.isEmpty()) nResults.removeLast();
|
|
||||||
}
|
|
||||||
|
|
||||||
// now generate an average
|
|
||||||
qint64 totaltime = 0;
|
|
||||||
if (nResults.size() == 0) nResults.append(9999);
|
|
||||||
for (int i = 0; i < nResults.size(); ++i)
|
|
||||||
totaltime += nResults.at(i);
|
|
||||||
double average = ((double)totaltime) / nResults.count();
|
|
||||||
|
|
||||||
// and return it as the result
|
|
||||||
QTest::setBenchmarkResult(average, QTest::WalltimeNanoseconds);
|
|
||||||
QTest::setBenchmarkResult(average, QTest::WalltimeNanoseconds); // twice to workaround bug in QTestLib
|
|
||||||
}
|
|
||||||
|
|
||||||
QTEST_MAIN(tst_librarymetrics_performance)
|
QTEST_MAIN(tst_librarymetrics_performance)
|
||||||
|
|
||||||
#include "tst_librarymetrics_performance.moc"
|
#include "tst_librarymetrics_performance.moc"
|
||||||
|
|
Loading…
Reference in New Issue