QHash: reduce how many times we calculate hash by passing it around
Result in some smaller speedups, because we now compute the hash once per insertion operation, and pass it around, instead of computing it once in every function where we need it. Though it's minimal, on my machine it executes around 1% less instructions per iteration. Change-Id: Ib84696cac56c379c37853dd4f0bc6ab4f5e440ad Reviewed-by: Thiago Macieira <thiago.macieira@intel.com>
This commit is contained in:
parent
c8e59d682c
commit
9afd4f6814
@ -684,11 +684,16 @@ struct Data
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename K> Bucket findBucket(const K &key) const noexcept
|
template <typename K> Bucket findBucket(const K &key) const noexcept
|
||||||
|
{
|
||||||
|
size_t hash = QHashPrivate::calculateHash(key, seed);
|
||||||
|
return findBucketWithHash(key, hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename K> Bucket findBucketWithHash(const K &key, size_t hash) const noexcept
|
||||||
{
|
{
|
||||||
static_assert(std::is_same_v<std::remove_cv_t<Key>, K> ||
|
static_assert(std::is_same_v<std::remove_cv_t<Key>, K> ||
|
||||||
QHashHeterogeneousSearch<std::remove_cv_t<Key>, K>::value);
|
QHashHeterogeneousSearch<std::remove_cv_t<Key>, K>::value);
|
||||||
Q_ASSERT(numBuckets > 0);
|
Q_ASSERT(numBuckets > 0);
|
||||||
size_t hash = QHashPrivate::calculateHash(key, seed);
|
|
||||||
Bucket bucket(this, GrowthPolicy::bucketForHash(numBuckets, hash));
|
Bucket bucket(this, GrowthPolicy::bucketForHash(numBuckets, hash));
|
||||||
// loop over the buckets until we find the entry we search for
|
// loop over the buckets until we find the entry we search for
|
||||||
// or an empty slot, in which case we know the entry doesn't exist
|
// or an empty slot, in which case we know the entry doesn't exist
|
||||||
@ -722,14 +727,15 @@ struct Data
|
|||||||
template <typename K> InsertionResult findOrInsert(const K &key) noexcept
|
template <typename K> InsertionResult findOrInsert(const K &key) noexcept
|
||||||
{
|
{
|
||||||
Bucket it(static_cast<Span *>(nullptr), 0);
|
Bucket it(static_cast<Span *>(nullptr), 0);
|
||||||
|
size_t hash = QHashPrivate::calculateHash(key, seed);
|
||||||
if (numBuckets > 0) {
|
if (numBuckets > 0) {
|
||||||
it = findBucket(key);
|
it = findBucketWithHash(key, hash);
|
||||||
if (!it.isUnused())
|
if (!it.isUnused())
|
||||||
return { it.toIterator(this), true };
|
return { it.toIterator(this), true };
|
||||||
}
|
}
|
||||||
if (shouldGrow()) {
|
if (shouldGrow()) {
|
||||||
rehash(size + 1);
|
rehash(size + 1);
|
||||||
it = findBucket(key); // need to get a new iterator after rehashing
|
it = findBucketWithHash(key, hash); // need to get a new iterator after rehashing
|
||||||
}
|
}
|
||||||
Q_ASSERT(it.span != nullptr);
|
Q_ASSERT(it.span != nullptr);
|
||||||
Q_ASSERT(it.isUnused());
|
Q_ASSERT(it.isUnused());
|
||||||
@ -1436,7 +1442,8 @@ private:
|
|||||||
detach();
|
detach();
|
||||||
QHash detachGuard;
|
QHash detachGuard;
|
||||||
|
|
||||||
typename Data::Bucket bucket = d->findBucket(key);
|
size_t hash = QHashPrivate::calculateHash(key, d->seed);
|
||||||
|
typename Data::Bucket bucket = d->findBucketWithHash(key, hash);
|
||||||
const bool shouldInsert = bucket.isUnused();
|
const bool shouldInsert = bucket.isUnused();
|
||||||
|
|
||||||
// Even if we don't insert we may have to detach because we are
|
// Even if we don't insert we may have to detach because we are
|
||||||
@ -1448,7 +1455,7 @@ private:
|
|||||||
|
|
||||||
// Must detach from detachGuard
|
// Must detach from detachGuard
|
||||||
d = resized ? Data::detached(d, d->size + 1) : Data::detached(d);
|
d = resized ? Data::detached(d, d->size + 1) : Data::detached(d);
|
||||||
bucket = resized ? d->findBucket(key) : typename Data::Bucket(d, bucketIndex);
|
bucket = resized ? d->findBucketWithHash(key, hash) : typename Data::Bucket(d, bucketIndex);
|
||||||
}
|
}
|
||||||
if (shouldInsert) {
|
if (shouldInsert) {
|
||||||
Node *n = bucket.insert();
|
Node *n = bucket.insert();
|
||||||
|
Loading…
x
Reference in New Issue
Block a user