MDEV-36184 - mhnsw: support powerpc64 SIMD instructions

This patch optimises the dot_product function by leveraging
vectorisation through SIMD intrinsics. This transformation enables
parallel execution of multiple operations, significantly improving the
performance of dot product computation on supported architectures.

The original dot_product function does undergo auto-vectorisation when
compiled with -O3. However, performance analysis has shown that the
newly optimised implementation performs better on Power10 and achieves
comparable performance on Power9 machines.

Benchmark tests were conducted on both Power9 and Power10 machines,
comparing the time taken by the original (auto-vectorized) code and the
new vectorised code. GCC 11.5.0 on RHEL 9.5 operating system with -O3
were used. The benchmarks were performed using a sample test code with
a vector size of 4096 and 10⁷ loop iterations. Here are the average
execution times (in seconds) over multiple runs:

Power9:
Before change: ~16.364 s
After change: ~16.180 s
Performance gain is modest but measurable.

Power10:
Before change: ~8.989 s
After change: ~6.446 s
Significant improvement, roughly 28–30% faster.

Signed-off-by: Manjul Mohan <manjul.mohan@ibm.com>
This commit is contained in:
Manjul Mohan 2025-02-21 12:41:50 -05:00 committed by Sergei Golubchik
parent db5bb6f333
commit 6bb92f98ce
2 changed files with 56 additions and 0 deletions

View File

@ -53,6 +53,10 @@ SOFTWARE.
#define NEON_IMPLEMENTATION #define NEON_IMPLEMENTATION
#endif #endif
#endif #endif
#if defined __powerpc64__ && defined __VSX__
#include <altivec.h>
#define POWER_IMPLEMENTATION
#endif
template <typename T> template <typename T>
struct PatternedSimdBloomFilter struct PatternedSimdBloomFilter

View File

@ -229,6 +229,58 @@ struct FVector
} }
#endif #endif
#ifdef POWER_IMPLEMENTATION
/************* POWERPC *****************************************************/
static constexpr size_t POWER_bytes= 128 / 8; // Assume 128-bit vector width
static constexpr size_t POWER_dims= POWER_bytes / sizeof(int16_t);
static float dot_product(const int16_t *v1, const int16_t *v2, size_t len)
{
// Using vector long long for int64_t accumulation
vector long long ll_sum= {0, 0};
// Round up to process full vector, including padding
size_t base= ((len + POWER_dims - 1) / POWER_dims) * POWER_dims;
for (size_t i= 0; i < base; i+= POWER_dims)
{
vector short x= vec_ld(0, &v1[i]);
vector short y= vec_ld(0, &v2[i]);
// Vectorized multiplication using vec_mule() and vec_mulo()
vector int product_hi= vec_mule(x, y);
vector int product_lo= vec_mulo(x, y);
// Extend vector int to vector long long for accumulation
vector long long llhi1= vec_unpackh(product_hi);
vector long long llhi2= vec_unpackl(product_hi);
vector long long lllo1= vec_unpackh(product_lo);
vector long long lllo2= vec_unpackl(product_lo);
ll_sum+= llhi1 + llhi2 + lllo1 + lllo2;
}
return static_cast<float>(static_cast<int64_t>(ll_sum[0]) +
static_cast<int64_t>(ll_sum[1]));
}
static size_t alloc_size(size_t n)
{
return alloc_header + MY_ALIGN(n * 2, POWER_bytes) + POWER_bytes - 1;
}
static FVector *align_ptr(void *ptr)
{
return (FVector*)(MY_ALIGN(((intptr)ptr) + alloc_header, POWER_bytes)
- alloc_header);
}
void fix_tail(size_t vec_len)
{
bzero(dims + vec_len, (MY_ALIGN(vec_len, POWER_dims) - vec_len) * 2);
}
#undef DEFAULT_IMPLEMENTATION
#endif
/************* no-SIMD default ******************************************/ /************* no-SIMD default ******************************************/
#ifdef DEFAULT_IMPLEMENTATION #ifdef DEFAULT_IMPLEMENTATION
DEFAULT_IMPLEMENTATION DEFAULT_IMPLEMENTATION