mirror of
https://github.com/RsyncProject/rsync.git
synced 2026-04-22 07:07:18 -04:00
x86-64 SIMD build fixes (#20)
* x86-64 SIMD build fixes configure.ac was modified to detect g++ >=5 and clang++ >=7. Additionally some script malfunctions on FreeBSD were corrected. The get_checksum1() code has been modified to fix clang and g++ 10 compilation. This version of the code and configure.ac has been tested on: Ubuntu 16 - gcc 7.3.0, clang 6.0.0 Debian 10 - gcc 5.4.0, 6.4.0, 7.2.0, 8.4.0, 9.2.1, 10.0.1, clang 5.0.2, 6.0.1, 7.0.1, 8.0.0, 9.0.0, 10.0.0 ArchLinux 20200605 - gcc 10.1.0, clang 10.0.0 FreeBSD 12.1 - gcc 9.3.0, clang 8.0.1 It is unknown if it will work on gcc 5.0-5.3, but the script currently allows it.
This commit is contained in:
49
configure.ac
49
configure.ac
@@ -197,31 +197,48 @@ SIMD=
|
||||
|
||||
AC_MSG_CHECKING([whether to enable SIMD optimizations])
|
||||
AC_ARG_ENABLE(simd,
|
||||
AS_HELP_STRING([--enable-simd],[enable SIMD optimizations (requires g++)]))
|
||||
AS_HELP_STRING([--enable-simd],[enable SIMD optimizations (requires c++)]))
|
||||
|
||||
if test x"$enable_simd" = x"yes"; then
|
||||
# For x86-64 SIMD, g++ is also required
|
||||
# For x86-64 SIMD, g++ >=5 or clang++ >=7 is required
|
||||
if test x"$build_cpu" = x"x86_64"; then
|
||||
case "$CXX" in
|
||||
*g++)
|
||||
# AC_MSG_RESULT() is called below.
|
||||
SIMD="$SIMD x86_64"
|
||||
;;
|
||||
*)
|
||||
AC_MSG_RESULT(no)
|
||||
AC_MSG_ERROR(Failed to find g++ for SIMD speedups.
|
||||
Omit --enable-simd to continue without it.)
|
||||
;;
|
||||
esac
|
||||
if test x"$CXX" != x""; then
|
||||
CXX_OK=
|
||||
CXX_VERSION=`$CXX --version | head -n 1`
|
||||
case "$CXX_VERSION" in
|
||||
g++*)
|
||||
CXX_VERSION=`$CXX -dumpversion | sed 's/\..*//g'`
|
||||
if test "$CXX_VERSION" -ge "5"; then
|
||||
CXX_OK=yes
|
||||
fi
|
||||
;;
|
||||
clang*)
|
||||
# $CXX -dumpversion would have been ideal, but is broken on older clang
|
||||
CXX_VERSION=`echo "$CXX_VERSION" | sed 's/.*version //g' | sed 's/\..*//g'`
|
||||
if test "$CXX_VERSION" -ge "7"; then
|
||||
CXX_OK=yes
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
if test x"$CXX_OK" = x"yes"; then
|
||||
# AC_MSG_RESULT() is called below.
|
||||
SIMD="$SIMD x86_64"
|
||||
else
|
||||
AC_MSG_RESULT(error)
|
||||
AC_MSG_ERROR([Failed to find g++ >=5 or clang++ >=7 for SIMD optimizations.
|
||||
Omit --enable-simd to continue without it. ($CXX, $CXX_VERSION)])
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if test x"$SIMD" != x""; then
|
||||
SIMD=`echo "$SIMD" | sed -e 's/^ *//'`
|
||||
SIMD=`echo "$SIMD" | sed 's/^ *//'`
|
||||
AC_MSG_RESULT([yes ($SIMD)])
|
||||
AC_DEFINE(HAVE_SIMD, 1, [Define to 1 to enable SIMD optimizations])
|
||||
SIMD=`echo "$SIMD" | sed -e 's/[[^ ]]\+/$(SIMD_&)/g'`
|
||||
# We only use g++ for its target attribute dispatching, disable unneeded bulky features
|
||||
SIMD=`echo "\\\$(SIMD_$SIMD)" | sed 's/ /) $(SIMD_/g'`
|
||||
# We only use c++ for its target attribute dispatching, disable unneeded bulky features
|
||||
CXXFLAGS="$CXXFLAGS -fno-exceptions -fno-rtti"
|
||||
else
|
||||
AC_MSG_RESULT(no)
|
||||
|
||||
@@ -45,9 +45,10 @@
|
||||
* the available xmm registers, this optimized version may not be faster than
|
||||
* the pure C version anyway. Note that all x86-64 CPUs support at least SSE2.
|
||||
*
|
||||
* This file is compiled using GCC 4.8+'s C++ front end to allow the use of
|
||||
* the target attribute, selecting the fastest code path based on runtime
|
||||
* detection of CPU capabilities.
|
||||
* This file is compiled using GCC 4.8+/clang 6+'s C++ front end to allow the
|
||||
* use of the target attribute, selecting the fastest code path based on
|
||||
* dispatch priority (GCC 5) or runtime detection of CPU capabilities (GCC 6+).
|
||||
* GCC 4.x are not supported to ease configure.ac logic.
|
||||
*/
|
||||
|
||||
#ifdef __x86_64__
|
||||
@@ -59,73 +60,34 @@
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
/* Compatibility functions to let our SSSE3 algorithm run on SSE2 */
|
||||
/* Some clang versions don't like it when you use static with multi-versioned functions: linker errors */
|
||||
#ifdef __clang__
|
||||
#define MVSTATIC
|
||||
#else
|
||||
#define MVSTATIC static
|
||||
#endif
|
||||
|
||||
__attribute__ ((target("sse2"))) static inline __m128i sse_interleave_odd_epi16(__m128i a, __m128i b)
|
||||
{
|
||||
return _mm_packs_epi32(
|
||||
_mm_srai_epi32(a, 16),
|
||||
_mm_srai_epi32(b, 16)
|
||||
);
|
||||
}
|
||||
// Missing from the headers on gcc 6 and older, clang 8 and older
|
||||
typedef long long __m128i_u __attribute__((__vector_size__(16), __may_alias__, __aligned__(1)));
|
||||
typedef long long __m256i_u __attribute__((__vector_size__(32), __may_alias__, __aligned__(1)));
|
||||
|
||||
__attribute__ ((target("sse2"))) static inline __m128i sse_interleave_even_epi16(__m128i a, __m128i b)
|
||||
{
|
||||
return sse_interleave_odd_epi16(
|
||||
_mm_slli_si128(a, 2),
|
||||
_mm_slli_si128(b, 2)
|
||||
);
|
||||
}
|
||||
/* Compatibility macros to let our SSSE3 algorithm run with only SSE2.
|
||||
These used to be neat individual functions with target attributes switching between SSE2 and SSSE3 implementations
|
||||
as needed, but though this works perfectly with GCC, clang fails to inline those properly leading to a near 50%
|
||||
performance drop - combined with static and inline modifiers gets you linker errors and even compiler crashes...
|
||||
*/
|
||||
|
||||
__attribute__ ((target("sse2"))) static inline __m128i sse_mulu_odd_epi8(__m128i a, __m128i b)
|
||||
{
|
||||
return _mm_mullo_epi16(
|
||||
_mm_srli_epi16(a, 8),
|
||||
_mm_srai_epi16(b, 8)
|
||||
);
|
||||
}
|
||||
#define SSE2_INTERLEAVE_ODD_EPI16(a, b) _mm_packs_epi32(_mm_srai_epi32(a, 16), _mm_srai_epi32(b, 16))
|
||||
#define SSE2_INTERLEAVE_EVEN_EPI16(a, b) SSE2_INTERLEAVE_ODD_EPI16(_mm_slli_si128(a, 2), _mm_slli_si128(b, 2))
|
||||
#define SSE2_MULU_ODD_EPI8(a, b) _mm_mullo_epi16(_mm_srli_epi16(a, 8), _mm_srai_epi16(b, 8))
|
||||
#define SSE2_MULU_EVEN_EPI8(a, b) _mm_mullo_epi16(_mm_and_si128(a, _mm_set1_epi16(0xFF)), _mm_srai_epi16(_mm_slli_si128(b, 1), 8))
|
||||
|
||||
__attribute__ ((target("sse2"))) static inline __m128i sse_mulu_even_epi8(__m128i a, __m128i b)
|
||||
{
|
||||
return _mm_mullo_epi16(
|
||||
_mm_and_si128(a, _mm_set1_epi16(0xFF)),
|
||||
_mm_srai_epi16(_mm_slli_si128(b, 1), 8)
|
||||
);
|
||||
}
|
||||
#define SSE2_HADDS_EPI16(a, b) _mm_adds_epi16(SSE2_INTERLEAVE_EVEN_EPI16(a, b), SSE2_INTERLEAVE_ODD_EPI16(a, b))
|
||||
#define SSE2_MADDUBS_EPI16(a, b) _mm_adds_epi16(SSE2_MULU_EVEN_EPI8(a, b), SSE2_MULU_ODD_EPI8(a, b))
|
||||
|
||||
__attribute__ ((target("sse2"))) static inline __m128i sse_hadds_epi16(__m128i a, __m128i b)
|
||||
{
|
||||
return _mm_adds_epi16(
|
||||
sse_interleave_even_epi16(a, b),
|
||||
sse_interleave_odd_epi16(a, b)
|
||||
);
|
||||
}
|
||||
|
||||
__attribute__ ((target("ssse3"))) static inline __m128i sse_hadds_epi16(__m128i a, __m128i b)
|
||||
{
|
||||
return _mm_hadds_epi16(a, b);
|
||||
}
|
||||
|
||||
__attribute__ ((target("sse2"))) static inline __m128i sse_maddubs_epi16(__m128i a, __m128i b)
|
||||
{
|
||||
return _mm_adds_epi16(
|
||||
sse_mulu_even_epi8(a, b),
|
||||
sse_mulu_odd_epi8(a, b)
|
||||
);
|
||||
}
|
||||
|
||||
__attribute__ ((target("ssse3"))) static inline __m128i sse_maddubs_epi16(__m128i a, __m128i b)
|
||||
{
|
||||
return _mm_maddubs_epi16(a, b);
|
||||
}
|
||||
|
||||
/* These don't actually get called, but we need to define them. */
|
||||
__attribute__ ((target("default"))) static inline __m128i sse_interleave_odd_epi16(__m128i a, __m128i b) { return a; }
|
||||
__attribute__ ((target("default"))) static inline __m128i sse_interleave_even_epi16(__m128i a, __m128i b) { return a; }
|
||||
__attribute__ ((target("default"))) static inline __m128i sse_mulu_odd_epi8(__m128i a, __m128i b) { return a; }
|
||||
__attribute__ ((target("default"))) static inline __m128i sse_mulu_even_epi8(__m128i a, __m128i b) { return a; }
|
||||
__attribute__ ((target("default"))) static inline __m128i sse_hadds_epi16(__m128i a, __m128i b) { return a; }
|
||||
__attribute__ ((target("default"))) static inline __m128i sse_maddubs_epi16(__m128i a, __m128i b) { return a; }
|
||||
__attribute__ ((target("default"))) MVSTATIC int32 get_checksum1_avx2_64(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2) { return i; }
|
||||
__attribute__ ((target("default"))) MVSTATIC int32 get_checksum1_ssse3_32(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2) { return i; }
|
||||
__attribute__ ((target("default"))) MVSTATIC int32 get_checksum1_sse2_32(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2) { return i; }
|
||||
|
||||
/*
|
||||
Original loop per 4 bytes:
|
||||
@@ -146,12 +108,7 @@ __attribute__ ((target("default"))) static inline __m128i sse_maddubs_epi16(__m1
|
||||
s1 += (uint32)(t1[0] + t1[1] + t1[2] + t1[3] + t1[4] + t1[5] + t1[6] + t1[7]) +
|
||||
32*CHAR_OFFSET;
|
||||
*/
|
||||
/*
|
||||
Both sse2 and ssse3 targets must be specified here or we lose (a lot) of
|
||||
performance, possibly due to not unrolling+inlining the called targeted
|
||||
functions.
|
||||
*/
|
||||
__attribute__ ((target("sse2", "ssse3"))) static int32 get_checksum1_sse2_32(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2)
|
||||
__attribute__ ((target("ssse3"))) MVSTATIC int32 get_checksum1_ssse3_32(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2)
|
||||
{
|
||||
if (len > 32) {
|
||||
int aligned = ((uintptr_t)buf & 15) == 0;
|
||||
@@ -167,16 +124,11 @@ __attribute__ ((target("sse2", "ssse3"))) static int32 get_checksum1_sse2_32(sch
|
||||
|
||||
for (; i < (len-32); i+=32) {
|
||||
// Load ... 2*[int8*16]
|
||||
// SSSE3 has _mm_lqqdu_si128, but this requires another
|
||||
// target function for each SSE2 and SSSE3 loads. For reasons
|
||||
// unknown (to me) we lose about 10% performance on some CPUs if
|
||||
// we do that right here. We just use _mm_loadu_si128 as for all
|
||||
// but a handful of specific old CPUs they are synonymous, and
|
||||
// take the 1-5% hit on those specific CPUs where it isn't.
|
||||
__m128i in8_1, in8_2;
|
||||
if (!aligned) {
|
||||
in8_1 = _mm_loadu_si128((__m128i_u*)&buf[i]);
|
||||
in8_2 = _mm_loadu_si128((__m128i_u*)&buf[i + 16]);
|
||||
// Synonymous with _mm_loadu_si128 on all but a handful of old CPUs
|
||||
in8_1 = _mm_lddqu_si128((__m128i_u*)&buf[i]);
|
||||
in8_2 = _mm_lddqu_si128((__m128i_u*)&buf[i + 16]);
|
||||
} else {
|
||||
in8_1 = _mm_load_si128((__m128i_u*)&buf[i]);
|
||||
in8_2 = _mm_load_si128((__m128i_u*)&buf[i + 16]);
|
||||
@@ -185,13 +137,13 @@ __attribute__ ((target("sse2", "ssse3"))) static int32 get_checksum1_sse2_32(sch
|
||||
// (1*buf[i] + 1*buf[i+1]), (1*buf[i+2], 1*buf[i+3]), ... 2*[int16*8]
|
||||
// Fastest, even though multiply by 1
|
||||
__m128i mul_one = _mm_set1_epi8(1);
|
||||
__m128i add16_1 = sse_maddubs_epi16(mul_one, in8_1);
|
||||
__m128i add16_2 = sse_maddubs_epi16(mul_one, in8_2);
|
||||
__m128i add16_1 = _mm_maddubs_epi16(mul_one, in8_1);
|
||||
__m128i add16_2 = _mm_maddubs_epi16(mul_one, in8_2);
|
||||
|
||||
// (4*buf[i] + 3*buf[i+1]), (2*buf[i+2], buf[i+3]), ... 2*[int16*8]
|
||||
__m128i mul_const = _mm_set1_epi32(4 + (3 << 8) + (2 << 16) + (1 << 24));
|
||||
__m128i mul_add16_1 = sse_maddubs_epi16(mul_const, in8_1);
|
||||
__m128i mul_add16_2 = sse_maddubs_epi16(mul_const, in8_2);
|
||||
__m128i mul_add16_1 = _mm_maddubs_epi16(mul_const, in8_1);
|
||||
__m128i mul_add16_2 = _mm_maddubs_epi16(mul_const, in8_2);
|
||||
|
||||
// s2 += 32*s1
|
||||
ss2 = _mm_add_epi32(ss2, _mm_slli_epi32(ss1, 5));
|
||||
@@ -224,7 +176,111 @@ __attribute__ ((target("sse2", "ssse3"))) static int32 get_checksum1_sse2_32(sch
|
||||
// [t1[0] + t1[1], t1[2] + t1[3] ...] [int16*8]
|
||||
// We could've combined this with generating sum_add32 above and
|
||||
// save an instruction but benchmarking shows that as being slower
|
||||
__m128i add16 = sse_hadds_epi16(add16_1, add16_2);
|
||||
__m128i add16 = _mm_hadds_epi16(add16_1, add16_2);
|
||||
|
||||
// [t1[0], t1[1], ...] -> [t1[0]*28 + t1[1]*24, ...] [int32*4]
|
||||
__m128i mul32 = _mm_madd_epi16(add16, mul_t1);
|
||||
|
||||
// [sum(mul32), X, X, X] [int32*4]; faster than multiple _mm_hadd_epi32
|
||||
mul32 = _mm_add_epi32(mul32, _mm_srli_si128(mul32, 4));
|
||||
mul32 = _mm_add_epi32(mul32, _mm_srli_si128(mul32, 8));
|
||||
|
||||
// s2 += 28*t1[0] + 24*t1[1] + 20*t1[2] + 16*t1[3] + 12*t1[4] + 8*t1[5] + 4*t1[6]
|
||||
ss2 = _mm_add_epi32(ss2, mul32);
|
||||
|
||||
#if CHAR_OFFSET != 0
|
||||
// s1 += 32*CHAR_OFFSET
|
||||
__m128i char_offset_multiplier = _mm_set1_epi32(32 * CHAR_OFFSET);
|
||||
ss1 = _mm_add_epi32(ss1, char_offset_multiplier);
|
||||
|
||||
// s2 += 528*CHAR_OFFSET
|
||||
char_offset_multiplier = _mm_set1_epi32(528 * CHAR_OFFSET);
|
||||
ss2 = _mm_add_epi32(ss2, char_offset_multiplier);
|
||||
#endif
|
||||
}
|
||||
|
||||
_mm_store_si128((__m128i_u*)x, ss1);
|
||||
*ps1 = x[0];
|
||||
_mm_store_si128((__m128i_u*)x, ss2);
|
||||
*ps2 = x[0];
|
||||
}
|
||||
return i;
|
||||
}
|
||||
|
||||
/*
|
||||
Same as SSSE3 version, but using macros defined above to emulate SSSE3 calls that are not available with SSE2.
|
||||
For GCC-only the SSE2 and SSSE3 versions could be a single function calling other functions with the right
|
||||
target attributes to emulate SSSE3 calls on SSE2 if needed, but clang doesn't inline those properly leading
|
||||
to a near 50% performance drop.
|
||||
*/
|
||||
__attribute__ ((target("sse2"))) MVSTATIC int32 get_checksum1_sse2_32(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2)
|
||||
{
|
||||
if (len > 32) {
|
||||
int aligned = ((uintptr_t)buf & 15) == 0;
|
||||
|
||||
uint32 x[4] = {0};
|
||||
x[0] = *ps1;
|
||||
__m128i ss1 = _mm_loadu_si128((__m128i_u*)x);
|
||||
x[0] = *ps2;
|
||||
__m128i ss2 = _mm_loadu_si128((__m128i_u*)x);
|
||||
|
||||
const int16 mul_t1_buf[8] = {28, 24, 20, 16, 12, 8, 4, 0};
|
||||
__m128i mul_t1 = _mm_loadu_si128((__m128i_u*)mul_t1_buf);
|
||||
|
||||
for (; i < (len-32); i+=32) {
|
||||
// Load ... 2*[int8*16]
|
||||
__m128i in8_1, in8_2;
|
||||
if (!aligned) {
|
||||
in8_1 = _mm_loadu_si128((__m128i_u*)&buf[i]);
|
||||
in8_2 = _mm_loadu_si128((__m128i_u*)&buf[i + 16]);
|
||||
} else {
|
||||
in8_1 = _mm_load_si128((__m128i_u*)&buf[i]);
|
||||
in8_2 = _mm_load_si128((__m128i_u*)&buf[i + 16]);
|
||||
}
|
||||
|
||||
// (1*buf[i] + 1*buf[i+1]), (1*buf[i+2], 1*buf[i+3]), ... 2*[int16*8]
|
||||
// Fastest, even though multiply by 1
|
||||
__m128i mul_one = _mm_set1_epi8(1);
|
||||
__m128i add16_1 = SSE2_MADDUBS_EPI16(mul_one, in8_1);
|
||||
__m128i add16_2 = SSE2_MADDUBS_EPI16(mul_one, in8_2);
|
||||
|
||||
// (4*buf[i] + 3*buf[i+1]), (2*buf[i+2], buf[i+3]), ... 2*[int16*8]
|
||||
__m128i mul_const = _mm_set1_epi32(4 + (3 << 8) + (2 << 16) + (1 << 24));
|
||||
__m128i mul_add16_1 = SSE2_MADDUBS_EPI16(mul_const, in8_1);
|
||||
__m128i mul_add16_2 = SSE2_MADDUBS_EPI16(mul_const, in8_2);
|
||||
|
||||
// s2 += 32*s1
|
||||
ss2 = _mm_add_epi32(ss2, _mm_slli_epi32(ss1, 5));
|
||||
|
||||
// [sum(t1[0]..t1[7]), X, X, X] [int32*4]; faster than multiple _mm_hadds_epi16
|
||||
// Shifting left, then shifting right again and shuffling (rather than just
|
||||
// shifting right as with mul32 below) to cheaply end up with the correct sign
|
||||
// extension as we go from int16 to int32.
|
||||
__m128i sum_add32 = _mm_add_epi16(add16_1, add16_2);
|
||||
sum_add32 = _mm_add_epi16(sum_add32, _mm_slli_si128(sum_add32, 2));
|
||||
sum_add32 = _mm_add_epi16(sum_add32, _mm_slli_si128(sum_add32, 4));
|
||||
sum_add32 = _mm_add_epi16(sum_add32, _mm_slli_si128(sum_add32, 8));
|
||||
sum_add32 = _mm_srai_epi32(sum_add32, 16);
|
||||
sum_add32 = _mm_shuffle_epi32(sum_add32, 3);
|
||||
|
||||
// [sum(t2[0]..t2[7]), X, X, X] [int32*4]; faster than multiple _mm_hadds_epi16
|
||||
__m128i sum_mul_add32 = _mm_add_epi16(mul_add16_1, mul_add16_2);
|
||||
sum_mul_add32 = _mm_add_epi16(sum_mul_add32, _mm_slli_si128(sum_mul_add32, 2));
|
||||
sum_mul_add32 = _mm_add_epi16(sum_mul_add32, _mm_slli_si128(sum_mul_add32, 4));
|
||||
sum_mul_add32 = _mm_add_epi16(sum_mul_add32, _mm_slli_si128(sum_mul_add32, 8));
|
||||
sum_mul_add32 = _mm_srai_epi32(sum_mul_add32, 16);
|
||||
sum_mul_add32 = _mm_shuffle_epi32(sum_mul_add32, 3);
|
||||
|
||||
// s1 += t1[0] + t1[1] + t1[2] + t1[3] + t1[4] + t1[5] + t1[6] + t1[7]
|
||||
ss1 = _mm_add_epi32(ss1, sum_add32);
|
||||
|
||||
// s2 += t2[0] + t2[1] + t2[2] + t2[3] + t2[4] + t2[5] + t2[6] + t2[7]
|
||||
ss2 = _mm_add_epi32(ss2, sum_mul_add32);
|
||||
|
||||
// [t1[0] + t1[1], t1[2] + t1[3] ...] [int16*8]
|
||||
// We could've combined this with generating sum_add32 above and
|
||||
// save an instruction but benchmarking shows that as being slower
|
||||
__m128i add16 = SSE2_HADDS_EPI16(add16_1, add16_2);
|
||||
|
||||
// [t1[0], t1[1], ...] -> [t1[0]*28 + t1[1]*24, ...] [int32*4]
|
||||
__m128i mul32 = _mm_madd_epi16(add16, mul_t1);
|
||||
@@ -270,7 +326,7 @@ __attribute__ ((target("sse2", "ssse3"))) static int32 get_checksum1_sse2_32(sch
|
||||
s1 += (uint32)(t1[0] + t1[1] + t1[2] + t1[3] + t1[4] + t1[5] + t1[6] + t1[7] + t1[8] + t1[9] + t1[10] + t1[11] + t1[12] + t1[13] + t1[14] + t1[15]) +
|
||||
64*CHAR_OFFSET;
|
||||
*/
|
||||
__attribute__ ((target("avx2"))) static int32 get_checksum1_avx2_64(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2)
|
||||
__attribute__ ((target("avx2"))) MVSTATIC int32 get_checksum1_avx2_64(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2)
|
||||
{
|
||||
if (len > 64) {
|
||||
// Instructions reshuffled compared to SSE2 for slightly better performance
|
||||
@@ -377,17 +433,7 @@ __attribute__ ((target("avx2"))) static int32 get_checksum1_avx2_64(schar* buf,
|
||||
return i;
|
||||
}
|
||||
|
||||
__attribute__ ((target("default"))) static int32 get_checksum1_avx2_64(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2)
|
||||
{
|
||||
return i;
|
||||
}
|
||||
|
||||
__attribute__ ((target("default"))) static int32 get_checksum1_sse2_32(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2)
|
||||
{
|
||||
return i;
|
||||
}
|
||||
|
||||
static inline int32 get_checksum1_default_1(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2)
|
||||
static int32 get_checksum1_default_1(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2)
|
||||
{
|
||||
uint32 s1 = *ps1;
|
||||
uint32 s2 = *ps2;
|
||||
@@ -403,9 +449,10 @@ static inline int32 get_checksum1_default_1(schar* buf, int32 len, int32 i, uint
|
||||
return i;
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
|
||||
uint32 get_checksum1(char *buf1, int32 len)
|
||||
/* With GCC 10 putting this implementation inside 'extern "C"' causes an
|
||||
assembler error. That worked fine on GCC 5-9 and clang 6-10...
|
||||
*/
|
||||
static inline uint32 get_checksum1_cpp(char *buf1, int32 len)
|
||||
{
|
||||
int32 i = 0;
|
||||
uint32 s1 = 0;
|
||||
@@ -414,7 +461,10 @@ uint32 get_checksum1(char *buf1, int32 len)
|
||||
// multiples of 64 bytes using AVX2 (if available)
|
||||
i = get_checksum1_avx2_64((schar*)buf1, len, i, &s1, &s2);
|
||||
|
||||
// multiples of 32 bytes using SSE2/SSSE3 (if available)
|
||||
// multiples of 32 bytes using SSSE3 (if available)
|
||||
i = get_checksum1_ssse3_32((schar*)buf1, len, i, &s1, &s2);
|
||||
|
||||
// multiples of 32 bytes using SSE2 (if available)
|
||||
i = get_checksum1_sse2_32((schar*)buf1, len, i, &s1, &s2);
|
||||
|
||||
// whatever is left
|
||||
@@ -423,7 +473,70 @@ uint32 get_checksum1(char *buf1, int32 len)
|
||||
return (s1 & 0xffff) + (s2 << 16);
|
||||
}
|
||||
|
||||
} // "C"
|
||||
extern "C" {
|
||||
|
||||
uint32 get_checksum1(char *buf1, int32 len)
|
||||
{
|
||||
return get_checksum1_cpp(buf1, len);
|
||||
}
|
||||
|
||||
} // extern "C"
|
||||
|
||||
#ifdef BENCHMARK_SIMD_CHECKSUM1
|
||||
#pragma clang optimize off
|
||||
#pragma GCC push_options
|
||||
#pragma GCC optimize ("O0")
|
||||
|
||||
#define ROUNDS 1024
|
||||
#define BLOCK_LEN 1024*1024
|
||||
|
||||
#ifndef CLOCK_MONOTONIC_RAW
|
||||
#define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC
|
||||
#endif
|
||||
|
||||
static void benchmark(const char* desc, int32 (*func)(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2), schar* buf, int32 len) {
|
||||
struct timespec start, end;
|
||||
uint64_t us;
|
||||
uint32_t cs, s1, s2;
|
||||
int i, next;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC_RAW, &start);
|
||||
for (i = 0; i < ROUNDS; i++) {
|
||||
s1 = s2 = 0;
|
||||
next = func((schar*)buf, len, 0, &s1, &s2);
|
||||
get_checksum1_default_1((schar*)buf, len, next, &s1, &s2);
|
||||
}
|
||||
clock_gettime(CLOCK_MONOTONIC_RAW, &end);
|
||||
us = next == 0 ? 0 : (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
|
||||
cs = next == 0 ? 0 : (s1 & 0xffff) + (s2 << 16);
|
||||
printf("%-5s :: %5.0f MB/s :: %08x\n", desc, us ? (float)(len / (1024 * 1024) * ROUNDS) / ((float)us / 1000000.0f) : 0, cs);
|
||||
}
|
||||
|
||||
static int32 get_checksum1_auto(schar* buf, int32 len, int32 i, uint32* ps1, uint32* ps2) {
|
||||
uint32 cs = get_checksum1((char*)buf, len);
|
||||
*ps1 = cs & 0xffff;
|
||||
*ps2 = cs >> 16;
|
||||
return len;
|
||||
}
|
||||
|
||||
int main() {
|
||||
int i;
|
||||
unsigned char* buf = (unsigned char*)malloc(BLOCK_LEN);
|
||||
for (i = 0; i < BLOCK_LEN; i++) buf[i] = (i + (i % 3) + (i % 11)) % 256;
|
||||
|
||||
benchmark("Auto", get_checksum1_auto, (schar*)buf, BLOCK_LEN);
|
||||
benchmark("Raw-C", get_checksum1_default_1, (schar*)buf, BLOCK_LEN);
|
||||
benchmark("SSE2", get_checksum1_sse2_32, (schar*)buf, BLOCK_LEN);
|
||||
benchmark("SSSE3", get_checksum1_ssse3_32, (schar*)buf, BLOCK_LEN);
|
||||
benchmark("AVX2", get_checksum1_avx2_64, (schar*)buf, BLOCK_LEN);
|
||||
|
||||
free(buf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#pragma GCC pop_options
|
||||
#pragma clang optimize on
|
||||
#endif /* BENCHMARK_SIMD_CHECKSUM1 */
|
||||
|
||||
#endif /* HAVE_SIMD */
|
||||
#endif /* __cplusplus */
|
||||
|
||||
Reference in New Issue
Block a user