AVX与SSE:期望看到更大的加速
我希望AVX比SSE快1.5倍。所有3个阵列(3个阵列 * 16384元素 * 4个字节/元素= 196608字节)应适合L2缓存(256KB)在Intel Core CPU(Broadwell)上。
我应该使用任何特殊的编译器指令或标志吗?
编译器版本
$ clang --version
Apple LLVM version 9.0.0 (clang-900.0.38)
Target: x86_64-apple-darwin16.7.0
Thread model: posix
InstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin
编译线
$ make avx
clang -O3 -fno-tree-vectorize -msse -msse2 -msse3 -msse4.1 -mavx -mavx2 avx.c ; ./a.out 123
n: 123
AVX Time taken: 0 seconds 177 milliseconds
vector+vector:begin int: 1 5 127 0
SSE Time taken: 0 seconds 195 milliseconds
vector+vector:begin int: 1 5 127 0
avx.c
#include <stdio.h>
#include <stdlib.h>
#include <x86intrin.h>
#include <time.h>
#ifndef __cplusplus
#include <stdalign.h> // C11 defines _Alignas(). This header defines alignas()
#endif
#define REPS 50000
#define AR 16384
// add int vectors via AVX
__attribute__((noinline))
void add_iv_avx(__m256i *restrict a, __m256i *restrict b, __m256i *restrict out, int N) {
__m256i *x = __builtin_assume_aligned(a, 32);
__m256i *y = __builtin_assume_aligned(b, 32);
__m256i *z = __builtin_assume_aligned(out, 32);
const int loops = N / 8; // 8 is number of int32 in __m256i
for(int i=0; i < loops; i++) {
_mm256_store_si256(&z[i], _mm256_add_epi32(x[i], y[i]));
}
}
// add int vectors via SSE; https://en.wikipedia.org/wiki/Restrict
__attribute__((noinline))
void add_iv_sse(__m128i *restrict a, __m128i *restrict b, __m128i *restrict out, int N) {
__m128i *x = __builtin_assume_aligned(a, 16);
__m128i *y = __builtin_assume_aligned(b, 16);
__m128i *z = __builtin_assume_aligned(out, 16);
const int loops = N / sizeof(int);
for(int i=0; i < loops; i++) {
//out[i]= _mm_add_epi32(a[i], b[i]); // this also works
_mm_storeu_si128(&z[i], _mm_add_epi32(x[i], y[i]));
}
}
// printing
void p128_as_int(__m128i in) {
alignas(16) uint32_t v[4];
_mm_store_si128((__m128i*)v, in);
printf("int: %i %i %i %i\n", v[0], v[1], v[2], v[3]);
}
__attribute__((noinline))
void debug_print(int *h) {
printf("vector+vector:begin ");
p128_as_int(* (__m128i*) &h[0] );
}
int main(int argc, char *argv[]) {
int n = atoi (argv[1]);
printf("n: %d\n", n);
int *x,*y,*z;
if (posix_memalign((void**)&x, 32, 16384*sizeof(int))) { free(x); return EXIT_FAILURE; }
if (posix_memalign((void**)&y, 32, 16384*sizeof(int))) { free(y); return EXIT_FAILURE; }
if (posix_memalign((void**)&z, 32, 16384*sizeof(int))) { free(z); return EXIT_FAILURE; }
x[0]=0; x[1]=2; x[2]=4;
y[0]=1; y[1]=3; y[2]=n;
// touch each 4K page in x,y,z to avoid copy-on-write optimizations
for (int i=512; i<AR; i+= 512) { x[i]=1; y[i]=1; z[i]=1; }
// warmup
for(int i=0; i<REPS; ++i) { add_iv_avx((__m256i*)x, (__m256i*)y, (__m256i*)z, AR); }
// AVX
clock_t start = clock();
for(int i=0; i<REPS; ++i) { add_iv_avx((__m256i*)x, (__m256i*)y, (__m256i*)z, AR); }
int msec = (clock()-start) * 1000 / CLOCKS_PER_SEC;
printf(" AVX Time taken: %d seconds %d milliseconds\n", msec/1000, msec%1000);
debug_print(z);
// warmup
for(int i=0; i<REPS; ++i) { add_iv_sse((__m128i*)x, (__m128i*)y, (__m128i*)z, AR); }
// SSE
start = clock();
for(int i=0; i<REPS; ++i) { add_iv_sse((__m128i*)x, (__m128i*)y, (__m128i*)z, AR); }
msec = (clock()-start) * 1000 / CLOCKS_PER_SEC;
printf("\n SSE Time taken: %d seconds %d milliseconds\n", msec/1000, msec%1000);
debug_print(z);
return EXIT_SUCCESS;
}
I expected AVX to be about 1.5x faster than SSE. All 3 arrays (3 arrays * 16384 elements *4 bytes/element = 196608 bytes) should fit in L2 cache (256KB) on an Intel Core CPU (Broadwell).
Are there any special compiler directives or flags that I should be using?
Compiler Version
$ clang --version
Apple LLVM version 9.0.0 (clang-900.0.38)
Target: x86_64-apple-darwin16.7.0
Thread model: posix
InstalledDir: /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin
Compile line
$ make avx
clang -O3 -fno-tree-vectorize -msse -msse2 -msse3 -msse4.1 -mavx -mavx2 avx.c ; ./a.out 123
n: 123
AVX Time taken: 0 seconds 177 milliseconds
vector+vector:begin int: 1 5 127 0
SSE Time taken: 0 seconds 195 milliseconds
vector+vector:begin int: 1 5 127 0
avx.c
#include <stdio.h>
#include <stdlib.h>
#include <x86intrin.h>
#include <time.h>
#ifndef __cplusplus
#include <stdalign.h> // C11 defines _Alignas(). This header defines alignas()
#endif
#define REPS 50000
#define AR 16384
// add int vectors via AVX
__attribute__((noinline))
void add_iv_avx(__m256i *restrict a, __m256i *restrict b, __m256i *restrict out, int N) {
__m256i *x = __builtin_assume_aligned(a, 32);
__m256i *y = __builtin_assume_aligned(b, 32);
__m256i *z = __builtin_assume_aligned(out, 32);
const int loops = N / 8; // 8 is number of int32 in __m256i
for(int i=0; i < loops; i++) {
_mm256_store_si256(&z[i], _mm256_add_epi32(x[i], y[i]));
}
}
// add int vectors via SSE; https://en.wikipedia.org/wiki/Restrict
__attribute__((noinline))
void add_iv_sse(__m128i *restrict a, __m128i *restrict b, __m128i *restrict out, int N) {
__m128i *x = __builtin_assume_aligned(a, 16);
__m128i *y = __builtin_assume_aligned(b, 16);
__m128i *z = __builtin_assume_aligned(out, 16);
const int loops = N / sizeof(int);
for(int i=0; i < loops; i++) {
//out[i]= _mm_add_epi32(a[i], b[i]); // this also works
_mm_storeu_si128(&z[i], _mm_add_epi32(x[i], y[i]));
}
}
// printing
void p128_as_int(__m128i in) {
alignas(16) uint32_t v[4];
_mm_store_si128((__m128i*)v, in);
printf("int: %i %i %i %i\n", v[0], v[1], v[2], v[3]);
}
__attribute__((noinline))
void debug_print(int *h) {
printf("vector+vector:begin ");
p128_as_int(* (__m128i*) &h[0] );
}
int main(int argc, char *argv[]) {
int n = atoi (argv[1]);
printf("n: %d\n", n);
int *x,*y,*z;
if (posix_memalign((void**)&x, 32, 16384*sizeof(int))) { free(x); return EXIT_FAILURE; }
if (posix_memalign((void**)&y, 32, 16384*sizeof(int))) { free(y); return EXIT_FAILURE; }
if (posix_memalign((void**)&z, 32, 16384*sizeof(int))) { free(z); return EXIT_FAILURE; }
x[0]=0; x[1]=2; x[2]=4;
y[0]=1; y[1]=3; y[2]=n;
// touch each 4K page in x,y,z to avoid copy-on-write optimizations
for (int i=512; i<AR; i+= 512) { x[i]=1; y[i]=1; z[i]=1; }
// warmup
for(int i=0; i<REPS; ++i) { add_iv_avx((__m256i*)x, (__m256i*)y, (__m256i*)z, AR); }
// AVX
clock_t start = clock();
for(int i=0; i<REPS; ++i) { add_iv_avx((__m256i*)x, (__m256i*)y, (__m256i*)z, AR); }
int msec = (clock()-start) * 1000 / CLOCKS_PER_SEC;
printf(" AVX Time taken: %d seconds %d milliseconds\n", msec/1000, msec%1000);
debug_print(z);
// warmup
for(int i=0; i<REPS; ++i) { add_iv_sse((__m128i*)x, (__m128i*)y, (__m128i*)z, AR); }
// SSE
start = clock();
for(int i=0; i<REPS; ++i) { add_iv_sse((__m128i*)x, (__m128i*)y, (__m128i*)z, AR); }
msec = (clock()-start) * 1000 / CLOCKS_PER_SEC;
printf("\n SSE Time taken: %d seconds %d milliseconds\n", msec/1000, msec%1000);
debug_print(z);
return EXIT_SUCCESS;
}
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论
评论(1)
问题在于您的数据不适合L1缓存。
Broadwell的L1带宽比L2带宽大得多。
L1带宽足够大,可以在每个CPU周期加载两个32个字节向量。因此,更好的AVX与SSE的加速
如果您的数据集要小得多,可能会预计。但是,请注意
L1读/写入带宽小于2*32(r)+32(w)= 96个字节每个周期。
在实践中,每个周期是可能的75个字节,请参见在这里。
页面显示,确实L2带宽要小得多:
在test_block_size = 128kb(= 32kb/核心)的带宽为900GB/s。
在test_block_size = 1Mb(=每核256kb)的带宽仅为300GB/s。
(请注意,Haswell 4770K或多或少具有与Broadwell相同的L1和L2 CACHE架构。)
尝试将
AR
减少到2000年,并将nrep
增加到1000000,看看会发生什么使用SSE与AVX加速。The problem is that that your data doesn't fit in the L1 cache.
The L1 bandwidth of Broadwell is much larger than the L2 bandwidth.
The L1 bandwidth is large enough to load two 32 byte vectors every cpu cycle. So, a better AVX vs. SSE speedup
might be expected if your data set is much smaller. However, note that
the combined L1 read/write bandwidth is less than 2*32(r)+32(w)=96 bytes per cycle.
In practice 75 bytes per cycle is possible, see here.
The second graph on this page shows that indeed the L2 bandwidth is much smaller:
At Test_block_size=128KB (=32KB per core) the bandwidth is 900GB/s.
At Test_block_size=1MB (=256KB per core) the bandwidth is only 300GB/s.
(Note that Haswell 4770k has more or less the same L1 and L2 cache architecture as Broadwell.)
Try to reduce
AR
to 2000 and to increaseNREP
to 1000000 and see what happens with the SSE vs. AVX speedup.