慢速SIMD性能 - 无内线
考虑以下计算 i32 数组总和的示例:
示例 1:简单的 for 循环
pub fn vec_sum_for_loop_i32(src: &[i32]) -> i32 {
let mut sum = 0;
for c in src {
sum += *c;
}
sum
}
示例 2:显式 SIMD 总和:
use std::arch::x86_64::*;
// #[inline]
pub fn vec_sum_simd_direct_loop(src: &[i32]) -> i32 {
#[cfg(debug_assertions)]
assert!(src.as_ptr() as u64 % 64 == 0);
#[cfg(debug_assertions)]
assert!(src.len() % (std::mem::size_of::<__m256i>() / std::mem::size_of::<i32>()) == 0);
let p_src = src.as_ptr();
let batch_size = std::mem::size_of::<__m256i>() / std::mem::size_of::<i32>();
#[cfg(debug_assertions)]
assert!(src.len() % batch_size == 0);
let result: i32;
unsafe {
let mut offset: isize = 0;
let total: isize = src.len() as isize;
let mut curr_sum = _mm256_setzero_si256();
while offset < total {
let curr = _mm256_load_epi32(p_src.offset(offset));
curr_sum = _mm256_add_epi32(curr_sum, curr);
offset += 8;
}
// this can be reduced with hadd.
let a0 = _mm256_extract_epi32::<0>(curr_sum);
let a1 = _mm256_extract_epi32::<1>(curr_sum);
let a2 = _mm256_extract_epi32::<2>(curr_sum);
let a3 = _mm256_extract_epi32::<3>(curr_sum);
let a4 = _mm256_extract_epi32::<4>(curr_sum);
let a5 = _mm256_extract_epi32::<5>(curr_sum);
let a6 = _mm256_extract_epi32::<6>(curr_sum);
let a7 = _mm256_extract_epi32::<7>(curr_sum);
result = a0 + a1 + a2 + a3 + a4 + a5 + a6 + a7;
}
result
}
当我尝试对代码进行基准测试时,第一个示例的速度约为 23GB/s(接近 RAM 速度的理论最大值)。第二个示例的速度为 8GB/s。
当使用Cargo asm查看程序集时,第一个示例转换为展开的SIMD优化循环:
.LBB11_7:
sum += *c;
movdqu xmm2, xmmword, ptr, [rcx, +, 4*rax]
paddd xmm2, xmm0
movdqu xmm0, xmmword, ptr, [rcx, +, 4*rax, +, 16]
paddd xmm0, xmm1
movdqu xmm1, xmmword, ptr, [rcx, +, 4*rax, +, 32]
movdqu xmm3, xmmword, ptr, [rcx, +, 4*rax, +, 48]
movdqu xmm4, xmmword, ptr, [rcx, +, 4*rax, +, 64]
paddd xmm4, xmm1
paddd xmm4, xmm2
movdqu xmm2, xmmword, ptr, [rcx, +, 4*rax, +, 80]
paddd xmm2, xmm3
paddd xmm2, xmm0
movdqu xmm0, xmmword, ptr, [rcx, +, 4*rax, +, 96]
paddd xmm0, xmm4
movdqu xmm1, xmmword, ptr, [rcx, +, 4*rax, +, 112]
paddd xmm1, xmm2
add rax, 32
add r11, -4
jne .LBB11_7
.LBB11_8:
test r10, r10
je .LBB11_11
lea r11, [rcx, +, 4*rax]
add r11, 16
shl r10, 5
xor eax, eax
第二个示例没有任何循环展开,甚至没有内联代码到_mm256_add_epi32:
...
movaps xmmword, ptr, [rbp, +, 320], xmm7
movaps xmmword, ptr, [rbp, +, 304], xmm6
and rsp, -32
mov r12, rdx
mov rdi, rcx
lea rcx, [rsp, +, 32]
let mut curr_sum = _mm256_setzero_si256();
call core::core_arch::x86::avx::_mm256_setzero_si256
movaps xmm6, xmmword, ptr, [rsp, +, 32]
movaps xmm7, xmmword, ptr, [rsp, +, 48]
while offset < total {
test r12, r12
jle .LBB13_3
xor esi, esi
lea rbx, [rsp, +, 384]
lea r14, [rsp, +, 64]
lea r15, [rsp, +, 96]
.LBB13_2:
let curr = _mm256_load_epi32(p_src.offset(offset));
mov rcx, rbx
mov rdx, rdi
call core::core_arch::x86::avx512f::_mm256_load_epi32
curr_sum = _mm256_add_epi32(curr_sum, curr);
movaps xmmword, ptr, [rsp, +, 112], xmm7
movaps xmmword, ptr, [rsp, +, 96], xmm6
mov rcx, r14
mov rdx, r15
mov r8, rbx
call core::core_arch::x86::avx2::_mm256_add_epi32
movaps xmm6, xmmword, ptr, [rsp, +, 64]
movaps xmm7, xmmword, ptr, [rsp, +, 80]
offset += 8;
add rsi, 8
while offset < total {
add rdi, 32
cmp rsi, r12
...
这当然是非常简单的示例,我不打算这样做使用手工制作的 SIMD 进行简单求和。但它仍然让我困惑为什么显式 SIMD 如此慢以及为什么使用 SIMD 内在函数会导致如此未优化的代码。
Consider following examples for calculating sum of i32 array:
Example1: Simple for loop
pub fn vec_sum_for_loop_i32(src: &[i32]) -> i32 {
let mut sum = 0;
for c in src {
sum += *c;
}
sum
}
Example2: Explicit SIMD sum:
use std::arch::x86_64::*;
// #[inline]
pub fn vec_sum_simd_direct_loop(src: &[i32]) -> i32 {
#[cfg(debug_assertions)]
assert!(src.as_ptr() as u64 % 64 == 0);
#[cfg(debug_assertions)]
assert!(src.len() % (std::mem::size_of::<__m256i>() / std::mem::size_of::<i32>()) == 0);
let p_src = src.as_ptr();
let batch_size = std::mem::size_of::<__m256i>() / std::mem::size_of::<i32>();
#[cfg(debug_assertions)]
assert!(src.len() % batch_size == 0);
let result: i32;
unsafe {
let mut offset: isize = 0;
let total: isize = src.len() as isize;
let mut curr_sum = _mm256_setzero_si256();
while offset < total {
let curr = _mm256_load_epi32(p_src.offset(offset));
curr_sum = _mm256_add_epi32(curr_sum, curr);
offset += 8;
}
// this can be reduced with hadd.
let a0 = _mm256_extract_epi32::<0>(curr_sum);
let a1 = _mm256_extract_epi32::<1>(curr_sum);
let a2 = _mm256_extract_epi32::<2>(curr_sum);
let a3 = _mm256_extract_epi32::<3>(curr_sum);
let a4 = _mm256_extract_epi32::<4>(curr_sum);
let a5 = _mm256_extract_epi32::<5>(curr_sum);
let a6 = _mm256_extract_epi32::<6>(curr_sum);
let a7 = _mm256_extract_epi32::<7>(curr_sum);
result = a0 + a1 + a2 + a3 + a4 + a5 + a6 + a7;
}
result
}
When I tried to benchmark the code the first example got ~23GB/s (which is close to theoretical maximum for my RAM speed). Second example got 8GB/s.
When looking at the assembly with cargo asm first example translates into unrolled SIMD optimized loops:
.LBB11_7:
sum += *c;
movdqu xmm2, xmmword, ptr, [rcx, +, 4*rax]
paddd xmm2, xmm0
movdqu xmm0, xmmword, ptr, [rcx, +, 4*rax, +, 16]
paddd xmm0, xmm1
movdqu xmm1, xmmword, ptr, [rcx, +, 4*rax, +, 32]
movdqu xmm3, xmmword, ptr, [rcx, +, 4*rax, +, 48]
movdqu xmm4, xmmword, ptr, [rcx, +, 4*rax, +, 64]
paddd xmm4, xmm1
paddd xmm4, xmm2
movdqu xmm2, xmmword, ptr, [rcx, +, 4*rax, +, 80]
paddd xmm2, xmm3
paddd xmm2, xmm0
movdqu xmm0, xmmword, ptr, [rcx, +, 4*rax, +, 96]
paddd xmm0, xmm4
movdqu xmm1, xmmword, ptr, [rcx, +, 4*rax, +, 112]
paddd xmm1, xmm2
add rax, 32
add r11, -4
jne .LBB11_7
.LBB11_8:
test r10, r10
je .LBB11_11
lea r11, [rcx, +, 4*rax]
add r11, 16
shl r10, 5
xor eax, eax
Second example doesn't have any loop unrolling and doesn't even inline code to _mm256_add_epi32:
...
movaps xmmword, ptr, [rbp, +, 320], xmm7
movaps xmmword, ptr, [rbp, +, 304], xmm6
and rsp, -32
mov r12, rdx
mov rdi, rcx
lea rcx, [rsp, +, 32]
let mut curr_sum = _mm256_setzero_si256();
call core::core_arch::x86::avx::_mm256_setzero_si256
movaps xmm6, xmmword, ptr, [rsp, +, 32]
movaps xmm7, xmmword, ptr, [rsp, +, 48]
while offset < total {
test r12, r12
jle .LBB13_3
xor esi, esi
lea rbx, [rsp, +, 384]
lea r14, [rsp, +, 64]
lea r15, [rsp, +, 96]
.LBB13_2:
let curr = _mm256_load_epi32(p_src.offset(offset));
mov rcx, rbx
mov rdx, rdi
call core::core_arch::x86::avx512f::_mm256_load_epi32
curr_sum = _mm256_add_epi32(curr_sum, curr);
movaps xmmword, ptr, [rsp, +, 112], xmm7
movaps xmmword, ptr, [rsp, +, 96], xmm6
mov rcx, r14
mov rdx, r15
mov r8, rbx
call core::core_arch::x86::avx2::_mm256_add_epi32
movaps xmm6, xmmword, ptr, [rsp, +, 64]
movaps xmm7, xmmword, ptr, [rsp, +, 80]
offset += 8;
add rsi, 8
while offset < total {
add rdi, 32
cmp rsi, r12
...
This of course is pretty trivial example and I don't plan to use hand crafted SIMD for simple sum. But it still puzzles me on why explicit SIMD is so slow and why using SIMD intrinsics led to such unoptimized code.
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论
评论(1)
看来您忘记告诉 rustc 它可以在任何地方使用 AVX2 指令,因此它无法内联这些函数。 相反,您会遇到一场彻底的灾难,只有包装器函数被编译为使用 AVX2 的函数函数,或者类似的东西。
对我来说工作正常
-O -C target-cpu=skylake-avx512
(https:/ /godbolt.org/z/csY5or43T),因此它甚至可以内联您使用的 AVX512VL 负载,_mm256_load_epi32
1,然后优化为vpaddd ymm0, ymm0, ymmword ptr [rdi + 4*rax]
(AVX2 )在紧密循环内。在 GCC / clang 中,在这种情况下,您会收到类似“调用always_inline
foobar
时内联失败”的错误,而不是工作但缓慢的 asm。 (有关详细信息,请参阅此内容)。这是 Rust 应该在准备好进入黄金时间之前解决的问题,要么像 MSVC 一样,使用内在函数将指令实际内联到函数中,要么拒绝像 GCC/clang 那样编译。脚注1:
请参阅如何使用 gcc 或 clang 模拟 _mm256_loadu_epi32? 如果您不想使用 AVX512。
使用
-O -C target-cpu=skylake
(仅 AVX2),它会内联其他所有内容,包括vpaddd ymm
,但仍然调用从复制 32 个字节的函数使用 AVXvmovaps
进行内存到内存的转换。它需要 AVX512VL 来内联内在函数,但后来在优化过程中它意识到,在没有掩码的情况下,它应该只执行 256 位加载,而无需臃肿的 AVX-512 指令。英特尔甚至提供了需要 AVX-512 的_mm256_mask[z]_loadu_epi32
的无屏蔽版本,这有点愚蠢。或者愚蠢的是 gcc/clang/rustc 认为它是 AVX512 内在函数。It appears you forgot to tell rustc it was allowed to use AVX2 instructions everywhere, so it couldn't inline those functions. Instead, you get a total disaster where only the wrapper functions are compiled as AVX2-using functions, or something like that.
Works fine for me with
-O -C target-cpu=skylake-avx512
(https://godbolt.org/z/csY5or43T) so it can inline even the AVX512VL load you used,_mm256_load_epi32
1, and then optimize it into a memory source operand forvpaddd ymm0, ymm0, ymmword ptr [rdi + 4*rax]
(AVX2) inside a tight loop.In GCC / clang, you get an error like "inlining failed in call to always_inline
foobar
" in this case, instead of working but slow asm. (See this for details). This is something Rust should probably sort out before this is ready for prime time, either be like MSVC and actually inline the instruction into a function using the intrinsic, or refuse to compile like GCC/clang.Footnote 1:
See How to emulate _mm256_loadu_epi32 with gcc or clang? if you didn't mean to use AVX512.
With
-O -C target-cpu=skylake
(just AVX2), it inlines everything else, includingvpaddd ymm
, but still calls out to a function that copies 32 bytes from memory to memory with AVXvmovaps
. It requires AVX512VL to inline the intrinsic, but later in the optimization process it realizes that with no masking, it's just a 256-bit load it should do without a bloated AVX-512 instruction. It's kinda dumb that Intel even provided a no-masking version of_mm256_mask[z]_loadu_epi32
that requires AVX-512. Or dumb that gcc/clang/rustc consider it an AVX512 intrinsic.