cuda nbody模拟-共享内存问题
基于 Nvidia GPU 计算 SDK 的示例,我为 nbody 模拟创建了两个内核。第一个不利用共享内存的内核比第二个使用共享内存的内核快约 15%。为什么共享内存的内核速度较慢?
内核参数:
- 8192 个主体,
- 每块线程 = 128,
- 每网格块 = 64。
- 设备:GeForce GTX 560 Ti。
第一个内核:
#define N 8192
#define EPS2 0.001f
__device__ float4 vel[N];
__device__ float3 force(float4 bi, float4 bj, float3 ai)
{
float3 r;
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
float distSqr = r.x * r.x + r.y * r.y + r.z * r.z + EPS2;
float distSixth = distSqr * distSqr * distSqr;
float invDistCube = 1.0f/sqrtf(distSixth);
float s = bj.w * invDistCube;
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
return ai;
}
__global__ void points(float4 *pos, float dt)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
if(k >= N) return;
float4 bi, bj, v;
float3 ai;
v = vel[k];
bi = pos[k];
ai = make_float3(0,0,0);
for(int i = 0; i < N; i++)
{
bj = pos[i];
ai = force(bi, bj, ai);
}
v.x += ai.x * dt;
v.y += ai.y * dt;
v.z += ai.z * dt;
bi.x += v.x * dt;
bi.y += v.y * dt;
bi.z += v.z * dt;
pos[k]=bi;
vel[k]=v;
}
第二个内核:
#define N 8192
#define EPS2 0.001f
#define THREADS_PER_BLOCK 128
__device__ float4 vel[N];
__shared__ float4 shPosition[THREADS_PER_BLOCK];
__device__ float3 force(float4 bi, float4 bj, float3 ai)
{
float3 r;
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
float distSqr = r.x * r.x + r.y * r.y + r.z * r.z + EPS2;
float distSixth = distSqr * distSqr * distSqr;
float invDistCube = 1.0f/sqrtf(distSixth);
float s = bj.w * invDistCube;
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
return ai;
}
__device__ float3 accumulate_tile(float4 myPosition, float3 accel)
{
int i;
for (i = 0; i < THREADS_PER_BLOCK; i++)
{
accel = force(myPosition, shPosition[i], accel);
}
return accel;
}
__global__ void points(float4 *pos, float dt)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
if(k >= N) return;
float4 bi, v;
float3 ai;
v = vel[k];
bi = pos[k];
ai = make_float3(0.0f, 0.0f, 0.0f);
int i,tile;
for(tile=0; tile < N / THREADS_PER_BLOCK; tile++)
{
i = tile * blockDim.x + threadIdx.x;
shPosition[threadIdx.x] = pos[i];
__syncthreads();
ai = accumulate_tile(bi, ai);
__syncthreads();
}
v.x += ai.x * dt;
v.y += ai.y * dt;
v.z += ai.z * dt;
bi.x += v.x * dt;
bi.y += v.y * dt;
bi.z += v.z * dt;
pos[k]=bi;
vel[k]=v;
}
Based on the example from Nvidia GPU computing SDK I created two kernels for the nbody simulation. The first kernel which doesn't take advantage of shared memory is ~15% faster than the second kernel which uses shared memory. Why is the kernel with shared memory slower?
Kernel paramters:
- 8192 bodies,
- threads per block = 128,
- blocks per grid = 64.
- Device: GeForce GTX 560 Ti.
First kernel:
#define N 8192
#define EPS2 0.001f
__device__ float4 vel[N];
__device__ float3 force(float4 bi, float4 bj, float3 ai)
{
float3 r;
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
float distSqr = r.x * r.x + r.y * r.y + r.z * r.z + EPS2;
float distSixth = distSqr * distSqr * distSqr;
float invDistCube = 1.0f/sqrtf(distSixth);
float s = bj.w * invDistCube;
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
return ai;
}
__global__ void points(float4 *pos, float dt)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
if(k >= N) return;
float4 bi, bj, v;
float3 ai;
v = vel[k];
bi = pos[k];
ai = make_float3(0,0,0);
for(int i = 0; i < N; i++)
{
bj = pos[i];
ai = force(bi, bj, ai);
}
v.x += ai.x * dt;
v.y += ai.y * dt;
v.z += ai.z * dt;
bi.x += v.x * dt;
bi.y += v.y * dt;
bi.z += v.z * dt;
pos[k]=bi;
vel[k]=v;
}
Second kernel:
#define N 8192
#define EPS2 0.001f
#define THREADS_PER_BLOCK 128
__device__ float4 vel[N];
__shared__ float4 shPosition[THREADS_PER_BLOCK];
__device__ float3 force(float4 bi, float4 bj, float3 ai)
{
float3 r;
r.x = bj.x - bi.x;
r.y = bj.y - bi.y;
r.z = bj.z - bi.z;
float distSqr = r.x * r.x + r.y * r.y + r.z * r.z + EPS2;
float distSixth = distSqr * distSqr * distSqr;
float invDistCube = 1.0f/sqrtf(distSixth);
float s = bj.w * invDistCube;
ai.x += r.x * s;
ai.y += r.y * s;
ai.z += r.z * s;
return ai;
}
__device__ float3 accumulate_tile(float4 myPosition, float3 accel)
{
int i;
for (i = 0; i < THREADS_PER_BLOCK; i++)
{
accel = force(myPosition, shPosition[i], accel);
}
return accel;
}
__global__ void points(float4 *pos, float dt)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
if(k >= N) return;
float4 bi, v;
float3 ai;
v = vel[k];
bi = pos[k];
ai = make_float3(0.0f, 0.0f, 0.0f);
int i,tile;
for(tile=0; tile < N / THREADS_PER_BLOCK; tile++)
{
i = tile * blockDim.x + threadIdx.x;
shPosition[threadIdx.x] = pos[i];
__syncthreads();
ai = accumulate_tile(bi, ai);
__syncthreads();
}
v.x += ai.x * dt;
v.y += ai.y * dt;
v.z += ai.z * dt;
bi.x += v.x * dt;
bi.y += v.y * dt;
bi.z += v.z * dt;
pos[k]=bi;
vel[k]=v;
}
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。
绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论
评论(2)
唯一真正有用的答案将通过仔细分析获得,而这只是您能够做的事情。 NVIDIA 为 Linux 和 Windows 提供了有用的分析工具,现在可能是使用它们的时候了。
尽管如此,共享内存版本的寄存器消耗比非共享内存版本大得多(当使用 CUDA 4.0 版本编译器编译到 sm_20 目标时,寄存器消耗为 37 比 29)。可能只是入住率的一个简单差异导致了您所看到的性能变化。
The only really useful answer will be obtained by careful profiling, and that is only something that you are in a position to do. NVIDIA ship useful profiling tools for both Linux and Windows, now might be the time to use them.
Having said that, the register consumption of the shared memory version is considerably larger than the non-shared memory version (37 versus 29 when compiled to the sm_20 target with the CUDA 4.0 release compiler). It might be a simple difference in occupancy which is causing the change in performance you are seeing.
实际上,非共享版本的内核确实以 L1 缓存的形式使用共享内存。从代码中我们可以看到线程访问全局内存的相同区域,因此它被缓存和重用。当我们添加更好的占用率并且缺少额外的指令(同步等)时,我们会获得更快的内核。
Actually non-shared version of kernel does use shared memory in form of L1 cache. From the code we can see that threads hit the same areas of global memory so it get's cached and reused. When we add better occupancy and lack of additional instructions (synchronization etc) we get faster kernel.