矢量<双>比双倍快*:为什么?双>
这是我使用 std::vector
和普通旧 double*
尝试过的循环。
对于 1000 万个元素,矢量版本的运行时间始终是 double* 版本的 80% 左右;对于几乎任何 N
值,向量的速度明显更快。
浏览 GCC STL 源代码,我没有看到 std::vector
正在做任何比 double*
习惯用法更奇特的事情(即,分配普通的旧new[]
,operator[]
取消引用偏移量)。 这个问题也说明了这一点。
有什么想法为什么矢量版本更快?
Compiler: GCC 4.6.1
Example compile line: g++ -Ofast -march=native -DNDEBUG \
-ftree-vectorizer-verbose=2 -o vector.bin \
vector.cpp -lrt
OS: CentOS 5
CPU: Opteron 8431
RAM: 128 GB
如果我使用 icpc 11.1 或在 Xeon 上运行,结果在质量上是相同的。此外,矢量化器转储表明只有 std::vector
构造函数中的填充操作被矢量化。
矢量版本:
#include <vector>
#include <iostream>
#include <boost/lexical_cast.hpp>
#include "util.h"
#include "rkck_params.h"
using namespace std;
int main( int argc, char* argv[] )
{
const size_t N = boost::lexical_cast<size_t>( argv[ 1 ] );
vector<double> y_old( N );
vector<double> y_new( N );
vector<double> y_err( N );
vector<double> k0( N );
vector<double> k1( N );
vector<double> k2( N );
vector<double> k3( N );
vector<double> k4( N );
vector<double> k5( N );
const double h = 0.5;
const timespec start = clock_tick();
for ( size_t i = 0 ; i < N ; ++i )
{
y_new[ i ] = y_old[ i ]
+ h
*(
rkck::c[ 0 ]*k0[ i ]
+ rkck::c[ 2 ]*k2[ i ]
+ rkck::c[ 3 ]*k3[ i ]
+ rkck::c[ 5 ]*k5[ i ]
);
y_err[ i ] = h
*(
rkck::cdiff[ 0 ]*k0[ i ]
+ rkck::cdiff[ 2 ]*k2[ i ]
+ rkck::cdiff[ 3 ]*k3[ i ]
+ rkck::cdiff[ 4 ]*k4[ i ]
+ rkck::cdiff[ 5 ]*k5[ i ]
);
}
const timespec stop = clock_tick();
const double total_time = seconds( start, stop );
// Output
cout << "vector\t" << N << "\t" << total_time << endl;
return 0;
}
double*
版本:
#include <iostream>
#include <boost/lexical_cast.hpp>
#include "util.h"
#include "rkck_params.h"
using namespace std;
int main( int argc, char* argv[] )
{
const size_t N = boost::lexical_cast<size_t>( argv[ 1 ] );
double* y_old = new double[ N ];
double* y_new = new double[ N ];
double* y_err = new double[ N ];
double* k0 = new double[ N ];
double* k1 = new double[ N ];
double* k2 = new double[ N ];
double* k3 = new double[ N ];
double* k4 = new double[ N ];
double* k5 = new double[ N ];
const double h = 0.5;
const timespec start = clock_tick();
for ( size_t i = 0 ; i < N ; ++i )
{
y_new[ i ]
= y_old[ i ]
+ h
*(
rkck::c[ 0 ]*k0[ i ]
+ rkck::c[ 2 ]*k2[ i ]
+ rkck::c[ 3 ]*k3[ i ]
+ rkck::c[ 5 ]*k5[ i ]
);
y_err[ i ]
= h
*(
rkck::cdiff[ 0 ]*k0[ i ]
+ rkck::cdiff[ 2 ]*k2[ i ]
+ rkck::cdiff[ 3 ]*k3[ i ]
+ rkck::cdiff[ 4 ]*k4[ i ]
+ rkck::cdiff[ 5 ]*k5[ i ]
);
}
const timespec stop = clock_tick();
const double total_time = seconds( start, stop );
delete [] y_old;
delete [] y_new;
delete [] y_err;
delete [] k0;
delete [] k1;
delete [] k2;
delete [] k3;
delete [] k4;
delete [] k5;
// Output
cout << "plain\t" << N << "\t" << total_time << endl;
return 0;
}
rkck_params.h
:
#ifndef RKCK_PARAMS_H
#define RKCK_PARAMS_H
namespace rkck
{
// C.f. $c_i$ in Ch. 16.2 of NR in C++, 2nd ed.
const double c[ 6 ]
= { 37.0/378.0,
0.0,
250.0/621.0,
125.0/594,
0.0,
512.0/1771.0 };
// C.f. $( c_i - c_i^* )$ in Ch. 16.2 of NR in C++, 2nd ed.
const double cdiff[ 6 ]
= { c[ 0 ] - 2825.0/27648.0,
c[ 1 ] - 0.0,
c[ 2 ] - 18575.0/48384.0,
c[ 3 ] - 13525.0/55296.0,
c[ 4 ] - 277.0/14336.0,
c[ 5 ] - 1.0/4.0 };
}
#endif
util.h
:
#ifndef UTIL_H
#define UTIL_H
#include <time.h>
#include <utility>
inline timespec clock_tick()
{
timespec tick;
clock_gettime( CLOCK_REALTIME, &tick );
return tick;
}
// \cite{www.guyrutenberg.com/2007/09/22/profiling-code-using-clock_gettime}
inline double seconds( const timespec& earlier, const timespec& later )
{
double seconds_diff = -1.0;
double nano_diff = -1.0;
if ( later.tv_nsec < earlier.tv_nsec )
{
seconds_diff = later.tv_sec - earlier.tv_sec - 1;
nano_diff = ( 1.0e9 + later.tv_nsec - earlier.tv_nsec )*1.0e-9;
}
else
{
seconds_diff = later.tv_sec - earlier.tv_sec;
nano_diff = ( later.tv_nsec - earlier.tv_nsec )*1.0e-9;
}
return seconds_diff + nano_diff;
}
#endif
Here's a loop that I've tried with std::vector<double>
and with plain old double*
.
For 10 million elements, the vector version consistently runs in about 80% of the time that the double*
version takes; for pretty much any value of N
, vector is notably faster.
Peeking at the GCC STL source code, I don't see that std::vector
is doing anything essentially fancier than what the double*
idiom is doing (i.e., allocate with plain old new[]
, operator[]
dereferences an offset). This question speaks to that, too.
Any ideas why the vector version is faster?
Compiler: GCC 4.6.1
Example compile line: g++ -Ofast -march=native -DNDEBUG \
-ftree-vectorizer-verbose=2 -o vector.bin \
vector.cpp -lrt
OS: CentOS 5
CPU: Opteron 8431
RAM: 128 GB
Results are qualitatively the same if I use icpc 11.1 or run on a Xeon. Also, the vectorizer dump says that only the fill operation in std::vector
's constructor was vectorized.
The vector version:
#include <vector>
#include <iostream>
#include <boost/lexical_cast.hpp>
#include "util.h"
#include "rkck_params.h"
using namespace std;
int main( int argc, char* argv[] )
{
const size_t N = boost::lexical_cast<size_t>( argv[ 1 ] );
vector<double> y_old( N );
vector<double> y_new( N );
vector<double> y_err( N );
vector<double> k0( N );
vector<double> k1( N );
vector<double> k2( N );
vector<double> k3( N );
vector<double> k4( N );
vector<double> k5( N );
const double h = 0.5;
const timespec start = clock_tick();
for ( size_t i = 0 ; i < N ; ++i )
{
y_new[ i ] = y_old[ i ]
+ h
*(
rkck::c[ 0 ]*k0[ i ]
+ rkck::c[ 2 ]*k2[ i ]
+ rkck::c[ 3 ]*k3[ i ]
+ rkck::c[ 5 ]*k5[ i ]
);
y_err[ i ] = h
*(
rkck::cdiff[ 0 ]*k0[ i ]
+ rkck::cdiff[ 2 ]*k2[ i ]
+ rkck::cdiff[ 3 ]*k3[ i ]
+ rkck::cdiff[ 4 ]*k4[ i ]
+ rkck::cdiff[ 5 ]*k5[ i ]
);
}
const timespec stop = clock_tick();
const double total_time = seconds( start, stop );
// Output
cout << "vector\t" << N << "\t" << total_time << endl;
return 0;
}
The double*
version:
#include <iostream>
#include <boost/lexical_cast.hpp>
#include "util.h"
#include "rkck_params.h"
using namespace std;
int main( int argc, char* argv[] )
{
const size_t N = boost::lexical_cast<size_t>( argv[ 1 ] );
double* y_old = new double[ N ];
double* y_new = new double[ N ];
double* y_err = new double[ N ];
double* k0 = new double[ N ];
double* k1 = new double[ N ];
double* k2 = new double[ N ];
double* k3 = new double[ N ];
double* k4 = new double[ N ];
double* k5 = new double[ N ];
const double h = 0.5;
const timespec start = clock_tick();
for ( size_t i = 0 ; i < N ; ++i )
{
y_new[ i ]
= y_old[ i ]
+ h
*(
rkck::c[ 0 ]*k0[ i ]
+ rkck::c[ 2 ]*k2[ i ]
+ rkck::c[ 3 ]*k3[ i ]
+ rkck::c[ 5 ]*k5[ i ]
);
y_err[ i ]
= h
*(
rkck::cdiff[ 0 ]*k0[ i ]
+ rkck::cdiff[ 2 ]*k2[ i ]
+ rkck::cdiff[ 3 ]*k3[ i ]
+ rkck::cdiff[ 4 ]*k4[ i ]
+ rkck::cdiff[ 5 ]*k5[ i ]
);
}
const timespec stop = clock_tick();
const double total_time = seconds( start, stop );
delete [] y_old;
delete [] y_new;
delete [] y_err;
delete [] k0;
delete [] k1;
delete [] k2;
delete [] k3;
delete [] k4;
delete [] k5;
// Output
cout << "plain\t" << N << "\t" << total_time << endl;
return 0;
}
rkck_params.h
:
#ifndef RKCK_PARAMS_H
#define RKCK_PARAMS_H
namespace rkck
{
// C.f. $c_i$ in Ch. 16.2 of NR in C++, 2nd ed.
const double c[ 6 ]
= { 37.0/378.0,
0.0,
250.0/621.0,
125.0/594,
0.0,
512.0/1771.0 };
// C.f. $( c_i - c_i^* )$ in Ch. 16.2 of NR in C++, 2nd ed.
const double cdiff[ 6 ]
= { c[ 0 ] - 2825.0/27648.0,
c[ 1 ] - 0.0,
c[ 2 ] - 18575.0/48384.0,
c[ 3 ] - 13525.0/55296.0,
c[ 4 ] - 277.0/14336.0,
c[ 5 ] - 1.0/4.0 };
}
#endif
util.h
:
#ifndef UTIL_H
#define UTIL_H
#include <time.h>
#include <utility>
inline timespec clock_tick()
{
timespec tick;
clock_gettime( CLOCK_REALTIME, &tick );
return tick;
}
// \cite{www.guyrutenberg.com/2007/09/22/profiling-code-using-clock_gettime}
inline double seconds( const timespec& earlier, const timespec& later )
{
double seconds_diff = -1.0;
double nano_diff = -1.0;
if ( later.tv_nsec < earlier.tv_nsec )
{
seconds_diff = later.tv_sec - earlier.tv_sec - 1;
nano_diff = ( 1.0e9 + later.tv_nsec - earlier.tv_nsec )*1.0e-9;
}
else
{
seconds_diff = later.tv_sec - earlier.tv_sec;
nano_diff = ( later.tv_nsec - earlier.tv_nsec )*1.0e-9;
}
return seconds_diff + nano_diff;
}
#endif
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。
绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论
评论(1)
在矢量版本中,您的数据被初始化为零。在
新
版本中它尚未初始化,因此可能会完成不同的工作。您是否以不同的顺序运行了多次?
In the vector version your data is initialized to zero. In the
new
version it's uninitialized, so different work might be done.Did you run multiple times, in different orders?