我之前已经完成了这项确切的任务。但这主要是为了测量功耗和CPU温度。以下代码(相当长)在我的Core i7 2600K上达到了最佳效果。
这里要注意的关键是大量的手动循环展开以及乘法和加法的交织...
可以在我的GitHub上找到完整的项目:https://github.com/Mysticial/Flops
警告:
如果决定编译并运行此程序,请注意您的CPU温度!!!
确保您没有使它过热。并确保CPU限制不会影响您的结果!
此外,对于运行此代码可能造成的任何损害,我不承担任何责任。
笔记:
- 此代码针对x64进行了优化。x86没有足够的寄存器来进行正确编译。
- 该代码已经过测试,可以在Visual Studio 2010/2012和GCC 4.6上正常工作。
令人惊讶的是,ICC 11(Intel编译器11)在编译时遇到麻烦。
- 这些是针对FMA之前的处理器的。为了在Intel Haswell和AMD Bulldozer处理器(及更高版本)上达到峰值FLOPS,将需要FMA(融合乘加)指令。这些超出了本基准的范围。
#include <emmintrin.h>
#include <omp.h>
#include <iostream>
using namespace std;
typedef unsigned long long uint64;
double test_dp_mac_SSE(double x,double y,uint64 iterations){
register __m128d r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,rA,rB,rC,rD,rE,rF;
// Generate starting data.
r0 = _mm_set1_pd(x);
r1 = _mm_set1_pd(y);
r8 = _mm_set1_pd(-0.0);
r2 = _mm_xor_pd(r0,r8);
r3 = _mm_or_pd(r0,r8);
r4 = _mm_andnot_pd(r8,r0);
r5 = _mm_mul_pd(r1,_mm_set1_pd(0.37796447300922722721));
r6 = _mm_mul_pd(r1,_mm_set1_pd(0.24253562503633297352));
r7 = _mm_mul_pd(r1,_mm_set1_pd(4.1231056256176605498));
r8 = _mm_add_pd(r0,_mm_set1_pd(0.37796447300922722721));
r9 = _mm_add_pd(r1,_mm_set1_pd(0.24253562503633297352));
rA = _mm_sub_pd(r0,_mm_set1_pd(4.1231056256176605498));
rB = _mm_sub_pd(r1,_mm_set1_pd(4.1231056256176605498));
rC = _mm_set1_pd(1.4142135623730950488);
rD = _mm_set1_pd(1.7320508075688772935);
rE = _mm_set1_pd(0.57735026918962576451);
rF = _mm_set1_pd(0.70710678118654752440);
uint64 iMASK = 0x800fffffffffffffull;
__m128d MASK = _mm_set1_pd(*(double*)&iMASK);
__m128d vONE = _mm_set1_pd(1.0);
uint64 c = 0;
while (c < iterations){
size_t i = 0;
while (i < 1000){
// Here's the meat - the part that really matters.
r0 = _mm_mul_pd(r0,rC);
r1 = _mm_add_pd(r1,rD);
r2 = _mm_mul_pd(r2,rE);
r3 = _mm_sub_pd(r3,rF);
r4 = _mm_mul_pd(r4,rC);
r5 = _mm_add_pd(r5,rD);
r6 = _mm_mul_pd(r6,rE);
r7 = _mm_sub_pd(r7,rF);
r8 = _mm_mul_pd(r8,rC);
r9 = _mm_add_pd(r9,rD);
rA = _mm_mul_pd(rA,rE);
rB = _mm_sub_pd(rB,rF);
r0 = _mm_add_pd(r0,rF);
r1 = _mm_mul_pd(r1,rE);
r2 = _mm_sub_pd(r2,rD);
r3 = _mm_mul_pd(r3,rC);
r4 = _mm_add_pd(r4,rF);
r5 = _mm_mul_pd(r5,rE);
r6 = _mm_sub_pd(r6,rD);
r7 = _mm_mul_pd(r7,rC);
r8 = _mm_add_pd(r8,rF);
r9 = _mm_mul_pd(r9,rE);
rA = _mm_sub_pd(rA,rD);
rB = _mm_mul_pd(rB,rC);
r0 = _mm_mul_pd(r0,rC);
r1 = _mm_add_pd(r1,rD);
r2 = _mm_mul_pd(r2,rE);
r3 = _mm_sub_pd(r3,rF);
r4 = _mm_mul_pd(r4,rC);
r5 = _mm_add_pd(r5,rD);
r6 = _mm_mul_pd(r6,rE);
r7 = _mm_sub_pd(r7,rF);
r8 = _mm_mul_pd(r8,rC);
r9 = _mm_add_pd(r9,rD);
rA = _mm_mul_pd(rA,rE);
rB = _mm_sub_pd(rB,rF);
r0 = _mm_add_pd(r0,rF);
r1 = _mm_mul_pd(r1,rE);
r2 = _mm_sub_pd(r2,rD);
r3 = _mm_mul_pd(r3,rC);
r4 = _mm_add_pd(r4,rF);
r5 = _mm_mul_pd(r5,rE);
r6 = _mm_sub_pd(r6,rD);
r7 = _mm_mul_pd(r7,rC);
r8 = _mm_add_pd(r8,rF);
r9 = _mm_mul_pd(r9,rE);
rA = _mm_sub_pd(rA,rD);
rB = _mm_mul_pd(rB,rC);
i++;
}
// Need to renormalize to prevent denormal/overflow.
r0 = _mm_and_pd(r0,MASK);
r1 = _mm_and_pd(r1,MASK);
r2 = _mm_and_pd(r2,MASK);
r3 = _mm_and_pd(r3,MASK);
r4 = _mm_and_pd(r4,MASK);
r5 = _mm_and_pd(r5,MASK);
r6 = _mm_and_pd(r6,MASK);
r7 = _mm_and_pd(r7,MASK);
r8 = _mm_and_pd(r8,MASK);
r9 = _mm_and_pd(r9,MASK);
rA = _mm_and_pd(rA,MASK);
rB = _mm_and_pd(rB,MASK);
r0 = _mm_or_pd(r0,vONE);
r1 = _mm_or_pd(r1,vONE);
r2 = _mm_or_pd(r2,vONE);
r3 = _mm_or_pd(r3,vONE);
r4 = _mm_or_pd(r4,vONE);
r5 = _mm_or_pd(r5,vONE);
r6 = _mm_or_pd(r6,vONE);
r7 = _mm_or_pd(r7,vONE);
r8 = _mm_or_pd(r8,vONE);
r9 = _mm_or_pd(r9,vONE);
rA = _mm_or_pd(rA,vONE);
rB = _mm_or_pd(rB,vONE);
c++;
}
r0 = _mm_add_pd(r0,r1);
r2 = _mm_add_pd(r2,r3);
r4 = _mm_add_pd(r4,r5);
r6 = _mm_add_pd(r6,r7);
r8 = _mm_add_pd(r8,r9);
rA = _mm_add_pd(rA,rB);
r0 = _mm_add_pd(r0,r2);
r4 = _mm_add_pd(r4,r6);
r8 = _mm_add_pd(r8,rA);
r0 = _mm_add_pd(r0,r4);
r0 = _mm_add_pd(r0,r8);
// Prevent Dead Code Elimination
double out = 0;
__m128d temp = r0;
out += ((double*)&temp)[0];
out += ((double*)&temp)[1];
return out;
}
void test_dp_mac_SSE(int tds,uint64 iterations){
double *sum = (double*)malloc(tds * sizeof(double));
double start = omp_get_wtime();
#pragma omp parallel num_threads(tds)
{
double ret = test_dp_mac_SSE(1.1,2.1,iterations);
sum[omp_get_thread_num()] = ret;
}
double secs = omp_get_wtime() - start;
uint64 ops = 48 * 1000 * iterations * tds * 2;
cout << "Seconds = " << secs << endl;
cout << "FP Ops = " << ops << endl;
cout << "FLOPs = " << ops / secs << endl;
double out = 0;
int c = 0;
while (c < tds){
out += sum[c++];
}
cout << "sum = " << out << endl;
cout << endl;
free(sum);
}
int main(){
// (threads, iterations)
test_dp_mac_SSE(8,10000000);
system("pause");
}
输出(1个线程,10000000次迭代)-与Visual Studio 2010 SP1一起编译-x64版本:
Seconds = 55.5104
FP Ops = 960000000000
FLOPs = 1.7294e+010
sum = 2.22652
该机器是Core i7 2600K @ 4.4 GHz。理论上的SSE峰值为4触发器* 4.4 GHz = 17.6 GFlops。这段代码达到了17.3 GFlops-不错。
输出(8个线程,10000000次迭代)-与Visual Studio 2010 SP1一起编译-x64版本:
Seconds = 117.202
FP Ops = 7680000000000
FLOPs = 6.55279e+010
sum = 17.8122
理论上的SSE峰值是4触发器* 4核心* 4.4 GHz = 70.4 GFlops。实际是65.5 GFlops。
让我们更进一步。AVX ...
#include <immintrin.h>
#include <omp.h>
#include <iostream>
using namespace std;
typedef unsigned long long uint64;
double test_dp_mac_AVX(double x,double y,uint64 iterations){
register __m256d r0,r1,r2,r3,r4,r5,r6,r7,r8,r9,rA,rB,rC,rD,rE,rF;
// Generate starting data.
r0 = _mm256_set1_pd(x);
r1 = _mm256_set1_pd(y);
r8 = _mm256_set1_pd(-0.0);
r2 = _mm256_xor_pd(r0,r8);
r3 = _mm256_or_pd(r0,r8);
r4 = _mm256_andnot_pd(r8,r0);
r5 = _mm256_mul_pd(r1,_mm256_set1_pd(0.37796447300922722721));
r6 = _mm256_mul_pd(r1,_mm256_set1_pd(0.24253562503633297352));
r7 = _mm256_mul_pd(r1,_mm256_set1_pd(4.1231056256176605498));
r8 = _mm256_add_pd(r0,_mm256_set1_pd(0.37796447300922722721));
r9 = _mm256_add_pd(r1,_mm256_set1_pd(0.24253562503633297352));
rA = _mm256_sub_pd(r0,_mm256_set1_pd(4.1231056256176605498));
rB = _mm256_sub_pd(r1,_mm256_set1_pd(4.1231056256176605498));
rC = _mm256_set1_pd(1.4142135623730950488);
rD = _mm256_set1_pd(1.7320508075688772935);
rE = _mm256_set1_pd(0.57735026918962576451);
rF = _mm256_set1_pd(0.70710678118654752440);
uint64 iMASK = 0x800fffffffffffffull;
__m256d MASK = _mm256_set1_pd(*(double*)&iMASK);
__m256d vONE = _mm256_set1_pd(1.0);
uint64 c = 0;
while (c < iterations){
size_t i = 0;
while (i < 1000){
// Here's the meat - the part that really matters.
r0 = _mm256_mul_pd(r0,rC);
r1 = _mm256_add_pd(r1,rD);
r2 = _mm256_mul_pd(r2,rE);
r3 = _mm256_sub_pd(r3,rF);
r4 = _mm256_mul_pd(r4,rC);
r5 = _mm256_add_pd(r5,rD);
r6 = _mm256_mul_pd(r6,rE);
r7 = _mm256_sub_pd(r7,rF);
r8 = _mm256_mul_pd(r8,rC);
r9 = _mm256_add_pd(r9,rD);
rA = _mm256_mul_pd(rA,rE);
rB = _mm256_sub_pd(rB,rF);
r0 = _mm256_add_pd(r0,rF);
r1 = _mm256_mul_pd(r1,rE);
r2 = _mm256_sub_pd(r2,rD);
r3 = _mm256_mul_pd(r3,rC);
r4 = _mm256_add_pd(r4,rF);
r5 = _mm256_mul_pd(r5,rE);
r6 = _mm256_sub_pd(r6,rD);
r7 = _mm256_mul_pd(r7,rC);
r8 = _mm256_add_pd(r8,rF);
r9 = _mm256_mul_pd(r9,rE);
rA = _mm256_sub_pd(rA,rD);
rB = _mm256_mul_pd(rB,rC);
r0 = _mm256_mul_pd(r0,rC);
r1 = _mm256_add_pd(r1,rD);
r2 = _mm256_mul_pd(r2,rE);
r3 = _mm256_sub_pd(r3,rF);
r4 = _mm256_mul_pd(r4,rC);
r5 = _mm256_add_pd(r5,rD);
r6 = _mm256_mul_pd(r6,rE);
r7 = _mm256_sub_pd(r7,rF);
r8 = _mm256_mul_pd(r8,rC);
r9 = _mm256_add_pd(r9,rD);
rA = _mm256_mul_pd(rA,rE);
rB = _mm256_sub_pd(rB,rF);
r0 = _mm256_add_pd(r0,rF);
r1 = _mm256_mul_pd(r1,rE);
r2 = _mm256_sub_pd(r2,rD);
r3 = _mm256_mul_pd(r3,rC);
r4 = _mm256_add_pd(r4,rF);
r5 = _mm256_mul_pd(r5,rE);
r6 = _mm256_sub_pd(r6,rD);
r7 = _mm256_mul_pd(r7,rC);
r8 = _mm256_add_pd(r8,rF);
r9 = _mm256_mul_pd(r9,rE);
rA = _mm256_sub_pd(rA,rD);
rB = _mm256_mul_pd(rB,rC);
i++;
}
// Need to renormalize to prevent denormal/overflow.
r0 = _mm256_and_pd(r0,MASK);
r1 = _mm256_and_pd(r1,MASK);
r2 = _mm256_and_pd(r2,MASK);
r3 = _mm256_and_pd(r3,MASK);
r4 = _mm256_and_pd(r4,MASK);
r5 = _mm256_and_pd(r5,MASK);
r6 = _mm256_and_pd(r6,MASK);
r7 = _mm256_and_pd(r7,MASK);
r8 = _mm256_and_pd(r8,MASK);
r9 = _mm256_and_pd(r9,MASK);
rA = _mm256_and_pd(rA,MASK);
rB = _mm256_and_pd(rB,MASK);
r0 = _mm256_or_pd(r0,vONE);
r1 = _mm256_or_pd(r1,vONE);
r2 = _mm256_or_pd(r2,vONE);
r3 = _mm256_or_pd(r3,vONE);
r4 = _mm256_or_pd(r4,vONE);
r5 = _mm256_or_pd(r5,vONE);
r6 = _mm256_or_pd(r6,vONE);
r7 = _mm256_or_pd(r7,vONE);
r8 = _mm256_or_pd(r8,vONE);
r9 = _mm256_or_pd(r9,vONE);
rA = _mm256_or_pd(rA,vONE);
rB = _mm256_or_pd(rB,vONE);
c++;
}
r0 = _mm256_add_pd(r0,r1);
r2 = _mm256_add_pd(r2,r3);
r4 = _mm256_add_pd(r4,r5);
r6 = _mm256_add_pd(r6,r7);
r8 = _mm256_add_pd(r8,r9);
rA = _mm256_add_pd(rA,rB);
r0 = _mm256_add_pd(r0,r2);
r4 = _mm256_add_pd(r4,r6);
r8 = _mm256_add_pd(r8,rA);
r0 = _mm256_add_pd(r0,r4);
r0 = _mm256_add_pd(r0,r8);
// Prevent Dead Code Elimination
double out = 0;
__m256d temp = r0;
out += ((double*)&temp)[0];
out += ((double*)&temp)[1];
out += ((double*)&temp)[2];
out += ((double*)&temp)[3];
return out;
}
void test_dp_mac_AVX(int tds,uint64 iterations){
double *sum = (double*)malloc(tds * sizeof(double));
double start = omp_get_wtime();
#pragma omp parallel num_threads(tds)
{
double ret = test_dp_mac_AVX(1.1,2.1,iterations);
sum[omp_get_thread_num()] = ret;
}
double secs = omp_get_wtime() - start;
uint64 ops = 48 * 1000 * iterations * tds * 4;
cout << "Seconds = " << secs << endl;
cout << "FP Ops = " << ops << endl;
cout << "FLOPs = " << ops / secs << endl;
double out = 0;
int c = 0;
while (c < tds){
out += sum[c++];
}
cout << "sum = " << out << endl;
cout << endl;
free(sum);
}
int main(){
// (threads, iterations)
test_dp_mac_AVX(8,10000000);
system("pause");
}
输出(1个线程,10000000次迭代)-与Visual Studio 2010 SP1一起编译-x64版本:
Seconds = 57.4679
FP Ops = 1920000000000
FLOPs = 3.34099e+010
sum = 4.45305
AVX的理论峰值为8触发器* 4.4 GHz = 35.2 GFlops。实际是33.4 GFlops。
输出(8个线程,10000000次迭代)-与Visual Studio 2010 SP1一起编译-x64版本:
Seconds = 111.119
FP Ops = 15360000000000
FLOPs = 1.3823e+011
sum = 35.6244
AVX的理论峰值为8触发器* 4核* 4.4 GHz = 140.8 GFlops。实际是138.2 Glops。
现在进行一些解释:
性能关键部分显然是内部循环中的48条指令。您会注意到,它分为4个块,每个块12条指令。这12个指令块中的每一个都是彼此完全独立的-平均需要6个周期来执行。
因此,从发布到使用,共有12条指令和6个周期。乘法的等待时间为5个周期,因此足以避免等待时间停顿。
需要进行标准化步骤以防止数据上溢/下溢。之所以需要这样做,是因为虚空代码将缓慢增加/减小数据的大小。
因此,如果仅使用全零并摆脱标准化步骤,实际上可能会做得更好。但是,由于我编写了用于测量功耗和温度的基准,因此我必须确保触发器位于“真实”数据上,而不是零上 -因为执行单元很可能具有特殊的情况处理,即零使用了较少的功率产生更少的热量
更多结果:
- 英特尔酷睿i7 920 @ 3.5 GHz
- Windows 7旗舰版x64
- Visual Studio 2010 SP1-x64版本
线程数:1
Seconds = 72.1116
FP Ops = 960000000000
FLOPs = 1.33127e+010
sum = 2.22652
理论上的SSE峰值:4触发器* 3.5 GHz = 14.0 GFlops。实际是13.3 Glops。
线程:8
Seconds = 149.576
FP Ops = 7680000000000
FLOPs = 5.13452e+010
sum = 17.8122
理论上的SSE峰值:4触发器* 4核* 3.5 GHz = 56.0 GFlops。实际是51.3 GFlops。
我的处理器温度在多线程运行中达到76C!如果运行这些,请确保结果不受CPU节流的影响。
- 2个Intel Xeon X5482 Harpertown @ 3.2 GHz
- Ubuntu Linux 10 x64
- GCC 4.5.2 x64-(-O2 -msse3 -fopenmp)
线程数:1
Seconds = 78.3357
FP Ops = 960000000000
FLOPs = 1.22549e+10
sum = 2.22652
理论上的SSE峰值:4触发器* 3.2 GHz = 12.8 GFlops。实际是12.3 GFlops。
线程:8
Seconds = 78.4733
FP Ops = 7680000000000
FLOPs = 9.78676e+10
sum = 17.8122
理论上的SSE峰值:4触发器* 8核* 3.2 GHz = 102.4 GFlops。实际是97.9 GFlops。