|
@@ -11,11 +11,186 @@ using mnd::CpuGenerator;
|
|
|
|
|
|
namespace mnd
|
|
|
{
|
|
|
+ template class CpuGenerator<float, mnd::X86_AVX_FMA, false>;
|
|
|
+ template class CpuGenerator<float, mnd::X86_AVX_FMA, true>;
|
|
|
+
|
|
|
+ template class CpuGenerator<double, mnd::X86_AVX_FMA, false>;
|
|
|
+ template class CpuGenerator<double, mnd::X86_AVX_FMA, true>;
|
|
|
+
|
|
|
template class CpuGenerator<DoubleDouble, mnd::X86_AVX_FMA, false>;
|
|
|
template class CpuGenerator<DoubleDouble, mnd::X86_AVX_FMA, true>;
|
|
|
}
|
|
|
|
|
|
|
|
|
+template<bool parallel>
|
|
|
+void CpuGenerator<float, mnd::X86_AVX_FMA, parallel>::generate(const mnd::MandelInfo& info, float* data)
|
|
|
+{
|
|
|
+ using T = float;
|
|
|
+ const MandelViewport& view = info.view;
|
|
|
+ const float dppf = float(view.width / info.bWidth);
|
|
|
+ const float viewxf = float(view.x);
|
|
|
+ __m256 viewx = { viewxf, viewxf, viewxf, viewxf, viewxf, viewxf, viewxf, viewxf };
|
|
|
+ __m256 dpp = { dppf, dppf, dppf, dppf, dppf, dppf, dppf, dppf };
|
|
|
+
|
|
|
+ if constexpr(parallel)
|
|
|
+ omp_set_num_threads(omp_get_num_procs());
|
|
|
+#pragma omp parallel for schedule(static, 1) if (parallel)
|
|
|
+ for (long j = 0; j < info.bHeight; j++) {
|
|
|
+ T y = T(view.y) + T(j) * T(view.height / info.bHeight);
|
|
|
+ __m256 ys = {y, y, y, y, y, y, y, y};
|
|
|
+ long i = 0;
|
|
|
+ for (i; i < info.bWidth; i += 8) {
|
|
|
+ __m256 pixc = { float(i), float(i + 1), float(i + 2), float(i + 3), float(i + 4), float(i + 5), float(i + 6), float(i + 7) };
|
|
|
+ __m256 xs = _mm256_fmadd_ps(dpp, pixc, viewx);
|
|
|
+
|
|
|
+ __m256 counter = { 0, 0, 0, 0, 0, 0, 0, 0 };
|
|
|
+ __m256 adder = { 1, 1, 1, 1, 1, 1, 1, 1 };
|
|
|
+ __m256 two = { 2, 2, 2, 2, 2, 2, 2, 2 };
|
|
|
+ __m256 resultsa = { 0, 0, 0, 0, 0, 0, 0, 0 };
|
|
|
+ __m256 resultsb = { 0, 0, 0, 0, 0, 0, 0, 0 };
|
|
|
+
|
|
|
+ __m256 threshold = { 16.0f, 16.0f, 16.0f, 16.0f, 16.0f, 16.0f, 16.0f, 16.0f };
|
|
|
+
|
|
|
+ __m256 a = xs;
|
|
|
+ __m256 b = ys;
|
|
|
+
|
|
|
+ for (int k = 0; k < info.maxIter; k++) {
|
|
|
+ if ((k & 0xF) == 0) {
|
|
|
+ __m256 bb = _mm256_mul_ps(b, b);
|
|
|
+ __m256 abab = _mm256_mul_ps(a, b); //abab = _mm256_add_ps(abab, abab);
|
|
|
+ a = _mm256_fmsub_ps(a, a, _mm256_fmsub_ps(b, b, xs));
|
|
|
+ b = _mm256_fmadd_ps(two, abab, ys);
|
|
|
+ __m256 cmp = _mm256_cmp_ps(_mm256_fmadd_ps(a, a, _mm256_mul_ps(b, b)), threshold, _CMP_LE_OQ);
|
|
|
+ if (info.smooth) {
|
|
|
+ resultsa = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsa), _mm256_and_ps(cmp, a));
|
|
|
+ resultsb = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsb), _mm256_and_ps(cmp, b));
|
|
|
+ }
|
|
|
+ adder = _mm256_and_ps(adder, cmp);
|
|
|
+ counter = _mm256_add_ps(counter, adder);
|
|
|
+ if (_mm256_testz_ps(cmp, cmp) != 0) {
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ else {
|
|
|
+ //__m256 aa = _mm256_mul_ps(a, a);
|
|
|
+ __m256 bb = _mm256_mul_ps(b, b);
|
|
|
+ __m256 abab = _mm256_mul_ps(a, b); //abab = _mm256_add_ps(abab, abab);
|
|
|
+ a = _mm256_fmsub_ps(a, a, _mm256_fmsub_ps(b, b, xs));
|
|
|
+ b = _mm256_fmadd_ps(two, abab, ys);
|
|
|
+ __m256 cmp = _mm256_cmp_ps(_mm256_fmadd_ps(a, a, _mm256_mul_ps(b, b)), threshold, _CMP_LE_OQ);
|
|
|
+ if (info.smooth) {
|
|
|
+ resultsa = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsa), _mm256_and_ps(cmp, a));
|
|
|
+ resultsb = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsb), _mm256_and_ps(cmp, b));
|
|
|
+ }
|
|
|
+ adder = _mm256_and_ps(adder, cmp);
|
|
|
+ counter = _mm256_add_ps(counter, adder);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ auto alignVec = [](float* data) -> float* {
|
|
|
+ void* aligned = data;
|
|
|
+ ::size_t length = 64;
|
|
|
+ std::align(32, 8 * sizeof(float), aligned, length);
|
|
|
+ return static_cast<float*>(aligned);
|
|
|
+ };
|
|
|
+
|
|
|
+ float resData[16];
|
|
|
+ float* ftRes = alignVec(resData);
|
|
|
+ float* resa = (float*) &resultsa;
|
|
|
+ float* resb = (float*) &resultsb;
|
|
|
+
|
|
|
+ _mm256_store_ps(ftRes, counter);
|
|
|
+ for (int k = 0; k < 8 && i + k < info.bWidth; k++) {
|
|
|
+ if (info.smooth) {
|
|
|
+ data[i + k + j * info.bWidth] = ftRes[k] <= 0 ? info.maxIter :
|
|
|
+ ftRes[k] >= info.maxIter ? info.maxIter :
|
|
|
+ ((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f);
|
|
|
+ }
|
|
|
+ else {
|
|
|
+ data[i + k + j * info.bWidth] = ftRes[k] <= 0 ? info.maxIter : ftRes[k];
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+template<bool parallel>
|
|
|
+void CpuGenerator<double, mnd::X86_AVX_FMA, parallel>::generate(const mnd::MandelInfo& info, float* data)
|
|
|
+{
|
|
|
+ using T = double;
|
|
|
+ const MandelViewport& view = info.view;
|
|
|
+
|
|
|
+ const double dppf = double(view.width / info.bWidth);
|
|
|
+ const double viewxf = double(view.x);
|
|
|
+ __m256d viewx = { viewxf, viewxf, viewxf, viewxf };
|
|
|
+ __m256d dpp = { dppf, dppf, dppf, dppf };
|
|
|
+
|
|
|
+ if constexpr(parallel)
|
|
|
+ omp_set_num_threads(omp_get_num_procs());
|
|
|
+#pragma omp parallel for schedule(static, 1) if (parallel)
|
|
|
+ for (long j = 0; j < info.bHeight; j++) {
|
|
|
+ T y = T(view.y + T(j) * view.height / info.bHeight);
|
|
|
+ __m256d ys = { y, y, y, y };
|
|
|
+ long i = 0;
|
|
|
+ for (i; i < info.bWidth; i += 4) {
|
|
|
+ __m256d pixc = { double(i), double(i + 1), double(i + 2), double(i + 3) };
|
|
|
+ __m256d xs = _mm256_fmadd_pd(dpp, pixc, viewx);
|
|
|
+
|
|
|
+ int itRes[4] = { 0, 0, 0, 0 };
|
|
|
+
|
|
|
+ __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
|
|
|
+ __m256d counter = { 0, 0, 0, 0 };
|
|
|
+ __m256d adder = { 1, 1, 1, 1 };
|
|
|
+ __m256d two = { 2, 2, 2, 2 };
|
|
|
+
|
|
|
+ __m256d resultsa = { 0, 0, 0, 0 };
|
|
|
+ __m256d resultsb = { 0, 0, 0, 0 };
|
|
|
+
|
|
|
+ __m256d a = xs;
|
|
|
+ __m256d b = ys;
|
|
|
+
|
|
|
+ for (int k = 0; k < info.maxIter; k++) {
|
|
|
+ __m256d ab = _mm256_mul_pd(a, b);
|
|
|
+ a = _mm256_fmsub_pd(a, a, _mm256_fmsub_pd(b, b, xs));
|
|
|
+ b = _mm256_fmadd_pd(two, ab, ys);
|
|
|
+ __m256d cmp = _mm256_cmp_pd(_mm256_fmadd_pd(a, a, _mm256_mul_pd(b, b)), threshold, _CMP_LE_OQ);
|
|
|
+ if (info.smooth) {
|
|
|
+ resultsa = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsa), _mm256_and_pd(cmp, a));
|
|
|
+ resultsb = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsb), _mm256_and_pd(cmp, b));
|
|
|
+ }
|
|
|
+ adder = _mm256_and_pd(adder, cmp);
|
|
|
+ counter = _mm256_add_pd(counter, adder);
|
|
|
+ if ((k & 0x7) == 0 && _mm256_testz_si256(_mm256_castpd_si256(cmp), _mm256_castpd_si256(cmp)) != 0) {
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ auto alignVec = [](double* data) -> double* {
|
|
|
+ void* aligned = data;
|
|
|
+ ::size_t length = 64;
|
|
|
+ std::align(32, 4 * sizeof(double), aligned, length);
|
|
|
+ return static_cast<double*>(aligned);
|
|
|
+ };
|
|
|
+
|
|
|
+ double resData[8];
|
|
|
+ double* ftRes = alignVec(resData);
|
|
|
+ double* resa = (double*) &resultsa;
|
|
|
+ double* resb = (double*) &resultsb;
|
|
|
+ _mm256_store_pd(ftRes, counter);
|
|
|
+ for (int k = 0; k < 4 && i + k < info.bWidth; k++) {
|
|
|
+ if (info.smooth)
|
|
|
+ data[i + k + j * info.bWidth] = ftRes[k] <= 0 ? info.maxIter :
|
|
|
+ ftRes[k] >= info.maxIter ? info.maxIter :
|
|
|
+ ((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f);
|
|
|
+ else
|
|
|
+ data[i + k + j * info.bWidth] = ftRes[k] > 0 ? float(ftRes[k]) : info.maxIter;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
struct VecPair
|
|
|
{
|
|
|
__m256d a;
|