123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278 |
- #include "CpuGenerators.h"
- #include <immintrin.h>
- #include <omp.h>
- #include <cmath>
- #include <utility>
- #include <memory>
- using mnd::CpuGenerator;
- namespace mnd
- {
- template class CpuGenerator<DoubleDouble, mnd::X86_AVX_FMA, false>;
- template class CpuGenerator<DoubleDouble, mnd::X86_AVX_FMA, true>;
- }
- struct VecPair
- {
- __m256d a;
- __m256d b;
- };
- static inline VecPair quickTwoSum(__m256d a, __m256d b)
- {
- __m256d s = _mm256_add_pd(a, b);
- __m256d e = _mm256_sub_pd(b, _mm256_sub_pd(s, a));
- return { s, e };
- }
- static inline VecPair quickTwoDiff(__m256d a, __m256d b)
- {
- __m256d s = _mm256_sub_pd(a, b);
- __m256d e = _mm256_sub_pd(_mm256_sub_pd(a, s), b);
- return { s, e };
- }
- static inline VecPair twoSum(__m256d a, __m256d b)
- {
- __m256d s = _mm256_add_pd(a, b);
- __m256d bb = _mm256_sub_pd(s, a);
- __m256d e = _mm256_add_pd(_mm256_sub_pd(a, _mm256_sub_pd(s, bb)), _mm256_sub_pd(b, bb));
- return { s, e };
- }
- static inline VecPair twoDiff(__m256d a, __m256d b)
- {
- __m256d s = _mm256_sub_pd(a, b);
- __m256d bb = _mm256_sub_pd(s, a);
- __m256d e = _mm256_sub_pd(_mm256_sub_pd(a, _mm256_sub_pd(s, bb)), _mm256_add_pd(b, bb));
- return { s, e };
- }
- /*
- static inline VecPair split(__m256d a)
- {
- static const __m256d SPLIT_THRESH = { 6.69692879491417e+299, 6.69692879491417e+299, 6.69692879491417e+299, 6.69692879491417e+299 };
- static const __m256d MINUS_SPLIT_THRESH = { -6.69692879491417e+299, -6.69692879491417e+299, -6.69692879491417e+299, -6.69692879491417e+299 };
- static const __m256d SPLITTER = { 134217729.0, 134217729.0, 134217729.0, 134217729.0};
- __m256d temp;
- __m256i cmp1 = _mm256_castpd_si256(_mm256_cmp_pd(a, SPLIT_THRESH, _CMP_GT_OQ));
- __m256i cmp2 = _mm256_castpd_si256(_mm256_cmp_pd(a, MINUS_SPLIT_THRESH, _CMP_LT_OQ));
- __m256i cmp = _mm256_or_si256
- }*/
- static inline VecPair twoProd(__m256d a, __m256d b)
- {
- //#ifdef CPUID_FMA
- __m256d p = _mm256_mul_pd(a, b);
- __m256d e = _mm256_fmsub_pd(a, b, p);
- return { p, e };
- //#else
- /* double a_hi, a_lo, b_hi, b_lo;
- __m256d p = _mm256_mul_ps(a, b);
- split(a, a_hi, a_lo);
- split(b, b_hi, b_lo);
- err = ((a_hi * b_hi - p) + a_hi * b_lo + a_lo * b_hi) + a_lo * b_lo;
- return p;*/
- //#endif
- }
- struct AvxDoubleDouble
- {
- __m256d x[2];
- inline AvxDoubleDouble(__m256d a, __m256d b) :
- x{ a, b }
- {}
- inline AvxDoubleDouble operator + (const AvxDoubleDouble& sm) const
- {
- auto[s, e] = twoSum(x[0], sm.x[0]);
- e = _mm256_add_pd(e, _mm256_add_pd(x[1], sm.x[1]));
- auto[r1, r2] = quickTwoSum(s, e);
- return AvxDoubleDouble{ r1, r2 };
- }
- inline AvxDoubleDouble operator - (const AvxDoubleDouble& sm) const
- {
- auto[s, e] = twoDiff(x[0], sm.x[0]);
- e = _mm256_add_pd(e, x[1]);
- e = _mm256_sub_pd(e, sm.x[1]);
- auto[r1, r2] = quickTwoSum(s, e);
- return AvxDoubleDouble{ r1, r2 };
- }
- inline AvxDoubleDouble operator * (const AvxDoubleDouble& sm) const
- {
- auto[p1, p2] = twoProd(this->x[0], sm.x[0]);
- p2 = _mm256_add_pd(p2,
- _mm256_add_pd(_mm256_mul_pd(sm.x[1], x[0]), _mm256_mul_pd(sm.x[0], x[1])) );
- auto[r1, r2] = quickTwoSum(p1, p2);
- return AvxDoubleDouble{ r1, r2 };
- }
- };
- template<bool parallel>
- void CpuGenerator<mnd::DoubleDouble, mnd::X86_AVX_FMA, parallel>::generate(const mnd::MandelInfo& info, float* data)
- {
- const MandelViewport& view = info.view;
- using T = DoubleDouble;
- T viewx = mnd::convert<T>(view.x);
- T viewy = mnd::convert<T>(view.y);
- T wpp = mnd::convert<T>(view.width / info.bWidth);
- T hpp = mnd::convert<T>(view.height / info.bHeight);
- // if constexpr(parallel)
- // omp_set_num_threads(2 * omp_get_num_procs());
- //#pragma omp parallel for schedule(static, 1) if (parallel)
- for (long j = 0; j < info.bHeight; j++) {
- T y = viewy + T(double(j)) * hpp;
- __m256d y0s = { y.x[0], y.x[0], y.x[0], y.x[0] };
- __m256d y1s = { y.x[1], y.x[1], y.x[1], y.x[1] };
- AvxDoubleDouble ys{ y0s, y1s };
- long i = 0;
- for (i; i < info.bWidth; i += 4) {
- T x1 = viewx + T(double(i)) * wpp;
- T x2 = x1 + wpp;
- T x3 = x2 + wpp;
- T x4 = x3 + wpp;
- __m256d x0s = {
- x1.x[0], x2.x[0], x3.x[0], x4.x[0],
- };
- __m256d x1s = {
- x1.x[1], x2.x[1], x3.x[1], x4.x[1],
- };
- AvxDoubleDouble xs{ x0s, x1s };
- int itRes[4] = { 0, 0, 0, 0 };
- __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
- __m256d counter = { 0, 0, 0, 0 };
- __m256d adder = { 1, 1, 1, 1 };
- AvxDoubleDouble a = xs;
- AvxDoubleDouble b = ys;
- __m256d resultsa;
- __m256d resultsb;
- for (int k = 0; k < info.maxIter; k++) {
- AvxDoubleDouble aa = a * a;
- AvxDoubleDouble bb = b * b;
- AvxDoubleDouble abab = a * b; abab = abab + abab;
- a = aa - bb + xs;
- b = abab + ys;
- __m256d cmp = _mm256_cmp_pd(_mm256_add_pd(aa.x[0], bb.x[0]), threshold, _CMP_LE_OQ);
- if (info.smooth) {
- resultsa = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsa), _mm256_and_pd(cmp, a.x[0]));
- resultsb = _mm256_or_pd(_mm256_andnot_pd(cmp, resultsb), _mm256_and_pd(cmp, b.x[0]));
- }
- adder = _mm256_and_pd(adder, cmp);
- counter = _mm256_add_pd(counter, adder);
- if (_mm256_testz_si256(_mm256_castpd_si256(cmp), _mm256_castpd_si256(cmp)) != 0) {
- break;
- }
- }
- auto alignVec = [](double* data) -> double* {
- void* aligned = data;
- ::size_t length = 64;
- std::align(32, 4 * sizeof(double), aligned, length);
- return static_cast<double*>(aligned);
- };
- double resData[8];
- double* ftRes = alignVec(resData);
- double* resa = (double*) &resultsa;
- double* resb = (double*) &resultsb;
- _mm256_store_pd(ftRes, counter);
- for (int k = 0; k < 4 && i + k < info.bWidth; k++) {
- if (info.smooth)
- data[i + k + j * info.bWidth] = ftRes[k] <= 0 ? info.maxIter :
- ftRes[k] >= info.maxIter ? info.maxIter :
- ((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f);
- else
- data[i + k + j * info.bWidth] = ftRes[k] > 0 ? float(ftRes[k]) : info.maxIter;
- }
- }
- }
- return;
- for (long j = 0; j < info.bHeight; j++) {
- T y = viewy + T(double(j)) * hpp;
- __m256d y0s = { y.x[0], y.x[0], y.x[0], y.x[0] };
- __m256d y1s = { y.x[1], y.x[1], y.x[1], y.x[1] };
- AvxDoubleDouble ys{ y0s, y1s };
- long i = 0;
- for (i; i < info.bWidth; i += 4) {
- T x1 = viewx + T(double(i)) * wpp;
- T x2 = viewx + T(double(i + 1)) * wpp;
- T x3 = viewx + T(double(i + 2)) * wpp;
- T x4 = viewx + T(double(i + 3)) * wpp;
- __m256d x0s = {
- x1.x[0], x2.x[0], x3.x[0], x4.x[0],
- };
- __m256d x1s = {
- x1.x[1], x2.x[1], x3.x[1], x4.x[1],
- };
- AvxDoubleDouble xs{ x0s, x1s };
- int itRes[4] = { 0, 0, 0, 0 };
- __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
- __m256d counter = { 0, 0, 0, 0 };
- __m256d adder = { 1, 1, 1, 1 };
- AvxDoubleDouble a = xs;
- AvxDoubleDouble b = ys;
- for (int k = 0; k < info.maxIter; k++) {
- AvxDoubleDouble aa = a * a;
- AvxDoubleDouble bb = b * b;
- AvxDoubleDouble abab = a * b; abab = abab + abab;
- a = aa - bb + xs;
- b = abab + ys;
- __m256i cmp = _mm256_castpd_si256(_mm256_cmp_pd(_mm256_add_pd(aa.x[0], bb.x[0]), threshold, _CMP_LE_OQ));
- /*if (info.smooth) {
- resultsa = _mm256_or_pd(_mm256_andnot_ps(cmp, resultsa), _mm256_and_ps(cmp, a));
- resultsb = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsb), _mm256_and_ps(cmp, b));
- }*/
- adder = _mm256_and_pd(adder, _mm256_castsi256_pd(cmp));
- counter = _mm256_add_pd(counter, adder);
- if ((k & 7) == 0 && _mm256_testz_si256(cmp, cmp) != 0) {
- break;
- }
- }
- auto alignVec = [](double* data) -> double* {
- void* aligned = data;
- ::size_t length = 64;
- std::align(32, 4 * sizeof(double), aligned, length);
- return static_cast<double*>(aligned);
- };
- double resData[8];
- double* ftRes = alignVec(resData);
- _mm256_store_pd(ftRes, counter);
- for (int k = 0; k < 4 && i + k < info.bWidth; k++)
- data[i + k + j * info.bWidth] = ftRes[k] > 0 ? float(ftRes[k]) : info.maxIter;
- }
- }
- }
|