|
@@ -16,6 +16,9 @@ namespace mnd
|
|
|
|
|
|
template class CpuGenerator<double, mnd::X86_AVX, false>;
|
|
|
template class CpuGenerator<double, mnd::X86_AVX, true>;
|
|
|
+
|
|
|
+ template class CpuGenerator<DoubleDouble, mnd::X86_AVX, false>;
|
|
|
+ template class CpuGenerator<DoubleDouble, mnd::X86_AVX, true>;
|
|
|
}
|
|
|
|
|
|
template<bool parallel>
|
|
@@ -163,3 +166,196 @@ void CpuGenerator<double, mnd::X86_AVX, parallel>::generate(const mnd::MandelInf
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+struct VecPair
|
|
|
+{
|
|
|
+ __m256d a;
|
|
|
+ __m256d b;
|
|
|
+};
|
|
|
+
|
|
|
+
|
|
|
+static inline VecPair quickTwoSum(__m256d a, __m256d b)
|
|
|
+{
|
|
|
+ __m256d s = _mm256_add_pd(a, b);
|
|
|
+ __m256d e = _mm256_sub_pd(b, _mm256_sub_pd(s, a));
|
|
|
+ return { s, e };
|
|
|
+}
|
|
|
+
|
|
|
+static inline VecPair quickTwoDiff(__m256d a, __m256d b)
|
|
|
+{
|
|
|
+ __m256d s = _mm256_sub_pd(a, b);
|
|
|
+ __m256d e = _mm256_sub_pd(_mm256_sub_pd(a, s), b);
|
|
|
+ return { s, e };
|
|
|
+}
|
|
|
+
|
|
|
+static inline VecPair twoSum(__m256d a, __m256d b)
|
|
|
+{
|
|
|
+ __m256d s = _mm256_add_pd(a, b);
|
|
|
+ __m256d bb = _mm256_sub_pd(s, a);
|
|
|
+ __m256d e = _mm256_add_pd(_mm256_sub_pd(a, _mm256_sub_pd(s, bb)), _mm256_sub_pd(b, bb));
|
|
|
+ return { s, e };
|
|
|
+}
|
|
|
+
|
|
|
+static inline VecPair twoDiff(__m256d a, __m256d b)
|
|
|
+{
|
|
|
+ __m256d s = _mm256_sub_pd(a, b);
|
|
|
+ __m256d bb = _mm256_sub_pd(s, a);
|
|
|
+ __m256d e = _mm256_sub_pd(_mm256_sub_pd(a, _mm256_sub_pd(s, bb)), _mm256_add_pd(b, bb));
|
|
|
+ return { s, e };
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+static inline VecPair split(__m256d a)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ // -- this should never happen when doing mandelbrot calculations,
|
|
|
+ // so we omit this check.
|
|
|
+ if (a > _QD_SPLIT_THRESH || a < -_QD_SPLIT_THRESH) {
|
|
|
+ a *= 3.7252902984619140625e-09; // 2^-28
|
|
|
+ temp = _QD_SPLITTER * a;
|
|
|
+ hi = temp - (temp - a);
|
|
|
+ lo = a - hi;
|
|
|
+ hi *= 268435456.0; // 2^28
|
|
|
+ lo *= 268435456.0; // 2^28
|
|
|
+ } else {
|
|
|
+ temp = _QD_SPLITTER * a;
|
|
|
+ hi = temp - (temp - a);
|
|
|
+ lo = a - hi;
|
|
|
+ }
|
|
|
+ */
|
|
|
+
|
|
|
+ static const __m256d SPLITTER = { 134217729.0, 134217729.0, 134217729.0, 134217729.0 };
|
|
|
+ __m256d temp = _mm256_mul_pd(SPLITTER, a);
|
|
|
+ __m256d hi = _mm256_sub_pd(temp, _mm256_sub_pd(temp, a));
|
|
|
+ __m256d lo = _mm256_sub_pd(a, hi);
|
|
|
+ return { hi, lo };
|
|
|
+}
|
|
|
+
|
|
|
+static inline VecPair twoProd(__m256d a, __m256d b)
|
|
|
+{
|
|
|
+ __m256d p = _mm256_mul_pd(a, b);
|
|
|
+ auto[a_hi, a_lo] = split(a);
|
|
|
+ auto[b_hi, b_lo] = split(b);
|
|
|
+ __m256d err = _mm256_add_pd(_mm256_add_pd(_mm256_sub_pd(_mm256_mul_pd(a_hi, b_hi), p), _mm256_add_pd(_mm256_mul_pd(a_hi, b_lo), _mm256_mul_pd(a_lo, b_hi))), _mm256_mul_pd(a_lo, b_lo));
|
|
|
+ return { p, err };
|
|
|
+}
|
|
|
+
|
|
|
+struct AvxDoubleDouble
|
|
|
+{
|
|
|
+ __m256d x[2];
|
|
|
+
|
|
|
+ inline AvxDoubleDouble(__m256d a, __m256d b) :
|
|
|
+ x{ a, b }
|
|
|
+ {}
|
|
|
+
|
|
|
+
|
|
|
+ inline AvxDoubleDouble operator + (const AvxDoubleDouble& sm) const
|
|
|
+ {
|
|
|
+ auto[s, e] = twoSum(x[0], sm.x[0]);
|
|
|
+ e = _mm256_add_pd(e, _mm256_add_pd(x[1], sm.x[1]));
|
|
|
+ auto[r1, r2] = quickTwoSum(s, e);
|
|
|
+ return AvxDoubleDouble{ r1, r2 };
|
|
|
+ }
|
|
|
+
|
|
|
+ inline AvxDoubleDouble operator - (const AvxDoubleDouble& sm) const
|
|
|
+ {
|
|
|
+ auto[s, e] = twoDiff(x[0], sm.x[0]);
|
|
|
+ e = _mm256_add_pd(e, x[1]);
|
|
|
+ e = _mm256_sub_pd(e, sm.x[1]);
|
|
|
+ auto[r1, r2] = quickTwoSum(s, e);
|
|
|
+ return AvxDoubleDouble{ r1, r2 };
|
|
|
+ }
|
|
|
+
|
|
|
+ inline AvxDoubleDouble operator * (const AvxDoubleDouble& sm) const
|
|
|
+ {
|
|
|
+ auto[p1, p2] = twoProd(this->x[0], sm.x[0]);
|
|
|
+ p2 = _mm256_add_pd(p2,
|
|
|
+ _mm256_add_pd(_mm256_mul_pd(sm.x[1], x[0]), _mm256_mul_pd(sm.x[0], x[1])) );
|
|
|
+ auto[r1, r2] = quickTwoSum(p1, p2);
|
|
|
+ return AvxDoubleDouble{ r1, r2 };
|
|
|
+ }
|
|
|
+};
|
|
|
+
|
|
|
+template<bool parallel>
|
|
|
+void CpuGenerator<mnd::DoubleDouble, mnd::X86_AVX, parallel>::generate(const mnd::MandelInfo& info, float* data)
|
|
|
+{
|
|
|
+ const MandelViewport& view = info.view;
|
|
|
+
|
|
|
+ using T = DoubleDouble;
|
|
|
+
|
|
|
+ T viewx = mnd::convert<T>(view.x);
|
|
|
+ T viewy = mnd::convert<T>(view.y);
|
|
|
+ T wpp = mnd::convert<T>(view.width / info.bWidth);
|
|
|
+ T hpp = mnd::convert<T>(view.height / info.bHeight);
|
|
|
+
|
|
|
+// if constexpr(parallel)
|
|
|
+// omp_set_num_threads(2 * omp_get_num_procs());
|
|
|
+//#pragma omp parallel for schedule(static, 1) if (parallel)
|
|
|
+
|
|
|
+ for (long j = 0; j < info.bHeight; j++) {
|
|
|
+ T y = viewy + T(double(j)) * hpp;
|
|
|
+ __m256d y0s = { y.x[0], y.x[0], y.x[0], y.x[0] };
|
|
|
+ __m256d y1s = { y.x[1], y.x[1], y.x[1], y.x[1] };
|
|
|
+ AvxDoubleDouble ys{ y0s, y1s };
|
|
|
+ long i = 0;
|
|
|
+ for (i; i < info.bWidth; i += 4) {
|
|
|
+ T x1 = viewx + T(double(i)) * wpp;
|
|
|
+ T x2 = x1 + wpp;
|
|
|
+ T x3 = x2 + wpp;
|
|
|
+ T x4 = x3 + wpp;
|
|
|
+
|
|
|
+ __m256d x0s = {
|
|
|
+ x1.x[0], x2.x[0], x3.x[0], x4.x[0],
|
|
|
+ };
|
|
|
+
|
|
|
+ __m256d x1s = {
|
|
|
+ x1.x[1], x2.x[1], x3.x[1], x4.x[1],
|
|
|
+ };
|
|
|
+
|
|
|
+ AvxDoubleDouble xs{ x0s, x1s };
|
|
|
+
|
|
|
+ int itRes[4] = { 0, 0, 0, 0 };
|
|
|
+
|
|
|
+ __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
|
|
|
+ __m256d counter = { 0, 0, 0, 0 };
|
|
|
+ __m256d adder = { 1, 1, 1, 1 };
|
|
|
+
|
|
|
+ AvxDoubleDouble a = xs;
|
|
|
+ AvxDoubleDouble b = ys;
|
|
|
+
|
|
|
+ for (int k = 0; k < info.maxIter; k++) {
|
|
|
+ AvxDoubleDouble aa = a * a;
|
|
|
+ AvxDoubleDouble bb = b * b;
|
|
|
+ AvxDoubleDouble abab = a * b; abab = abab + abab;
|
|
|
+ a = aa - bb + xs;
|
|
|
+ b = abab + ys;
|
|
|
+ __m256i cmp = _mm256_castpd_si256(_mm256_cmp_pd(_mm256_add_pd(aa.x[0], bb.x[0]), threshold, _CMP_LE_OQ));
|
|
|
+ /*if (info.smooth) {
|
|
|
+ resultsa = _mm256_or_pd(_mm256_andnot_ps(cmp, resultsa), _mm256_and_ps(cmp, a));
|
|
|
+ resultsb = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsb), _mm256_and_ps(cmp, b));
|
|
|
+ }*/
|
|
|
+ adder = _mm256_and_pd(adder, _mm256_castsi256_pd(cmp));
|
|
|
+ counter = _mm256_add_pd(counter, adder);
|
|
|
+ if (_mm256_testz_si256(cmp, cmp) != 0) {
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ auto alignVec = [](double* data) -> double* {
|
|
|
+ void* aligned = data;
|
|
|
+ ::size_t length = 64;
|
|
|
+ std::align(32, 4 * sizeof(double), aligned, length);
|
|
|
+ return static_cast<double*>(aligned);
|
|
|
+ };
|
|
|
+
|
|
|
+ double resData[8];
|
|
|
+ double* ftRes = alignVec(resData);
|
|
|
+ _mm256_store_pd(ftRes, counter);
|
|
|
+ for (int k = 0; k < 4 && i + k < info.bWidth; k++)
|
|
|
+ data[i + k + j * info.bWidth] = ftRes[k] > 0 ? float(ftRes[k]) : info.maxIter;
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
+
|