123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165 |
- #include "CpuGenerators.h"
- #include <immintrin.h>
- #include <omp.h>
- #include <cmath>
- #include <utility>
- #include <memory>
- using mnd::CpuGenerator;
- namespace mnd
- {
- template class CpuGenerator<float, mnd::X86_AVX, false>;
- template class CpuGenerator<float, mnd::X86_AVX, true>;
- template class CpuGenerator<double, mnd::X86_AVX, false>;
- template class CpuGenerator<double, mnd::X86_AVX, true>;
- }
- template<bool parallel>
- void CpuGenerator<float, mnd::X86_AVX, parallel>::generate(const mnd::MandelInfo& info, float* data)
- {
- using T = float;
- const MandelViewport& view = info.view;
- if constexpr(parallel)
- omp_set_num_threads(2 * omp_get_num_procs());
- #pragma omp parallel for schedule(static, 1) if (parallel)
- for (long j = 0; j < info.bHeight; j++) {
- T y = T(view.y) + T(j) * T(view.height / info.bHeight);
- __m256 ys = {y, y, y, y, y, y, y, y};
- long i = 0;
- for (i; i < info.bWidth; i += 8) {
- __m256 xs = {
- float(view.x + double(i) * view.width / info.bWidth),
- float(view.x + double(i + 1) * view.width / info.bWidth),
- float(view.x + double(i + 2) * view.width / info.bWidth),
- float(view.x + double(i + 3) * view.width / info.bWidth),
- float(view.x + double(i + 4) * view.width / info.bWidth),
- float(view.x + double(i + 5) * view.width / info.bWidth),
- float(view.x + double(i + 6) * view.width / info.bWidth),
- float(view.x + double(i + 7) * view.width / info.bWidth)
- };
- __m256 counter = {0, 0, 0, 0, 0, 0, 0, 0};
- __m256 adder = {1, 1, 1, 1, 1, 1, 1, 1};
- __m256 resultsa = {0, 0, 0, 0, 0, 0, 0, 0};
- __m256 resultsb = {0, 0, 0, 0, 0, 0, 0, 0};
- __m256 threshold = {16.0f, 16.0f, 16.0f, 16.0f, 16.0f, 16.0f, 16.0f, 16.0f};
- __m256 a = xs;
- __m256 b = ys;
- for (int k = 0; k < info.maxIter; k++) {
- __m256 aa = _mm256_mul_ps(a, a);
- __m256 bb = _mm256_mul_ps(b, b);
- __m256 abab = _mm256_mul_ps(a, b); abab = _mm256_add_ps(abab, abab);
- a = _mm256_add_ps(_mm256_sub_ps(aa, bb), xs);
- b = _mm256_add_ps(abab, ys);
- __m256 cmp = _mm256_cmp_ps(_mm256_add_ps(aa, bb), threshold, _CMP_LE_OQ);
- if (info.smooth) {
- resultsa = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsa), _mm256_and_ps(cmp, a));
- resultsb = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsb), _mm256_and_ps(cmp, b));
- }
- adder = _mm256_and_ps(adder, cmp);
- counter = _mm256_add_ps(counter, adder);
- if ((k & 0x7) == 0 && _mm256_testz_ps(cmp, cmp) != 0) {
- break;
- }
- }
- auto alignVec = [](float* data) -> float* {
- void* aligned = data;
- ::size_t length = 64;
- std::align(32, 8 * sizeof(float), aligned, length);
- return static_cast<float*>(aligned);
- };
- float resData[16];
- float* ftRes = alignVec(resData);
- float* resa = (float*) &resultsa;
- float* resb = (float*) &resultsb;
- _mm256_store_ps(ftRes, counter);
- for (int k = 0; k < 8 && i + k < info.bWidth; k++) {
- if (info.smooth) {
- data[i + k + j * info.bWidth] = ftRes[k] <= 0 ? info.maxIter :
- ftRes[k] >= info.maxIter ? info.maxIter :
- ((float)ftRes[k]) + 1 - ::log(::log(resa[k] * resa[k] + resb[k] * resb[k]) / 2) / ::log(2.0f);
- }
- else {
- data[i + k + j * info.bWidth] = ftRes[k] <= 0 ? info.maxIter : ftRes[k];
- }
- }
- }
- }
- }
- template<bool parallel>
- void CpuGenerator<double, mnd::X86_AVX, parallel>::generate(const mnd::MandelInfo& info, float* data)
- {
- using T = double;
- const MandelViewport& view = info.view;
- if constexpr(parallel)
- omp_set_num_threads(2 * omp_get_num_procs());
- #pragma omp parallel for schedule(static, 1) if (parallel)
- for (long j = 0; j < info.bHeight; j++) {
- T y = T(view.y + T(j) * view.height / info.bHeight);
- __m256d ys = { y, y, y, y };
- long i = 0;
- for (i; i < info.bWidth; i += 4) {
- __m256d xs = {
- double(view.x + double(i) * view.width / info.bWidth),
- double(view.x + double(i + 1) * view.width / info.bWidth),
- double(view.x + double(i + 2) * view.width / info.bWidth),
- double(view.x + double(i + 3) * view.width / info.bWidth)
- };
- int itRes[4] = { 0, 0, 0, 0 };
- __m256d threshold = { 16.0, 16.0, 16.0, 16.0 };
- __m256d counter = { 0, 0, 0, 0 };
- __m256d adder = { 1, 1, 1, 1 };
- __m256d a = xs;
- __m256d b = ys;
- for (int k = 0; k < info.maxIter; k++) {
- __m256d aa = _mm256_mul_pd(a, a);
- __m256d bb = _mm256_mul_pd(b, b);
- __m256d abab = _mm256_mul_pd(a, b); abab = _mm256_add_pd(abab, abab);
- a = _mm256_add_pd(_mm256_sub_pd(aa, bb), xs);
- b = _mm256_add_pd(abab, ys);
- __m256i cmp = _mm256_castpd_si256(_mm256_cmp_pd(_mm256_add_pd(aa, bb), threshold, _CMP_LE_OQ));
- /*if (info.smooth) {
- resultsa = _mm256_or_pd(_mm256_andnot_ps(cmp, resultsa), _mm256_and_ps(cmp, a));
- resultsb = _mm256_or_ps(_mm256_andnot_ps(cmp, resultsb), _mm256_and_ps(cmp, b));
- }*/
- adder = _mm256_and_pd(adder, _mm256_castsi256_pd(cmp));
- counter = _mm256_add_pd(counter, adder);
- if ((k & 0x7) == 0 && _mm256_testz_si256(cmp, cmp) != 0) {
- break;
- }
- }
- auto alignVec = [](double* data) -> double* {
- void* aligned = data;
- ::size_t length = 64;
- std::align(32, 4 * sizeof(double), aligned, length);
- return static_cast<double*>(aligned);
- };
- double resData[8];
- double* ftRes = alignVec(resData);
- _mm256_store_pd(ftRes, counter);
- for (int k = 0; k < 4 && i + k < info.bWidth; k++)
- data[i + k + j * info.bWidth] = ftRes[k] > 0 ? float(ftRes[k]) : info.maxIter;
- }
- }
- }
|